xref: /xnu-8019.80.24/iokit/Kernel/IOMemoryDescriptor.cpp (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <vm/vm_fault.h>
73 #include <vm/vm_protos.h>
74 
75 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76 extern void ipc_port_release_send(ipc_port_t port);
77 
78 __END_DECLS
79 
80 #define kIOMapperWaitSystem     ((IOMapper *) 1)
81 
82 static IOMapper * gIOSystemMapper = NULL;
83 
84 ppnum_t           gIOLastPage;
85 
86 enum {
87 	kIOMapGuardSizeLarge = 65536
88 };
89 
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91 
92 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
93 
94 #define super IOMemoryDescriptor
95 
96 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
97     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
98 
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 
101 static IORecursiveLock * gIOMemoryLock;
102 
103 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
104 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
105 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
106 #define WAKEUP  \
107     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
108 
109 #if 0
110 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
111 #else
112 #define DEBG(fmt, args...)      {}
113 #endif
114 
115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116 
117 // Some data structures and accessor macros used by the initWithOptions
118 // Function
119 
120 enum ioPLBlockFlags {
121 	kIOPLOnDevice  = 0x00000001,
122 	kIOPLExternUPL = 0x00000002,
123 };
124 
125 struct IOMDPersistentInitData {
126 	const IOGeneralMemoryDescriptor * fMD;
127 	IOMemoryReference               * fMemRef;
128 };
129 
130 struct ioPLBlock {
131 	upl_t fIOPL;
132 	vm_address_t fPageInfo; // Pointer to page list or index into it
133 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
134 	ppnum_t fMappedPage;        // Page number of first page in this iopl
135 	unsigned int fPageOffset;   // Offset within first page of iopl
136 	unsigned int fFlags;        // Flags
137 };
138 
139 enum { kMaxWireTags = 6 };
140 
141 struct ioGMDData {
142 	IOMapper *  fMapper;
143 	uint64_t    fDMAMapAlignment;
144 	uint64_t    fMappedBase;
145 	uint64_t    fMappedLength;
146 	uint64_t    fPreparationID;
147 #if IOTRACKING
148 	IOTracking  fWireTracking;
149 #endif /* IOTRACKING */
150 	unsigned int      fPageCnt;
151 	uint8_t           fDMAMapNumAddressBits;
152 	unsigned char     fCompletionError:1;
153 	unsigned char     fMappedBaseValid:1;
154 	unsigned char     _resv:4;
155 	unsigned char     fDMAAccess:2;
156 
157 	/* variable length arrays */
158 	upl_page_info_t fPageList[1]
159 #if __LP64__
160 	// align fPageList as for ioPLBlock
161 	__attribute__((aligned(sizeof(upl_t))))
162 #endif
163 	;
164 	//ioPLBlock fBlocks[1];
165 };
166 
167 #pragma GCC visibility push(hidden)
168 
169 class _IOMemoryDescriptorMixedData : public OSObject
170 {
171 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
172 
173 public:
174 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
175 	virtual bool initWithCapacity(size_t capacity);
176 	virtual void free() APPLE_KEXT_OVERRIDE;
177 
178 	virtual bool appendBytes(const void * bytes, size_t length);
179 	virtual void setLength(size_t length);
180 
181 	virtual const void * getBytes() const;
182 	virtual size_t getLength() const;
183 
184 private:
185 	void freeMemory();
186 
187 	void *  _data = nullptr;
188 	size_t  _length = 0;
189 	size_t  _capacity = 0;
190 };
191 
192 #pragma GCC visibility pop
193 
194 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
195 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
196 #define getNumIOPL(osd, d)      \
197     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
198 #define getPageList(d)  (&(d->fPageList[0]))
199 #define computeDataSize(p, u) \
200     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
201 
202 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
203 
204 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
205 
206 extern "C" {
207 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)208 device_data_action(
209 	uintptr_t               device_handle,
210 	ipc_port_t              device_pager,
211 	vm_prot_t               protection,
212 	vm_object_offset_t      offset,
213 	vm_size_t               size)
214 {
215 	kern_return_t        kr;
216 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
217 	OSSharedPtr<IOMemoryDescriptor> memDesc;
218 
219 	LOCK;
220 	if (ref->dp.memory) {
221 		memDesc.reset(ref->dp.memory, OSRetain);
222 		kr = memDesc->handleFault(device_pager, offset, size);
223 		memDesc.reset();
224 	} else {
225 		kr = KERN_ABORTED;
226 	}
227 	UNLOCK;
228 
229 	return kr;
230 }
231 
232 kern_return_t
device_close(uintptr_t device_handle)233 device_close(
234 	uintptr_t     device_handle)
235 {
236 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
237 
238 	IOFreeType( ref, IOMemoryDescriptorReserved );
239 
240 	return kIOReturnSuccess;
241 }
242 };      // end extern "C"
243 
244 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245 
246 // Note this inline function uses C++ reference arguments to return values
247 // This means that pointers are not passed and NULLs don't have to be
248 // checked for as a NULL reference is illegal.
249 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)250 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
251     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
252 {
253 	assert(kIOMemoryTypeUIO == type
254 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
255 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
256 	if (kIOMemoryTypeUIO == type) {
257 		user_size_t us;
258 		user_addr_t ad;
259 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
260 	}
261 #ifndef __LP64__
262 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
263 		IOAddressRange cur = r.v64[ind];
264 		addr = cur.address;
265 		len  = cur.length;
266 	}
267 #endif /* !__LP64__ */
268 	else {
269 		IOVirtualRange cur = r.v[ind];
270 		addr = cur.address;
271 		len  = cur.length;
272 	}
273 }
274 
275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276 
277 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)278 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
279 {
280 	IOReturn err = kIOReturnSuccess;
281 
282 	*control = VM_PURGABLE_SET_STATE;
283 
284 	enum { kIOMemoryPurgeableControlMask = 15 };
285 
286 	switch (kIOMemoryPurgeableControlMask & newState) {
287 	case kIOMemoryPurgeableKeepCurrent:
288 		*control = VM_PURGABLE_GET_STATE;
289 		break;
290 
291 	case kIOMemoryPurgeableNonVolatile:
292 		*state = VM_PURGABLE_NONVOLATILE;
293 		break;
294 	case kIOMemoryPurgeableVolatile:
295 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
296 		break;
297 	case kIOMemoryPurgeableEmpty:
298 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
299 		break;
300 	default:
301 		err = kIOReturnBadArgument;
302 		break;
303 	}
304 
305 	if (*control == VM_PURGABLE_SET_STATE) {
306 		// let VM know this call is from the kernel and is allowed to alter
307 		// the volatility of the memory entry even if it was created with
308 		// MAP_MEM_PURGABLE_KERNEL_ONLY
309 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
310 	}
311 
312 	return err;
313 }
314 
315 static IOReturn
purgeableStateBits(int * state)316 purgeableStateBits(int * state)
317 {
318 	IOReturn err = kIOReturnSuccess;
319 
320 	switch (VM_PURGABLE_STATE_MASK & *state) {
321 	case VM_PURGABLE_NONVOLATILE:
322 		*state = kIOMemoryPurgeableNonVolatile;
323 		break;
324 	case VM_PURGABLE_VOLATILE:
325 		*state = kIOMemoryPurgeableVolatile;
326 		break;
327 	case VM_PURGABLE_EMPTY:
328 		*state = kIOMemoryPurgeableEmpty;
329 		break;
330 	default:
331 		*state = kIOMemoryPurgeableNonVolatile;
332 		err = kIOReturnNotReady;
333 		break;
334 	}
335 	return err;
336 }
337 
338 typedef struct {
339 	unsigned int wimg;
340 	unsigned int object_type;
341 } iokit_memtype_entry;
342 
343 static const iokit_memtype_entry iomd_mem_types[] = {
344 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
345 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
346 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
347 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
348 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
349 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
350 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
351 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
352 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
353 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
354 };
355 
356 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)357 vmProtForCacheMode(IOOptionBits cacheMode)
358 {
359 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
360 	vm_prot_t prot = 0;
361 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
362 	return prot;
363 }
364 
365 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)366 pagerFlagsForCacheMode(IOOptionBits cacheMode)
367 {
368 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
369 	if (cacheMode == kIODefaultCache) {
370 		return -1U;
371 	}
372 	return iomd_mem_types[cacheMode].wimg;
373 }
374 
375 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)376 cacheModeForPagerFlags(unsigned int pagerFlags)
377 {
378 	pagerFlags &= VM_WIMG_MASK;
379 	IOOptionBits cacheMode = kIODefaultCache;
380 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
381 		if (iomd_mem_types[i].wimg == pagerFlags) {
382 			cacheMode = i;
383 			break;
384 		}
385 	}
386 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
387 }
388 
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391 
392 struct IOMemoryEntry {
393 	ipc_port_t entry;
394 	int64_t    offset;
395 	uint64_t   size;
396 	uint64_t   start;
397 };
398 
399 struct IOMemoryReference {
400 	volatile SInt32             refCount;
401 	vm_prot_t                   prot;
402 	uint32_t                    capacity;
403 	uint32_t                    count;
404 	struct IOMemoryReference  * mapRef;
405 	IOMemoryEntry               entries[0];
406 };
407 
408 enum{
409 	kIOMemoryReferenceReuse = 0x00000001,
410 	kIOMemoryReferenceWrite = 0x00000002,
411 	kIOMemoryReferenceCOW   = 0x00000004,
412 };
413 
414 SInt32 gIOMemoryReferenceCount;
415 
416 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)417 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
418 {
419 	IOMemoryReference * ref;
420 	size_t              newSize, oldSize, copySize;
421 
422 	newSize = (sizeof(IOMemoryReference)
423 	    - sizeof(ref->entries)
424 	    + capacity * sizeof(ref->entries[0]));
425 	ref = (typeof(ref))IOMalloc(newSize);
426 	if (realloc) {
427 		oldSize = (sizeof(IOMemoryReference)
428 		    - sizeof(realloc->entries)
429 		    + realloc->capacity * sizeof(realloc->entries[0]));
430 		copySize = oldSize;
431 		if (copySize > newSize) {
432 			copySize = newSize;
433 		}
434 		if (ref) {
435 			bcopy(realloc, ref, copySize);
436 		}
437 		IOFree(realloc, oldSize);
438 	} else if (ref) {
439 		bzero(ref, sizeof(*ref));
440 		ref->refCount = 1;
441 		OSIncrementAtomic(&gIOMemoryReferenceCount);
442 	}
443 	if (!ref) {
444 		return NULL;
445 	}
446 	ref->capacity = capacity;
447 	return ref;
448 }
449 
450 void
memoryReferenceFree(IOMemoryReference * ref)451 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
452 {
453 	IOMemoryEntry * entries;
454 	size_t          size;
455 
456 	if (ref->mapRef) {
457 		memoryReferenceFree(ref->mapRef);
458 		ref->mapRef = NULL;
459 	}
460 
461 	entries = ref->entries + ref->count;
462 	while (entries > &ref->entries[0]) {
463 		entries--;
464 		ipc_port_release_send(entries->entry);
465 	}
466 	size = (sizeof(IOMemoryReference)
467 	    - sizeof(ref->entries)
468 	    + ref->capacity * sizeof(ref->entries[0]));
469 	IOFree(ref, size);
470 
471 	OSDecrementAtomic(&gIOMemoryReferenceCount);
472 }
473 
474 void
memoryReferenceRelease(IOMemoryReference * ref)475 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
476 {
477 	if (1 == OSDecrementAtomic(&ref->refCount)) {
478 		memoryReferenceFree(ref);
479 	}
480 }
481 
482 
483 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)484 IOGeneralMemoryDescriptor::memoryReferenceCreate(
485 	IOOptionBits         options,
486 	IOMemoryReference ** reference)
487 {
488 	enum { kCapacity = 4, kCapacityInc = 4 };
489 
490 	kern_return_t        err;
491 	IOMemoryReference *  ref;
492 	IOMemoryEntry *      entries;
493 	IOMemoryEntry *      cloneEntries;
494 	vm_map_t             map;
495 	ipc_port_t           entry, cloneEntry;
496 	vm_prot_t            prot;
497 	memory_object_size_t actualSize;
498 	uint32_t             rangeIdx;
499 	uint32_t             count;
500 	mach_vm_address_t    entryAddr, endAddr, entrySize;
501 	mach_vm_size_t       srcAddr, srcLen;
502 	mach_vm_size_t       nextAddr, nextLen;
503 	mach_vm_size_t       offset, remain;
504 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
505 	int                  misaligned_start = 0, misaligned_end = 0;
506 	IOByteCount          physLen;
507 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
508 	IOOptionBits         cacheMode;
509 	unsigned int         pagerFlags;
510 	vm_tag_t             tag;
511 	vm_named_entry_kernel_flags_t vmne_kflags;
512 
513 	ref = memoryReferenceAlloc(kCapacity, NULL);
514 	if (!ref) {
515 		return kIOReturnNoMemory;
516 	}
517 
518 	tag = (vm_tag_t) getVMTag(kernel_map);
519 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
520 	entries = &ref->entries[0];
521 	count = 0;
522 	err = KERN_SUCCESS;
523 
524 	offset = 0;
525 	rangeIdx = 0;
526 	remain = _length;
527 	if (_task) {
528 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
529 
530 		// account for IOBMD setLength(), use its capacity as length
531 		IOBufferMemoryDescriptor * bmd;
532 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
533 			nextLen = bmd->getCapacity();
534 			remain  = nextLen;
535 		}
536 	} else {
537 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
538 		nextLen = physLen;
539 
540 		// default cache mode for physical
541 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
542 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
543 			_flags |= (mode << kIOMemoryBufferCacheShift);
544 		}
545 	}
546 
547 	// cache mode & vm_prot
548 	prot = VM_PROT_READ;
549 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
550 	prot |= vmProtForCacheMode(cacheMode);
551 	// VM system requires write access to change cache mode
552 	if (kIODefaultCache != cacheMode) {
553 		prot |= VM_PROT_WRITE;
554 	}
555 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
556 		prot |= VM_PROT_WRITE;
557 	}
558 	if (kIOMemoryReferenceWrite & options) {
559 		prot |= VM_PROT_WRITE;
560 	}
561 	if (kIOMemoryReferenceCOW   & options) {
562 		prot |= MAP_MEM_VM_COPY;
563 	}
564 
565 	if (kIOMemoryUseReserve & _flags) {
566 		prot |= MAP_MEM_GRAB_SECLUDED;
567 	}
568 
569 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
570 		cloneEntries = &_memRef->entries[0];
571 		prot |= MAP_MEM_NAMED_REUSE;
572 	}
573 
574 	if (_task) {
575 		// virtual ranges
576 
577 		if (kIOMemoryBufferPageable & _flags) {
578 			int ledger_tag, ledger_no_footprint;
579 
580 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
581 			prot |= MAP_MEM_NAMED_CREATE;
582 
583 			// default accounting settings:
584 			//   + "none" ledger tag
585 			//   + include in footprint
586 			// can be changed later with ::setOwnership()
587 			ledger_tag = VM_LEDGER_TAG_NONE;
588 			ledger_no_footprint = 0;
589 
590 			if (kIOMemoryBufferPurgeable & _flags) {
591 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
592 				if (VM_KERN_MEMORY_SKYWALK == tag) {
593 					// Skywalk purgeable memory accounting:
594 					//    + "network" ledger tag
595 					//    + not included in footprint
596 					ledger_tag = VM_LEDGER_TAG_NETWORK;
597 					ledger_no_footprint = 1;
598 				} else {
599 					// regular purgeable memory accounting:
600 					//    + no ledger tag
601 					//    + included in footprint
602 					ledger_tag = VM_LEDGER_TAG_NONE;
603 					ledger_no_footprint = 0;
604 				}
605 			}
606 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
607 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
608 			if (kIOMemoryUseReserve & _flags) {
609 				prot |= MAP_MEM_GRAB_SECLUDED;
610 			}
611 
612 			prot |= VM_PROT_WRITE;
613 			map = NULL;
614 		} else {
615 			prot |= MAP_MEM_USE_DATA_ADDR;
616 			map = get_task_map(_task);
617 		}
618 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
619 
620 		while (remain) {
621 			srcAddr  = nextAddr;
622 			srcLen   = nextLen;
623 			nextAddr = 0;
624 			nextLen  = 0;
625 			// coalesce addr range
626 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
627 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
628 				if ((srcAddr + srcLen) != nextAddr) {
629 					break;
630 				}
631 				srcLen += nextLen;
632 			}
633 
634 			if (MAP_MEM_USE_DATA_ADDR & prot) {
635 				entryAddr = srcAddr;
636 				endAddr   = srcAddr + srcLen;
637 			} else {
638 				entryAddr = trunc_page_64(srcAddr);
639 				endAddr   = round_page_64(srcAddr + srcLen);
640 			}
641 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
642 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
643 			}
644 
645 			do{
646 				entrySize = (endAddr - entryAddr);
647 				if (!entrySize) {
648 					break;
649 				}
650 				actualSize = entrySize;
651 
652 				cloneEntry = MACH_PORT_NULL;
653 				if (MAP_MEM_NAMED_REUSE & prot) {
654 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
655 						cloneEntry = cloneEntries->entry;
656 					} else {
657 						prot &= ~MAP_MEM_NAMED_REUSE;
658 					}
659 				}
660 
661 				err = mach_make_memory_entry_internal(map,
662 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
663 
664 				if (KERN_SUCCESS != err) {
665 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
666 					break;
667 				}
668 				if (MAP_MEM_USE_DATA_ADDR & prot) {
669 					if (actualSize > entrySize) {
670 						actualSize = entrySize;
671 					}
672 				} else if (actualSize > entrySize) {
673 					panic("mach_make_memory_entry_64 actualSize");
674 				}
675 
676 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
677 
678 				if (count && overmap_start) {
679 					/*
680 					 * Track misaligned start for all
681 					 * except the first entry.
682 					 */
683 					misaligned_start++;
684 				}
685 
686 				if (overmap_end) {
687 					/*
688 					 * Ignore misaligned end for the
689 					 * last entry.
690 					 */
691 					if ((entryAddr + actualSize) != endAddr) {
692 						misaligned_end++;
693 					}
694 				}
695 
696 				if (count) {
697 					/* Middle entries */
698 					if (misaligned_start || misaligned_end) {
699 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
700 						ipc_port_release_send(entry);
701 						err = KERN_NOT_SUPPORTED;
702 						break;
703 					}
704 				}
705 
706 				if (count >= ref->capacity) {
707 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
708 					entries = &ref->entries[count];
709 				}
710 				entries->entry  = entry;
711 				entries->size   = actualSize;
712 				entries->offset = offset + (entryAddr - srcAddr);
713 				entries->start = entryAddr;
714 				entryAddr += actualSize;
715 				if (MAP_MEM_NAMED_REUSE & prot) {
716 					if ((cloneEntries->entry == entries->entry)
717 					    && (cloneEntries->size == entries->size)
718 					    && (cloneEntries->offset == entries->offset)) {
719 						cloneEntries++;
720 					} else {
721 						prot &= ~MAP_MEM_NAMED_REUSE;
722 					}
723 				}
724 				entries++;
725 				count++;
726 			}while (true);
727 			offset += srcLen;
728 			remain -= srcLen;
729 		}
730 	} else {
731 		// _task == 0, physical or kIOMemoryTypeUPL
732 		memory_object_t pager;
733 		vm_size_t       size = ptoa_64(_pages);
734 
735 		if (!getKernelReserved()) {
736 			panic("getKernelReserved");
737 		}
738 
739 		reserved->dp.pagerContig = (1 == _rangesCount);
740 		reserved->dp.memory      = this;
741 
742 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
743 		if (-1U == pagerFlags) {
744 			panic("phys is kIODefaultCache");
745 		}
746 		if (reserved->dp.pagerContig) {
747 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
748 		}
749 
750 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
751 		    size, pagerFlags);
752 		assert(pager);
753 		if (!pager) {
754 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
755 			err = kIOReturnVMError;
756 		} else {
757 			srcAddr  = nextAddr;
758 			entryAddr = trunc_page_64(srcAddr);
759 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
760 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
761 			assert(KERN_SUCCESS == err);
762 			if (KERN_SUCCESS != err) {
763 				device_pager_deallocate(pager);
764 			} else {
765 				reserved->dp.devicePager = pager;
766 				entries->entry  = entry;
767 				entries->size   = size;
768 				entries->offset = offset + (entryAddr - srcAddr);
769 				entries++;
770 				count++;
771 			}
772 		}
773 	}
774 
775 	ref->count = count;
776 	ref->prot  = prot;
777 
778 	if (_task && (KERN_SUCCESS == err)
779 	    && (kIOMemoryMapCopyOnWrite & _flags)
780 	    && !(kIOMemoryReferenceCOW & options)) {
781 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
782 		if (KERN_SUCCESS != err) {
783 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
784 		}
785 	}
786 
787 	if (KERN_SUCCESS == err) {
788 		if (MAP_MEM_NAMED_REUSE & prot) {
789 			memoryReferenceFree(ref);
790 			OSIncrementAtomic(&_memRef->refCount);
791 			ref = _memRef;
792 		}
793 	} else {
794 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
795 		memoryReferenceFree(ref);
796 		ref = NULL;
797 	}
798 
799 	*reference = ref;
800 
801 	return err;
802 }
803 
804 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)805 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
806 {
807 	switch (kIOMapGuardedMask & options) {
808 	default:
809 	case kIOMapGuardedSmall:
810 		return vm_map_page_size(map);
811 	case kIOMapGuardedLarge:
812 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
813 		return kIOMapGuardSizeLarge;
814 	}
815 	;
816 }
817 
818 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)819 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
820     vm_map_offset_t addr, mach_vm_size_t size)
821 {
822 	kern_return_t   kr;
823 	vm_map_offset_t actualAddr;
824 	mach_vm_size_t  actualSize;
825 
826 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
827 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
828 
829 	if (kIOMapGuardedMask & options) {
830 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
831 		actualAddr -= guardSize;
832 		actualSize += 2 * guardSize;
833 	}
834 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
835 
836 	return kr;
837 }
838 
839 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)840 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
841 {
842 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
843 	IOReturn                        err;
844 	vm_map_offset_t                 addr;
845 	mach_vm_size_t                  size;
846 	mach_vm_size_t                  guardSize;
847 
848 	addr = ref->mapped;
849 	size = ref->size;
850 	guardSize = 0;
851 
852 	if (kIOMapGuardedMask & ref->options) {
853 		if (!(kIOMapAnywhere & ref->options)) {
854 			return kIOReturnBadArgument;
855 		}
856 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
857 		size += 2 * guardSize;
858 	}
859 
860 	err = vm_map_enter_mem_object(map, &addr, size,
861 #if __ARM_MIXED_PAGE_SIZE__
862 	    // TODO4K this should not be necessary...
863 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
864 #else /* __ARM_MIXED_PAGE_SIZE__ */
865 	    (vm_map_offset_t) 0,
866 #endif /* __ARM_MIXED_PAGE_SIZE__ */
867 	    (((ref->options & kIOMapAnywhere)
868 	    ? VM_FLAGS_ANYWHERE
869 	    : VM_FLAGS_FIXED)),
870 	    VM_MAP_KERNEL_FLAGS_NONE,
871 	    ref->tag,
872 	    IPC_PORT_NULL,
873 	    (memory_object_offset_t) 0,
874 	    false,                       /* copy */
875 	    ref->prot,
876 	    ref->prot,
877 	    VM_INHERIT_NONE);
878 	if (KERN_SUCCESS == err) {
879 		ref->mapped = (mach_vm_address_t) addr;
880 		ref->map = map;
881 		if (kIOMapGuardedMask & ref->options) {
882 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
883 
884 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
885 			assert(KERN_SUCCESS == err);
886 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
887 			assert(KERN_SUCCESS == err);
888 			ref->mapped += guardSize;
889 		}
890 	}
891 
892 	return err;
893 }
894 
895 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)896 IOGeneralMemoryDescriptor::memoryReferenceMap(
897 	IOMemoryReference * ref,
898 	vm_map_t            map,
899 	mach_vm_size_t      inoffset,
900 	mach_vm_size_t      size,
901 	IOOptionBits        options,
902 	mach_vm_address_t * inaddr)
903 {
904 	IOReturn        err;
905 	int64_t         offset = inoffset;
906 	uint32_t        rangeIdx, entryIdx;
907 	vm_map_offset_t addr, mapAddr;
908 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
909 
910 	mach_vm_address_t nextAddr;
911 	mach_vm_size_t    nextLen;
912 	IOByteCount       physLen;
913 	IOMemoryEntry   * entry;
914 	vm_prot_t         prot, memEntryCacheMode;
915 	IOOptionBits      type;
916 	IOOptionBits      cacheMode;
917 	vm_tag_t          tag;
918 	// for the kIOMapPrefault option.
919 	upl_page_info_t * pageList = NULL;
920 	UInt              currentPageIndex = 0;
921 	bool              didAlloc;
922 
923 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
924 
925 	if (ref->mapRef) {
926 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
927 		return err;
928 	}
929 
930 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
931 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
932 		return err;
933 	}
934 
935 	type = _flags & kIOMemoryTypeMask;
936 
937 	prot = VM_PROT_READ;
938 	if (!(kIOMapReadOnly & options)) {
939 		prot |= VM_PROT_WRITE;
940 	}
941 	prot &= ref->prot;
942 
943 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
944 	if (kIODefaultCache != cacheMode) {
945 		// VM system requires write access to update named entry cache mode
946 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
947 	}
948 
949 	tag = (typeof(tag))getVMTag(map);
950 
951 	if (_task) {
952 		// Find first range for offset
953 		if (!_rangesCount) {
954 			return kIOReturnBadArgument;
955 		}
956 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
957 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
958 			if (remain < nextLen) {
959 				break;
960 			}
961 			remain -= nextLen;
962 		}
963 	} else {
964 		rangeIdx = 0;
965 		remain   = 0;
966 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
967 		nextLen  = size;
968 	}
969 
970 	assert(remain < nextLen);
971 	if (remain >= nextLen) {
972 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
973 		return kIOReturnBadArgument;
974 	}
975 
976 	nextAddr  += remain;
977 	nextLen   -= remain;
978 #if __ARM_MIXED_PAGE_SIZE__
979 	pageOffset = (vm_map_page_mask(map) & nextAddr);
980 #else /* __ARM_MIXED_PAGE_SIZE__ */
981 	pageOffset = (page_mask & nextAddr);
982 #endif /* __ARM_MIXED_PAGE_SIZE__ */
983 	addr       = 0;
984 	didAlloc   = false;
985 
986 	if (!(options & kIOMapAnywhere)) {
987 		addr = *inaddr;
988 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
989 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
990 		}
991 		addr -= pageOffset;
992 	}
993 
994 	// find first entry for offset
995 	for (entryIdx = 0;
996 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
997 	    entryIdx++) {
998 	}
999 	entryIdx--;
1000 	entry = &ref->entries[entryIdx];
1001 
1002 	// allocate VM
1003 #if __ARM_MIXED_PAGE_SIZE__
1004 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1005 #else
1006 	size = round_page_64(size + pageOffset);
1007 #endif
1008 	if (kIOMapOverwrite & options) {
1009 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1010 			map = IOPageableMapForAddress(addr);
1011 		}
1012 		err = KERN_SUCCESS;
1013 	} else {
1014 		IOMemoryDescriptorMapAllocRef ref;
1015 		ref.map     = map;
1016 		ref.tag     = tag;
1017 		ref.options = options;
1018 		ref.size    = size;
1019 		ref.prot    = prot;
1020 		if (options & kIOMapAnywhere) {
1021 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1022 			ref.mapped = 0;
1023 		} else {
1024 			ref.mapped = addr;
1025 		}
1026 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1027 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1028 		} else {
1029 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1030 		}
1031 		if (KERN_SUCCESS == err) {
1032 			addr     = ref.mapped;
1033 			map      = ref.map;
1034 			didAlloc = true;
1035 		}
1036 	}
1037 
1038 	/*
1039 	 * If the memory is associated with a device pager but doesn't have a UPL,
1040 	 * it will be immediately faulted in through the pager via populateDevicePager().
1041 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1042 	 * operations.
1043 	 */
1044 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1045 		options &= ~kIOMapPrefault;
1046 	}
1047 
1048 	/*
1049 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1050 	 * memory type, and the underlying data.
1051 	 */
1052 	if (options & kIOMapPrefault) {
1053 		/*
1054 		 * The memory must have been wired by calling ::prepare(), otherwise
1055 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1056 		 */
1057 		assert(_wireCount != 0);
1058 		assert(_memoryEntries != NULL);
1059 		if ((_wireCount == 0) ||
1060 		    (_memoryEntries == NULL)) {
1061 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1062 			return kIOReturnBadArgument;
1063 		}
1064 
1065 		// Get the page list.
1066 		ioGMDData* dataP = getDataP(_memoryEntries);
1067 		ioPLBlock const* ioplList = getIOPLList(dataP);
1068 		pageList = getPageList(dataP);
1069 
1070 		// Get the number of IOPLs.
1071 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1072 
1073 		/*
1074 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1075 		 * the offset. The research will go past it, so we'll need to go back to the
1076 		 * right range at the end.
1077 		 */
1078 		UInt ioplIndex = 0;
1079 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1080 			ioplIndex++;
1081 		}
1082 		ioplIndex--;
1083 
1084 		// Retrieve the IOPL info block.
1085 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1086 
1087 		/*
1088 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1089 		 * array.
1090 		 */
1091 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1092 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1093 		} else {
1094 			pageList = &pageList[ioplInfo.fPageInfo];
1095 		}
1096 
1097 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1098 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1099 
1100 		// Retrieve the index of the first page corresponding to the offset.
1101 		currentPageIndex = atop_32(offsetInIOPL);
1102 	}
1103 
1104 	// enter mappings
1105 	remain  = size;
1106 	mapAddr = addr;
1107 	addr    += pageOffset;
1108 
1109 	while (remain && (KERN_SUCCESS == err)) {
1110 		entryOffset = offset - entry->offset;
1111 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1112 			err = kIOReturnNotAligned;
1113 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1114 			break;
1115 		}
1116 
1117 		if (kIODefaultCache != cacheMode) {
1118 			vm_size_t unused = 0;
1119 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1120 			    memEntryCacheMode, NULL, entry->entry);
1121 			assert(KERN_SUCCESS == err);
1122 		}
1123 
1124 		entryOffset -= pageOffset;
1125 		if (entryOffset >= entry->size) {
1126 			panic("entryOffset");
1127 		}
1128 		chunk = entry->size - entryOffset;
1129 		if (chunk) {
1130 			vm_map_kernel_flags_t vmk_flags;
1131 
1132 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1133 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1134 
1135 			if (chunk > remain) {
1136 				chunk = remain;
1137 			}
1138 			if (options & kIOMapPrefault) {
1139 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1140 
1141 				err = vm_map_enter_mem_object_prefault(map,
1142 				    &mapAddr,
1143 				    chunk, 0 /* mask */,
1144 				    (VM_FLAGS_FIXED
1145 				    | VM_FLAGS_OVERWRITE),
1146 				    vmk_flags,
1147 				    tag,
1148 				    entry->entry,
1149 				    entryOffset,
1150 				    prot,                        // cur
1151 				    prot,                        // max
1152 				    &pageList[currentPageIndex],
1153 				    nb_pages);
1154 
1155 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1156 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1157 				}
1158 				// Compute the next index in the page list.
1159 				currentPageIndex += nb_pages;
1160 				assert(currentPageIndex <= _pages);
1161 			} else {
1162 				err = vm_map_enter_mem_object(map,
1163 				    &mapAddr,
1164 				    chunk, 0 /* mask */,
1165 				    (VM_FLAGS_FIXED
1166 				    | VM_FLAGS_OVERWRITE),
1167 				    vmk_flags,
1168 				    tag,
1169 				    entry->entry,
1170 				    entryOffset,
1171 				    false,               // copy
1172 				    prot,               // cur
1173 				    prot,               // max
1174 				    VM_INHERIT_NONE);
1175 			}
1176 			if (KERN_SUCCESS != err) {
1177 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1178 				break;
1179 			}
1180 			remain -= chunk;
1181 			if (!remain) {
1182 				break;
1183 			}
1184 			mapAddr  += chunk;
1185 			offset   += chunk - pageOffset;
1186 		}
1187 		pageOffset = 0;
1188 		entry++;
1189 		entryIdx++;
1190 		if (entryIdx >= ref->count) {
1191 			err = kIOReturnOverrun;
1192 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1193 			break;
1194 		}
1195 	}
1196 
1197 	if ((KERN_SUCCESS != err) && didAlloc) {
1198 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1199 		addr = 0;
1200 	}
1201 	*inaddr = addr;
1202 
1203 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1204 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1205 	}
1206 	return err;
1207 }
1208 
1209 #define LOGUNALIGN 0
1210 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1211 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1212 	IOMemoryReference * ref,
1213 	vm_map_t            map,
1214 	mach_vm_size_t      inoffset,
1215 	mach_vm_size_t      size,
1216 	IOOptionBits        options,
1217 	mach_vm_address_t * inaddr)
1218 {
1219 	IOReturn            err;
1220 	int64_t             offset = inoffset;
1221 	uint32_t            entryIdx, firstEntryIdx;
1222 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1223 	vm_map_offset_t     entryOffset, remain, chunk;
1224 
1225 	IOMemoryEntry    * entry;
1226 	vm_prot_t          prot, memEntryCacheMode;
1227 	IOOptionBits       type;
1228 	IOOptionBits       cacheMode;
1229 	vm_tag_t           tag;
1230 	// for the kIOMapPrefault option.
1231 	upl_page_info_t  * pageList = NULL;
1232 	UInt               currentPageIndex = 0;
1233 	bool               didAlloc;
1234 
1235 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1236 
1237 	if (ref->mapRef) {
1238 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1239 		return err;
1240 	}
1241 
1242 #if LOGUNALIGN
1243 	printf("MAP offset %qx, %qx\n", inoffset, size);
1244 #endif
1245 
1246 	type = _flags & kIOMemoryTypeMask;
1247 
1248 	prot = VM_PROT_READ;
1249 	if (!(kIOMapReadOnly & options)) {
1250 		prot |= VM_PROT_WRITE;
1251 	}
1252 	prot &= ref->prot;
1253 
1254 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1255 	if (kIODefaultCache != cacheMode) {
1256 		// VM system requires write access to update named entry cache mode
1257 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1258 	}
1259 
1260 	tag = (vm_tag_t) getVMTag(map);
1261 
1262 	addr       = 0;
1263 	didAlloc   = false;
1264 
1265 	if (!(options & kIOMapAnywhere)) {
1266 		addr = *inaddr;
1267 	}
1268 
1269 	// find first entry for offset
1270 	for (firstEntryIdx = 0;
1271 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1272 	    firstEntryIdx++) {
1273 	}
1274 	firstEntryIdx--;
1275 
1276 	// calculate required VM space
1277 
1278 	entryIdx = firstEntryIdx;
1279 	entry = &ref->entries[entryIdx];
1280 
1281 	remain  = size;
1282 	int64_t iteroffset = offset;
1283 	uint64_t mapSize = 0;
1284 	while (remain) {
1285 		entryOffset = iteroffset - entry->offset;
1286 		if (entryOffset >= entry->size) {
1287 			panic("entryOffset");
1288 		}
1289 
1290 #if LOGUNALIGN
1291 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1292 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1293 #endif
1294 
1295 		chunk = entry->size - entryOffset;
1296 		if (chunk) {
1297 			if (chunk > remain) {
1298 				chunk = remain;
1299 			}
1300 			mach_vm_size_t entrySize;
1301 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1302 			assert(KERN_SUCCESS == err);
1303 			mapSize += entrySize;
1304 
1305 			remain -= chunk;
1306 			if (!remain) {
1307 				break;
1308 			}
1309 			iteroffset   += chunk; // - pageOffset;
1310 		}
1311 		entry++;
1312 		entryIdx++;
1313 		if (entryIdx >= ref->count) {
1314 			panic("overrun");
1315 			err = kIOReturnOverrun;
1316 			break;
1317 		}
1318 	}
1319 
1320 	if (kIOMapOverwrite & options) {
1321 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1322 			map = IOPageableMapForAddress(addr);
1323 		}
1324 		err = KERN_SUCCESS;
1325 	} else {
1326 		IOMemoryDescriptorMapAllocRef ref;
1327 		ref.map     = map;
1328 		ref.tag     = tag;
1329 		ref.options = options;
1330 		ref.size    = mapSize;
1331 		ref.prot    = prot;
1332 		if (options & kIOMapAnywhere) {
1333 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1334 			ref.mapped = 0;
1335 		} else {
1336 			ref.mapped = addr;
1337 		}
1338 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1340 		} else {
1341 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1342 		}
1343 
1344 		if (KERN_SUCCESS == err) {
1345 			addr     = ref.mapped;
1346 			map      = ref.map;
1347 			didAlloc = true;
1348 		}
1349 #if LOGUNALIGN
1350 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1351 #endif
1352 	}
1353 
1354 	/*
1355 	 * If the memory is associated with a device pager but doesn't have a UPL,
1356 	 * it will be immediately faulted in through the pager via populateDevicePager().
1357 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1358 	 * operations.
1359 	 */
1360 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1361 		options &= ~kIOMapPrefault;
1362 	}
1363 
1364 	/*
1365 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1366 	 * memory type, and the underlying data.
1367 	 */
1368 	if (options & kIOMapPrefault) {
1369 		/*
1370 		 * The memory must have been wired by calling ::prepare(), otherwise
1371 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1372 		 */
1373 		assert(_wireCount != 0);
1374 		assert(_memoryEntries != NULL);
1375 		if ((_wireCount == 0) ||
1376 		    (_memoryEntries == NULL)) {
1377 			return kIOReturnBadArgument;
1378 		}
1379 
1380 		// Get the page list.
1381 		ioGMDData* dataP = getDataP(_memoryEntries);
1382 		ioPLBlock const* ioplList = getIOPLList(dataP);
1383 		pageList = getPageList(dataP);
1384 
1385 		// Get the number of IOPLs.
1386 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1387 
1388 		/*
1389 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1390 		 * the offset. The research will go past it, so we'll need to go back to the
1391 		 * right range at the end.
1392 		 */
1393 		UInt ioplIndex = 0;
1394 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1395 			ioplIndex++;
1396 		}
1397 		ioplIndex--;
1398 
1399 		// Retrieve the IOPL info block.
1400 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1401 
1402 		/*
1403 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1404 		 * array.
1405 		 */
1406 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1407 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1408 		} else {
1409 			pageList = &pageList[ioplInfo.fPageInfo];
1410 		}
1411 
1412 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1413 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1414 
1415 		// Retrieve the index of the first page corresponding to the offset.
1416 		currentPageIndex = atop_32(offsetInIOPL);
1417 	}
1418 
1419 	// enter mappings
1420 	remain   = size;
1421 	mapAddr  = addr;
1422 	entryIdx = firstEntryIdx;
1423 	entry = &ref->entries[entryIdx];
1424 
1425 	while (remain && (KERN_SUCCESS == err)) {
1426 #if LOGUNALIGN
1427 		printf("offset %qx, %qx\n", offset, entry->offset);
1428 #endif
1429 		if (kIODefaultCache != cacheMode) {
1430 			vm_size_t unused = 0;
1431 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1432 			    memEntryCacheMode, NULL, entry->entry);
1433 			assert(KERN_SUCCESS == err);
1434 		}
1435 		entryOffset = offset - entry->offset;
1436 		if (entryOffset >= entry->size) {
1437 			panic("entryOffset");
1438 		}
1439 		chunk = entry->size - entryOffset;
1440 #if LOGUNALIGN
1441 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1442 #endif
1443 		if (chunk) {
1444 			vm_map_kernel_flags_t vmk_flags;
1445 
1446 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1447 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1448 
1449 			if (chunk > remain) {
1450 				chunk = remain;
1451 			}
1452 			mapAddrOut = mapAddr;
1453 			if (options & kIOMapPrefault) {
1454 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1455 
1456 				err = vm_map_enter_mem_object_prefault(map,
1457 				    &mapAddrOut,
1458 				    chunk, 0 /* mask */,
1459 				    (VM_FLAGS_FIXED
1460 				    | VM_FLAGS_OVERWRITE
1461 				    | VM_FLAGS_RETURN_DATA_ADDR),
1462 				    vmk_flags,
1463 				    tag,
1464 				    entry->entry,
1465 				    entryOffset,
1466 				    prot,                        // cur
1467 				    prot,                        // max
1468 				    &pageList[currentPageIndex],
1469 				    nb_pages);
1470 
1471 				// Compute the next index in the page list.
1472 				currentPageIndex += nb_pages;
1473 				assert(currentPageIndex <= _pages);
1474 			} else {
1475 #if LOGUNALIGN
1476 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1477 #endif
1478 				err = vm_map_enter_mem_object(map,
1479 				    &mapAddrOut,
1480 				    chunk, 0 /* mask */,
1481 				    (VM_FLAGS_FIXED
1482 				    | VM_FLAGS_OVERWRITE
1483 				    | VM_FLAGS_RETURN_DATA_ADDR),
1484 				    vmk_flags,
1485 				    tag,
1486 				    entry->entry,
1487 				    entryOffset,
1488 				    false,               // copy
1489 				    prot,               // cur
1490 				    prot,               // max
1491 				    VM_INHERIT_NONE);
1492 			}
1493 			if (KERN_SUCCESS != err) {
1494 				panic("map enter err %x", err);
1495 				break;
1496 			}
1497 #if LOGUNALIGN
1498 			printf("mapAddr o %qx\n", mapAddrOut);
1499 #endif
1500 			if (entryIdx == firstEntryIdx) {
1501 				addr = mapAddrOut;
1502 			}
1503 			remain -= chunk;
1504 			if (!remain) {
1505 				break;
1506 			}
1507 			mach_vm_size_t entrySize;
1508 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1509 			assert(KERN_SUCCESS == err);
1510 			mapAddr += entrySize;
1511 			offset  += chunk;
1512 		}
1513 
1514 		entry++;
1515 		entryIdx++;
1516 		if (entryIdx >= ref->count) {
1517 			err = kIOReturnOverrun;
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (KERN_SUCCESS != err) {
1523 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1524 	}
1525 
1526 	if ((KERN_SUCCESS != err) && didAlloc) {
1527 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1528 		addr = 0;
1529 	}
1530 	*inaddr = addr;
1531 
1532 	return err;
1533 }
1534 
1535 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1536 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1537 	IOMemoryReference * ref,
1538 	uint64_t          * offset)
1539 {
1540 	kern_return_t kr;
1541 	vm_object_offset_t data_offset = 0;
1542 	uint64_t total;
1543 	uint32_t idx;
1544 
1545 	assert(ref->count);
1546 	if (offset) {
1547 		*offset = (uint64_t) data_offset;
1548 	}
1549 	total = 0;
1550 	for (idx = 0; idx < ref->count; idx++) {
1551 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1552 		    &data_offset);
1553 		if (KERN_SUCCESS != kr) {
1554 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1555 		} else if (0 != data_offset) {
1556 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1557 		}
1558 		if (offset && !idx) {
1559 			*offset = (uint64_t) data_offset;
1560 		}
1561 		total += round_page(data_offset + ref->entries[idx].size);
1562 	}
1563 
1564 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1565 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1566 
1567 	return total;
1568 }
1569 
1570 
1571 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1572 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1573 	IOMemoryReference * ref,
1574 	IOByteCount       * residentPageCount,
1575 	IOByteCount       * dirtyPageCount)
1576 {
1577 	IOReturn        err;
1578 	IOMemoryEntry * entries;
1579 	unsigned int resident, dirty;
1580 	unsigned int totalResident, totalDirty;
1581 
1582 	totalResident = totalDirty = 0;
1583 	err = kIOReturnSuccess;
1584 	entries = ref->entries + ref->count;
1585 	while (entries > &ref->entries[0]) {
1586 		entries--;
1587 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1588 		if (KERN_SUCCESS != err) {
1589 			break;
1590 		}
1591 		totalResident += resident;
1592 		totalDirty    += dirty;
1593 	}
1594 
1595 	if (residentPageCount) {
1596 		*residentPageCount = totalResident;
1597 	}
1598 	if (dirtyPageCount) {
1599 		*dirtyPageCount    = totalDirty;
1600 	}
1601 	return err;
1602 }
1603 
1604 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1605 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1606 	IOMemoryReference * ref,
1607 	IOOptionBits        newState,
1608 	IOOptionBits      * oldState)
1609 {
1610 	IOReturn        err;
1611 	IOMemoryEntry * entries;
1612 	vm_purgable_t   control;
1613 	int             totalState, state;
1614 
1615 	totalState = kIOMemoryPurgeableNonVolatile;
1616 	err = kIOReturnSuccess;
1617 	entries = ref->entries + ref->count;
1618 	while (entries > &ref->entries[0]) {
1619 		entries--;
1620 
1621 		err = purgeableControlBits(newState, &control, &state);
1622 		if (KERN_SUCCESS != err) {
1623 			break;
1624 		}
1625 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1626 		if (KERN_SUCCESS != err) {
1627 			break;
1628 		}
1629 		err = purgeableStateBits(&state);
1630 		if (KERN_SUCCESS != err) {
1631 			break;
1632 		}
1633 
1634 		if (kIOMemoryPurgeableEmpty == state) {
1635 			totalState = kIOMemoryPurgeableEmpty;
1636 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1637 			continue;
1638 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1639 			continue;
1640 		} else if (kIOMemoryPurgeableVolatile == state) {
1641 			totalState = kIOMemoryPurgeableVolatile;
1642 		} else {
1643 			totalState = kIOMemoryPurgeableNonVolatile;
1644 		}
1645 	}
1646 
1647 	if (oldState) {
1648 		*oldState = totalState;
1649 	}
1650 	return err;
1651 }
1652 
1653 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1654 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1655 	IOMemoryReference * ref,
1656 	task_t              newOwner,
1657 	int                 newLedgerTag,
1658 	IOOptionBits        newLedgerOptions)
1659 {
1660 	IOReturn        err, totalErr;
1661 	IOMemoryEntry * entries;
1662 
1663 	totalErr = kIOReturnSuccess;
1664 	entries = ref->entries + ref->count;
1665 	while (entries > &ref->entries[0]) {
1666 		entries--;
1667 
1668 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1669 		if (KERN_SUCCESS != err) {
1670 			totalErr = err;
1671 		}
1672 	}
1673 
1674 	return totalErr;
1675 }
1676 
1677 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1678 
1679 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1680 IOMemoryDescriptor::withAddress(void *      address,
1681     IOByteCount   length,
1682     IODirection direction)
1683 {
1684 	return IOMemoryDescriptor::
1685 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1686 }
1687 
1688 #ifndef __LP64__
1689 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1690 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1691     IOByteCount  length,
1692     IODirection  direction,
1693     task_t       task)
1694 {
1695 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1696 	if (that) {
1697 		if (that->initWithAddress(address, length, direction, task)) {
1698 			return os::move(that);
1699 		}
1700 	}
1701 	return nullptr;
1702 }
1703 #endif /* !__LP64__ */
1704 
1705 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1706 IOMemoryDescriptor::withPhysicalAddress(
1707 	IOPhysicalAddress       address,
1708 	IOByteCount             length,
1709 	IODirection             direction )
1710 {
1711 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1712 }
1713 
1714 #ifndef __LP64__
1715 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1716 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1717     UInt32           withCount,
1718     IODirection      direction,
1719     task_t           task,
1720     bool             asReference)
1721 {
1722 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1723 	if (that) {
1724 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1725 			return os::move(that);
1726 		}
1727 	}
1728 	return nullptr;
1729 }
1730 #endif /* !__LP64__ */
1731 
1732 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1733 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1734     mach_vm_size_t length,
1735     IOOptionBits   options,
1736     task_t         task)
1737 {
1738 	IOAddressRange range = { address, length };
1739 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1740 }
1741 
1742 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1743 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1744     UInt32           rangeCount,
1745     IOOptionBits     options,
1746     task_t           task)
1747 {
1748 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1749 	if (that) {
1750 		if (task) {
1751 			options |= kIOMemoryTypeVirtual64;
1752 		} else {
1753 			options |= kIOMemoryTypePhysical64;
1754 		}
1755 
1756 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1757 			return os::move(that);
1758 		}
1759 	}
1760 
1761 	return nullptr;
1762 }
1763 
1764 
1765 /*
1766  * withOptions:
1767  *
1768  * Create a new IOMemoryDescriptor. The buffer is made up of several
1769  * virtual address ranges, from a given task.
1770  *
1771  * Passing the ranges as a reference will avoid an extra allocation.
1772  */
1773 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1774 IOMemoryDescriptor::withOptions(void *          buffers,
1775     UInt32          count,
1776     UInt32          offset,
1777     task_t          task,
1778     IOOptionBits    opts,
1779     IOMapper *      mapper)
1780 {
1781 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1782 
1783 	if (self
1784 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1785 		return nullptr;
1786 	}
1787 
1788 	return os::move(self);
1789 }
1790 
1791 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1792 IOMemoryDescriptor::initWithOptions(void *         buffers,
1793     UInt32         count,
1794     UInt32         offset,
1795     task_t         task,
1796     IOOptionBits   options,
1797     IOMapper *     mapper)
1798 {
1799 	return false;
1800 }
1801 
1802 #ifndef __LP64__
1803 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1804 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1805     UInt32          withCount,
1806     IODirection     direction,
1807     bool            asReference)
1808 {
1809 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1810 	if (that) {
1811 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1812 			return os::move(that);
1813 		}
1814 	}
1815 	return nullptr;
1816 }
1817 
1818 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1819 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1820     IOByteCount             offset,
1821     IOByteCount             length,
1822     IODirection             direction)
1823 {
1824 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1825 }
1826 #endif /* !__LP64__ */
1827 
1828 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1829 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1830 {
1831 	IOGeneralMemoryDescriptor *origGenMD =
1832 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1833 
1834 	if (origGenMD) {
1835 		return IOGeneralMemoryDescriptor::
1836 		       withPersistentMemoryDescriptor(origGenMD);
1837 	} else {
1838 		return nullptr;
1839 	}
1840 }
1841 
1842 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1843 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1844 {
1845 	IOMemoryReference * memRef;
1846 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1847 
1848 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1849 		return nullptr;
1850 	}
1851 
1852 	if (memRef == originalMD->_memRef) {
1853 		self.reset(originalMD, OSRetain);
1854 		originalMD->memoryReferenceRelease(memRef);
1855 		return os::move(self);
1856 	}
1857 
1858 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1859 	IOMDPersistentInitData initData = { originalMD, memRef };
1860 
1861 	if (self
1862 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1863 		return nullptr;
1864 	}
1865 	return os::move(self);
1866 }
1867 
1868 #ifndef __LP64__
1869 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1870 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1871     IOByteCount   withLength,
1872     IODirection withDirection)
1873 {
1874 	_singleRange.v.address = (vm_offset_t) address;
1875 	_singleRange.v.length  = withLength;
1876 
1877 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1878 }
1879 
1880 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1881 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1882     IOByteCount    withLength,
1883     IODirection  withDirection,
1884     task_t       withTask)
1885 {
1886 	_singleRange.v.address = address;
1887 	_singleRange.v.length  = withLength;
1888 
1889 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1890 }
1891 
1892 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1893 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1894 	IOPhysicalAddress      address,
1895 	IOByteCount            withLength,
1896 	IODirection            withDirection )
1897 {
1898 	_singleRange.p.address = address;
1899 	_singleRange.p.length  = withLength;
1900 
1901 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1902 }
1903 
1904 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1905 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1906 	IOPhysicalRange * ranges,
1907 	UInt32            count,
1908 	IODirection       direction,
1909 	bool              reference)
1910 {
1911 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1912 
1913 	if (reference) {
1914 		mdOpts |= kIOMemoryAsReference;
1915 	}
1916 
1917 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1918 }
1919 
1920 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1921 IOGeneralMemoryDescriptor::initWithRanges(
1922 	IOVirtualRange * ranges,
1923 	UInt32           count,
1924 	IODirection      direction,
1925 	task_t           task,
1926 	bool             reference)
1927 {
1928 	IOOptionBits mdOpts = direction;
1929 
1930 	if (reference) {
1931 		mdOpts |= kIOMemoryAsReference;
1932 	}
1933 
1934 	if (task) {
1935 		mdOpts |= kIOMemoryTypeVirtual;
1936 
1937 		// Auto-prepare if this is a kernel memory descriptor as very few
1938 		// clients bother to prepare() kernel memory.
1939 		// But it was not enforced so what are you going to do?
1940 		if (task == kernel_task) {
1941 			mdOpts |= kIOMemoryAutoPrepare;
1942 		}
1943 	} else {
1944 		mdOpts |= kIOMemoryTypePhysical;
1945 	}
1946 
1947 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1948 }
1949 #endif /* !__LP64__ */
1950 
1951 /*
1952  * initWithOptions:
1953  *
1954  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1955  * from a given task, several physical ranges, an UPL from the ubc
1956  * system or a uio (may be 64bit) from the BSD subsystem.
1957  *
1958  * Passing the ranges as a reference will avoid an extra allocation.
1959  *
1960  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1961  * existing instance -- note this behavior is not commonly supported in other
1962  * I/O Kit classes, although it is supported here.
1963  */
1964 
1965 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1966 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1967     UInt32       count,
1968     UInt32       offset,
1969     task_t       task,
1970     IOOptionBits options,
1971     IOMapper *   mapper)
1972 {
1973 	IOOptionBits type = options & kIOMemoryTypeMask;
1974 
1975 #ifndef __LP64__
1976 	if (task
1977 	    && (kIOMemoryTypeVirtual == type)
1978 	    && vm_map_is_64bit(get_task_map(task))
1979 	    && ((IOVirtualRange *) buffers)->address) {
1980 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1981 		return false;
1982 	}
1983 #endif /* !__LP64__ */
1984 
1985 	// Grab the original MD's configuation data to initialse the
1986 	// arguments to this function.
1987 	if (kIOMemoryTypePersistentMD == type) {
1988 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1989 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
1990 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
1991 
1992 		// Only accept persistent memory descriptors with valid dataP data.
1993 		assert(orig->_rangesCount == 1);
1994 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1995 			return false;
1996 		}
1997 
1998 		_memRef = initData->fMemRef; // Grab the new named entry
1999 		options = orig->_flags & ~kIOMemoryAsReference;
2000 		type = options & kIOMemoryTypeMask;
2001 		buffers = orig->_ranges.v;
2002 		count = orig->_rangesCount;
2003 
2004 		// Now grab the original task and whatever mapper was previously used
2005 		task = orig->_task;
2006 		mapper = dataP->fMapper;
2007 
2008 		// We are ready to go through the original initialisation now
2009 	}
2010 
2011 	switch (type) {
2012 	case kIOMemoryTypeUIO:
2013 	case kIOMemoryTypeVirtual:
2014 #ifndef __LP64__
2015 	case kIOMemoryTypeVirtual64:
2016 #endif /* !__LP64__ */
2017 		assert(task);
2018 		if (!task) {
2019 			return false;
2020 		}
2021 		break;
2022 
2023 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2024 #ifndef __LP64__
2025 	case kIOMemoryTypePhysical64:
2026 #endif /* !__LP64__ */
2027 	case kIOMemoryTypeUPL:
2028 		assert(!task);
2029 		break;
2030 	default:
2031 		return false; /* bad argument */
2032 	}
2033 
2034 	assert(buffers);
2035 	assert(count);
2036 
2037 	/*
2038 	 * We can check the _initialized  instance variable before having ever set
2039 	 * it to an initial value because I/O Kit guarantees that all our instance
2040 	 * variables are zeroed on an object's allocation.
2041 	 */
2042 
2043 	if (_initialized) {
2044 		/*
2045 		 * An existing memory descriptor is being retargeted to point to
2046 		 * somewhere else.  Clean up our present state.
2047 		 */
2048 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2049 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2050 			while (_wireCount) {
2051 				complete();
2052 			}
2053 		}
2054 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2055 			if (kIOMemoryTypeUIO == type) {
2056 				uio_free((uio_t) _ranges.v);
2057 			}
2058 #ifndef __LP64__
2059 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2060 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2061 			}
2062 #endif /* !__LP64__ */
2063 			else {
2064 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2065 			}
2066 		}
2067 
2068 		options |= (kIOMemoryRedirected & _flags);
2069 		if (!(kIOMemoryRedirected & options)) {
2070 			if (_memRef) {
2071 				memoryReferenceRelease(_memRef);
2072 				_memRef = NULL;
2073 			}
2074 			if (_mappings) {
2075 				_mappings->flushCollection();
2076 			}
2077 		}
2078 	} else {
2079 		if (!super::init()) {
2080 			return false;
2081 		}
2082 		_initialized = true;
2083 	}
2084 
2085 	// Grab the appropriate mapper
2086 	if (kIOMemoryHostOrRemote & options) {
2087 		options |= kIOMemoryMapperNone;
2088 	}
2089 	if (kIOMemoryMapperNone & options) {
2090 		mapper = NULL; // No Mapper
2091 	} else if (mapper == kIOMapperSystem) {
2092 		IOMapper::checkForSystemMapper();
2093 		gIOSystemMapper = mapper = IOMapper::gSystem;
2094 	}
2095 
2096 	// Remove the dynamic internal use flags from the initial setting
2097 	options               &= ~(kIOMemoryPreparedReadOnly);
2098 	_flags                 = options;
2099 	_task                  = task;
2100 
2101 #ifndef __LP64__
2102 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2103 #endif /* !__LP64__ */
2104 
2105 	_dmaReferences = 0;
2106 	__iomd_reservedA = 0;
2107 	__iomd_reservedB = 0;
2108 	_highestPage = 0;
2109 
2110 	if (kIOMemoryThreadSafe & options) {
2111 		if (!_prepareLock) {
2112 			_prepareLock = IOLockAlloc();
2113 		}
2114 	} else if (_prepareLock) {
2115 		IOLockFree(_prepareLock);
2116 		_prepareLock = NULL;
2117 	}
2118 
2119 	if (kIOMemoryTypeUPL == type) {
2120 		ioGMDData *dataP;
2121 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2122 
2123 		if (!initMemoryEntries(dataSize, mapper)) {
2124 			return false;
2125 		}
2126 		dataP = getDataP(_memoryEntries);
2127 		dataP->fPageCnt = 0;
2128 		switch (kIOMemoryDirectionMask & options) {
2129 		case kIODirectionOut:
2130 			dataP->fDMAAccess = kIODMAMapReadAccess;
2131 			break;
2132 		case kIODirectionIn:
2133 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2134 			break;
2135 		case kIODirectionNone:
2136 		case kIODirectionOutIn:
2137 		default:
2138 			panic("bad dir for upl 0x%x", (int) options);
2139 			break;
2140 		}
2141 		//       _wireCount++;	// UPLs start out life wired
2142 
2143 		_length    = count;
2144 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2145 
2146 		ioPLBlock iopl;
2147 		iopl.fIOPL = (upl_t) buffers;
2148 		upl_set_referenced(iopl.fIOPL, true);
2149 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2150 
2151 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2152 			panic("short external upl");
2153 		}
2154 
2155 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2156 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2157 
2158 		// Set the flag kIOPLOnDevice convieniently equal to 1
2159 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2160 		if (!pageList->device) {
2161 			// Pre-compute the offset into the UPL's page list
2162 			pageList = &pageList[atop_32(offset)];
2163 			offset &= PAGE_MASK;
2164 		}
2165 		iopl.fIOMDOffset = 0;
2166 		iopl.fMappedPage = 0;
2167 		iopl.fPageInfo = (vm_address_t) pageList;
2168 		iopl.fPageOffset = offset;
2169 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2170 	} else {
2171 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2172 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2173 
2174 		// Initialize the memory descriptor
2175 		if (options & kIOMemoryAsReference) {
2176 #ifndef __LP64__
2177 			_rangesIsAllocated = false;
2178 #endif /* !__LP64__ */
2179 
2180 			// Hack assignment to get the buffer arg into _ranges.
2181 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2182 			// work, C++ sigh.
2183 			// This also initialises the uio & physical ranges.
2184 			_ranges.v = (IOVirtualRange *) buffers;
2185 		} else {
2186 #ifndef __LP64__
2187 			_rangesIsAllocated = true;
2188 #endif /* !__LP64__ */
2189 			switch (type) {
2190 			case kIOMemoryTypeUIO:
2191 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2192 				break;
2193 
2194 #ifndef __LP64__
2195 			case kIOMemoryTypeVirtual64:
2196 			case kIOMemoryTypePhysical64:
2197 				if (count == 1
2198 #ifndef __arm__
2199 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2200 #endif
2201 				    ) {
2202 					if (kIOMemoryTypeVirtual64 == type) {
2203 						type = kIOMemoryTypeVirtual;
2204 					} else {
2205 						type = kIOMemoryTypePhysical;
2206 					}
2207 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2208 					_rangesIsAllocated = false;
2209 					_ranges.v = &_singleRange.v;
2210 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2211 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2212 					break;
2213 				}
2214 				_ranges.v64 = IONew(IOAddressRange, count);
2215 				if (!_ranges.v64) {
2216 					return false;
2217 				}
2218 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2219 				break;
2220 #endif /* !__LP64__ */
2221 			case kIOMemoryTypeVirtual:
2222 			case kIOMemoryTypePhysical:
2223 				if (count == 1) {
2224 					_flags |= kIOMemoryAsReference;
2225 #ifndef __LP64__
2226 					_rangesIsAllocated = false;
2227 #endif /* !__LP64__ */
2228 					_ranges.v = &_singleRange.v;
2229 				} else {
2230 					_ranges.v = IONew(IOVirtualRange, count);
2231 					if (!_ranges.v) {
2232 						return false;
2233 					}
2234 				}
2235 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2236 				break;
2237 			}
2238 		}
2239 		_rangesCount = count;
2240 
2241 		// Find starting address within the vector of ranges
2242 		Ranges vec = _ranges;
2243 		mach_vm_size_t totalLength = 0;
2244 		unsigned int ind, pages = 0;
2245 		for (ind = 0; ind < count; ind++) {
2246 			mach_vm_address_t addr;
2247 			mach_vm_address_t endAddr;
2248 			mach_vm_size_t    len;
2249 
2250 			// addr & len are returned by this function
2251 			getAddrLenForInd(addr, len, type, vec, ind);
2252 			if (_task) {
2253 				mach_vm_size_t phys_size;
2254 				kern_return_t kret;
2255 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2256 				if (KERN_SUCCESS != kret) {
2257 					break;
2258 				}
2259 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2260 					break;
2261 				}
2262 			} else {
2263 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2264 					break;
2265 				}
2266 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2267 					break;
2268 				}
2269 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2270 					break;
2271 				}
2272 			}
2273 			if (os_add_overflow(totalLength, len, &totalLength)) {
2274 				break;
2275 			}
2276 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2277 				uint64_t highPage = atop_64(addr + len - 1);
2278 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2279 					_highestPage = (ppnum_t) highPage;
2280 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2281 				}
2282 			}
2283 		}
2284 		if ((ind < count)
2285 		    || (totalLength != ((IOByteCount) totalLength))) {
2286 			return false;                                   /* overflow */
2287 		}
2288 		_length      = totalLength;
2289 		_pages       = pages;
2290 
2291 		// Auto-prepare memory at creation time.
2292 		// Implied completion when descriptor is free-ed
2293 
2294 
2295 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2296 			_wireCount++; // Physical MDs are, by definition, wired
2297 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2298 			ioGMDData *dataP;
2299 			unsigned dataSize;
2300 
2301 			if (_pages > atop_64(max_mem)) {
2302 				return false;
2303 			}
2304 
2305 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2306 			if (!initMemoryEntries(dataSize, mapper)) {
2307 				return false;
2308 			}
2309 			dataP = getDataP(_memoryEntries);
2310 			dataP->fPageCnt = _pages;
2311 
2312 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2313 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2314 				_kernelTag = IOMemoryTag(kernel_map);
2315 				if (_kernelTag == gIOSurfaceTag) {
2316 					_userTag = VM_MEMORY_IOSURFACE;
2317 				}
2318 			}
2319 
2320 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2321 				IOReturn
2322 				    err = memoryReferenceCreate(0, &_memRef);
2323 				if (kIOReturnSuccess != err) {
2324 					return false;
2325 				}
2326 			}
2327 
2328 			if ((_flags & kIOMemoryAutoPrepare)
2329 			    && prepare() != kIOReturnSuccess) {
2330 				return false;
2331 			}
2332 		}
2333 	}
2334 
2335 	return true;
2336 }
2337 
2338 /*
2339  * free
2340  *
2341  * Free resources.
2342  */
2343 void
free()2344 IOGeneralMemoryDescriptor::free()
2345 {
2346 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2347 
2348 	if (reserved && reserved->dp.memory) {
2349 		LOCK;
2350 		reserved->dp.memory = NULL;
2351 		UNLOCK;
2352 	}
2353 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2354 		ioGMDData * dataP;
2355 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2356 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2357 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2358 		}
2359 	} else {
2360 		while (_wireCount) {
2361 			complete();
2362 		}
2363 	}
2364 
2365 	if (_memoryEntries) {
2366 		_memoryEntries.reset();
2367 	}
2368 
2369 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2370 		if (kIOMemoryTypeUIO == type) {
2371 			uio_free((uio_t) _ranges.v);
2372 		}
2373 #ifndef __LP64__
2374 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2375 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2376 		}
2377 #endif /* !__LP64__ */
2378 		else {
2379 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2380 		}
2381 
2382 		_ranges.v = NULL;
2383 	}
2384 
2385 	if (reserved) {
2386 		cleanKernelReserved(reserved);
2387 		if (reserved->dp.devicePager) {
2388 			// memEntry holds a ref on the device pager which owns reserved
2389 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2390 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2391 		} else {
2392 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2393 		}
2394 		reserved = NULL;
2395 	}
2396 
2397 	if (_memRef) {
2398 		memoryReferenceRelease(_memRef);
2399 	}
2400 	if (_prepareLock) {
2401 		IOLockFree(_prepareLock);
2402 	}
2403 
2404 	super::free();
2405 }
2406 
2407 #ifndef __LP64__
2408 void
unmapFromKernel()2409 IOGeneralMemoryDescriptor::unmapFromKernel()
2410 {
2411 	panic("IOGMD::unmapFromKernel deprecated");
2412 }
2413 
2414 void
mapIntoKernel(unsigned rangeIndex)2415 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2416 {
2417 	panic("IOGMD::mapIntoKernel deprecated");
2418 }
2419 #endif /* !__LP64__ */
2420 
2421 /*
2422  * getDirection:
2423  *
2424  * Get the direction of the transfer.
2425  */
2426 IODirection
getDirection() const2427 IOMemoryDescriptor::getDirection() const
2428 {
2429 #ifndef __LP64__
2430 	if (_direction) {
2431 		return _direction;
2432 	}
2433 #endif /* !__LP64__ */
2434 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2435 }
2436 
2437 /*
2438  * getLength:
2439  *
2440  * Get the length of the transfer (over all ranges).
2441  */
2442 IOByteCount
getLength() const2443 IOMemoryDescriptor::getLength() const
2444 {
2445 	return _length;
2446 }
2447 
2448 void
setTag(IOOptionBits tag)2449 IOMemoryDescriptor::setTag( IOOptionBits tag )
2450 {
2451 	_tag = tag;
2452 }
2453 
2454 IOOptionBits
getTag(void)2455 IOMemoryDescriptor::getTag( void )
2456 {
2457 	return _tag;
2458 }
2459 
2460 uint64_t
getFlags(void)2461 IOMemoryDescriptor::getFlags(void)
2462 {
2463 	return _flags;
2464 }
2465 
2466 OSObject *
copyContext(void) const2467 IOMemoryDescriptor::copyContext(void) const
2468 {
2469 	if (reserved) {
2470 		OSObject * context = reserved->contextObject;
2471 		if (context) {
2472 			context->retain();
2473 		}
2474 		return context;
2475 	} else {
2476 		return NULL;
2477 	}
2478 }
2479 
2480 void
setContext(OSObject * obj)2481 IOMemoryDescriptor::setContext(OSObject * obj)
2482 {
2483 	if (this->reserved == NULL && obj == NULL) {
2484 		// No existing object, and no object to set
2485 		return;
2486 	}
2487 
2488 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2489 	if (reserved) {
2490 		OSObject * oldObject = reserved->contextObject;
2491 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2492 			oldObject->release();
2493 		}
2494 		if (obj != NULL) {
2495 			obj->retain();
2496 			reserved->contextObject = obj;
2497 		}
2498 	}
2499 }
2500 
2501 #ifndef __LP64__
2502 #pragma clang diagnostic push
2503 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2504 
2505 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2506 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2507 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2508 {
2509 	addr64_t physAddr = 0;
2510 
2511 	if (prepare() == kIOReturnSuccess) {
2512 		physAddr = getPhysicalSegment64( offset, length );
2513 		complete();
2514 	}
2515 
2516 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2517 }
2518 
2519 #pragma clang diagnostic pop
2520 
2521 #endif /* !__LP64__ */
2522 
2523 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2524 IOMemoryDescriptor::readBytes
2525 (IOByteCount offset, void *bytes, IOByteCount length)
2526 {
2527 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2528 	IOByteCount endoffset;
2529 	IOByteCount remaining;
2530 
2531 
2532 	// Check that this entire I/O is within the available range
2533 	if ((offset > _length)
2534 	    || os_add_overflow(length, offset, &endoffset)
2535 	    || (endoffset > _length)) {
2536 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2537 		return 0;
2538 	}
2539 	if (offset >= _length) {
2540 		return 0;
2541 	}
2542 
2543 	assert(!(kIOMemoryRemote & _flags));
2544 	if (kIOMemoryRemote & _flags) {
2545 		return 0;
2546 	}
2547 
2548 	if (kIOMemoryThreadSafe & _flags) {
2549 		LOCK;
2550 	}
2551 
2552 	remaining = length = min(length, _length - offset);
2553 	while (remaining) { // (process another target segment?)
2554 		addr64_t        srcAddr64;
2555 		IOByteCount     srcLen;
2556 
2557 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2558 		if (!srcAddr64) {
2559 			break;
2560 		}
2561 
2562 		// Clip segment length to remaining
2563 		if (srcLen > remaining) {
2564 			srcLen = remaining;
2565 		}
2566 
2567 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2568 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2569 		}
2570 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2571 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2572 
2573 		dstAddr   += srcLen;
2574 		offset    += srcLen;
2575 		remaining -= srcLen;
2576 	}
2577 
2578 	if (kIOMemoryThreadSafe & _flags) {
2579 		UNLOCK;
2580 	}
2581 
2582 	assert(!remaining);
2583 
2584 	return length - remaining;
2585 }
2586 
2587 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2588 IOMemoryDescriptor::writeBytes
2589 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2590 {
2591 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2592 	IOByteCount remaining;
2593 	IOByteCount endoffset;
2594 	IOByteCount offset = inoffset;
2595 
2596 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2597 
2598 	// Check that this entire I/O is within the available range
2599 	if ((offset > _length)
2600 	    || os_add_overflow(length, offset, &endoffset)
2601 	    || (endoffset > _length)) {
2602 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2603 		return 0;
2604 	}
2605 	if (kIOMemoryPreparedReadOnly & _flags) {
2606 		return 0;
2607 	}
2608 	if (offset >= _length) {
2609 		return 0;
2610 	}
2611 
2612 	assert(!(kIOMemoryRemote & _flags));
2613 	if (kIOMemoryRemote & _flags) {
2614 		return 0;
2615 	}
2616 
2617 	if (kIOMemoryThreadSafe & _flags) {
2618 		LOCK;
2619 	}
2620 
2621 	remaining = length = min(length, _length - offset);
2622 	while (remaining) { // (process another target segment?)
2623 		addr64_t    dstAddr64;
2624 		IOByteCount dstLen;
2625 
2626 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2627 		if (!dstAddr64) {
2628 			break;
2629 		}
2630 
2631 		// Clip segment length to remaining
2632 		if (dstLen > remaining) {
2633 			dstLen = remaining;
2634 		}
2635 
2636 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2637 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2638 		}
2639 		if (!srcAddr) {
2640 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2641 		} else {
2642 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2643 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2644 			srcAddr   += dstLen;
2645 		}
2646 		offset    += dstLen;
2647 		remaining -= dstLen;
2648 	}
2649 
2650 	if (kIOMemoryThreadSafe & _flags) {
2651 		UNLOCK;
2652 	}
2653 
2654 	assert(!remaining);
2655 
2656 #if defined(__x86_64__)
2657 	// copypv does not cppvFsnk on intel
2658 #else
2659 	if (!srcAddr) {
2660 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2661 	}
2662 #endif
2663 
2664 	return length - remaining;
2665 }
2666 
2667 #ifndef __LP64__
2668 void
setPosition(IOByteCount position)2669 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2670 {
2671 	panic("IOGMD::setPosition deprecated");
2672 }
2673 #endif /* !__LP64__ */
2674 
2675 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2676 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2677 
2678 uint64_t
getPreparationID(void)2679 IOGeneralMemoryDescriptor::getPreparationID( void )
2680 {
2681 	ioGMDData *dataP;
2682 
2683 	if (!_wireCount) {
2684 		return kIOPreparationIDUnprepared;
2685 	}
2686 
2687 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2688 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2689 		IOMemoryDescriptor::setPreparationID();
2690 		return IOMemoryDescriptor::getPreparationID();
2691 	}
2692 
2693 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2694 		return kIOPreparationIDUnprepared;
2695 	}
2696 
2697 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2698 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2699 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2700 	}
2701 	return dataP->fPreparationID;
2702 }
2703 
2704 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2705 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2706 {
2707 	if (reserved->creator) {
2708 		task_deallocate(reserved->creator);
2709 		reserved->creator = NULL;
2710 	}
2711 
2712 	if (reserved->contextObject) {
2713 		reserved->contextObject->release();
2714 		reserved->contextObject = NULL;
2715 	}
2716 }
2717 
2718 IOMemoryDescriptorReserved *
getKernelReserved(void)2719 IOMemoryDescriptor::getKernelReserved( void )
2720 {
2721 	if (!reserved) {
2722 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2723 	}
2724 	return reserved;
2725 }
2726 
2727 void
setPreparationID(void)2728 IOMemoryDescriptor::setPreparationID( void )
2729 {
2730 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2731 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2732 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2733 	}
2734 }
2735 
2736 uint64_t
getPreparationID(void)2737 IOMemoryDescriptor::getPreparationID( void )
2738 {
2739 	if (reserved) {
2740 		return reserved->preparationID;
2741 	} else {
2742 		return kIOPreparationIDUnsupported;
2743 	}
2744 }
2745 
2746 void
setDescriptorID(void)2747 IOMemoryDescriptor::setDescriptorID( void )
2748 {
2749 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2750 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2751 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2752 	}
2753 }
2754 
2755 uint64_t
getDescriptorID(void)2756 IOMemoryDescriptor::getDescriptorID( void )
2757 {
2758 	setDescriptorID();
2759 
2760 	if (reserved) {
2761 		return reserved->descriptorID;
2762 	} else {
2763 		return kIODescriptorIDInvalid;
2764 	}
2765 }
2766 
2767 IOReturn
ktraceEmitPhysicalSegments(void)2768 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2769 {
2770 	if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2771 		return kIOReturnSuccess;
2772 	}
2773 
2774 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2775 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2776 		return kIOReturnBadArgument;
2777 	}
2778 
2779 	uint64_t descriptorID = getDescriptorID();
2780 	assert(descriptorID != kIODescriptorIDInvalid);
2781 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2782 		return kIOReturnBadArgument;
2783 	}
2784 
2785 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2786 
2787 #if __LP64__
2788 	static const uint8_t num_segments_page = 8;
2789 #else
2790 	static const uint8_t num_segments_page = 4;
2791 #endif
2792 	static const uint8_t num_segments_long = 2;
2793 
2794 	IOPhysicalAddress segments_page[num_segments_page];
2795 	IOPhysicalRange   segments_long[num_segments_long];
2796 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2797 	memset(segments_long, 0, sizeof(segments_long));
2798 
2799 	uint8_t segment_page_idx = 0;
2800 	uint8_t segment_long_idx = 0;
2801 
2802 	IOPhysicalRange physical_segment;
2803 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2804 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2805 
2806 		if (physical_segment.length == 0) {
2807 			break;
2808 		}
2809 
2810 		/**
2811 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2812 		 * buffer memory, pack segment events according to the following.
2813 		 *
2814 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2815 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2816 		 *
2817 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2818 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2819 		 * - unmapped pages will have a ppn of MAX_INT_32
2820 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2821 		 * - address_0, length_0, address_0, length_1
2822 		 * - unmapped pages will have an address of 0
2823 		 *
2824 		 * During each iteration do the following depending on the length of the mapping:
2825 		 * 1. add the current segment to the appropriate queue of pending segments
2826 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2827 		 * 1a. if FALSE emit and reset all events in the previous queue
2828 		 * 2. check if we have filled up the current queue of pending events
2829 		 * 2a. if TRUE emit and reset all events in the pending queue
2830 		 * 3. after completing all iterations emit events in the current queue
2831 		 */
2832 
2833 		bool emit_page = false;
2834 		bool emit_long = false;
2835 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2836 			segments_page[segment_page_idx] = physical_segment.address;
2837 			segment_page_idx++;
2838 
2839 			emit_long = segment_long_idx != 0;
2840 			emit_page = segment_page_idx == num_segments_page;
2841 
2842 			if (os_unlikely(emit_long)) {
2843 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2844 				    segments_long[0].address, segments_long[0].length,
2845 				    segments_long[1].address, segments_long[1].length);
2846 			}
2847 
2848 			if (os_unlikely(emit_page)) {
2849 #if __LP64__
2850 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2851 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2852 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2853 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2854 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2855 #else
2856 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2857 				    (ppnum_t) atop_32(segments_page[1]),
2858 				    (ppnum_t) atop_32(segments_page[2]),
2859 				    (ppnum_t) atop_32(segments_page[3]),
2860 				    (ppnum_t) atop_32(segments_page[4]));
2861 #endif
2862 			}
2863 		} else {
2864 			segments_long[segment_long_idx] = physical_segment;
2865 			segment_long_idx++;
2866 
2867 			emit_page = segment_page_idx != 0;
2868 			emit_long = segment_long_idx == num_segments_long;
2869 
2870 			if (os_unlikely(emit_page)) {
2871 #if __LP64__
2872 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2873 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2874 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2875 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2876 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2877 #else
2878 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2879 				    (ppnum_t) atop_32(segments_page[1]),
2880 				    (ppnum_t) atop_32(segments_page[2]),
2881 				    (ppnum_t) atop_32(segments_page[3]),
2882 				    (ppnum_t) atop_32(segments_page[4]));
2883 #endif
2884 			}
2885 
2886 			if (emit_long) {
2887 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2888 				    segments_long[0].address, segments_long[0].length,
2889 				    segments_long[1].address, segments_long[1].length);
2890 			}
2891 		}
2892 
2893 		if (os_unlikely(emit_page)) {
2894 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2895 			segment_page_idx = 0;
2896 		}
2897 
2898 		if (os_unlikely(emit_long)) {
2899 			memset(segments_long, 0, sizeof(segments_long));
2900 			segment_long_idx = 0;
2901 		}
2902 	}
2903 
2904 	if (segment_page_idx != 0) {
2905 		assert(segment_long_idx == 0);
2906 #if __LP64__
2907 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2908 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2909 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2910 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2911 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2912 #else
2913 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2914 		    (ppnum_t) atop_32(segments_page[1]),
2915 		    (ppnum_t) atop_32(segments_page[2]),
2916 		    (ppnum_t) atop_32(segments_page[3]),
2917 		    (ppnum_t) atop_32(segments_page[4]));
2918 #endif
2919 	} else if (segment_long_idx != 0) {
2920 		assert(segment_page_idx == 0);
2921 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2922 		    segments_long[0].address, segments_long[0].length,
2923 		    segments_long[1].address, segments_long[1].length);
2924 	}
2925 
2926 	return kIOReturnSuccess;
2927 }
2928 
2929 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2930 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2931 {
2932 	_kernelTag = (vm_tag_t) kernelTag;
2933 	_userTag   = (vm_tag_t) userTag;
2934 }
2935 
2936 uint32_t
getVMTag(vm_map_t map)2937 IOMemoryDescriptor::getVMTag(vm_map_t map)
2938 {
2939 	if (vm_kernel_map_is_kernel(map)) {
2940 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2941 			return (uint32_t) _kernelTag;
2942 		}
2943 	} else {
2944 		if (VM_KERN_MEMORY_NONE != _userTag) {
2945 			return (uint32_t) _userTag;
2946 		}
2947 	}
2948 	return IOMemoryTag(map);
2949 }
2950 
2951 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2952 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2953 {
2954 	IOReturn err = kIOReturnSuccess;
2955 	DMACommandOps params;
2956 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2957 	ioGMDData *dataP;
2958 
2959 	params = (op & ~kIOMDDMACommandOperationMask & op);
2960 	op &= kIOMDDMACommandOperationMask;
2961 
2962 	if (kIOMDDMAMap == op) {
2963 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2964 			return kIOReturnUnderrun;
2965 		}
2966 
2967 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2968 
2969 		if (!_memoryEntries
2970 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2971 			return kIOReturnNoMemory;
2972 		}
2973 
2974 		if (_memoryEntries && data->fMapper) {
2975 			bool remap, keepMap;
2976 			dataP = getDataP(_memoryEntries);
2977 
2978 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2979 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2980 			}
2981 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2982 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2983 			}
2984 
2985 			keepMap = (data->fMapper == gIOSystemMapper);
2986 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2987 
2988 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2989 				IOLockLock(_prepareLock);
2990 			}
2991 
2992 			remap = (!keepMap);
2993 			remap |= (dataP->fDMAMapNumAddressBits < 64)
2994 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2995 			remap |= (dataP->fDMAMapAlignment > page_size);
2996 
2997 			if (remap || !dataP->fMappedBaseValid) {
2998 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2999 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3000 					dataP->fMappedBase      = data->fAlloc;
3001 					dataP->fMappedBaseValid = true;
3002 					dataP->fMappedLength    = data->fAllocLength;
3003 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3004 				}
3005 			} else {
3006 				data->fAlloc = dataP->fMappedBase;
3007 				data->fAllocLength = 0;         // give out IOMD map
3008 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3009 			}
3010 
3011 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3012 				IOLockUnlock(_prepareLock);
3013 			}
3014 		}
3015 		return err;
3016 	}
3017 	if (kIOMDDMAUnmap == op) {
3018 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3019 			return kIOReturnUnderrun;
3020 		}
3021 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3022 
3023 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3024 
3025 		return kIOReturnSuccess;
3026 	}
3027 
3028 	if (kIOMDAddDMAMapSpec == op) {
3029 		if (dataSize < sizeof(IODMAMapSpecification)) {
3030 			return kIOReturnUnderrun;
3031 		}
3032 
3033 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3034 
3035 		if (!_memoryEntries
3036 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3037 			return kIOReturnNoMemory;
3038 		}
3039 
3040 		if (_memoryEntries) {
3041 			dataP = getDataP(_memoryEntries);
3042 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3043 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3044 			}
3045 			if (data->alignment > dataP->fDMAMapAlignment) {
3046 				dataP->fDMAMapAlignment = data->alignment;
3047 			}
3048 		}
3049 		return kIOReturnSuccess;
3050 	}
3051 
3052 	if (kIOMDGetCharacteristics == op) {
3053 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3054 			return kIOReturnUnderrun;
3055 		}
3056 
3057 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3058 		data->fLength = _length;
3059 		data->fSGCount = _rangesCount;
3060 		data->fPages = _pages;
3061 		data->fDirection = getDirection();
3062 		if (!_wireCount) {
3063 			data->fIsPrepared = false;
3064 		} else {
3065 			data->fIsPrepared = true;
3066 			data->fHighestPage = _highestPage;
3067 			if (_memoryEntries) {
3068 				dataP = getDataP(_memoryEntries);
3069 				ioPLBlock *ioplList = getIOPLList(dataP);
3070 				UInt count = getNumIOPL(_memoryEntries, dataP);
3071 				if (count == 1) {
3072 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3073 				}
3074 			}
3075 		}
3076 
3077 		return kIOReturnSuccess;
3078 	} else if (kIOMDDMAActive == op) {
3079 		if (params) {
3080 			int16_t prior;
3081 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3082 			if (!prior) {
3083 				md->_mapName = NULL;
3084 			}
3085 		} else {
3086 			if (md->_dmaReferences) {
3087 				OSAddAtomic16(-1, &md->_dmaReferences);
3088 			} else {
3089 				panic("_dmaReferences underflow");
3090 			}
3091 		}
3092 	} else if (kIOMDWalkSegments != op) {
3093 		return kIOReturnBadArgument;
3094 	}
3095 
3096 	// Get the next segment
3097 	struct InternalState {
3098 		IOMDDMAWalkSegmentArgs fIO;
3099 		mach_vm_size_t fOffset2Index;
3100 		mach_vm_size_t fNextOffset;
3101 		UInt fIndex;
3102 	} *isP;
3103 
3104 	// Find the next segment
3105 	if (dataSize < sizeof(*isP)) {
3106 		return kIOReturnUnderrun;
3107 	}
3108 
3109 	isP = (InternalState *) vData;
3110 	uint64_t offset = isP->fIO.fOffset;
3111 	uint8_t mapped = isP->fIO.fMapped;
3112 	uint64_t mappedBase;
3113 
3114 	if (mapped && (kIOMemoryRemote & _flags)) {
3115 		return kIOReturnNotAttached;
3116 	}
3117 
3118 	if (IOMapper::gSystem && mapped
3119 	    && (!(kIOMemoryHostOnly & _flags))
3120 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3121 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3122 		if (!_memoryEntries
3123 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3124 			return kIOReturnNoMemory;
3125 		}
3126 
3127 		dataP = getDataP(_memoryEntries);
3128 		if (dataP->fMapper) {
3129 			IODMAMapSpecification mapSpec;
3130 			bzero(&mapSpec, sizeof(mapSpec));
3131 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3132 			mapSpec.alignment = dataP->fDMAMapAlignment;
3133 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3134 			if (kIOReturnSuccess != err) {
3135 				return err;
3136 			}
3137 			dataP->fMappedBaseValid = true;
3138 		}
3139 	}
3140 
3141 	if (mapped) {
3142 		if (IOMapper::gSystem
3143 		    && (!(kIOMemoryHostOnly & _flags))
3144 		    && _memoryEntries
3145 		    && (dataP = getDataP(_memoryEntries))
3146 		    && dataP->fMappedBaseValid) {
3147 			mappedBase = dataP->fMappedBase;
3148 		} else {
3149 			mapped = 0;
3150 		}
3151 	}
3152 
3153 	if (offset >= _length) {
3154 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3155 	}
3156 
3157 	// Validate the previous offset
3158 	UInt ind;
3159 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3160 	if (!params
3161 	    && offset
3162 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3163 		ind = isP->fIndex;
3164 	} else {
3165 		ind = off2Ind = 0; // Start from beginning
3166 	}
3167 	mach_vm_size_t length;
3168 	UInt64 address;
3169 
3170 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3171 		// Physical address based memory descriptor
3172 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3173 
3174 		// Find the range after the one that contains the offset
3175 		mach_vm_size_t len;
3176 		for (len = 0; off2Ind <= offset; ind++) {
3177 			len = physP[ind].length;
3178 			off2Ind += len;
3179 		}
3180 
3181 		// Calculate length within range and starting address
3182 		length   = off2Ind - offset;
3183 		address  = physP[ind - 1].address + len - length;
3184 
3185 		if (true && mapped) {
3186 			address = mappedBase + offset;
3187 		} else {
3188 			// see how far we can coalesce ranges
3189 			while (ind < _rangesCount && address + length == physP[ind].address) {
3190 				len = physP[ind].length;
3191 				length += len;
3192 				off2Ind += len;
3193 				ind++;
3194 			}
3195 		}
3196 
3197 		// correct contiguous check overshoot
3198 		ind--;
3199 		off2Ind -= len;
3200 	}
3201 #ifndef __LP64__
3202 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3203 		// Physical address based memory descriptor
3204 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3205 
3206 		// Find the range after the one that contains the offset
3207 		mach_vm_size_t len;
3208 		for (len = 0; off2Ind <= offset; ind++) {
3209 			len = physP[ind].length;
3210 			off2Ind += len;
3211 		}
3212 
3213 		// Calculate length within range and starting address
3214 		length   = off2Ind - offset;
3215 		address  = physP[ind - 1].address + len - length;
3216 
3217 		if (true && mapped) {
3218 			address = mappedBase + offset;
3219 		} else {
3220 			// see how far we can coalesce ranges
3221 			while (ind < _rangesCount && address + length == physP[ind].address) {
3222 				len = physP[ind].length;
3223 				length += len;
3224 				off2Ind += len;
3225 				ind++;
3226 			}
3227 		}
3228 		// correct contiguous check overshoot
3229 		ind--;
3230 		off2Ind -= len;
3231 	}
3232 #endif /* !__LP64__ */
3233 	else {
3234 		do {
3235 			if (!_wireCount) {
3236 				panic("IOGMD: not wired for the IODMACommand");
3237 			}
3238 
3239 			assert(_memoryEntries);
3240 
3241 			dataP = getDataP(_memoryEntries);
3242 			const ioPLBlock *ioplList = getIOPLList(dataP);
3243 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3244 			upl_page_info_t *pageList = getPageList(dataP);
3245 
3246 			assert(numIOPLs > 0);
3247 
3248 			// Scan through iopl info blocks looking for block containing offset
3249 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3250 				ind++;
3251 			}
3252 
3253 			// Go back to actual range as search goes past it
3254 			ioPLBlock ioplInfo = ioplList[ind - 1];
3255 			off2Ind = ioplInfo.fIOMDOffset;
3256 
3257 			if (ind < numIOPLs) {
3258 				length = ioplList[ind].fIOMDOffset;
3259 			} else {
3260 				length = _length;
3261 			}
3262 			length -= offset;       // Remainder within iopl
3263 
3264 			// Subtract offset till this iopl in total list
3265 			offset -= off2Ind;
3266 
3267 			// If a mapped address is requested and this is a pre-mapped IOPL
3268 			// then just need to compute an offset relative to the mapped base.
3269 			if (mapped) {
3270 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3271 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3272 				continue; // Done leave do/while(false) now
3273 			}
3274 
3275 			// The offset is rebased into the current iopl.
3276 			// Now add the iopl 1st page offset.
3277 			offset += ioplInfo.fPageOffset;
3278 
3279 			// For external UPLs the fPageInfo field points directly to
3280 			// the upl's upl_page_info_t array.
3281 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3282 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3283 			} else {
3284 				pageList = &pageList[ioplInfo.fPageInfo];
3285 			}
3286 
3287 			// Check for direct device non-paged memory
3288 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3289 				address = ptoa_64(pageList->phys_addr) + offset;
3290 				continue; // Done leave do/while(false) now
3291 			}
3292 
3293 			// Now we need compute the index into the pageList
3294 			UInt pageInd = atop_32(offset);
3295 			offset &= PAGE_MASK;
3296 
3297 			// Compute the starting address of this segment
3298 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3299 			if (!pageAddr) {
3300 				panic("!pageList phys_addr");
3301 			}
3302 
3303 			address = ptoa_64(pageAddr) + offset;
3304 
3305 			// length is currently set to the length of the remainider of the iopl.
3306 			// We need to check that the remainder of the iopl is contiguous.
3307 			// This is indicated by pageList[ind].phys_addr being sequential.
3308 			IOByteCount contigLength = PAGE_SIZE - offset;
3309 			while (contigLength < length
3310 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3311 				contigLength += PAGE_SIZE;
3312 			}
3313 
3314 			if (contigLength < length) {
3315 				length = contigLength;
3316 			}
3317 
3318 
3319 			assert(address);
3320 			assert(length);
3321 		} while (false);
3322 	}
3323 
3324 	// Update return values and state
3325 	isP->fIO.fIOVMAddr = address;
3326 	isP->fIO.fLength   = length;
3327 	isP->fIndex        = ind;
3328 	isP->fOffset2Index = off2Ind;
3329 	isP->fNextOffset   = isP->fIO.fOffset + length;
3330 
3331 	return kIOReturnSuccess;
3332 }
3333 
3334 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3335 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3336 {
3337 	IOReturn          ret;
3338 	mach_vm_address_t address = 0;
3339 	mach_vm_size_t    length  = 0;
3340 	IOMapper *        mapper  = gIOSystemMapper;
3341 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3342 
3343 	if (lengthOfSegment) {
3344 		*lengthOfSegment = 0;
3345 	}
3346 
3347 	if (offset >= _length) {
3348 		return 0;
3349 	}
3350 
3351 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3352 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3353 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3354 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3355 
3356 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3357 		unsigned rangesIndex = 0;
3358 		Ranges vec = _ranges;
3359 		mach_vm_address_t addr;
3360 
3361 		// Find starting address within the vector of ranges
3362 		for (;;) {
3363 			getAddrLenForInd(addr, length, type, vec, rangesIndex);
3364 			if (offset < length) {
3365 				break;
3366 			}
3367 			offset -= length; // (make offset relative)
3368 			rangesIndex++;
3369 		}
3370 
3371 		// Now that we have the starting range,
3372 		// lets find the last contiguous range
3373 		addr   += offset;
3374 		length -= offset;
3375 
3376 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3377 			mach_vm_address_t newAddr;
3378 			mach_vm_size_t    newLen;
3379 
3380 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3381 			if (addr + length != newAddr) {
3382 				break;
3383 			}
3384 			length += newLen;
3385 		}
3386 		if (addr) {
3387 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3388 		}
3389 	} else {
3390 		IOMDDMAWalkSegmentState _state;
3391 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3392 
3393 		state->fOffset = offset;
3394 		state->fLength = _length - offset;
3395 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3396 
3397 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3398 
3399 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3400 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3401 			    ret, this, state->fOffset,
3402 			    state->fIOVMAddr, state->fLength);
3403 		}
3404 		if (kIOReturnSuccess == ret) {
3405 			address = state->fIOVMAddr;
3406 			length  = state->fLength;
3407 		}
3408 
3409 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3410 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3411 
3412 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3413 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3414 				addr64_t    origAddr = address;
3415 				IOByteCount origLen  = length;
3416 
3417 				address = mapper->mapToPhysicalAddress(origAddr);
3418 				length = page_size - (address & (page_size - 1));
3419 				while ((length < origLen)
3420 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3421 					length += page_size;
3422 				}
3423 				if (length > origLen) {
3424 					length = origLen;
3425 				}
3426 			}
3427 		}
3428 	}
3429 
3430 	if (!address) {
3431 		length = 0;
3432 	}
3433 
3434 	if (lengthOfSegment) {
3435 		*lengthOfSegment = length;
3436 	}
3437 
3438 	return address;
3439 }
3440 
3441 #ifndef __LP64__
3442 #pragma clang diagnostic push
3443 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3444 
3445 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3446 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3447 {
3448 	addr64_t address = 0;
3449 
3450 	if (options & _kIOMemorySourceSegment) {
3451 		address = getSourceSegment(offset, lengthOfSegment);
3452 	} else if (options & kIOMemoryMapperNone) {
3453 		address = getPhysicalSegment64(offset, lengthOfSegment);
3454 	} else {
3455 		address = getPhysicalSegment(offset, lengthOfSegment);
3456 	}
3457 
3458 	return address;
3459 }
3460 #pragma clang diagnostic pop
3461 
3462 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3463 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3464 {
3465 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3466 }
3467 
3468 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3469 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3470 {
3471 	addr64_t    address = 0;
3472 	IOByteCount length  = 0;
3473 
3474 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3475 
3476 	if (lengthOfSegment) {
3477 		length = *lengthOfSegment;
3478 	}
3479 
3480 	if ((address + length) > 0x100000000ULL) {
3481 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3482 		    address, (long) length, (getMetaClass())->getClassName());
3483 	}
3484 
3485 	return (IOPhysicalAddress) address;
3486 }
3487 
3488 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3489 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3490 {
3491 	IOPhysicalAddress phys32;
3492 	IOByteCount       length;
3493 	addr64_t          phys64;
3494 	IOMapper *        mapper = NULL;
3495 
3496 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3497 	if (!phys32) {
3498 		return 0;
3499 	}
3500 
3501 	if (gIOSystemMapper) {
3502 		mapper = gIOSystemMapper;
3503 	}
3504 
3505 	if (mapper) {
3506 		IOByteCount origLen;
3507 
3508 		phys64 = mapper->mapToPhysicalAddress(phys32);
3509 		origLen = *lengthOfSegment;
3510 		length = page_size - (phys64 & (page_size - 1));
3511 		while ((length < origLen)
3512 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3513 			length += page_size;
3514 		}
3515 		if (length > origLen) {
3516 			length = origLen;
3517 		}
3518 
3519 		*lengthOfSegment = length;
3520 	} else {
3521 		phys64 = (addr64_t) phys32;
3522 	}
3523 
3524 	return phys64;
3525 }
3526 
3527 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3528 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3529 {
3530 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3531 }
3532 
3533 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3534 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3535 {
3536 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3537 }
3538 
3539 #pragma clang diagnostic push
3540 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3541 
3542 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3543 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3544     IOByteCount * lengthOfSegment)
3545 {
3546 	if (_task == kernel_task) {
3547 		return (void *) getSourceSegment(offset, lengthOfSegment);
3548 	} else {
3549 		panic("IOGMD::getVirtualSegment deprecated");
3550 	}
3551 
3552 	return NULL;
3553 }
3554 #pragma clang diagnostic pop
3555 #endif /* !__LP64__ */
3556 
3557 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3558 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3559 {
3560 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3561 	DMACommandOps params;
3562 	IOReturn err;
3563 
3564 	params = (op & ~kIOMDDMACommandOperationMask & op);
3565 	op &= kIOMDDMACommandOperationMask;
3566 
3567 	if (kIOMDGetCharacteristics == op) {
3568 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3569 			return kIOReturnUnderrun;
3570 		}
3571 
3572 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3573 		data->fLength = getLength();
3574 		data->fSGCount = 0;
3575 		data->fDirection = getDirection();
3576 		data->fIsPrepared = true; // Assume prepared - fails safe
3577 	} else if (kIOMDWalkSegments == op) {
3578 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3579 			return kIOReturnUnderrun;
3580 		}
3581 
3582 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3583 		IOByteCount offset  = (IOByteCount) data->fOffset;
3584 		IOPhysicalLength length, nextLength;
3585 		addr64_t         addr, nextAddr;
3586 
3587 		if (data->fMapped) {
3588 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3589 		}
3590 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3591 		offset += length;
3592 		while (offset < getLength()) {
3593 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3594 			if ((addr + length) != nextAddr) {
3595 				break;
3596 			}
3597 			length += nextLength;
3598 			offset += nextLength;
3599 		}
3600 		data->fIOVMAddr = addr;
3601 		data->fLength   = length;
3602 	} else if (kIOMDAddDMAMapSpec == op) {
3603 		return kIOReturnUnsupported;
3604 	} else if (kIOMDDMAMap == op) {
3605 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3606 			return kIOReturnUnderrun;
3607 		}
3608 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3609 
3610 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3611 
3612 		return err;
3613 	} else if (kIOMDDMAUnmap == op) {
3614 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3615 			return kIOReturnUnderrun;
3616 		}
3617 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3618 
3619 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3620 
3621 		return kIOReturnSuccess;
3622 	} else {
3623 		return kIOReturnBadArgument;
3624 	}
3625 
3626 	return kIOReturnSuccess;
3627 }
3628 
3629 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3630 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3631     IOOptionBits * oldState )
3632 {
3633 	IOReturn      err = kIOReturnSuccess;
3634 
3635 	vm_purgable_t control;
3636 	int           state;
3637 
3638 	assert(!(kIOMemoryRemote & _flags));
3639 	if (kIOMemoryRemote & _flags) {
3640 		return kIOReturnNotAttached;
3641 	}
3642 
3643 	if (_memRef) {
3644 		err = super::setPurgeable(newState, oldState);
3645 	} else {
3646 		if (kIOMemoryThreadSafe & _flags) {
3647 			LOCK;
3648 		}
3649 		do{
3650 			// Find the appropriate vm_map for the given task
3651 			vm_map_t curMap;
3652 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3653 				err = kIOReturnNotReady;
3654 				break;
3655 			} else if (!_task) {
3656 				err = kIOReturnUnsupported;
3657 				break;
3658 			} else {
3659 				curMap = get_task_map(_task);
3660 				if (NULL == curMap) {
3661 					err = KERN_INVALID_ARGUMENT;
3662 					break;
3663 				}
3664 			}
3665 
3666 			// can only do one range
3667 			Ranges vec = _ranges;
3668 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3669 			mach_vm_address_t addr;
3670 			mach_vm_size_t    len;
3671 			getAddrLenForInd(addr, len, type, vec, 0);
3672 
3673 			err = purgeableControlBits(newState, &control, &state);
3674 			if (kIOReturnSuccess != err) {
3675 				break;
3676 			}
3677 			err = vm_map_purgable_control(curMap, addr, control, &state);
3678 			if (oldState) {
3679 				if (kIOReturnSuccess == err) {
3680 					err = purgeableStateBits(&state);
3681 					*oldState = state;
3682 				}
3683 			}
3684 		}while (false);
3685 		if (kIOMemoryThreadSafe & _flags) {
3686 			UNLOCK;
3687 		}
3688 	}
3689 
3690 	return err;
3691 }
3692 
3693 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3694 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3695     IOOptionBits * oldState )
3696 {
3697 	IOReturn err = kIOReturnNotReady;
3698 
3699 	if (kIOMemoryThreadSafe & _flags) {
3700 		LOCK;
3701 	}
3702 	if (_memRef) {
3703 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3704 	}
3705 	if (kIOMemoryThreadSafe & _flags) {
3706 		UNLOCK;
3707 	}
3708 
3709 	return err;
3710 }
3711 
3712 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3713 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3714     int newLedgerTag,
3715     IOOptionBits newLedgerOptions )
3716 {
3717 	IOReturn      err = kIOReturnSuccess;
3718 
3719 	assert(!(kIOMemoryRemote & _flags));
3720 	if (kIOMemoryRemote & _flags) {
3721 		return kIOReturnNotAttached;
3722 	}
3723 
3724 	if (iokit_iomd_setownership_enabled == FALSE) {
3725 		return kIOReturnUnsupported;
3726 	}
3727 
3728 	if (_memRef) {
3729 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3730 	} else {
3731 		err = kIOReturnUnsupported;
3732 	}
3733 
3734 	return err;
3735 }
3736 
3737 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3738 IOMemoryDescriptor::setOwnership( task_t newOwner,
3739     int newLedgerTag,
3740     IOOptionBits newLedgerOptions )
3741 {
3742 	IOReturn err = kIOReturnNotReady;
3743 
3744 	assert(!(kIOMemoryRemote & _flags));
3745 	if (kIOMemoryRemote & _flags) {
3746 		return kIOReturnNotAttached;
3747 	}
3748 
3749 	if (iokit_iomd_setownership_enabled == FALSE) {
3750 		return kIOReturnUnsupported;
3751 	}
3752 
3753 	if (kIOMemoryThreadSafe & _flags) {
3754 		LOCK;
3755 	}
3756 	if (_memRef) {
3757 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3758 	} else {
3759 		IOMultiMemoryDescriptor * mmd;
3760 		IOSubMemoryDescriptor   * smd;
3761 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3762 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3763 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3764 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3765 		}
3766 	}
3767 	if (kIOMemoryThreadSafe & _flags) {
3768 		UNLOCK;
3769 	}
3770 
3771 	return err;
3772 }
3773 
3774 
3775 uint64_t
getDMAMapLength(uint64_t * offset)3776 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3777 {
3778 	uint64_t length;
3779 
3780 	if (_memRef) {
3781 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3782 	} else {
3783 		IOByteCount       iterate, segLen;
3784 		IOPhysicalAddress sourceAddr, sourceAlign;
3785 
3786 		if (kIOMemoryThreadSafe & _flags) {
3787 			LOCK;
3788 		}
3789 		length = 0;
3790 		iterate = 0;
3791 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3792 			sourceAlign = (sourceAddr & page_mask);
3793 			if (offset && !iterate) {
3794 				*offset = sourceAlign;
3795 			}
3796 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3797 			iterate += segLen;
3798 		}
3799 		if (kIOMemoryThreadSafe & _flags) {
3800 			UNLOCK;
3801 		}
3802 	}
3803 
3804 	return length;
3805 }
3806 
3807 
3808 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3809 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3810     IOByteCount * dirtyPageCount )
3811 {
3812 	IOReturn err = kIOReturnNotReady;
3813 
3814 	assert(!(kIOMemoryRemote & _flags));
3815 	if (kIOMemoryRemote & _flags) {
3816 		return kIOReturnNotAttached;
3817 	}
3818 
3819 	if (kIOMemoryThreadSafe & _flags) {
3820 		LOCK;
3821 	}
3822 	if (_memRef) {
3823 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3824 	} else {
3825 		IOMultiMemoryDescriptor * mmd;
3826 		IOSubMemoryDescriptor   * smd;
3827 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3828 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3829 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3830 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3831 		}
3832 	}
3833 	if (kIOMemoryThreadSafe & _flags) {
3834 		UNLOCK;
3835 	}
3836 
3837 	return err;
3838 }
3839 
3840 
3841 #if defined(__arm__) || defined(__arm64__)
3842 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3843 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3844 #else /* defined(__arm__) || defined(__arm64__) */
3845 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3846 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3847 #endif /* defined(__arm__) || defined(__arm64__) */
3848 
3849 static void
SetEncryptOp(addr64_t pa,unsigned int count)3850 SetEncryptOp(addr64_t pa, unsigned int count)
3851 {
3852 	ppnum_t page, end;
3853 
3854 	page = (ppnum_t) atop_64(round_page_64(pa));
3855 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3856 	for (; page < end; page++) {
3857 		pmap_clear_noencrypt(page);
3858 	}
3859 }
3860 
3861 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3862 ClearEncryptOp(addr64_t pa, unsigned int count)
3863 {
3864 	ppnum_t page, end;
3865 
3866 	page = (ppnum_t) atop_64(round_page_64(pa));
3867 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3868 	for (; page < end; page++) {
3869 		pmap_set_noencrypt(page);
3870 	}
3871 }
3872 
3873 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3874 IOMemoryDescriptor::performOperation( IOOptionBits options,
3875     IOByteCount offset, IOByteCount length )
3876 {
3877 	IOByteCount remaining;
3878 	unsigned int res;
3879 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3880 #if defined(__arm__) || defined(__arm64__)
3881 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3882 #endif
3883 
3884 	assert(!(kIOMemoryRemote & _flags));
3885 	if (kIOMemoryRemote & _flags) {
3886 		return kIOReturnNotAttached;
3887 	}
3888 
3889 	switch (options) {
3890 	case kIOMemoryIncoherentIOFlush:
3891 #if defined(__arm__) || defined(__arm64__)
3892 		func_ext = &dcache_incoherent_io_flush64;
3893 #if __ARM_COHERENT_IO__
3894 		func_ext(0, 0, 0, &res);
3895 		return kIOReturnSuccess;
3896 #else /* __ARM_COHERENT_IO__ */
3897 		break;
3898 #endif /* __ARM_COHERENT_IO__ */
3899 #else /* defined(__arm__) || defined(__arm64__) */
3900 		func = &dcache_incoherent_io_flush64;
3901 		break;
3902 #endif /* defined(__arm__) || defined(__arm64__) */
3903 	case kIOMemoryIncoherentIOStore:
3904 #if defined(__arm__) || defined(__arm64__)
3905 		func_ext = &dcache_incoherent_io_store64;
3906 #if __ARM_COHERENT_IO__
3907 		func_ext(0, 0, 0, &res);
3908 		return kIOReturnSuccess;
3909 #else /* __ARM_COHERENT_IO__ */
3910 		break;
3911 #endif /* __ARM_COHERENT_IO__ */
3912 #else /* defined(__arm__) || defined(__arm64__) */
3913 		func = &dcache_incoherent_io_store64;
3914 		break;
3915 #endif /* defined(__arm__) || defined(__arm64__) */
3916 
3917 	case kIOMemorySetEncrypted:
3918 		func = &SetEncryptOp;
3919 		break;
3920 	case kIOMemoryClearEncrypted:
3921 		func = &ClearEncryptOp;
3922 		break;
3923 	}
3924 
3925 #if defined(__arm__) || defined(__arm64__)
3926 	if ((func == NULL) && (func_ext == NULL)) {
3927 		return kIOReturnUnsupported;
3928 	}
3929 #else /* defined(__arm__) || defined(__arm64__) */
3930 	if (!func) {
3931 		return kIOReturnUnsupported;
3932 	}
3933 #endif /* defined(__arm__) || defined(__arm64__) */
3934 
3935 	if (kIOMemoryThreadSafe & _flags) {
3936 		LOCK;
3937 	}
3938 
3939 	res = 0x0UL;
3940 	remaining = length = min(length, getLength() - offset);
3941 	while (remaining) {
3942 		// (process another target segment?)
3943 		addr64_t    dstAddr64;
3944 		IOByteCount dstLen;
3945 
3946 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3947 		if (!dstAddr64) {
3948 			break;
3949 		}
3950 
3951 		// Clip segment length to remaining
3952 		if (dstLen > remaining) {
3953 			dstLen = remaining;
3954 		}
3955 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3956 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3957 		}
3958 		if (remaining > UINT_MAX) {
3959 			remaining = UINT_MAX;
3960 		}
3961 
3962 #if defined(__arm__) || defined(__arm64__)
3963 		if (func) {
3964 			(*func)(dstAddr64, (unsigned int) dstLen);
3965 		}
3966 		if (func_ext) {
3967 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3968 			if (res != 0x0UL) {
3969 				remaining = 0;
3970 				break;
3971 			}
3972 		}
3973 #else /* defined(__arm__) || defined(__arm64__) */
3974 		(*func)(dstAddr64, (unsigned int) dstLen);
3975 #endif /* defined(__arm__) || defined(__arm64__) */
3976 
3977 		offset    += dstLen;
3978 		remaining -= dstLen;
3979 	}
3980 
3981 	if (kIOMemoryThreadSafe & _flags) {
3982 		UNLOCK;
3983 	}
3984 
3985 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3986 }
3987 
3988 /*
3989  *
3990  */
3991 
3992 #if defined(__i386__) || defined(__x86_64__)
3993 
3994 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
3995 
3996 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
3997  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
3998  * kernel non-text data -- should we just add another range instead?
3999  */
4000 #define io_kernel_static_start  vm_kernel_stext
4001 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4002 
4003 #elif defined(__arm__) || defined(__arm64__)
4004 
4005 extern vm_offset_t              static_memory_end;
4006 
4007 #if defined(__arm64__)
4008 #define io_kernel_static_start vm_kext_base
4009 #else /* defined(__arm64__) */
4010 #define io_kernel_static_start vm_kernel_stext
4011 #endif /* defined(__arm64__) */
4012 
4013 #define io_kernel_static_end    static_memory_end
4014 
4015 #else
4016 #error io_kernel_static_end is undefined for this architecture
4017 #endif
4018 
4019 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4020 io_get_kernel_static_upl(
4021 	vm_map_t                /* map */,
4022 	uintptr_t               offset,
4023 	upl_size_t              *upl_size,
4024 	unsigned int            *page_offset,
4025 	upl_t                   *upl,
4026 	upl_page_info_array_t   page_list,
4027 	unsigned int            *count,
4028 	ppnum_t                 *highest_page)
4029 {
4030 	unsigned int pageCount, page;
4031 	ppnum_t phys;
4032 	ppnum_t highestPage = 0;
4033 
4034 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4035 	if (pageCount > *count) {
4036 		pageCount = *count;
4037 	}
4038 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4039 
4040 	*upl = NULL;
4041 	*page_offset = ((unsigned int) page_mask & offset);
4042 
4043 	for (page = 0; page < pageCount; page++) {
4044 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4045 		if (!phys) {
4046 			break;
4047 		}
4048 		page_list[page].phys_addr = phys;
4049 		page_list[page].free_when_done = 0;
4050 		page_list[page].absent    = 0;
4051 		page_list[page].dirty     = 0;
4052 		page_list[page].precious  = 0;
4053 		page_list[page].device    = 0;
4054 		if (phys > highestPage) {
4055 			highestPage = phys;
4056 		}
4057 	}
4058 
4059 	*highest_page = highestPage;
4060 
4061 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4062 }
4063 
4064 IOReturn
wireVirtual(IODirection forDirection)4065 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4066 {
4067 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4068 	IOReturn error = kIOReturnSuccess;
4069 	ioGMDData *dataP;
4070 	upl_page_info_array_t pageInfo;
4071 	ppnum_t mapBase;
4072 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4073 	mach_vm_size_t numBytesWired = 0;
4074 
4075 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4076 
4077 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4078 		forDirection = (IODirection) (forDirection | getDirection());
4079 	}
4080 
4081 	dataP = getDataP(_memoryEntries);
4082 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4083 	switch (kIODirectionOutIn & forDirection) {
4084 	case kIODirectionOut:
4085 		// Pages do not need to be marked as dirty on commit
4086 		uplFlags = UPL_COPYOUT_FROM;
4087 		dataP->fDMAAccess = kIODMAMapReadAccess;
4088 		break;
4089 
4090 	case kIODirectionIn:
4091 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4092 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4093 		break;
4094 
4095 	default:
4096 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4097 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4098 		break;
4099 	}
4100 
4101 	if (_wireCount) {
4102 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4103 			OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
4104 			error = kIOReturnNotWritable;
4105 		}
4106 	} else {
4107 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4108 		IOMapper *mapper;
4109 
4110 		mapper = dataP->fMapper;
4111 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4112 
4113 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4114 		tag = _kernelTag;
4115 		if (VM_KERN_MEMORY_NONE == tag) {
4116 			tag = IOMemoryTag(kernel_map);
4117 		}
4118 
4119 		if (kIODirectionPrepareToPhys32 & forDirection) {
4120 			if (!mapper) {
4121 				uplFlags |= UPL_NEED_32BIT_ADDR;
4122 			}
4123 			if (dataP->fDMAMapNumAddressBits > 32) {
4124 				dataP->fDMAMapNumAddressBits = 32;
4125 			}
4126 		}
4127 		if (kIODirectionPrepareNoFault    & forDirection) {
4128 			uplFlags |= UPL_REQUEST_NO_FAULT;
4129 		}
4130 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4131 			uplFlags |= UPL_NOZEROFILLIO;
4132 		}
4133 		if (kIODirectionPrepareNonCoherent & forDirection) {
4134 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4135 		}
4136 
4137 		mapBase = 0;
4138 
4139 		// Note that appendBytes(NULL) zeros the data up to the desired length
4140 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4141 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4142 			error = kIOReturnNoMemory;
4143 			traceInterval.setEndArg2(error);
4144 			return error;
4145 		}
4146 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4147 			error = kIOReturnNoMemory;
4148 			traceInterval.setEndArg2(error);
4149 			return error;
4150 		}
4151 		dataP = NULL;
4152 
4153 		// Find the appropriate vm_map for the given task
4154 		vm_map_t curMap;
4155 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4156 			curMap = NULL;
4157 		} else {
4158 			curMap = get_task_map(_task);
4159 		}
4160 
4161 		// Iterate over the vector of virtual ranges
4162 		Ranges vec = _ranges;
4163 		unsigned int pageIndex  = 0;
4164 		IOByteCount mdOffset    = 0;
4165 		ppnum_t highestPage     = 0;
4166 		bool         byteAlignUPL;
4167 
4168 		IOMemoryEntry * memRefEntry = NULL;
4169 		if (_memRef) {
4170 			memRefEntry = &_memRef->entries[0];
4171 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4172 		} else {
4173 			byteAlignUPL = true;
4174 		}
4175 
4176 		for (UInt range = 0; mdOffset < _length; range++) {
4177 			ioPLBlock iopl;
4178 			mach_vm_address_t startPage, startPageOffset;
4179 			mach_vm_size_t    numBytes;
4180 			ppnum_t highPage = 0;
4181 
4182 			if (_memRef) {
4183 				if (range >= _memRef->count) {
4184 					panic("memRefEntry");
4185 				}
4186 				memRefEntry = &_memRef->entries[range];
4187 				numBytes    = memRefEntry->size;
4188 				startPage   = -1ULL;
4189 				if (byteAlignUPL) {
4190 					startPageOffset = 0;
4191 				} else {
4192 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4193 				}
4194 			} else {
4195 				// Get the startPage address and length of vec[range]
4196 				getAddrLenForInd(startPage, numBytes, type, vec, range);
4197 				if (byteAlignUPL) {
4198 					startPageOffset = 0;
4199 				} else {
4200 					startPageOffset = startPage & PAGE_MASK;
4201 					startPage = trunc_page_64(startPage);
4202 				}
4203 			}
4204 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4205 			numBytes += startPageOffset;
4206 
4207 			if (mapper) {
4208 				iopl.fMappedPage = mapBase + pageIndex;
4209 			} else {
4210 				iopl.fMappedPage = 0;
4211 			}
4212 
4213 			// Iterate over the current range, creating UPLs
4214 			while (numBytes) {
4215 				vm_address_t kernelStart = (vm_address_t) startPage;
4216 				vm_map_t theMap;
4217 				if (curMap) {
4218 					theMap = curMap;
4219 				} else if (_memRef) {
4220 					theMap = NULL;
4221 				} else {
4222 					assert(_task == kernel_task);
4223 					theMap = IOPageableMapForAddress(kernelStart);
4224 				}
4225 
4226 				// ioplFlags is an in/out parameter
4227 				upl_control_flags_t ioplFlags = uplFlags;
4228 				dataP = getDataP(_memoryEntries);
4229 				pageInfo = getPageList(dataP);
4230 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4231 
4232 				mach_vm_size_t ioplPhysSize;
4233 				upl_size_t     ioplSize;
4234 				unsigned int   numPageInfo;
4235 
4236 				if (_memRef) {
4237 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4238 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4239 				} else {
4240 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4241 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4242 				}
4243 				if (error != KERN_SUCCESS) {
4244 					if (_memRef) {
4245 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4246 					} else {
4247 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4248 					}
4249 					printf("entry size error %d\n", error);
4250 					goto abortExit;
4251 				}
4252 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4253 				numPageInfo = atop_32(ioplPhysSize);
4254 				if (byteAlignUPL) {
4255 					if (numBytes > ioplPhysSize) {
4256 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4257 					} else {
4258 						ioplSize = ((typeof(ioplSize))numBytes);
4259 					}
4260 				} else {
4261 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4262 				}
4263 
4264 				if (_memRef) {
4265 					memory_object_offset_t entryOffset;
4266 
4267 					entryOffset = mdOffset;
4268 					if (byteAlignUPL) {
4269 						entryOffset = (entryOffset - memRefEntry->offset);
4270 					} else {
4271 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4272 					}
4273 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4274 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4275 					}
4276 					error = memory_object_iopl_request(memRefEntry->entry,
4277 					    entryOffset,
4278 					    &ioplSize,
4279 					    &iopl.fIOPL,
4280 					    baseInfo,
4281 					    &numPageInfo,
4282 					    &ioplFlags,
4283 					    tag);
4284 				} else if ((theMap == kernel_map)
4285 				    && (kernelStart >= io_kernel_static_start)
4286 				    && (kernelStart < io_kernel_static_end)) {
4287 					error = io_get_kernel_static_upl(theMap,
4288 					    kernelStart,
4289 					    &ioplSize,
4290 					    &iopl.fPageOffset,
4291 					    &iopl.fIOPL,
4292 					    baseInfo,
4293 					    &numPageInfo,
4294 					    &highPage);
4295 				} else {
4296 					assert(theMap);
4297 					error = vm_map_create_upl(theMap,
4298 					    startPage,
4299 					    (upl_size_t*)&ioplSize,
4300 					    &iopl.fIOPL,
4301 					    baseInfo,
4302 					    &numPageInfo,
4303 					    &ioplFlags,
4304 					    tag);
4305 				}
4306 
4307 				if (error != KERN_SUCCESS) {
4308 					traceInterval.setEndArg2(error);
4309 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4310 					goto abortExit;
4311 				}
4312 
4313 				assert(ioplSize);
4314 
4315 				if (iopl.fIOPL) {
4316 					highPage = upl_get_highest_page(iopl.fIOPL);
4317 				}
4318 				if (highPage > highestPage) {
4319 					highestPage = highPage;
4320 				}
4321 
4322 				if (baseInfo->device) {
4323 					numPageInfo = 1;
4324 					iopl.fFlags = kIOPLOnDevice;
4325 				} else {
4326 					iopl.fFlags = 0;
4327 				}
4328 
4329 				if (byteAlignUPL) {
4330 					if (iopl.fIOPL) {
4331 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4332 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4333 					}
4334 					if (startPage != (mach_vm_address_t)-1) {
4335 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4336 						startPage -= iopl.fPageOffset;
4337 					}
4338 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4339 					numBytes += iopl.fPageOffset;
4340 				}
4341 
4342 				iopl.fIOMDOffset = mdOffset;
4343 				iopl.fPageInfo = pageIndex;
4344 
4345 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4346 					// Clean up partial created and unsaved iopl
4347 					if (iopl.fIOPL) {
4348 						upl_abort(iopl.fIOPL, 0);
4349 						upl_deallocate(iopl.fIOPL);
4350 					}
4351 					error = kIOReturnNoMemory;
4352 					traceInterval.setEndArg2(error);
4353 					goto abortExit;
4354 				}
4355 				dataP = NULL;
4356 
4357 				// Check for a multiple iopl's in one virtual range
4358 				pageIndex += numPageInfo;
4359 				mdOffset -= iopl.fPageOffset;
4360 				numBytesWired += ioplSize;
4361 				if (ioplSize < numBytes) {
4362 					numBytes -= ioplSize;
4363 					if (startPage != (mach_vm_address_t)-1) {
4364 						startPage += ioplSize;
4365 					}
4366 					mdOffset += ioplSize;
4367 					iopl.fPageOffset = 0;
4368 					if (mapper) {
4369 						iopl.fMappedPage = mapBase + pageIndex;
4370 					}
4371 				} else {
4372 					mdOffset += numBytes;
4373 					break;
4374 				}
4375 			}
4376 		}
4377 
4378 		_highestPage = highestPage;
4379 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4380 
4381 		if (UPL_COPYOUT_FROM & uplFlags) {
4382 			_flags |= kIOMemoryPreparedReadOnly;
4383 		}
4384 		traceInterval.setEndCodes(numBytesWired, error);
4385 	}
4386 
4387 #if IOTRACKING
4388 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4389 		dataP = getDataP(_memoryEntries);
4390 		if (!dataP->fWireTracking.link.next) {
4391 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4392 		}
4393 	}
4394 #endif /* IOTRACKING */
4395 
4396 	return error;
4397 
4398 abortExit:
4399 	{
4400 		dataP = getDataP(_memoryEntries);
4401 		UInt done = getNumIOPL(_memoryEntries, dataP);
4402 		ioPLBlock *ioplList = getIOPLList(dataP);
4403 
4404 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4405 			if (ioplList[ioplIdx].fIOPL) {
4406 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4407 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4408 			}
4409 		}
4410 		_memoryEntries->setLength(computeDataSize(0, 0));
4411 	}
4412 
4413 	if (error == KERN_FAILURE) {
4414 		error = kIOReturnCannotWire;
4415 	} else if (error == KERN_MEMORY_ERROR) {
4416 		error = kIOReturnNoResources;
4417 	}
4418 
4419 	return error;
4420 }
4421 
4422 bool
initMemoryEntries(size_t size,IOMapper * mapper)4423 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4424 {
4425 	ioGMDData * dataP;
4426 
4427 	if (size > UINT_MAX) {
4428 		return false;
4429 	}
4430 	if (!_memoryEntries) {
4431 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4432 		if (!_memoryEntries) {
4433 			return false;
4434 		}
4435 	} else if (!_memoryEntries->initWithCapacity(size)) {
4436 		return false;
4437 	}
4438 
4439 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4440 	dataP = getDataP(_memoryEntries);
4441 
4442 	if (mapper == kIOMapperWaitSystem) {
4443 		IOMapper::checkForSystemMapper();
4444 		mapper = IOMapper::gSystem;
4445 	}
4446 	dataP->fMapper               = mapper;
4447 	dataP->fPageCnt              = 0;
4448 	dataP->fMappedBase           = 0;
4449 	dataP->fDMAMapNumAddressBits = 64;
4450 	dataP->fDMAMapAlignment      = 0;
4451 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4452 	dataP->fCompletionError      = false;
4453 	dataP->fMappedBaseValid      = false;
4454 
4455 	return true;
4456 }
4457 
4458 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4459 IOMemoryDescriptor::dmaMap(
4460 	IOMapper                    * mapper,
4461 	IOMemoryDescriptor          * memory,
4462 	IODMACommand                * command,
4463 	const IODMAMapSpecification * mapSpec,
4464 	uint64_t                      offset,
4465 	uint64_t                      length,
4466 	uint64_t                    * mapAddress,
4467 	uint64_t                    * mapLength)
4468 {
4469 	IOReturn err;
4470 	uint32_t mapOptions;
4471 
4472 	mapOptions = 0;
4473 	mapOptions |= kIODMAMapReadAccess;
4474 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4475 		mapOptions |= kIODMAMapWriteAccess;
4476 	}
4477 
4478 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4479 	    mapSpec, command, NULL, mapAddress, mapLength);
4480 
4481 	if (kIOReturnSuccess == err) {
4482 		dmaMapRecord(mapper, command, *mapLength);
4483 	}
4484 
4485 	return err;
4486 }
4487 
4488 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4489 IOMemoryDescriptor::dmaMapRecord(
4490 	IOMapper                    * mapper,
4491 	IODMACommand                * command,
4492 	uint64_t                      mapLength)
4493 {
4494 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4495 	kern_allocation_name_t alloc;
4496 	int16_t                prior;
4497 
4498 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4499 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4500 	}
4501 
4502 	if (!command) {
4503 		return;
4504 	}
4505 	prior = OSAddAtomic16(1, &_dmaReferences);
4506 	if (!prior) {
4507 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4508 			_mapName  = alloc;
4509 			mapLength = _length;
4510 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4511 		} else {
4512 			_mapName = NULL;
4513 		}
4514 	}
4515 }
4516 
4517 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4518 IOMemoryDescriptor::dmaUnmap(
4519 	IOMapper                    * mapper,
4520 	IODMACommand                * command,
4521 	uint64_t                      offset,
4522 	uint64_t                      mapAddress,
4523 	uint64_t                      mapLength)
4524 {
4525 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4526 	IOReturn ret;
4527 	kern_allocation_name_t alloc;
4528 	kern_allocation_name_t mapName;
4529 	int16_t prior;
4530 
4531 	mapName = NULL;
4532 	prior = 0;
4533 	if (command) {
4534 		mapName = _mapName;
4535 		if (_dmaReferences) {
4536 			prior = OSAddAtomic16(-1, &_dmaReferences);
4537 		} else {
4538 			panic("_dmaReferences underflow");
4539 		}
4540 	}
4541 
4542 	if (!mapLength) {
4543 		traceInterval.setEndArg1(kIOReturnSuccess);
4544 		return kIOReturnSuccess;
4545 	}
4546 
4547 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4548 
4549 	if ((alloc = mapper->fAllocName)) {
4550 		kern_allocation_update_size(alloc, -mapLength);
4551 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4552 			mapLength = _length;
4553 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4554 		}
4555 	}
4556 
4557 	traceInterval.setEndArg1(ret);
4558 	return ret;
4559 }
4560 
4561 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4562 IOGeneralMemoryDescriptor::dmaMap(
4563 	IOMapper                    * mapper,
4564 	IOMemoryDescriptor          * memory,
4565 	IODMACommand                * command,
4566 	const IODMAMapSpecification * mapSpec,
4567 	uint64_t                      offset,
4568 	uint64_t                      length,
4569 	uint64_t                    * mapAddress,
4570 	uint64_t                    * mapLength)
4571 {
4572 	IOReturn          err = kIOReturnSuccess;
4573 	ioGMDData *       dataP;
4574 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4575 
4576 	*mapAddress = 0;
4577 	if (kIOMemoryHostOnly & _flags) {
4578 		return kIOReturnSuccess;
4579 	}
4580 	if (kIOMemoryRemote & _flags) {
4581 		return kIOReturnNotAttached;
4582 	}
4583 
4584 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4585 	    || offset || (length != _length)) {
4586 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4587 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4588 		const ioPLBlock * ioplList = getIOPLList(dataP);
4589 		upl_page_info_t * pageList;
4590 		uint32_t          mapOptions = 0;
4591 
4592 		IODMAMapSpecification mapSpec;
4593 		bzero(&mapSpec, sizeof(mapSpec));
4594 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4595 		mapSpec.alignment = dataP->fDMAMapAlignment;
4596 
4597 		// For external UPLs the fPageInfo field points directly to
4598 		// the upl's upl_page_info_t array.
4599 		if (ioplList->fFlags & kIOPLExternUPL) {
4600 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4601 			mapOptions |= kIODMAMapPagingPath;
4602 		} else {
4603 			pageList = getPageList(dataP);
4604 		}
4605 
4606 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4607 			mapOptions |= kIODMAMapPageListFullyOccupied;
4608 		}
4609 
4610 		assert(dataP->fDMAAccess);
4611 		mapOptions |= dataP->fDMAAccess;
4612 
4613 		// Check for direct device non-paged memory
4614 		if (ioplList->fFlags & kIOPLOnDevice) {
4615 			mapOptions |= kIODMAMapPhysicallyContiguous;
4616 		}
4617 
4618 		IODMAMapPageList dmaPageList =
4619 		{
4620 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4621 			.pageListCount = _pages,
4622 			.pageList      = &pageList[0]
4623 		};
4624 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4625 		    command, &dmaPageList, mapAddress, mapLength);
4626 
4627 		if (kIOReturnSuccess == err) {
4628 			dmaMapRecord(mapper, command, *mapLength);
4629 		}
4630 	}
4631 
4632 	return err;
4633 }
4634 
4635 /*
4636  * prepare
4637  *
4638  * Prepare the memory for an I/O transfer.  This involves paging in
4639  * the memory, if necessary, and wiring it down for the duration of
4640  * the transfer.  The complete() method completes the processing of
4641  * the memory after the I/O transfer finishes.  This method needn't
4642  * called for non-pageable memory.
4643  */
4644 
4645 IOReturn
prepare(IODirection forDirection)4646 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4647 {
4648 	IOReturn     error    = kIOReturnSuccess;
4649 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4650 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4651 
4652 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4653 		traceInterval.setEndArg1(kIOReturnSuccess);
4654 		return kIOReturnSuccess;
4655 	}
4656 
4657 	assert(!(kIOMemoryRemote & _flags));
4658 	if (kIOMemoryRemote & _flags) {
4659 		traceInterval.setEndArg1(kIOReturnNotAttached);
4660 		return kIOReturnNotAttached;
4661 	}
4662 
4663 	if (_prepareLock) {
4664 		IOLockLock(_prepareLock);
4665 	}
4666 
4667 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4668 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4669 			error = kIOReturnNotReady;
4670 			goto finish;
4671 		}
4672 		error = wireVirtual(forDirection);
4673 	}
4674 
4675 	if (kIOReturnSuccess == error) {
4676 		if (1 == ++_wireCount) {
4677 			if (kIOMemoryClearEncrypt & _flags) {
4678 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4679 			}
4680 
4681 			ktraceEmitPhysicalSegments();
4682 		}
4683 	}
4684 
4685 finish:
4686 
4687 	if (_prepareLock) {
4688 		IOLockUnlock(_prepareLock);
4689 	}
4690 	traceInterval.setEndArg1(error);
4691 
4692 	return error;
4693 }
4694 
4695 /*
4696  * complete
4697  *
4698  * Complete processing of the memory after an I/O transfer finishes.
4699  * This method should not be called unless a prepare was previously
4700  * issued; the prepare() and complete() must occur in pairs, before
4701  * before and after an I/O transfer involving pageable memory.
4702  */
4703 
4704 IOReturn
complete(IODirection forDirection)4705 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4706 {
4707 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4708 	ioGMDData  * dataP;
4709 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4710 
4711 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4712 		traceInterval.setEndArg1(kIOReturnSuccess);
4713 		return kIOReturnSuccess;
4714 	}
4715 
4716 	assert(!(kIOMemoryRemote & _flags));
4717 	if (kIOMemoryRemote & _flags) {
4718 		traceInterval.setEndArg1(kIOReturnNotAttached);
4719 		return kIOReturnNotAttached;
4720 	}
4721 
4722 	if (_prepareLock) {
4723 		IOLockLock(_prepareLock);
4724 	}
4725 	do{
4726 		assert(_wireCount);
4727 		if (!_wireCount) {
4728 			break;
4729 		}
4730 		dataP = getDataP(_memoryEntries);
4731 		if (!dataP) {
4732 			break;
4733 		}
4734 
4735 		if (kIODirectionCompleteWithError & forDirection) {
4736 			dataP->fCompletionError = true;
4737 		}
4738 
4739 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4740 			performOperation(kIOMemorySetEncrypted, 0, _length);
4741 		}
4742 
4743 		_wireCount--;
4744 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4745 			ioPLBlock *ioplList = getIOPLList(dataP);
4746 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4747 
4748 			if (_wireCount) {
4749 				// kIODirectionCompleteWithDataValid & forDirection
4750 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4751 					vm_tag_t tag;
4752 					tag = (typeof(tag))getVMTag(kernel_map);
4753 					for (ind = 0; ind < count; ind++) {
4754 						if (ioplList[ind].fIOPL) {
4755 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4756 						}
4757 					}
4758 				}
4759 			} else {
4760 				if (_dmaReferences) {
4761 					panic("complete() while dma active");
4762 				}
4763 
4764 				if (dataP->fMappedBaseValid) {
4765 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4766 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4767 				}
4768 #if IOTRACKING
4769 				if (dataP->fWireTracking.link.next) {
4770 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4771 				}
4772 #endif /* IOTRACKING */
4773 				// Only complete iopls that we created which are for TypeVirtual
4774 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4775 					for (ind = 0; ind < count; ind++) {
4776 						if (ioplList[ind].fIOPL) {
4777 							if (dataP->fCompletionError) {
4778 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4779 							} else {
4780 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4781 							}
4782 							upl_deallocate(ioplList[ind].fIOPL);
4783 						}
4784 					}
4785 				} else if (kIOMemoryTypeUPL == type) {
4786 					upl_set_referenced(ioplList[0].fIOPL, false);
4787 				}
4788 
4789 				_memoryEntries->setLength(computeDataSize(0, 0));
4790 
4791 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4792 				_flags &= ~kIOMemoryPreparedReadOnly;
4793 
4794 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4795 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4796 				}
4797 			}
4798 		}
4799 	}while (false);
4800 
4801 	if (_prepareLock) {
4802 		IOLockUnlock(_prepareLock);
4803 	}
4804 
4805 	traceInterval.setEndArg1(kIOReturnSuccess);
4806 	return kIOReturnSuccess;
4807 }
4808 
4809 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4810 IOGeneralMemoryDescriptor::doMap(
4811 	vm_map_t                __addressMap,
4812 	IOVirtualAddress *      __address,
4813 	IOOptionBits            options,
4814 	IOByteCount             __offset,
4815 	IOByteCount             __length )
4816 {
4817 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4818 	traceInterval.setEndArg1(kIOReturnSuccess);
4819 #ifndef __LP64__
4820 	if (!(kIOMap64Bit & options)) {
4821 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4822 	}
4823 #endif /* !__LP64__ */
4824 
4825 	kern_return_t  err;
4826 
4827 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4828 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4829 	mach_vm_size_t length  = mapping->fLength;
4830 
4831 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4832 	Ranges vec = _ranges;
4833 
4834 	mach_vm_address_t range0Addr = 0;
4835 	mach_vm_size_t    range0Len = 0;
4836 
4837 	if ((offset >= _length) || ((offset + length) > _length)) {
4838 		traceInterval.setEndArg1(kIOReturnBadArgument);
4839 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4840 		// assert(offset == 0 && _length == 0 && length == 0);
4841 		return kIOReturnBadArgument;
4842 	}
4843 
4844 	assert(!(kIOMemoryRemote & _flags));
4845 	if (kIOMemoryRemote & _flags) {
4846 		return 0;
4847 	}
4848 
4849 	if (vec.v) {
4850 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4851 	}
4852 
4853 	// mapping source == dest? (could be much better)
4854 	if (_task
4855 	    && (mapping->fAddressTask == _task)
4856 	    && (mapping->fAddressMap == get_task_map(_task))
4857 	    && (options & kIOMapAnywhere)
4858 	    && (!(kIOMapUnique & options))
4859 	    && (!(kIOMapGuardedMask & options))
4860 	    && (1 == _rangesCount)
4861 	    && (0 == offset)
4862 	    && range0Addr
4863 	    && (length <= range0Len)) {
4864 		mapping->fAddress = range0Addr;
4865 		mapping->fOptions |= kIOMapStatic;
4866 
4867 		return kIOReturnSuccess;
4868 	}
4869 
4870 	if (!_memRef) {
4871 		IOOptionBits createOptions = 0;
4872 		if (!(kIOMapReadOnly & options)) {
4873 			createOptions |= kIOMemoryReferenceWrite;
4874 #if DEVELOPMENT || DEBUG
4875 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4876 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4877 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4878 			}
4879 #endif
4880 		}
4881 		err = memoryReferenceCreate(createOptions, &_memRef);
4882 		if (kIOReturnSuccess != err) {
4883 			traceInterval.setEndArg1(err);
4884 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4885 			return err;
4886 		}
4887 	}
4888 
4889 	memory_object_t pager;
4890 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4891 
4892 	// <upl_transpose //
4893 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4894 		do{
4895 			upl_t               redirUPL2;
4896 			upl_size_t          size;
4897 			upl_control_flags_t flags;
4898 			unsigned int        lock_count;
4899 
4900 			if (!_memRef || (1 != _memRef->count)) {
4901 				err = kIOReturnNotReadable;
4902 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4903 				break;
4904 			}
4905 
4906 			size = (upl_size_t) round_page(mapping->fLength);
4907 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4908 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4909 
4910 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4911 			    NULL, NULL,
4912 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4913 				redirUPL2 = NULL;
4914 			}
4915 
4916 			for (lock_count = 0;
4917 			    IORecursiveLockHaveLock(gIOMemoryLock);
4918 			    lock_count++) {
4919 				UNLOCK;
4920 			}
4921 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4922 			for (;
4923 			    lock_count;
4924 			    lock_count--) {
4925 				LOCK;
4926 			}
4927 
4928 			if (kIOReturnSuccess != err) {
4929 				IOLog("upl_transpose(%x)\n", err);
4930 				err = kIOReturnSuccess;
4931 			}
4932 
4933 			if (redirUPL2) {
4934 				upl_commit(redirUPL2, NULL, 0);
4935 				upl_deallocate(redirUPL2);
4936 				redirUPL2 = NULL;
4937 			}
4938 			{
4939 				// swap the memEntries since they now refer to different vm_objects
4940 				IOMemoryReference * me = _memRef;
4941 				_memRef = mapping->fMemory->_memRef;
4942 				mapping->fMemory->_memRef = me;
4943 			}
4944 			if (pager) {
4945 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4946 			}
4947 		}while (false);
4948 	}
4949 	// upl_transpose> //
4950 	else {
4951 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4952 		if (err) {
4953 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4954 		}
4955 #if IOTRACKING
4956 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4957 			// only dram maps in the default on developement case
4958 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4959 		}
4960 #endif /* IOTRACKING */
4961 		if ((err == KERN_SUCCESS) && pager) {
4962 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4963 
4964 			if (err != KERN_SUCCESS) {
4965 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4966 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4967 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4968 			}
4969 		}
4970 	}
4971 
4972 	traceInterval.setEndArg1(err);
4973 	if (err) {
4974 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4975 	}
4976 	return err;
4977 }
4978 
4979 #if IOTRACKING
4980 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4981 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4982     mach_vm_address_t * address, mach_vm_size_t * size)
4983 {
4984 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4985 
4986 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4987 
4988 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4989 		return kIOReturnNotReady;
4990 	}
4991 
4992 	*task    = map->fAddressTask;
4993 	*address = map->fAddress;
4994 	*size    = map->fLength;
4995 
4996 	return kIOReturnSuccess;
4997 }
4998 #endif /* IOTRACKING */
4999 
5000 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5001 IOGeneralMemoryDescriptor::doUnmap(
5002 	vm_map_t                addressMap,
5003 	IOVirtualAddress        __address,
5004 	IOByteCount             __length )
5005 {
5006 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5007 	IOReturn ret;
5008 	ret = super::doUnmap(addressMap, __address, __length);
5009 	traceInterval.setEndArg1(ret);
5010 	return ret;
5011 }
5012 
5013 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5014 
5015 #undef super
5016 #define super OSObject
5017 
5018 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5019 
5020 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5021 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5022 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5023 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5024 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5025 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5026 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5027 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5028 
5029 /* ex-inline function implementation */
5030 IOPhysicalAddress
getPhysicalAddress()5031 IOMemoryMap::getPhysicalAddress()
5032 {
5033 	return getPhysicalSegment( 0, NULL );
5034 }
5035 
5036 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5037 
5038 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5039 IOMemoryMap::init(
5040 	task_t                  intoTask,
5041 	mach_vm_address_t       toAddress,
5042 	IOOptionBits            _options,
5043 	mach_vm_size_t          _offset,
5044 	mach_vm_size_t          _length )
5045 {
5046 	if (!intoTask) {
5047 		return false;
5048 	}
5049 
5050 	if (!super::init()) {
5051 		return false;
5052 	}
5053 
5054 	fAddressMap  = get_task_map(intoTask);
5055 	if (!fAddressMap) {
5056 		return false;
5057 	}
5058 	vm_map_reference(fAddressMap);
5059 
5060 	fAddressTask = intoTask;
5061 	fOptions     = _options;
5062 	fLength      = _length;
5063 	fOffset      = _offset;
5064 	fAddress     = toAddress;
5065 
5066 	return true;
5067 }
5068 
5069 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5070 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5071 {
5072 	if (!_memory) {
5073 		return false;
5074 	}
5075 
5076 	if (!fSuperMap) {
5077 		if ((_offset + fLength) > _memory->getLength()) {
5078 			return false;
5079 		}
5080 		fOffset = _offset;
5081 	}
5082 
5083 
5084 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5085 	if (fMemory) {
5086 		if (fMemory != _memory) {
5087 			fMemory->removeMapping(this);
5088 		}
5089 	}
5090 	fMemory = os::move(tempval);
5091 
5092 	return true;
5093 }
5094 
5095 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5096 IOMemoryDescriptor::doMap(
5097 	vm_map_t                __addressMap,
5098 	IOVirtualAddress *      __address,
5099 	IOOptionBits            options,
5100 	IOByteCount             __offset,
5101 	IOByteCount             __length )
5102 {
5103 	return kIOReturnUnsupported;
5104 }
5105 
5106 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5107 IOMemoryDescriptor::handleFault(
5108 	void *                  _pager,
5109 	mach_vm_size_t          sourceOffset,
5110 	mach_vm_size_t          length)
5111 {
5112 	if (kIOMemoryRedirected & _flags) {
5113 #if DEBUG
5114 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5115 #endif
5116 		do {
5117 			SLEEP;
5118 		} while (kIOMemoryRedirected & _flags);
5119 	}
5120 	return kIOReturnSuccess;
5121 }
5122 
5123 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5124 IOMemoryDescriptor::populateDevicePager(
5125 	void *                  _pager,
5126 	vm_map_t                addressMap,
5127 	mach_vm_address_t       address,
5128 	mach_vm_size_t          sourceOffset,
5129 	mach_vm_size_t          length,
5130 	IOOptionBits            options )
5131 {
5132 	IOReturn            err = kIOReturnSuccess;
5133 	memory_object_t     pager = (memory_object_t) _pager;
5134 	mach_vm_size_t      size;
5135 	mach_vm_size_t      bytes;
5136 	mach_vm_size_t      page;
5137 	mach_vm_size_t      pageOffset;
5138 	mach_vm_size_t      pagerOffset;
5139 	IOPhysicalLength    segLen, chunk;
5140 	addr64_t            physAddr;
5141 	IOOptionBits        type;
5142 
5143 	type = _flags & kIOMemoryTypeMask;
5144 
5145 	if (reserved->dp.pagerContig) {
5146 		sourceOffset = 0;
5147 		pagerOffset  = 0;
5148 	}
5149 
5150 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5151 	assert( physAddr );
5152 	pageOffset = physAddr - trunc_page_64( physAddr );
5153 	pagerOffset = sourceOffset;
5154 
5155 	size = length + pageOffset;
5156 	physAddr -= pageOffset;
5157 
5158 	segLen += pageOffset;
5159 	bytes = size;
5160 	do{
5161 		// in the middle of the loop only map whole pages
5162 		if (segLen >= bytes) {
5163 			segLen = bytes;
5164 		} else if (segLen != trunc_page_64(segLen)) {
5165 			err = kIOReturnVMError;
5166 		}
5167 		if (physAddr != trunc_page_64(physAddr)) {
5168 			err = kIOReturnBadArgument;
5169 		}
5170 
5171 		if (kIOReturnSuccess != err) {
5172 			break;
5173 		}
5174 
5175 #if DEBUG || DEVELOPMENT
5176 		if ((kIOMemoryTypeUPL != type)
5177 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5178 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
5179 		}
5180 #endif /* DEBUG || DEVELOPMENT */
5181 
5182 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5183 		for (page = 0;
5184 		    (page < segLen) && (KERN_SUCCESS == err);
5185 		    page += chunk) {
5186 			err = device_pager_populate_object(pager, pagerOffset,
5187 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5188 			pagerOffset += chunk;
5189 		}
5190 
5191 		assert(KERN_SUCCESS == err);
5192 		if (err) {
5193 			break;
5194 		}
5195 
5196 		// This call to vm_fault causes an early pmap level resolution
5197 		// of the mappings created above for kernel mappings, since
5198 		// faulting in later can't take place from interrupt level.
5199 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5200 			err = vm_fault(addressMap,
5201 			    (vm_map_offset_t)trunc_page_64(address),
5202 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5203 			    FALSE, VM_KERN_MEMORY_NONE,
5204 			    THREAD_UNINT, NULL,
5205 			    (vm_map_offset_t)0);
5206 
5207 			if (KERN_SUCCESS != err) {
5208 				break;
5209 			}
5210 		}
5211 
5212 		sourceOffset += segLen - pageOffset;
5213 		address += segLen;
5214 		bytes -= segLen;
5215 		pageOffset = 0;
5216 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5217 
5218 	if (bytes) {
5219 		err = kIOReturnBadArgument;
5220 	}
5221 
5222 	return err;
5223 }
5224 
5225 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5226 IOMemoryDescriptor::doUnmap(
5227 	vm_map_t                addressMap,
5228 	IOVirtualAddress        __address,
5229 	IOByteCount             __length )
5230 {
5231 	IOReturn          err;
5232 	IOMemoryMap *     mapping;
5233 	mach_vm_address_t address;
5234 	mach_vm_size_t    length;
5235 
5236 	if (__length) {
5237 		panic("doUnmap");
5238 	}
5239 
5240 	mapping = (IOMemoryMap *) __address;
5241 	addressMap = mapping->fAddressMap;
5242 	address    = mapping->fAddress;
5243 	length     = mapping->fLength;
5244 
5245 	if (kIOMapOverwrite & mapping->fOptions) {
5246 		err = KERN_SUCCESS;
5247 	} else {
5248 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5249 			addressMap = IOPageableMapForAddress( address );
5250 		}
5251 #if DEBUG
5252 		if (kIOLogMapping & gIOKitDebug) {
5253 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5254 			    addressMap, address, length );
5255 		}
5256 #endif
5257 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5258 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5259 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5260 		}
5261 	}
5262 
5263 #if IOTRACKING
5264 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5265 #endif /* IOTRACKING */
5266 
5267 	return err;
5268 }
5269 
5270 IOReturn
redirect(task_t safeTask,bool doRedirect)5271 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5272 {
5273 	IOReturn            err = kIOReturnSuccess;
5274 	IOMemoryMap *       mapping = NULL;
5275 	OSSharedPtr<OSIterator>        iter;
5276 
5277 	LOCK;
5278 
5279 	if (doRedirect) {
5280 		_flags |= kIOMemoryRedirected;
5281 	} else {
5282 		_flags &= ~kIOMemoryRedirected;
5283 	}
5284 
5285 	do {
5286 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5287 			memory_object_t   pager;
5288 
5289 			if (reserved) {
5290 				pager = (memory_object_t) reserved->dp.devicePager;
5291 			} else {
5292 				pager = MACH_PORT_NULL;
5293 			}
5294 
5295 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5296 				mapping->redirect( safeTask, doRedirect );
5297 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5298 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5299 				}
5300 			}
5301 
5302 			iter.reset();
5303 		}
5304 	} while (false);
5305 
5306 	if (!doRedirect) {
5307 		WAKEUP;
5308 	}
5309 
5310 	UNLOCK;
5311 
5312 #ifndef __LP64__
5313 	// temporary binary compatibility
5314 	IOSubMemoryDescriptor * subMem;
5315 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5316 		err = subMem->redirect( safeTask, doRedirect );
5317 	} else {
5318 		err = kIOReturnSuccess;
5319 	}
5320 #endif /* !__LP64__ */
5321 
5322 	return err;
5323 }
5324 
5325 IOReturn
redirect(task_t safeTask,bool doRedirect)5326 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5327 {
5328 	IOReturn err = kIOReturnSuccess;
5329 
5330 	if (fSuperMap) {
5331 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5332 	} else {
5333 		LOCK;
5334 
5335 		do{
5336 			if (!fAddress) {
5337 				break;
5338 			}
5339 			if (!fAddressMap) {
5340 				break;
5341 			}
5342 
5343 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5344 			    && (0 == (fOptions & kIOMapStatic))) {
5345 				IOUnmapPages( fAddressMap, fAddress, fLength );
5346 				err = kIOReturnSuccess;
5347 #if DEBUG
5348 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5349 #endif
5350 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5351 				IOOptionBits newMode;
5352 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5353 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5354 			}
5355 		}while (false);
5356 		UNLOCK;
5357 	}
5358 
5359 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5360 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5361 	    && safeTask
5362 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5363 		fMemory->redirect(safeTask, doRedirect);
5364 	}
5365 
5366 	return err;
5367 }
5368 
5369 IOReturn
unmap(void)5370 IOMemoryMap::unmap( void )
5371 {
5372 	IOReturn    err;
5373 
5374 	LOCK;
5375 
5376 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5377 	    && (0 == (kIOMapStatic & fOptions))) {
5378 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5379 	} else {
5380 		err = kIOReturnSuccess;
5381 	}
5382 
5383 	if (fAddressMap) {
5384 		vm_map_deallocate(fAddressMap);
5385 		fAddressMap = NULL;
5386 	}
5387 
5388 	fAddress = 0;
5389 
5390 	UNLOCK;
5391 
5392 	return err;
5393 }
5394 
5395 void
taskDied(void)5396 IOMemoryMap::taskDied( void )
5397 {
5398 	LOCK;
5399 	if (fUserClientUnmap) {
5400 		unmap();
5401 	}
5402 #if IOTRACKING
5403 	else {
5404 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5405 	}
5406 #endif /* IOTRACKING */
5407 
5408 	if (fAddressMap) {
5409 		vm_map_deallocate(fAddressMap);
5410 		fAddressMap = NULL;
5411 	}
5412 	fAddressTask = NULL;
5413 	fAddress     = 0;
5414 	UNLOCK;
5415 }
5416 
5417 IOReturn
userClientUnmap(void)5418 IOMemoryMap::userClientUnmap( void )
5419 {
5420 	fUserClientUnmap = true;
5421 	return kIOReturnSuccess;
5422 }
5423 
5424 // Overload the release mechanism.  All mappings must be a member
5425 // of a memory descriptors _mappings set.  This means that we
5426 // always have 2 references on a mapping.  When either of these mappings
5427 // are released we need to free ourselves.
5428 void
taggedRelease(const void * tag) const5429 IOMemoryMap::taggedRelease(const void *tag) const
5430 {
5431 	LOCK;
5432 	super::taggedRelease(tag, 2);
5433 	UNLOCK;
5434 }
5435 
5436 void
free()5437 IOMemoryMap::free()
5438 {
5439 	unmap();
5440 
5441 	if (fMemory) {
5442 		LOCK;
5443 		fMemory->removeMapping(this);
5444 		UNLOCK;
5445 		fMemory.reset();
5446 	}
5447 
5448 	if (fSuperMap) {
5449 		fSuperMap.reset();
5450 	}
5451 
5452 	if (fRedirUPL) {
5453 		upl_commit(fRedirUPL, NULL, 0);
5454 		upl_deallocate(fRedirUPL);
5455 	}
5456 
5457 	super::free();
5458 }
5459 
5460 IOByteCount
getLength()5461 IOMemoryMap::getLength()
5462 {
5463 	return fLength;
5464 }
5465 
5466 IOVirtualAddress
getVirtualAddress()5467 IOMemoryMap::getVirtualAddress()
5468 {
5469 #ifndef __LP64__
5470 	if (fSuperMap) {
5471 		fSuperMap->getVirtualAddress();
5472 	} else if (fAddressMap
5473 	    && vm_map_is_64bit(fAddressMap)
5474 	    && (sizeof(IOVirtualAddress) < 8)) {
5475 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5476 	}
5477 #endif /* !__LP64__ */
5478 
5479 	return fAddress;
5480 }
5481 
5482 #ifndef __LP64__
5483 mach_vm_address_t
getAddress()5484 IOMemoryMap::getAddress()
5485 {
5486 	return fAddress;
5487 }
5488 
5489 mach_vm_size_t
getSize()5490 IOMemoryMap::getSize()
5491 {
5492 	return fLength;
5493 }
5494 #endif /* !__LP64__ */
5495 
5496 
5497 task_t
getAddressTask()5498 IOMemoryMap::getAddressTask()
5499 {
5500 	if (fSuperMap) {
5501 		return fSuperMap->getAddressTask();
5502 	} else {
5503 		return fAddressTask;
5504 	}
5505 }
5506 
5507 IOOptionBits
getMapOptions()5508 IOMemoryMap::getMapOptions()
5509 {
5510 	return fOptions;
5511 }
5512 
5513 IOMemoryDescriptor *
getMemoryDescriptor()5514 IOMemoryMap::getMemoryDescriptor()
5515 {
5516 	return fMemory.get();
5517 }
5518 
5519 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5520 IOMemoryMap::copyCompatible(
5521 	IOMemoryMap * newMapping )
5522 {
5523 	task_t              task      = newMapping->getAddressTask();
5524 	mach_vm_address_t   toAddress = newMapping->fAddress;
5525 	IOOptionBits        _options  = newMapping->fOptions;
5526 	mach_vm_size_t      _offset   = newMapping->fOffset;
5527 	mach_vm_size_t      _length   = newMapping->fLength;
5528 
5529 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5530 		return NULL;
5531 	}
5532 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5533 		return NULL;
5534 	}
5535 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5536 		return NULL;
5537 	}
5538 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5539 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5540 		return NULL;
5541 	}
5542 
5543 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5544 		return NULL;
5545 	}
5546 
5547 	if (_offset < fOffset) {
5548 		return NULL;
5549 	}
5550 
5551 	_offset -= fOffset;
5552 
5553 	if ((_offset + _length) > fLength) {
5554 		return NULL;
5555 	}
5556 
5557 	if ((fLength == _length) && (!_offset)) {
5558 		retain();
5559 		newMapping = this;
5560 	} else {
5561 		newMapping->fSuperMap.reset(this, OSRetain);
5562 		newMapping->fOffset   = fOffset + _offset;
5563 		newMapping->fAddress  = fAddress + _offset;
5564 	}
5565 
5566 	return newMapping;
5567 }
5568 
5569 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5570 IOMemoryMap::wireRange(
5571 	uint32_t                options,
5572 	mach_vm_size_t          offset,
5573 	mach_vm_size_t          length)
5574 {
5575 	IOReturn kr;
5576 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5577 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5578 	vm_prot_t prot;
5579 
5580 	prot = (kIODirectionOutIn & options);
5581 	if (prot) {
5582 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5583 	} else {
5584 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5585 	}
5586 
5587 	return kr;
5588 }
5589 
5590 
5591 IOPhysicalAddress
5592 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5593 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5594 #else /* !__LP64__ */
5595 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5596 #endif /* !__LP64__ */
5597 {
5598 	IOPhysicalAddress   address;
5599 
5600 	LOCK;
5601 #ifdef __LP64__
5602 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5603 #else /* !__LP64__ */
5604 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5605 #endif /* !__LP64__ */
5606 	UNLOCK;
5607 
5608 	return address;
5609 }
5610 
5611 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5612 
5613 #undef super
5614 #define super OSObject
5615 
5616 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5617 
5618 void
initialize(void)5619 IOMemoryDescriptor::initialize( void )
5620 {
5621 	if (NULL == gIOMemoryLock) {
5622 		gIOMemoryLock = IORecursiveLockAlloc();
5623 	}
5624 
5625 	gIOLastPage = IOGetLastPageNumber();
5626 }
5627 
5628 void
free(void)5629 IOMemoryDescriptor::free( void )
5630 {
5631 	if (_mappings) {
5632 		_mappings.reset();
5633 	}
5634 
5635 	if (reserved) {
5636 		cleanKernelReserved(reserved);
5637 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5638 		reserved = NULL;
5639 	}
5640 	super::free();
5641 }
5642 
5643 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5644 IOMemoryDescriptor::setMapping(
5645 	task_t                  intoTask,
5646 	IOVirtualAddress        mapAddress,
5647 	IOOptionBits            options )
5648 {
5649 	return createMappingInTask( intoTask, mapAddress,
5650 	           options | kIOMapStatic,
5651 	           0, getLength());
5652 }
5653 
5654 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5655 IOMemoryDescriptor::map(
5656 	IOOptionBits            options )
5657 {
5658 	return createMappingInTask( kernel_task, 0,
5659 	           options | kIOMapAnywhere,
5660 	           0, getLength());
5661 }
5662 
5663 #ifndef __LP64__
5664 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5665 IOMemoryDescriptor::map(
5666 	task_t                  intoTask,
5667 	IOVirtualAddress        atAddress,
5668 	IOOptionBits            options,
5669 	IOByteCount             offset,
5670 	IOByteCount             length )
5671 {
5672 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5673 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5674 		return NULL;
5675 	}
5676 
5677 	return createMappingInTask(intoTask, atAddress,
5678 	           options, offset, length);
5679 }
5680 #endif /* !__LP64__ */
5681 
5682 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5683 IOMemoryDescriptor::createMappingInTask(
5684 	task_t                  intoTask,
5685 	mach_vm_address_t       atAddress,
5686 	IOOptionBits            options,
5687 	mach_vm_size_t          offset,
5688 	mach_vm_size_t          length)
5689 {
5690 	IOMemoryMap * result;
5691 	IOMemoryMap * mapping;
5692 
5693 	if (0 == length) {
5694 		length = getLength();
5695 	}
5696 
5697 	mapping = new IOMemoryMap;
5698 
5699 	if (mapping
5700 	    && !mapping->init( intoTask, atAddress,
5701 	    options, offset, length )) {
5702 		mapping->release();
5703 		mapping = NULL;
5704 	}
5705 
5706 	if (mapping) {
5707 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5708 	} else {
5709 		result = nullptr;
5710 	}
5711 
5712 #if DEBUG
5713 	if (!result) {
5714 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5715 		    this, atAddress, (uint32_t) options, offset, length);
5716 	}
5717 #endif
5718 
5719 	// already retained through makeMapping
5720 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5721 
5722 	return retval;
5723 }
5724 
5725 #ifndef __LP64__ // there is only a 64 bit version for LP64
5726 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5727 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5728     IOOptionBits         options,
5729     IOByteCount          offset)
5730 {
5731 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5732 }
5733 #endif
5734 
5735 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5736 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5737     IOOptionBits         options,
5738     mach_vm_size_t       offset)
5739 {
5740 	IOReturn err = kIOReturnSuccess;
5741 	OSSharedPtr<IOMemoryDescriptor> physMem;
5742 
5743 	LOCK;
5744 
5745 	if (fAddress && fAddressMap) {
5746 		do{
5747 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5748 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5749 				physMem = fMemory;
5750 			}
5751 
5752 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5753 				upl_size_t          size = (typeof(size))round_page(fLength);
5754 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5755 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5756 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5757 				    NULL, NULL,
5758 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5759 					fRedirUPL = NULL;
5760 				}
5761 
5762 				if (physMem) {
5763 					IOUnmapPages( fAddressMap, fAddress, fLength );
5764 					if ((false)) {
5765 						physMem->redirect(NULL, true);
5766 					}
5767 				}
5768 			}
5769 
5770 			if (newBackingMemory) {
5771 				if (newBackingMemory != fMemory) {
5772 					fOffset = 0;
5773 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5774 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5775 					    offset, fLength)) {
5776 						err = kIOReturnError;
5777 					}
5778 				}
5779 				if (fRedirUPL) {
5780 					upl_commit(fRedirUPL, NULL, 0);
5781 					upl_deallocate(fRedirUPL);
5782 					fRedirUPL = NULL;
5783 				}
5784 				if ((false) && physMem) {
5785 					physMem->redirect(NULL, false);
5786 				}
5787 			}
5788 		}while (false);
5789 	}
5790 
5791 	UNLOCK;
5792 
5793 	return err;
5794 }
5795 
5796 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5797 IOMemoryDescriptor::makeMapping(
5798 	IOMemoryDescriptor *    owner,
5799 	task_t                  __intoTask,
5800 	IOVirtualAddress        __address,
5801 	IOOptionBits            options,
5802 	IOByteCount             __offset,
5803 	IOByteCount             __length )
5804 {
5805 #ifndef __LP64__
5806 	if (!(kIOMap64Bit & options)) {
5807 		panic("IOMemoryDescriptor::makeMapping !64bit");
5808 	}
5809 #endif /* !__LP64__ */
5810 
5811 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5812 	__block IOMemoryMap * result  = NULL;
5813 
5814 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5815 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5816 	mach_vm_size_t length  = mapping->fLength;
5817 
5818 	mapping->fOffset = offset;
5819 
5820 	LOCK;
5821 
5822 	do{
5823 		if (kIOMapStatic & options) {
5824 			result = mapping;
5825 			addMapping(mapping);
5826 			mapping->setMemoryDescriptor(this, 0);
5827 			continue;
5828 		}
5829 
5830 		if (kIOMapUnique & options) {
5831 			addr64_t phys;
5832 			IOByteCount       physLen;
5833 
5834 //	    if (owner != this)		continue;
5835 
5836 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5837 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5838 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5839 				if (!phys || (physLen < length)) {
5840 					continue;
5841 				}
5842 
5843 				mapDesc = IOMemoryDescriptor::withAddressRange(
5844 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5845 				if (!mapDesc) {
5846 					continue;
5847 				}
5848 				offset = 0;
5849 				mapping->fOffset = offset;
5850 			}
5851 		} else {
5852 			// look for a compatible existing mapping
5853 			if (_mappings) {
5854 				_mappings->iterateObjects(^(OSObject * object)
5855 				{
5856 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5857 					if ((result = lookMapping->copyCompatible(mapping))) {
5858 					        addMapping(result);
5859 					        result->setMemoryDescriptor(this, offset);
5860 					        return true;
5861 					}
5862 					return false;
5863 				});
5864 			}
5865 			if (result || (options & kIOMapReference)) {
5866 				if (result != mapping) {
5867 					mapping->release();
5868 					mapping = NULL;
5869 				}
5870 				continue;
5871 			}
5872 		}
5873 
5874 		if (!mapDesc) {
5875 			mapDesc.reset(this, OSRetain);
5876 		}
5877 		IOReturn
5878 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5879 		if (kIOReturnSuccess == kr) {
5880 			result = mapping;
5881 			mapDesc->addMapping(result);
5882 			result->setMemoryDescriptor(mapDesc.get(), offset);
5883 		} else {
5884 			mapping->release();
5885 			mapping = NULL;
5886 		}
5887 	}while (false);
5888 
5889 	UNLOCK;
5890 
5891 	return result;
5892 }
5893 
5894 void
addMapping(IOMemoryMap * mapping)5895 IOMemoryDescriptor::addMapping(
5896 	IOMemoryMap * mapping )
5897 {
5898 	if (mapping) {
5899 		if (NULL == _mappings) {
5900 			_mappings = OSSet::withCapacity(1);
5901 		}
5902 		if (_mappings) {
5903 			_mappings->setObject( mapping );
5904 		}
5905 	}
5906 }
5907 
5908 void
removeMapping(IOMemoryMap * mapping)5909 IOMemoryDescriptor::removeMapping(
5910 	IOMemoryMap * mapping )
5911 {
5912 	if (_mappings) {
5913 		_mappings->removeObject( mapping);
5914 	}
5915 }
5916 
5917 #ifndef __LP64__
5918 // obsolete initializers
5919 // - initWithOptions is the designated initializer
5920 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5921 IOMemoryDescriptor::initWithAddress(void *      address,
5922     IOByteCount   length,
5923     IODirection direction)
5924 {
5925 	return false;
5926 }
5927 
5928 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5929 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5930     IOByteCount    length,
5931     IODirection  direction,
5932     task_t       task)
5933 {
5934 	return false;
5935 }
5936 
5937 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5938 IOMemoryDescriptor::initWithPhysicalAddress(
5939 	IOPhysicalAddress      address,
5940 	IOByteCount            length,
5941 	IODirection            direction )
5942 {
5943 	return false;
5944 }
5945 
5946 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5947 IOMemoryDescriptor::initWithRanges(
5948 	IOVirtualRange * ranges,
5949 	UInt32           withCount,
5950 	IODirection      direction,
5951 	task_t           task,
5952 	bool             asReference)
5953 {
5954 	return false;
5955 }
5956 
5957 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5958 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5959     UInt32           withCount,
5960     IODirection      direction,
5961     bool             asReference)
5962 {
5963 	return false;
5964 }
5965 
5966 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5967 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5968     IOByteCount * lengthOfSegment)
5969 {
5970 	return NULL;
5971 }
5972 #endif /* !__LP64__ */
5973 
5974 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5975 
5976 bool
serialize(OSSerialize * s) const5977 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5978 {
5979 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
5980 	OSSharedPtr<OSObject>           values[2] = {NULL};
5981 	OSSharedPtr<OSArray>            array;
5982 
5983 	struct SerData {
5984 		user_addr_t address;
5985 		user_size_t length;
5986 	};
5987 
5988 	unsigned int index;
5989 
5990 	IOOptionBits type = _flags & kIOMemoryTypeMask;
5991 
5992 	if (s == NULL) {
5993 		return false;
5994 	}
5995 
5996 	array = OSArray::withCapacity(4);
5997 	if (!array) {
5998 		return false;
5999 	}
6000 
6001 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6002 	if (!vcopy) {
6003 		return false;
6004 	}
6005 
6006 	keys[0] = OSSymbol::withCString("address");
6007 	keys[1] = OSSymbol::withCString("length");
6008 
6009 	// Copy the volatile data so we don't have to allocate memory
6010 	// while the lock is held.
6011 	LOCK;
6012 	if (vcopy.size() == _rangesCount) {
6013 		Ranges vec = _ranges;
6014 		for (index = 0; index < vcopy.size(); index++) {
6015 			mach_vm_address_t addr; mach_vm_size_t len;
6016 			getAddrLenForInd(addr, len, type, vec, index);
6017 			vcopy[index].address = addr;
6018 			vcopy[index].length  = len;
6019 		}
6020 	} else {
6021 		// The descriptor changed out from under us.  Give up.
6022 		UNLOCK;
6023 		return false;
6024 	}
6025 	UNLOCK;
6026 
6027 	for (index = 0; index < vcopy.size(); index++) {
6028 		user_addr_t addr = vcopy[index].address;
6029 		IOByteCount len = (IOByteCount) vcopy[index].length;
6030 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6031 		if (values[0] == NULL) {
6032 			return false;
6033 		}
6034 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6035 		if (values[1] == NULL) {
6036 			return false;
6037 		}
6038 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6039 		if (dict == NULL) {
6040 			return false;
6041 		}
6042 		array->setObject(dict.get());
6043 		dict.reset();
6044 		values[0].reset();
6045 		values[1].reset();
6046 	}
6047 
6048 	return array->serialize(s);
6049 }
6050 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6051 
6052 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6053 #ifdef __LP64__
6054 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6055 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6056 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6057 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6058 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6059 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6060 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6061 #else /* !__LP64__ */
6062 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6063 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6064 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6065 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6066 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6067 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6068 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6069 #endif /* !__LP64__ */
6070 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6071 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6072 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6073 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6074 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6075 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6076 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6077 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6078 
6079 /* ex-inline function implementation */
6080 IOPhysicalAddress
getPhysicalAddress()6081 IOMemoryDescriptor::getPhysicalAddress()
6082 {
6083 	return getPhysicalSegment( 0, NULL );
6084 }
6085 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6086 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6087 
6088 OSPtr<_IOMemoryDescriptorMixedData>
6089 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6090 {
6091 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6092 	if (me && !me->initWithCapacity(capacity)) {
6093 		return nullptr;
6094 	}
6095 	return me;
6096 }
6097 
6098 bool
initWithCapacity(size_t capacity)6099 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6100 {
6101 	if (_data && (!capacity || (_capacity < capacity))) {
6102 		freeMemory();
6103 	}
6104 
6105 	if (!OSObject::init()) {
6106 		return false;
6107 	}
6108 
6109 	if (!_data && capacity) {
6110 		_data = IOMalloc(capacity);
6111 		if (!_data) {
6112 			return false;
6113 		}
6114 		_capacity = capacity;
6115 	}
6116 
6117 	_length = 0;
6118 
6119 	return true;
6120 }
6121 
6122 void
free()6123 _IOMemoryDescriptorMixedData::free()
6124 {
6125 	freeMemory();
6126 	OSObject::free();
6127 }
6128 
6129 void
freeMemory()6130 _IOMemoryDescriptorMixedData::freeMemory()
6131 {
6132 	IOFree(_data, _capacity);
6133 	_data = nullptr;
6134 	_capacity = _length = 0;
6135 }
6136 
6137 bool
appendBytes(const void * bytes,size_t length)6138 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6139 {
6140 	const auto oldLength = getLength();
6141 	size_t newLength;
6142 	if (os_add_overflow(oldLength, length, &newLength)) {
6143 		return false;
6144 	}
6145 
6146 	if (newLength > _capacity) {
6147 		void * const newData = IOMalloc(newLength);
6148 		if (_data) {
6149 			bcopy(_data, newData, oldLength);
6150 			IOFree(_data, _capacity);
6151 		}
6152 		_data = newData;
6153 		_capacity = newLength;
6154 	}
6155 
6156 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6157 	if (bytes) {
6158 		bcopy(bytes, dest, length);
6159 	} else {
6160 		bzero(dest, length);
6161 	}
6162 
6163 	_length = newLength;
6164 
6165 	return true;
6166 }
6167 
6168 void
setLength(size_t length)6169 _IOMemoryDescriptorMixedData::setLength(size_t length)
6170 {
6171 	if (!_data || (length > _capacity)) {
6172 		void * const newData = IOMallocZero(length);
6173 		if (_data) {
6174 			bcopy(_data, newData, _length);
6175 			IOFree(_data, _capacity);
6176 		}
6177 		_data = newData;
6178 		_capacity = length;
6179 	}
6180 	_length = length;
6181 }
6182 
6183 const void *
getBytes() const6184 _IOMemoryDescriptorMixedData::getBytes() const
6185 {
6186 	return _length ? _data : nullptr;
6187 }
6188 
6189 size_t
getLength() const6190 _IOMemoryDescriptorMixedData::getLength() const
6191 {
6192 	return _data ? _length : 0;
6193 }
6194