xref: /xnu-8020.121.3/iokit/Kernel/IOMemoryDescriptor.cpp (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1*fdd8201dSApple OSS Distributions /*
2*fdd8201dSApple OSS Distributions  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*fdd8201dSApple OSS Distributions  *
4*fdd8201dSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*fdd8201dSApple OSS Distributions  *
6*fdd8201dSApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*fdd8201dSApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*fdd8201dSApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*fdd8201dSApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*fdd8201dSApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*fdd8201dSApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*fdd8201dSApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*fdd8201dSApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*fdd8201dSApple OSS Distributions  *
15*fdd8201dSApple OSS Distributions  * Please obtain a copy of the License at
16*fdd8201dSApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*fdd8201dSApple OSS Distributions  *
18*fdd8201dSApple OSS Distributions  * The Original Code and all software distributed under the License are
19*fdd8201dSApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*fdd8201dSApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*fdd8201dSApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*fdd8201dSApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*fdd8201dSApple OSS Distributions  * Please see the License for the specific language governing rights and
24*fdd8201dSApple OSS Distributions  * limitations under the License.
25*fdd8201dSApple OSS Distributions  *
26*fdd8201dSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*fdd8201dSApple OSS Distributions  */
28*fdd8201dSApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*fdd8201dSApple OSS Distributions 
30*fdd8201dSApple OSS Distributions #include <sys/cdefs.h>
31*fdd8201dSApple OSS Distributions 
32*fdd8201dSApple OSS Distributions #include <IOKit/assert.h>
33*fdd8201dSApple OSS Distributions #include <IOKit/system.h>
34*fdd8201dSApple OSS Distributions #include <IOKit/IOLib.h>
35*fdd8201dSApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*fdd8201dSApple OSS Distributions #include <IOKit/IOMapper.h>
37*fdd8201dSApple OSS Distributions #include <IOKit/IODMACommand.h>
38*fdd8201dSApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*fdd8201dSApple OSS Distributions 
40*fdd8201dSApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*fdd8201dSApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*fdd8201dSApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*fdd8201dSApple OSS Distributions 
44*fdd8201dSApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*fdd8201dSApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*fdd8201dSApple OSS Distributions #include <libkern/OSDebug.h>
47*fdd8201dSApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*fdd8201dSApple OSS Distributions 
49*fdd8201dSApple OSS Distributions #include "IOKitKernelInternal.h"
50*fdd8201dSApple OSS Distributions 
51*fdd8201dSApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*fdd8201dSApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*fdd8201dSApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*fdd8201dSApple OSS Distributions #include <libkern/c++/OSArray.h>
55*fdd8201dSApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*fdd8201dSApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*fdd8201dSApple OSS Distributions #include <os/overflow.h>
58*fdd8201dSApple OSS Distributions #include <os/cpp_util.h>
59*fdd8201dSApple OSS Distributions #include <os/base_private.h>
60*fdd8201dSApple OSS Distributions 
61*fdd8201dSApple OSS Distributions #include <sys/uio.h>
62*fdd8201dSApple OSS Distributions 
63*fdd8201dSApple OSS Distributions __BEGIN_DECLS
64*fdd8201dSApple OSS Distributions #include <vm/pmap.h>
65*fdd8201dSApple OSS Distributions #include <vm/vm_pageout.h>
66*fdd8201dSApple OSS Distributions #include <mach/memory_object_types.h>
67*fdd8201dSApple OSS Distributions #include <device/device_port.h>
68*fdd8201dSApple OSS Distributions 
69*fdd8201dSApple OSS Distributions #include <mach/vm_prot.h>
70*fdd8201dSApple OSS Distributions #include <mach/mach_vm.h>
71*fdd8201dSApple OSS Distributions #include <mach/memory_entry.h>
72*fdd8201dSApple OSS Distributions #include <vm/vm_fault.h>
73*fdd8201dSApple OSS Distributions #include <vm/vm_protos.h>
74*fdd8201dSApple OSS Distributions 
75*fdd8201dSApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76*fdd8201dSApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
77*fdd8201dSApple OSS Distributions 
78*fdd8201dSApple OSS Distributions __END_DECLS
79*fdd8201dSApple OSS Distributions 
80*fdd8201dSApple OSS Distributions #define kIOMapperWaitSystem     ((IOMapper *) 1)
81*fdd8201dSApple OSS Distributions 
82*fdd8201dSApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
83*fdd8201dSApple OSS Distributions 
84*fdd8201dSApple OSS Distributions ppnum_t           gIOLastPage;
85*fdd8201dSApple OSS Distributions 
86*fdd8201dSApple OSS Distributions enum {
87*fdd8201dSApple OSS Distributions 	kIOMapGuardSizeLarge = 65536
88*fdd8201dSApple OSS Distributions };
89*fdd8201dSApple OSS Distributions 
90*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91*fdd8201dSApple OSS Distributions 
92*fdd8201dSApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
93*fdd8201dSApple OSS Distributions 
94*fdd8201dSApple OSS Distributions #define super IOMemoryDescriptor
95*fdd8201dSApple OSS Distributions 
96*fdd8201dSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
97*fdd8201dSApple OSS Distributions     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
98*fdd8201dSApple OSS Distributions 
99*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100*fdd8201dSApple OSS Distributions 
101*fdd8201dSApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
102*fdd8201dSApple OSS Distributions 
103*fdd8201dSApple OSS Distributions #define LOCK    IORecursiveLockLock( gIOMemoryLock)
104*fdd8201dSApple OSS Distributions #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
105*fdd8201dSApple OSS Distributions #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
106*fdd8201dSApple OSS Distributions #define WAKEUP  \
107*fdd8201dSApple OSS Distributions     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
108*fdd8201dSApple OSS Distributions 
109*fdd8201dSApple OSS Distributions #if 0
110*fdd8201dSApple OSS Distributions #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
111*fdd8201dSApple OSS Distributions #else
112*fdd8201dSApple OSS Distributions #define DEBG(fmt, args...)      {}
113*fdd8201dSApple OSS Distributions #endif
114*fdd8201dSApple OSS Distributions 
115*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116*fdd8201dSApple OSS Distributions 
117*fdd8201dSApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
118*fdd8201dSApple OSS Distributions // Function
119*fdd8201dSApple OSS Distributions 
120*fdd8201dSApple OSS Distributions enum ioPLBlockFlags {
121*fdd8201dSApple OSS Distributions 	kIOPLOnDevice  = 0x00000001,
122*fdd8201dSApple OSS Distributions 	kIOPLExternUPL = 0x00000002,
123*fdd8201dSApple OSS Distributions };
124*fdd8201dSApple OSS Distributions 
125*fdd8201dSApple OSS Distributions struct IOMDPersistentInitData {
126*fdd8201dSApple OSS Distributions 	const IOGeneralMemoryDescriptor * fMD;
127*fdd8201dSApple OSS Distributions 	IOMemoryReference               * fMemRef;
128*fdd8201dSApple OSS Distributions };
129*fdd8201dSApple OSS Distributions 
130*fdd8201dSApple OSS Distributions struct ioPLBlock {
131*fdd8201dSApple OSS Distributions 	upl_t fIOPL;
132*fdd8201dSApple OSS Distributions 	vm_address_t fPageInfo; // Pointer to page list or index into it
133*fdd8201dSApple OSS Distributions 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
134*fdd8201dSApple OSS Distributions 	ppnum_t fMappedPage;        // Page number of first page in this iopl
135*fdd8201dSApple OSS Distributions 	unsigned int fPageOffset;   // Offset within first page of iopl
136*fdd8201dSApple OSS Distributions 	unsigned int fFlags;        // Flags
137*fdd8201dSApple OSS Distributions };
138*fdd8201dSApple OSS Distributions 
139*fdd8201dSApple OSS Distributions enum { kMaxWireTags = 6 };
140*fdd8201dSApple OSS Distributions 
141*fdd8201dSApple OSS Distributions struct ioGMDData {
142*fdd8201dSApple OSS Distributions 	IOMapper *  fMapper;
143*fdd8201dSApple OSS Distributions 	uint64_t    fDMAMapAlignment;
144*fdd8201dSApple OSS Distributions 	uint64_t    fMappedBase;
145*fdd8201dSApple OSS Distributions 	uint64_t    fMappedLength;
146*fdd8201dSApple OSS Distributions 	uint64_t    fPreparationID;
147*fdd8201dSApple OSS Distributions #if IOTRACKING
148*fdd8201dSApple OSS Distributions 	IOTracking  fWireTracking;
149*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
150*fdd8201dSApple OSS Distributions 	unsigned int      fPageCnt;
151*fdd8201dSApple OSS Distributions 	uint8_t           fDMAMapNumAddressBits;
152*fdd8201dSApple OSS Distributions 	unsigned char     fCompletionError:1;
153*fdd8201dSApple OSS Distributions 	unsigned char     fMappedBaseValid:1;
154*fdd8201dSApple OSS Distributions 	unsigned char     _resv:4;
155*fdd8201dSApple OSS Distributions 	unsigned char     fDMAAccess:2;
156*fdd8201dSApple OSS Distributions 
157*fdd8201dSApple OSS Distributions 	/* variable length arrays */
158*fdd8201dSApple OSS Distributions 	upl_page_info_t fPageList[1]
159*fdd8201dSApple OSS Distributions #if __LP64__
160*fdd8201dSApple OSS Distributions 	// align fPageList as for ioPLBlock
161*fdd8201dSApple OSS Distributions 	__attribute__((aligned(sizeof(upl_t))))
162*fdd8201dSApple OSS Distributions #endif
163*fdd8201dSApple OSS Distributions 	;
164*fdd8201dSApple OSS Distributions 	//ioPLBlock fBlocks[1];
165*fdd8201dSApple OSS Distributions };
166*fdd8201dSApple OSS Distributions 
167*fdd8201dSApple OSS Distributions #pragma GCC visibility push(hidden)
168*fdd8201dSApple OSS Distributions 
169*fdd8201dSApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
170*fdd8201dSApple OSS Distributions {
171*fdd8201dSApple OSS Distributions 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
172*fdd8201dSApple OSS Distributions 
173*fdd8201dSApple OSS Distributions public:
174*fdd8201dSApple OSS Distributions 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
175*fdd8201dSApple OSS Distributions 	virtual bool initWithCapacity(size_t capacity);
176*fdd8201dSApple OSS Distributions 	virtual void free() APPLE_KEXT_OVERRIDE;
177*fdd8201dSApple OSS Distributions 
178*fdd8201dSApple OSS Distributions 	virtual bool appendBytes(const void * bytes, size_t length);
179*fdd8201dSApple OSS Distributions 	virtual void setLength(size_t length);
180*fdd8201dSApple OSS Distributions 
181*fdd8201dSApple OSS Distributions 	virtual const void * getBytes() const;
182*fdd8201dSApple OSS Distributions 	virtual size_t getLength() const;
183*fdd8201dSApple OSS Distributions 
184*fdd8201dSApple OSS Distributions private:
185*fdd8201dSApple OSS Distributions 	void freeMemory();
186*fdd8201dSApple OSS Distributions 
187*fdd8201dSApple OSS Distributions 	void *  _data = nullptr;
188*fdd8201dSApple OSS Distributions 	size_t  _length = 0;
189*fdd8201dSApple OSS Distributions 	size_t  _capacity = 0;
190*fdd8201dSApple OSS Distributions };
191*fdd8201dSApple OSS Distributions 
192*fdd8201dSApple OSS Distributions #pragma GCC visibility pop
193*fdd8201dSApple OSS Distributions 
194*fdd8201dSApple OSS Distributions #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
195*fdd8201dSApple OSS Distributions #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
196*fdd8201dSApple OSS Distributions #define getNumIOPL(osd, d)      \
197*fdd8201dSApple OSS Distributions     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
198*fdd8201dSApple OSS Distributions #define getPageList(d)  (&(d->fPageList[0]))
199*fdd8201dSApple OSS Distributions #define computeDataSize(p, u) \
200*fdd8201dSApple OSS Distributions     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
201*fdd8201dSApple OSS Distributions 
202*fdd8201dSApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
203*fdd8201dSApple OSS Distributions 
204*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
205*fdd8201dSApple OSS Distributions 
206*fdd8201dSApple OSS Distributions extern "C" {
207*fdd8201dSApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)208*fdd8201dSApple OSS Distributions device_data_action(
209*fdd8201dSApple OSS Distributions 	uintptr_t               device_handle,
210*fdd8201dSApple OSS Distributions 	ipc_port_t              device_pager,
211*fdd8201dSApple OSS Distributions 	vm_prot_t               protection,
212*fdd8201dSApple OSS Distributions 	vm_object_offset_t      offset,
213*fdd8201dSApple OSS Distributions 	vm_size_t               size)
214*fdd8201dSApple OSS Distributions {
215*fdd8201dSApple OSS Distributions 	kern_return_t        kr;
216*fdd8201dSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
217*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> memDesc;
218*fdd8201dSApple OSS Distributions 
219*fdd8201dSApple OSS Distributions 	LOCK;
220*fdd8201dSApple OSS Distributions 	if (ref->dp.memory) {
221*fdd8201dSApple OSS Distributions 		memDesc.reset(ref->dp.memory, OSRetain);
222*fdd8201dSApple OSS Distributions 		kr = memDesc->handleFault(device_pager, offset, size);
223*fdd8201dSApple OSS Distributions 		memDesc.reset();
224*fdd8201dSApple OSS Distributions 	} else {
225*fdd8201dSApple OSS Distributions 		kr = KERN_ABORTED;
226*fdd8201dSApple OSS Distributions 	}
227*fdd8201dSApple OSS Distributions 	UNLOCK;
228*fdd8201dSApple OSS Distributions 
229*fdd8201dSApple OSS Distributions 	return kr;
230*fdd8201dSApple OSS Distributions }
231*fdd8201dSApple OSS Distributions 
232*fdd8201dSApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)233*fdd8201dSApple OSS Distributions device_close(
234*fdd8201dSApple OSS Distributions 	uintptr_t     device_handle)
235*fdd8201dSApple OSS Distributions {
236*fdd8201dSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
237*fdd8201dSApple OSS Distributions 
238*fdd8201dSApple OSS Distributions 	IOFreeType( ref, IOMemoryDescriptorReserved );
239*fdd8201dSApple OSS Distributions 
240*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
241*fdd8201dSApple OSS Distributions }
242*fdd8201dSApple OSS Distributions };      // end extern "C"
243*fdd8201dSApple OSS Distributions 
244*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245*fdd8201dSApple OSS Distributions 
246*fdd8201dSApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
247*fdd8201dSApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
248*fdd8201dSApple OSS Distributions // checked for as a NULL reference is illegal.
249*fdd8201dSApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)250*fdd8201dSApple OSS Distributions getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
251*fdd8201dSApple OSS Distributions     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
252*fdd8201dSApple OSS Distributions {
253*fdd8201dSApple OSS Distributions 	assert(kIOMemoryTypeUIO == type
254*fdd8201dSApple OSS Distributions 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
255*fdd8201dSApple OSS Distributions 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
256*fdd8201dSApple OSS Distributions 	if (kIOMemoryTypeUIO == type) {
257*fdd8201dSApple OSS Distributions 		user_size_t us;
258*fdd8201dSApple OSS Distributions 		user_addr_t ad;
259*fdd8201dSApple OSS Distributions 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
260*fdd8201dSApple OSS Distributions 	}
261*fdd8201dSApple OSS Distributions #ifndef __LP64__
262*fdd8201dSApple OSS Distributions 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
263*fdd8201dSApple OSS Distributions 		IOAddressRange cur = r.v64[ind];
264*fdd8201dSApple OSS Distributions 		addr = cur.address;
265*fdd8201dSApple OSS Distributions 		len  = cur.length;
266*fdd8201dSApple OSS Distributions 	}
267*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
268*fdd8201dSApple OSS Distributions 	else {
269*fdd8201dSApple OSS Distributions 		IOVirtualRange cur = r.v[ind];
270*fdd8201dSApple OSS Distributions 		addr = cur.address;
271*fdd8201dSApple OSS Distributions 		len  = cur.length;
272*fdd8201dSApple OSS Distributions 	}
273*fdd8201dSApple OSS Distributions }
274*fdd8201dSApple OSS Distributions 
275*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276*fdd8201dSApple OSS Distributions 
277*fdd8201dSApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)278*fdd8201dSApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
279*fdd8201dSApple OSS Distributions {
280*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
281*fdd8201dSApple OSS Distributions 
282*fdd8201dSApple OSS Distributions 	*control = VM_PURGABLE_SET_STATE;
283*fdd8201dSApple OSS Distributions 
284*fdd8201dSApple OSS Distributions 	enum { kIOMemoryPurgeableControlMask = 15 };
285*fdd8201dSApple OSS Distributions 
286*fdd8201dSApple OSS Distributions 	switch (kIOMemoryPurgeableControlMask & newState) {
287*fdd8201dSApple OSS Distributions 	case kIOMemoryPurgeableKeepCurrent:
288*fdd8201dSApple OSS Distributions 		*control = VM_PURGABLE_GET_STATE;
289*fdd8201dSApple OSS Distributions 		break;
290*fdd8201dSApple OSS Distributions 
291*fdd8201dSApple OSS Distributions 	case kIOMemoryPurgeableNonVolatile:
292*fdd8201dSApple OSS Distributions 		*state = VM_PURGABLE_NONVOLATILE;
293*fdd8201dSApple OSS Distributions 		break;
294*fdd8201dSApple OSS Distributions 	case kIOMemoryPurgeableVolatile:
295*fdd8201dSApple OSS Distributions 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
296*fdd8201dSApple OSS Distributions 		break;
297*fdd8201dSApple OSS Distributions 	case kIOMemoryPurgeableEmpty:
298*fdd8201dSApple OSS Distributions 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
299*fdd8201dSApple OSS Distributions 		break;
300*fdd8201dSApple OSS Distributions 	default:
301*fdd8201dSApple OSS Distributions 		err = kIOReturnBadArgument;
302*fdd8201dSApple OSS Distributions 		break;
303*fdd8201dSApple OSS Distributions 	}
304*fdd8201dSApple OSS Distributions 
305*fdd8201dSApple OSS Distributions 	if (*control == VM_PURGABLE_SET_STATE) {
306*fdd8201dSApple OSS Distributions 		// let VM know this call is from the kernel and is allowed to alter
307*fdd8201dSApple OSS Distributions 		// the volatility of the memory entry even if it was created with
308*fdd8201dSApple OSS Distributions 		// MAP_MEM_PURGABLE_KERNEL_ONLY
309*fdd8201dSApple OSS Distributions 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
310*fdd8201dSApple OSS Distributions 	}
311*fdd8201dSApple OSS Distributions 
312*fdd8201dSApple OSS Distributions 	return err;
313*fdd8201dSApple OSS Distributions }
314*fdd8201dSApple OSS Distributions 
315*fdd8201dSApple OSS Distributions static IOReturn
purgeableStateBits(int * state)316*fdd8201dSApple OSS Distributions purgeableStateBits(int * state)
317*fdd8201dSApple OSS Distributions {
318*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
319*fdd8201dSApple OSS Distributions 
320*fdd8201dSApple OSS Distributions 	switch (VM_PURGABLE_STATE_MASK & *state) {
321*fdd8201dSApple OSS Distributions 	case VM_PURGABLE_NONVOLATILE:
322*fdd8201dSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
323*fdd8201dSApple OSS Distributions 		break;
324*fdd8201dSApple OSS Distributions 	case VM_PURGABLE_VOLATILE:
325*fdd8201dSApple OSS Distributions 		*state = kIOMemoryPurgeableVolatile;
326*fdd8201dSApple OSS Distributions 		break;
327*fdd8201dSApple OSS Distributions 	case VM_PURGABLE_EMPTY:
328*fdd8201dSApple OSS Distributions 		*state = kIOMemoryPurgeableEmpty;
329*fdd8201dSApple OSS Distributions 		break;
330*fdd8201dSApple OSS Distributions 	default:
331*fdd8201dSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
332*fdd8201dSApple OSS Distributions 		err = kIOReturnNotReady;
333*fdd8201dSApple OSS Distributions 		break;
334*fdd8201dSApple OSS Distributions 	}
335*fdd8201dSApple OSS Distributions 	return err;
336*fdd8201dSApple OSS Distributions }
337*fdd8201dSApple OSS Distributions 
338*fdd8201dSApple OSS Distributions typedef struct {
339*fdd8201dSApple OSS Distributions 	unsigned int wimg;
340*fdd8201dSApple OSS Distributions 	unsigned int object_type;
341*fdd8201dSApple OSS Distributions } iokit_memtype_entry;
342*fdd8201dSApple OSS Distributions 
343*fdd8201dSApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
344*fdd8201dSApple OSS Distributions 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
345*fdd8201dSApple OSS Distributions 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
346*fdd8201dSApple OSS Distributions 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
347*fdd8201dSApple OSS Distributions 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
348*fdd8201dSApple OSS Distributions 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
349*fdd8201dSApple OSS Distributions 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
350*fdd8201dSApple OSS Distributions 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
351*fdd8201dSApple OSS Distributions 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
352*fdd8201dSApple OSS Distributions 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
353*fdd8201dSApple OSS Distributions 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
354*fdd8201dSApple OSS Distributions };
355*fdd8201dSApple OSS Distributions 
356*fdd8201dSApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)357*fdd8201dSApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
358*fdd8201dSApple OSS Distributions {
359*fdd8201dSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
360*fdd8201dSApple OSS Distributions 	vm_prot_t prot = 0;
361*fdd8201dSApple OSS Distributions 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
362*fdd8201dSApple OSS Distributions 	return prot;
363*fdd8201dSApple OSS Distributions }
364*fdd8201dSApple OSS Distributions 
365*fdd8201dSApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)366*fdd8201dSApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
367*fdd8201dSApple OSS Distributions {
368*fdd8201dSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
369*fdd8201dSApple OSS Distributions 	if (cacheMode == kIODefaultCache) {
370*fdd8201dSApple OSS Distributions 		return -1U;
371*fdd8201dSApple OSS Distributions 	}
372*fdd8201dSApple OSS Distributions 	return iomd_mem_types[cacheMode].wimg;
373*fdd8201dSApple OSS Distributions }
374*fdd8201dSApple OSS Distributions 
375*fdd8201dSApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)376*fdd8201dSApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
377*fdd8201dSApple OSS Distributions {
378*fdd8201dSApple OSS Distributions 	pagerFlags &= VM_WIMG_MASK;
379*fdd8201dSApple OSS Distributions 	IOOptionBits cacheMode = kIODefaultCache;
380*fdd8201dSApple OSS Distributions 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
381*fdd8201dSApple OSS Distributions 		if (iomd_mem_types[i].wimg == pagerFlags) {
382*fdd8201dSApple OSS Distributions 			cacheMode = i;
383*fdd8201dSApple OSS Distributions 			break;
384*fdd8201dSApple OSS Distributions 		}
385*fdd8201dSApple OSS Distributions 	}
386*fdd8201dSApple OSS Distributions 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
387*fdd8201dSApple OSS Distributions }
388*fdd8201dSApple OSS Distributions 
389*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391*fdd8201dSApple OSS Distributions 
392*fdd8201dSApple OSS Distributions struct IOMemoryEntry {
393*fdd8201dSApple OSS Distributions 	ipc_port_t entry;
394*fdd8201dSApple OSS Distributions 	int64_t    offset;
395*fdd8201dSApple OSS Distributions 	uint64_t   size;
396*fdd8201dSApple OSS Distributions 	uint64_t   start;
397*fdd8201dSApple OSS Distributions };
398*fdd8201dSApple OSS Distributions 
399*fdd8201dSApple OSS Distributions struct IOMemoryReference {
400*fdd8201dSApple OSS Distributions 	volatile SInt32             refCount;
401*fdd8201dSApple OSS Distributions 	vm_prot_t                   prot;
402*fdd8201dSApple OSS Distributions 	uint32_t                    capacity;
403*fdd8201dSApple OSS Distributions 	uint32_t                    count;
404*fdd8201dSApple OSS Distributions 	struct IOMemoryReference  * mapRef;
405*fdd8201dSApple OSS Distributions 	IOMemoryEntry               entries[0];
406*fdd8201dSApple OSS Distributions };
407*fdd8201dSApple OSS Distributions 
408*fdd8201dSApple OSS Distributions enum{
409*fdd8201dSApple OSS Distributions 	kIOMemoryReferenceReuse = 0x00000001,
410*fdd8201dSApple OSS Distributions 	kIOMemoryReferenceWrite = 0x00000002,
411*fdd8201dSApple OSS Distributions 	kIOMemoryReferenceCOW   = 0x00000004,
412*fdd8201dSApple OSS Distributions };
413*fdd8201dSApple OSS Distributions 
414*fdd8201dSApple OSS Distributions SInt32 gIOMemoryReferenceCount;
415*fdd8201dSApple OSS Distributions 
416*fdd8201dSApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)417*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
418*fdd8201dSApple OSS Distributions {
419*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref;
420*fdd8201dSApple OSS Distributions 	size_t              newSize, oldSize, copySize;
421*fdd8201dSApple OSS Distributions 
422*fdd8201dSApple OSS Distributions 	newSize = (sizeof(IOMemoryReference)
423*fdd8201dSApple OSS Distributions 	    - sizeof(ref->entries)
424*fdd8201dSApple OSS Distributions 	    + capacity * sizeof(ref->entries[0]));
425*fdd8201dSApple OSS Distributions 	ref = (typeof(ref))IOMalloc(newSize);
426*fdd8201dSApple OSS Distributions 	if (realloc) {
427*fdd8201dSApple OSS Distributions 		oldSize = (sizeof(IOMemoryReference)
428*fdd8201dSApple OSS Distributions 		    - sizeof(realloc->entries)
429*fdd8201dSApple OSS Distributions 		    + realloc->capacity * sizeof(realloc->entries[0]));
430*fdd8201dSApple OSS Distributions 		copySize = oldSize;
431*fdd8201dSApple OSS Distributions 		if (copySize > newSize) {
432*fdd8201dSApple OSS Distributions 			copySize = newSize;
433*fdd8201dSApple OSS Distributions 		}
434*fdd8201dSApple OSS Distributions 		if (ref) {
435*fdd8201dSApple OSS Distributions 			bcopy(realloc, ref, copySize);
436*fdd8201dSApple OSS Distributions 		}
437*fdd8201dSApple OSS Distributions 		IOFree(realloc, oldSize);
438*fdd8201dSApple OSS Distributions 	} else if (ref) {
439*fdd8201dSApple OSS Distributions 		bzero(ref, sizeof(*ref));
440*fdd8201dSApple OSS Distributions 		ref->refCount = 1;
441*fdd8201dSApple OSS Distributions 		OSIncrementAtomic(&gIOMemoryReferenceCount);
442*fdd8201dSApple OSS Distributions 	}
443*fdd8201dSApple OSS Distributions 	if (!ref) {
444*fdd8201dSApple OSS Distributions 		return NULL;
445*fdd8201dSApple OSS Distributions 	}
446*fdd8201dSApple OSS Distributions 	ref->capacity = capacity;
447*fdd8201dSApple OSS Distributions 	return ref;
448*fdd8201dSApple OSS Distributions }
449*fdd8201dSApple OSS Distributions 
450*fdd8201dSApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)451*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
452*fdd8201dSApple OSS Distributions {
453*fdd8201dSApple OSS Distributions 	IOMemoryEntry * entries;
454*fdd8201dSApple OSS Distributions 	size_t          size;
455*fdd8201dSApple OSS Distributions 
456*fdd8201dSApple OSS Distributions 	if (ref->mapRef) {
457*fdd8201dSApple OSS Distributions 		memoryReferenceFree(ref->mapRef);
458*fdd8201dSApple OSS Distributions 		ref->mapRef = NULL;
459*fdd8201dSApple OSS Distributions 	}
460*fdd8201dSApple OSS Distributions 
461*fdd8201dSApple OSS Distributions 	entries = ref->entries + ref->count;
462*fdd8201dSApple OSS Distributions 	while (entries > &ref->entries[0]) {
463*fdd8201dSApple OSS Distributions 		entries--;
464*fdd8201dSApple OSS Distributions 		ipc_port_release_send(entries->entry);
465*fdd8201dSApple OSS Distributions 	}
466*fdd8201dSApple OSS Distributions 	size = (sizeof(IOMemoryReference)
467*fdd8201dSApple OSS Distributions 	    - sizeof(ref->entries)
468*fdd8201dSApple OSS Distributions 	    + ref->capacity * sizeof(ref->entries[0]));
469*fdd8201dSApple OSS Distributions 	IOFree(ref, size);
470*fdd8201dSApple OSS Distributions 
471*fdd8201dSApple OSS Distributions 	OSDecrementAtomic(&gIOMemoryReferenceCount);
472*fdd8201dSApple OSS Distributions }
473*fdd8201dSApple OSS Distributions 
474*fdd8201dSApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)475*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
476*fdd8201dSApple OSS Distributions {
477*fdd8201dSApple OSS Distributions 	if (1 == OSDecrementAtomic(&ref->refCount)) {
478*fdd8201dSApple OSS Distributions 		memoryReferenceFree(ref);
479*fdd8201dSApple OSS Distributions 	}
480*fdd8201dSApple OSS Distributions }
481*fdd8201dSApple OSS Distributions 
482*fdd8201dSApple OSS Distributions 
483*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)484*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
485*fdd8201dSApple OSS Distributions 	IOOptionBits         options,
486*fdd8201dSApple OSS Distributions 	IOMemoryReference ** reference)
487*fdd8201dSApple OSS Distributions {
488*fdd8201dSApple OSS Distributions 	enum { kCapacity = 4, kCapacityInc = 4 };
489*fdd8201dSApple OSS Distributions 
490*fdd8201dSApple OSS Distributions 	kern_return_t        err;
491*fdd8201dSApple OSS Distributions 	IOMemoryReference *  ref;
492*fdd8201dSApple OSS Distributions 	IOMemoryEntry *      entries;
493*fdd8201dSApple OSS Distributions 	IOMemoryEntry *      cloneEntries = NULL;
494*fdd8201dSApple OSS Distributions 	vm_map_t             map;
495*fdd8201dSApple OSS Distributions 	ipc_port_t           entry, cloneEntry;
496*fdd8201dSApple OSS Distributions 	vm_prot_t            prot;
497*fdd8201dSApple OSS Distributions 	memory_object_size_t actualSize;
498*fdd8201dSApple OSS Distributions 	uint32_t             rangeIdx;
499*fdd8201dSApple OSS Distributions 	uint32_t             count;
500*fdd8201dSApple OSS Distributions 	mach_vm_address_t    entryAddr, endAddr, entrySize;
501*fdd8201dSApple OSS Distributions 	mach_vm_size_t       srcAddr, srcLen;
502*fdd8201dSApple OSS Distributions 	mach_vm_size_t       nextAddr, nextLen;
503*fdd8201dSApple OSS Distributions 	mach_vm_size_t       offset, remain;
504*fdd8201dSApple OSS Distributions 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
505*fdd8201dSApple OSS Distributions 	int                  misaligned_start = 0, misaligned_end = 0;
506*fdd8201dSApple OSS Distributions 	IOByteCount          physLen;
507*fdd8201dSApple OSS Distributions 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
508*fdd8201dSApple OSS Distributions 	IOOptionBits         cacheMode;
509*fdd8201dSApple OSS Distributions 	unsigned int         pagerFlags;
510*fdd8201dSApple OSS Distributions 	vm_tag_t             tag;
511*fdd8201dSApple OSS Distributions 	vm_named_entry_kernel_flags_t vmne_kflags;
512*fdd8201dSApple OSS Distributions 
513*fdd8201dSApple OSS Distributions 	ref = memoryReferenceAlloc(kCapacity, NULL);
514*fdd8201dSApple OSS Distributions 	if (!ref) {
515*fdd8201dSApple OSS Distributions 		return kIOReturnNoMemory;
516*fdd8201dSApple OSS Distributions 	}
517*fdd8201dSApple OSS Distributions 
518*fdd8201dSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(kernel_map);
519*fdd8201dSApple OSS Distributions 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
520*fdd8201dSApple OSS Distributions 	entries = &ref->entries[0];
521*fdd8201dSApple OSS Distributions 	count = 0;
522*fdd8201dSApple OSS Distributions 	err = KERN_SUCCESS;
523*fdd8201dSApple OSS Distributions 
524*fdd8201dSApple OSS Distributions 	offset = 0;
525*fdd8201dSApple OSS Distributions 	rangeIdx = 0;
526*fdd8201dSApple OSS Distributions 	remain = _length;
527*fdd8201dSApple OSS Distributions 	if (_task) {
528*fdd8201dSApple OSS Distributions 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
529*fdd8201dSApple OSS Distributions 
530*fdd8201dSApple OSS Distributions 		// account for IOBMD setLength(), use its capacity as length
531*fdd8201dSApple OSS Distributions 		IOBufferMemoryDescriptor * bmd;
532*fdd8201dSApple OSS Distributions 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
533*fdd8201dSApple OSS Distributions 			nextLen = bmd->getCapacity();
534*fdd8201dSApple OSS Distributions 			remain  = nextLen;
535*fdd8201dSApple OSS Distributions 		}
536*fdd8201dSApple OSS Distributions 	} else {
537*fdd8201dSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
538*fdd8201dSApple OSS Distributions 		nextLen = physLen;
539*fdd8201dSApple OSS Distributions 
540*fdd8201dSApple OSS Distributions 		// default cache mode for physical
541*fdd8201dSApple OSS Distributions 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
542*fdd8201dSApple OSS Distributions 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
543*fdd8201dSApple OSS Distributions 			_flags |= (mode << kIOMemoryBufferCacheShift);
544*fdd8201dSApple OSS Distributions 		}
545*fdd8201dSApple OSS Distributions 	}
546*fdd8201dSApple OSS Distributions 
547*fdd8201dSApple OSS Distributions 	// cache mode & vm_prot
548*fdd8201dSApple OSS Distributions 	prot = VM_PROT_READ;
549*fdd8201dSApple OSS Distributions 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
550*fdd8201dSApple OSS Distributions 	prot |= vmProtForCacheMode(cacheMode);
551*fdd8201dSApple OSS Distributions 	// VM system requires write access to change cache mode
552*fdd8201dSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
553*fdd8201dSApple OSS Distributions 		prot |= VM_PROT_WRITE;
554*fdd8201dSApple OSS Distributions 	}
555*fdd8201dSApple OSS Distributions 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
556*fdd8201dSApple OSS Distributions 		prot |= VM_PROT_WRITE;
557*fdd8201dSApple OSS Distributions 	}
558*fdd8201dSApple OSS Distributions 	if (kIOMemoryReferenceWrite & options) {
559*fdd8201dSApple OSS Distributions 		prot |= VM_PROT_WRITE;
560*fdd8201dSApple OSS Distributions 	}
561*fdd8201dSApple OSS Distributions 	if (kIOMemoryReferenceCOW   & options) {
562*fdd8201dSApple OSS Distributions 		prot |= MAP_MEM_VM_COPY;
563*fdd8201dSApple OSS Distributions 	}
564*fdd8201dSApple OSS Distributions 
565*fdd8201dSApple OSS Distributions 	if (kIOMemoryUseReserve & _flags) {
566*fdd8201dSApple OSS Distributions 		prot |= MAP_MEM_GRAB_SECLUDED;
567*fdd8201dSApple OSS Distributions 	}
568*fdd8201dSApple OSS Distributions 
569*fdd8201dSApple OSS Distributions 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
570*fdd8201dSApple OSS Distributions 		cloneEntries = &_memRef->entries[0];
571*fdd8201dSApple OSS Distributions 		prot |= MAP_MEM_NAMED_REUSE;
572*fdd8201dSApple OSS Distributions 	}
573*fdd8201dSApple OSS Distributions 
574*fdd8201dSApple OSS Distributions 	if (_task) {
575*fdd8201dSApple OSS Distributions 		// virtual ranges
576*fdd8201dSApple OSS Distributions 
577*fdd8201dSApple OSS Distributions 		if (kIOMemoryBufferPageable & _flags) {
578*fdd8201dSApple OSS Distributions 			int ledger_tag, ledger_no_footprint;
579*fdd8201dSApple OSS Distributions 
580*fdd8201dSApple OSS Distributions 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
581*fdd8201dSApple OSS Distributions 			prot |= MAP_MEM_NAMED_CREATE;
582*fdd8201dSApple OSS Distributions 
583*fdd8201dSApple OSS Distributions 			// default accounting settings:
584*fdd8201dSApple OSS Distributions 			//   + "none" ledger tag
585*fdd8201dSApple OSS Distributions 			//   + include in footprint
586*fdd8201dSApple OSS Distributions 			// can be changed later with ::setOwnership()
587*fdd8201dSApple OSS Distributions 			ledger_tag = VM_LEDGER_TAG_NONE;
588*fdd8201dSApple OSS Distributions 			ledger_no_footprint = 0;
589*fdd8201dSApple OSS Distributions 
590*fdd8201dSApple OSS Distributions 			if (kIOMemoryBufferPurgeable & _flags) {
591*fdd8201dSApple OSS Distributions 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
592*fdd8201dSApple OSS Distributions 				if (VM_KERN_MEMORY_SKYWALK == tag) {
593*fdd8201dSApple OSS Distributions 					// Skywalk purgeable memory accounting:
594*fdd8201dSApple OSS Distributions 					//    + "network" ledger tag
595*fdd8201dSApple OSS Distributions 					//    + not included in footprint
596*fdd8201dSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NETWORK;
597*fdd8201dSApple OSS Distributions 					ledger_no_footprint = 1;
598*fdd8201dSApple OSS Distributions 				} else {
599*fdd8201dSApple OSS Distributions 					// regular purgeable memory accounting:
600*fdd8201dSApple OSS Distributions 					//    + no ledger tag
601*fdd8201dSApple OSS Distributions 					//    + included in footprint
602*fdd8201dSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NONE;
603*fdd8201dSApple OSS Distributions 					ledger_no_footprint = 0;
604*fdd8201dSApple OSS Distributions 				}
605*fdd8201dSApple OSS Distributions 			}
606*fdd8201dSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
607*fdd8201dSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
608*fdd8201dSApple OSS Distributions 			if (kIOMemoryUseReserve & _flags) {
609*fdd8201dSApple OSS Distributions 				prot |= MAP_MEM_GRAB_SECLUDED;
610*fdd8201dSApple OSS Distributions 			}
611*fdd8201dSApple OSS Distributions 
612*fdd8201dSApple OSS Distributions 			prot |= VM_PROT_WRITE;
613*fdd8201dSApple OSS Distributions 			map = NULL;
614*fdd8201dSApple OSS Distributions 		} else {
615*fdd8201dSApple OSS Distributions 			prot |= MAP_MEM_USE_DATA_ADDR;
616*fdd8201dSApple OSS Distributions 			map = get_task_map(_task);
617*fdd8201dSApple OSS Distributions 		}
618*fdd8201dSApple OSS Distributions 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
619*fdd8201dSApple OSS Distributions 
620*fdd8201dSApple OSS Distributions 		while (remain) {
621*fdd8201dSApple OSS Distributions 			srcAddr  = nextAddr;
622*fdd8201dSApple OSS Distributions 			srcLen   = nextLen;
623*fdd8201dSApple OSS Distributions 			nextAddr = 0;
624*fdd8201dSApple OSS Distributions 			nextLen  = 0;
625*fdd8201dSApple OSS Distributions 			// coalesce addr range
626*fdd8201dSApple OSS Distributions 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
627*fdd8201dSApple OSS Distributions 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
628*fdd8201dSApple OSS Distributions 				if ((srcAddr + srcLen) != nextAddr) {
629*fdd8201dSApple OSS Distributions 					break;
630*fdd8201dSApple OSS Distributions 				}
631*fdd8201dSApple OSS Distributions 				srcLen += nextLen;
632*fdd8201dSApple OSS Distributions 			}
633*fdd8201dSApple OSS Distributions 
634*fdd8201dSApple OSS Distributions 			if (MAP_MEM_USE_DATA_ADDR & prot) {
635*fdd8201dSApple OSS Distributions 				entryAddr = srcAddr;
636*fdd8201dSApple OSS Distributions 				endAddr   = srcAddr + srcLen;
637*fdd8201dSApple OSS Distributions 			} else {
638*fdd8201dSApple OSS Distributions 				entryAddr = trunc_page_64(srcAddr);
639*fdd8201dSApple OSS Distributions 				endAddr   = round_page_64(srcAddr + srcLen);
640*fdd8201dSApple OSS Distributions 			}
641*fdd8201dSApple OSS Distributions 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
642*fdd8201dSApple OSS Distributions 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
643*fdd8201dSApple OSS Distributions 			}
644*fdd8201dSApple OSS Distributions 
645*fdd8201dSApple OSS Distributions 			do{
646*fdd8201dSApple OSS Distributions 				entrySize = (endAddr - entryAddr);
647*fdd8201dSApple OSS Distributions 				if (!entrySize) {
648*fdd8201dSApple OSS Distributions 					break;
649*fdd8201dSApple OSS Distributions 				}
650*fdd8201dSApple OSS Distributions 				actualSize = entrySize;
651*fdd8201dSApple OSS Distributions 
652*fdd8201dSApple OSS Distributions 				cloneEntry = MACH_PORT_NULL;
653*fdd8201dSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
654*fdd8201dSApple OSS Distributions 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
655*fdd8201dSApple OSS Distributions 						cloneEntry = cloneEntries->entry;
656*fdd8201dSApple OSS Distributions 					} else {
657*fdd8201dSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
658*fdd8201dSApple OSS Distributions 					}
659*fdd8201dSApple OSS Distributions 				}
660*fdd8201dSApple OSS Distributions 
661*fdd8201dSApple OSS Distributions 				err = mach_make_memory_entry_internal(map,
662*fdd8201dSApple OSS Distributions 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
663*fdd8201dSApple OSS Distributions 
664*fdd8201dSApple OSS Distributions 				if (KERN_SUCCESS != err) {
665*fdd8201dSApple OSS Distributions 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
666*fdd8201dSApple OSS Distributions 					break;
667*fdd8201dSApple OSS Distributions 				}
668*fdd8201dSApple OSS Distributions 				if (MAP_MEM_USE_DATA_ADDR & prot) {
669*fdd8201dSApple OSS Distributions 					if (actualSize > entrySize) {
670*fdd8201dSApple OSS Distributions 						actualSize = entrySize;
671*fdd8201dSApple OSS Distributions 					}
672*fdd8201dSApple OSS Distributions 				} else if (actualSize > entrySize) {
673*fdd8201dSApple OSS Distributions 					panic("mach_make_memory_entry_64 actualSize");
674*fdd8201dSApple OSS Distributions 				}
675*fdd8201dSApple OSS Distributions 
676*fdd8201dSApple OSS Distributions 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
677*fdd8201dSApple OSS Distributions 
678*fdd8201dSApple OSS Distributions 				if (count && overmap_start) {
679*fdd8201dSApple OSS Distributions 					/*
680*fdd8201dSApple OSS Distributions 					 * Track misaligned start for all
681*fdd8201dSApple OSS Distributions 					 * except the first entry.
682*fdd8201dSApple OSS Distributions 					 */
683*fdd8201dSApple OSS Distributions 					misaligned_start++;
684*fdd8201dSApple OSS Distributions 				}
685*fdd8201dSApple OSS Distributions 
686*fdd8201dSApple OSS Distributions 				if (overmap_end) {
687*fdd8201dSApple OSS Distributions 					/*
688*fdd8201dSApple OSS Distributions 					 * Ignore misaligned end for the
689*fdd8201dSApple OSS Distributions 					 * last entry.
690*fdd8201dSApple OSS Distributions 					 */
691*fdd8201dSApple OSS Distributions 					if ((entryAddr + actualSize) != endAddr) {
692*fdd8201dSApple OSS Distributions 						misaligned_end++;
693*fdd8201dSApple OSS Distributions 					}
694*fdd8201dSApple OSS Distributions 				}
695*fdd8201dSApple OSS Distributions 
696*fdd8201dSApple OSS Distributions 				if (count) {
697*fdd8201dSApple OSS Distributions 					/* Middle entries */
698*fdd8201dSApple OSS Distributions 					if (misaligned_start || misaligned_end) {
699*fdd8201dSApple OSS Distributions 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
700*fdd8201dSApple OSS Distributions 						ipc_port_release_send(entry);
701*fdd8201dSApple OSS Distributions 						err = KERN_NOT_SUPPORTED;
702*fdd8201dSApple OSS Distributions 						break;
703*fdd8201dSApple OSS Distributions 					}
704*fdd8201dSApple OSS Distributions 				}
705*fdd8201dSApple OSS Distributions 
706*fdd8201dSApple OSS Distributions 				if (count >= ref->capacity) {
707*fdd8201dSApple OSS Distributions 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
708*fdd8201dSApple OSS Distributions 					entries = &ref->entries[count];
709*fdd8201dSApple OSS Distributions 				}
710*fdd8201dSApple OSS Distributions 				entries->entry  = entry;
711*fdd8201dSApple OSS Distributions 				entries->size   = actualSize;
712*fdd8201dSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
713*fdd8201dSApple OSS Distributions 				entries->start = entryAddr;
714*fdd8201dSApple OSS Distributions 				entryAddr += actualSize;
715*fdd8201dSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
716*fdd8201dSApple OSS Distributions 					if ((cloneEntries->entry == entries->entry)
717*fdd8201dSApple OSS Distributions 					    && (cloneEntries->size == entries->size)
718*fdd8201dSApple OSS Distributions 					    && (cloneEntries->offset == entries->offset)) {
719*fdd8201dSApple OSS Distributions 						cloneEntries++;
720*fdd8201dSApple OSS Distributions 					} else {
721*fdd8201dSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
722*fdd8201dSApple OSS Distributions 					}
723*fdd8201dSApple OSS Distributions 				}
724*fdd8201dSApple OSS Distributions 				entries++;
725*fdd8201dSApple OSS Distributions 				count++;
726*fdd8201dSApple OSS Distributions 			}while (true);
727*fdd8201dSApple OSS Distributions 			offset += srcLen;
728*fdd8201dSApple OSS Distributions 			remain -= srcLen;
729*fdd8201dSApple OSS Distributions 		}
730*fdd8201dSApple OSS Distributions 	} else {
731*fdd8201dSApple OSS Distributions 		// _task == 0, physical or kIOMemoryTypeUPL
732*fdd8201dSApple OSS Distributions 		memory_object_t pager;
733*fdd8201dSApple OSS Distributions 		vm_size_t       size = ptoa_64(_pages);
734*fdd8201dSApple OSS Distributions 
735*fdd8201dSApple OSS Distributions 		if (!getKernelReserved()) {
736*fdd8201dSApple OSS Distributions 			panic("getKernelReserved");
737*fdd8201dSApple OSS Distributions 		}
738*fdd8201dSApple OSS Distributions 
739*fdd8201dSApple OSS Distributions 		reserved->dp.pagerContig = (1 == _rangesCount);
740*fdd8201dSApple OSS Distributions 		reserved->dp.memory      = this;
741*fdd8201dSApple OSS Distributions 
742*fdd8201dSApple OSS Distributions 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
743*fdd8201dSApple OSS Distributions 		if (-1U == pagerFlags) {
744*fdd8201dSApple OSS Distributions 			panic("phys is kIODefaultCache");
745*fdd8201dSApple OSS Distributions 		}
746*fdd8201dSApple OSS Distributions 		if (reserved->dp.pagerContig) {
747*fdd8201dSApple OSS Distributions 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
748*fdd8201dSApple OSS Distributions 		}
749*fdd8201dSApple OSS Distributions 
750*fdd8201dSApple OSS Distributions 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
751*fdd8201dSApple OSS Distributions 		    size, pagerFlags);
752*fdd8201dSApple OSS Distributions 		assert(pager);
753*fdd8201dSApple OSS Distributions 		if (!pager) {
754*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
755*fdd8201dSApple OSS Distributions 			err = kIOReturnVMError;
756*fdd8201dSApple OSS Distributions 		} else {
757*fdd8201dSApple OSS Distributions 			srcAddr  = nextAddr;
758*fdd8201dSApple OSS Distributions 			entryAddr = trunc_page_64(srcAddr);
759*fdd8201dSApple OSS Distributions 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
760*fdd8201dSApple OSS Distributions 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
761*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
762*fdd8201dSApple OSS Distributions 			if (KERN_SUCCESS != err) {
763*fdd8201dSApple OSS Distributions 				device_pager_deallocate(pager);
764*fdd8201dSApple OSS Distributions 			} else {
765*fdd8201dSApple OSS Distributions 				reserved->dp.devicePager = pager;
766*fdd8201dSApple OSS Distributions 				entries->entry  = entry;
767*fdd8201dSApple OSS Distributions 				entries->size   = size;
768*fdd8201dSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
769*fdd8201dSApple OSS Distributions 				entries++;
770*fdd8201dSApple OSS Distributions 				count++;
771*fdd8201dSApple OSS Distributions 			}
772*fdd8201dSApple OSS Distributions 		}
773*fdd8201dSApple OSS Distributions 	}
774*fdd8201dSApple OSS Distributions 
775*fdd8201dSApple OSS Distributions 	ref->count = count;
776*fdd8201dSApple OSS Distributions 	ref->prot  = prot;
777*fdd8201dSApple OSS Distributions 
778*fdd8201dSApple OSS Distributions 	if (_task && (KERN_SUCCESS == err)
779*fdd8201dSApple OSS Distributions 	    && (kIOMemoryMapCopyOnWrite & _flags)
780*fdd8201dSApple OSS Distributions 	    && !(kIOMemoryReferenceCOW & options)) {
781*fdd8201dSApple OSS Distributions 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
782*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
783*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
784*fdd8201dSApple OSS Distributions 		}
785*fdd8201dSApple OSS Distributions 	}
786*fdd8201dSApple OSS Distributions 
787*fdd8201dSApple OSS Distributions 	if (KERN_SUCCESS == err) {
788*fdd8201dSApple OSS Distributions 		if (MAP_MEM_NAMED_REUSE & prot) {
789*fdd8201dSApple OSS Distributions 			memoryReferenceFree(ref);
790*fdd8201dSApple OSS Distributions 			OSIncrementAtomic(&_memRef->refCount);
791*fdd8201dSApple OSS Distributions 			ref = _memRef;
792*fdd8201dSApple OSS Distributions 		}
793*fdd8201dSApple OSS Distributions 	} else {
794*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
795*fdd8201dSApple OSS Distributions 		memoryReferenceFree(ref);
796*fdd8201dSApple OSS Distributions 		ref = NULL;
797*fdd8201dSApple OSS Distributions 	}
798*fdd8201dSApple OSS Distributions 
799*fdd8201dSApple OSS Distributions 	*reference = ref;
800*fdd8201dSApple OSS Distributions 
801*fdd8201dSApple OSS Distributions 	return err;
802*fdd8201dSApple OSS Distributions }
803*fdd8201dSApple OSS Distributions 
804*fdd8201dSApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)805*fdd8201dSApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
806*fdd8201dSApple OSS Distributions {
807*fdd8201dSApple OSS Distributions 	switch (kIOMapGuardedMask & options) {
808*fdd8201dSApple OSS Distributions 	default:
809*fdd8201dSApple OSS Distributions 	case kIOMapGuardedSmall:
810*fdd8201dSApple OSS Distributions 		return vm_map_page_size(map);
811*fdd8201dSApple OSS Distributions 	case kIOMapGuardedLarge:
812*fdd8201dSApple OSS Distributions 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
813*fdd8201dSApple OSS Distributions 		return kIOMapGuardSizeLarge;
814*fdd8201dSApple OSS Distributions 	}
815*fdd8201dSApple OSS Distributions 	;
816*fdd8201dSApple OSS Distributions }
817*fdd8201dSApple OSS Distributions 
818*fdd8201dSApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)819*fdd8201dSApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
820*fdd8201dSApple OSS Distributions     vm_map_offset_t addr, mach_vm_size_t size)
821*fdd8201dSApple OSS Distributions {
822*fdd8201dSApple OSS Distributions 	kern_return_t   kr;
823*fdd8201dSApple OSS Distributions 	vm_map_offset_t actualAddr;
824*fdd8201dSApple OSS Distributions 	mach_vm_size_t  actualSize;
825*fdd8201dSApple OSS Distributions 
826*fdd8201dSApple OSS Distributions 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
827*fdd8201dSApple OSS Distributions 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
828*fdd8201dSApple OSS Distributions 
829*fdd8201dSApple OSS Distributions 	if (kIOMapGuardedMask & options) {
830*fdd8201dSApple OSS Distributions 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
831*fdd8201dSApple OSS Distributions 		actualAddr -= guardSize;
832*fdd8201dSApple OSS Distributions 		actualSize += 2 * guardSize;
833*fdd8201dSApple OSS Distributions 	}
834*fdd8201dSApple OSS Distributions 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
835*fdd8201dSApple OSS Distributions 
836*fdd8201dSApple OSS Distributions 	return kr;
837*fdd8201dSApple OSS Distributions }
838*fdd8201dSApple OSS Distributions 
839*fdd8201dSApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)840*fdd8201dSApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
841*fdd8201dSApple OSS Distributions {
842*fdd8201dSApple OSS Distributions 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
843*fdd8201dSApple OSS Distributions 	IOReturn                        err;
844*fdd8201dSApple OSS Distributions 	vm_map_offset_t                 addr;
845*fdd8201dSApple OSS Distributions 	mach_vm_size_t                  size;
846*fdd8201dSApple OSS Distributions 	mach_vm_size_t                  guardSize;
847*fdd8201dSApple OSS Distributions 
848*fdd8201dSApple OSS Distributions 	addr = ref->mapped;
849*fdd8201dSApple OSS Distributions 	size = ref->size;
850*fdd8201dSApple OSS Distributions 	guardSize = 0;
851*fdd8201dSApple OSS Distributions 
852*fdd8201dSApple OSS Distributions 	if (kIOMapGuardedMask & ref->options) {
853*fdd8201dSApple OSS Distributions 		if (!(kIOMapAnywhere & ref->options)) {
854*fdd8201dSApple OSS Distributions 			return kIOReturnBadArgument;
855*fdd8201dSApple OSS Distributions 		}
856*fdd8201dSApple OSS Distributions 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
857*fdd8201dSApple OSS Distributions 		size += 2 * guardSize;
858*fdd8201dSApple OSS Distributions 	}
859*fdd8201dSApple OSS Distributions 
860*fdd8201dSApple OSS Distributions 	err = vm_map_enter_mem_object(map, &addr, size,
861*fdd8201dSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
862*fdd8201dSApple OSS Distributions 	    // TODO4K this should not be necessary...
863*fdd8201dSApple OSS Distributions 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
864*fdd8201dSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
865*fdd8201dSApple OSS Distributions 	    (vm_map_offset_t) 0,
866*fdd8201dSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
867*fdd8201dSApple OSS Distributions 	    (((ref->options & kIOMapAnywhere)
868*fdd8201dSApple OSS Distributions 	    ? VM_FLAGS_ANYWHERE
869*fdd8201dSApple OSS Distributions 	    : VM_FLAGS_FIXED)),
870*fdd8201dSApple OSS Distributions 	    VM_MAP_KERNEL_FLAGS_NONE,
871*fdd8201dSApple OSS Distributions 	    ref->tag,
872*fdd8201dSApple OSS Distributions 	    IPC_PORT_NULL,
873*fdd8201dSApple OSS Distributions 	    (memory_object_offset_t) 0,
874*fdd8201dSApple OSS Distributions 	    false,                       /* copy */
875*fdd8201dSApple OSS Distributions 	    ref->prot,
876*fdd8201dSApple OSS Distributions 	    ref->prot,
877*fdd8201dSApple OSS Distributions 	    VM_INHERIT_NONE);
878*fdd8201dSApple OSS Distributions 	if (KERN_SUCCESS == err) {
879*fdd8201dSApple OSS Distributions 		ref->mapped = (mach_vm_address_t) addr;
880*fdd8201dSApple OSS Distributions 		ref->map = map;
881*fdd8201dSApple OSS Distributions 		if (kIOMapGuardedMask & ref->options) {
882*fdd8201dSApple OSS Distributions 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
883*fdd8201dSApple OSS Distributions 
884*fdd8201dSApple OSS Distributions 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
885*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
886*fdd8201dSApple OSS Distributions 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
887*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
888*fdd8201dSApple OSS Distributions 			ref->mapped += guardSize;
889*fdd8201dSApple OSS Distributions 		}
890*fdd8201dSApple OSS Distributions 	}
891*fdd8201dSApple OSS Distributions 
892*fdd8201dSApple OSS Distributions 	return err;
893*fdd8201dSApple OSS Distributions }
894*fdd8201dSApple OSS Distributions 
895*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)896*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
897*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
898*fdd8201dSApple OSS Distributions 	vm_map_t            map,
899*fdd8201dSApple OSS Distributions 	mach_vm_size_t      inoffset,
900*fdd8201dSApple OSS Distributions 	mach_vm_size_t      size,
901*fdd8201dSApple OSS Distributions 	IOOptionBits        options,
902*fdd8201dSApple OSS Distributions 	mach_vm_address_t * inaddr)
903*fdd8201dSApple OSS Distributions {
904*fdd8201dSApple OSS Distributions 	IOReturn        err;
905*fdd8201dSApple OSS Distributions 	int64_t         offset = inoffset;
906*fdd8201dSApple OSS Distributions 	uint32_t        rangeIdx, entryIdx;
907*fdd8201dSApple OSS Distributions 	vm_map_offset_t addr, mapAddr;
908*fdd8201dSApple OSS Distributions 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
909*fdd8201dSApple OSS Distributions 
910*fdd8201dSApple OSS Distributions 	mach_vm_address_t nextAddr;
911*fdd8201dSApple OSS Distributions 	mach_vm_size_t    nextLen;
912*fdd8201dSApple OSS Distributions 	IOByteCount       physLen;
913*fdd8201dSApple OSS Distributions 	IOMemoryEntry   * entry;
914*fdd8201dSApple OSS Distributions 	vm_prot_t         prot, memEntryCacheMode;
915*fdd8201dSApple OSS Distributions 	IOOptionBits      type;
916*fdd8201dSApple OSS Distributions 	IOOptionBits      cacheMode;
917*fdd8201dSApple OSS Distributions 	vm_tag_t          tag;
918*fdd8201dSApple OSS Distributions 	// for the kIOMapPrefault option.
919*fdd8201dSApple OSS Distributions 	upl_page_info_t * pageList = NULL;
920*fdd8201dSApple OSS Distributions 	UInt              currentPageIndex = 0;
921*fdd8201dSApple OSS Distributions 	bool              didAlloc;
922*fdd8201dSApple OSS Distributions 
923*fdd8201dSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
924*fdd8201dSApple OSS Distributions 
925*fdd8201dSApple OSS Distributions 	if (ref->mapRef) {
926*fdd8201dSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
927*fdd8201dSApple OSS Distributions 		return err;
928*fdd8201dSApple OSS Distributions 	}
929*fdd8201dSApple OSS Distributions 
930*fdd8201dSApple OSS Distributions 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
931*fdd8201dSApple OSS Distributions 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
932*fdd8201dSApple OSS Distributions 		return err;
933*fdd8201dSApple OSS Distributions 	}
934*fdd8201dSApple OSS Distributions 
935*fdd8201dSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
936*fdd8201dSApple OSS Distributions 
937*fdd8201dSApple OSS Distributions 	prot = VM_PROT_READ;
938*fdd8201dSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
939*fdd8201dSApple OSS Distributions 		prot |= VM_PROT_WRITE;
940*fdd8201dSApple OSS Distributions 	}
941*fdd8201dSApple OSS Distributions 	prot &= ref->prot;
942*fdd8201dSApple OSS Distributions 
943*fdd8201dSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
944*fdd8201dSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
945*fdd8201dSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
946*fdd8201dSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
947*fdd8201dSApple OSS Distributions 	}
948*fdd8201dSApple OSS Distributions 
949*fdd8201dSApple OSS Distributions 	tag = (typeof(tag))getVMTag(map);
950*fdd8201dSApple OSS Distributions 
951*fdd8201dSApple OSS Distributions 	if (_task) {
952*fdd8201dSApple OSS Distributions 		// Find first range for offset
953*fdd8201dSApple OSS Distributions 		if (!_rangesCount) {
954*fdd8201dSApple OSS Distributions 			return kIOReturnBadArgument;
955*fdd8201dSApple OSS Distributions 		}
956*fdd8201dSApple OSS Distributions 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
957*fdd8201dSApple OSS Distributions 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
958*fdd8201dSApple OSS Distributions 			if (remain < nextLen) {
959*fdd8201dSApple OSS Distributions 				break;
960*fdd8201dSApple OSS Distributions 			}
961*fdd8201dSApple OSS Distributions 			remain -= nextLen;
962*fdd8201dSApple OSS Distributions 		}
963*fdd8201dSApple OSS Distributions 	} else {
964*fdd8201dSApple OSS Distributions 		rangeIdx = 0;
965*fdd8201dSApple OSS Distributions 		remain   = 0;
966*fdd8201dSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
967*fdd8201dSApple OSS Distributions 		nextLen  = size;
968*fdd8201dSApple OSS Distributions 	}
969*fdd8201dSApple OSS Distributions 
970*fdd8201dSApple OSS Distributions 	assert(remain < nextLen);
971*fdd8201dSApple OSS Distributions 	if (remain >= nextLen) {
972*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
973*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
974*fdd8201dSApple OSS Distributions 	}
975*fdd8201dSApple OSS Distributions 
976*fdd8201dSApple OSS Distributions 	nextAddr  += remain;
977*fdd8201dSApple OSS Distributions 	nextLen   -= remain;
978*fdd8201dSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
979*fdd8201dSApple OSS Distributions 	pageOffset = (vm_map_page_mask(map) & nextAddr);
980*fdd8201dSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
981*fdd8201dSApple OSS Distributions 	pageOffset = (page_mask & nextAddr);
982*fdd8201dSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
983*fdd8201dSApple OSS Distributions 	addr       = 0;
984*fdd8201dSApple OSS Distributions 	didAlloc   = false;
985*fdd8201dSApple OSS Distributions 
986*fdd8201dSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
987*fdd8201dSApple OSS Distributions 		addr = *inaddr;
988*fdd8201dSApple OSS Distributions 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
989*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
990*fdd8201dSApple OSS Distributions 		}
991*fdd8201dSApple OSS Distributions 		addr -= pageOffset;
992*fdd8201dSApple OSS Distributions 	}
993*fdd8201dSApple OSS Distributions 
994*fdd8201dSApple OSS Distributions 	// find first entry for offset
995*fdd8201dSApple OSS Distributions 	for (entryIdx = 0;
996*fdd8201dSApple OSS Distributions 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
997*fdd8201dSApple OSS Distributions 	    entryIdx++) {
998*fdd8201dSApple OSS Distributions 	}
999*fdd8201dSApple OSS Distributions 	entryIdx--;
1000*fdd8201dSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1001*fdd8201dSApple OSS Distributions 
1002*fdd8201dSApple OSS Distributions 	// allocate VM
1003*fdd8201dSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1004*fdd8201dSApple OSS Distributions 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1005*fdd8201dSApple OSS Distributions #else
1006*fdd8201dSApple OSS Distributions 	size = round_page_64(size + pageOffset);
1007*fdd8201dSApple OSS Distributions #endif
1008*fdd8201dSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1009*fdd8201dSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1010*fdd8201dSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1011*fdd8201dSApple OSS Distributions 		}
1012*fdd8201dSApple OSS Distributions 		err = KERN_SUCCESS;
1013*fdd8201dSApple OSS Distributions 	} else {
1014*fdd8201dSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1015*fdd8201dSApple OSS Distributions 		ref.map     = map;
1016*fdd8201dSApple OSS Distributions 		ref.tag     = tag;
1017*fdd8201dSApple OSS Distributions 		ref.options = options;
1018*fdd8201dSApple OSS Distributions 		ref.size    = size;
1019*fdd8201dSApple OSS Distributions 		ref.prot    = prot;
1020*fdd8201dSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1021*fdd8201dSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1022*fdd8201dSApple OSS Distributions 			ref.mapped = 0;
1023*fdd8201dSApple OSS Distributions 		} else {
1024*fdd8201dSApple OSS Distributions 			ref.mapped = addr;
1025*fdd8201dSApple OSS Distributions 		}
1026*fdd8201dSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1027*fdd8201dSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1028*fdd8201dSApple OSS Distributions 		} else {
1029*fdd8201dSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1030*fdd8201dSApple OSS Distributions 		}
1031*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1032*fdd8201dSApple OSS Distributions 			addr     = ref.mapped;
1033*fdd8201dSApple OSS Distributions 			map      = ref.map;
1034*fdd8201dSApple OSS Distributions 			didAlloc = true;
1035*fdd8201dSApple OSS Distributions 		}
1036*fdd8201dSApple OSS Distributions 	}
1037*fdd8201dSApple OSS Distributions 
1038*fdd8201dSApple OSS Distributions 	/*
1039*fdd8201dSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1040*fdd8201dSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1041*fdd8201dSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1042*fdd8201dSApple OSS Distributions 	 * operations.
1043*fdd8201dSApple OSS Distributions 	 */
1044*fdd8201dSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1045*fdd8201dSApple OSS Distributions 		options &= ~kIOMapPrefault;
1046*fdd8201dSApple OSS Distributions 	}
1047*fdd8201dSApple OSS Distributions 
1048*fdd8201dSApple OSS Distributions 	/*
1049*fdd8201dSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1050*fdd8201dSApple OSS Distributions 	 * memory type, and the underlying data.
1051*fdd8201dSApple OSS Distributions 	 */
1052*fdd8201dSApple OSS Distributions 	if (options & kIOMapPrefault) {
1053*fdd8201dSApple OSS Distributions 		/*
1054*fdd8201dSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1055*fdd8201dSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1056*fdd8201dSApple OSS Distributions 		 */
1057*fdd8201dSApple OSS Distributions 		assert(_wireCount != 0);
1058*fdd8201dSApple OSS Distributions 		assert(_memoryEntries != NULL);
1059*fdd8201dSApple OSS Distributions 		if ((_wireCount == 0) ||
1060*fdd8201dSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1061*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1062*fdd8201dSApple OSS Distributions 			return kIOReturnBadArgument;
1063*fdd8201dSApple OSS Distributions 		}
1064*fdd8201dSApple OSS Distributions 
1065*fdd8201dSApple OSS Distributions 		// Get the page list.
1066*fdd8201dSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1067*fdd8201dSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1068*fdd8201dSApple OSS Distributions 		pageList = getPageList(dataP);
1069*fdd8201dSApple OSS Distributions 
1070*fdd8201dSApple OSS Distributions 		// Get the number of IOPLs.
1071*fdd8201dSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1072*fdd8201dSApple OSS Distributions 
1073*fdd8201dSApple OSS Distributions 		/*
1074*fdd8201dSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1075*fdd8201dSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1076*fdd8201dSApple OSS Distributions 		 * right range at the end.
1077*fdd8201dSApple OSS Distributions 		 */
1078*fdd8201dSApple OSS Distributions 		UInt ioplIndex = 0;
1079*fdd8201dSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1080*fdd8201dSApple OSS Distributions 			ioplIndex++;
1081*fdd8201dSApple OSS Distributions 		}
1082*fdd8201dSApple OSS Distributions 		ioplIndex--;
1083*fdd8201dSApple OSS Distributions 
1084*fdd8201dSApple OSS Distributions 		// Retrieve the IOPL info block.
1085*fdd8201dSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1086*fdd8201dSApple OSS Distributions 
1087*fdd8201dSApple OSS Distributions 		/*
1088*fdd8201dSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1089*fdd8201dSApple OSS Distributions 		 * array.
1090*fdd8201dSApple OSS Distributions 		 */
1091*fdd8201dSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1092*fdd8201dSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1093*fdd8201dSApple OSS Distributions 		} else {
1094*fdd8201dSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1095*fdd8201dSApple OSS Distributions 		}
1096*fdd8201dSApple OSS Distributions 
1097*fdd8201dSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1098*fdd8201dSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1099*fdd8201dSApple OSS Distributions 
1100*fdd8201dSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1101*fdd8201dSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1102*fdd8201dSApple OSS Distributions 	}
1103*fdd8201dSApple OSS Distributions 
1104*fdd8201dSApple OSS Distributions 	// enter mappings
1105*fdd8201dSApple OSS Distributions 	remain  = size;
1106*fdd8201dSApple OSS Distributions 	mapAddr = addr;
1107*fdd8201dSApple OSS Distributions 	addr    += pageOffset;
1108*fdd8201dSApple OSS Distributions 
1109*fdd8201dSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1110*fdd8201dSApple OSS Distributions 		entryOffset = offset - entry->offset;
1111*fdd8201dSApple OSS Distributions 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1112*fdd8201dSApple OSS Distributions 			err = kIOReturnNotAligned;
1113*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1114*fdd8201dSApple OSS Distributions 			break;
1115*fdd8201dSApple OSS Distributions 		}
1116*fdd8201dSApple OSS Distributions 
1117*fdd8201dSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1118*fdd8201dSApple OSS Distributions 			vm_size_t unused = 0;
1119*fdd8201dSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1120*fdd8201dSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1121*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1122*fdd8201dSApple OSS Distributions 		}
1123*fdd8201dSApple OSS Distributions 
1124*fdd8201dSApple OSS Distributions 		entryOffset -= pageOffset;
1125*fdd8201dSApple OSS Distributions 		if (entryOffset >= entry->size) {
1126*fdd8201dSApple OSS Distributions 			panic("entryOffset");
1127*fdd8201dSApple OSS Distributions 		}
1128*fdd8201dSApple OSS Distributions 		chunk = entry->size - entryOffset;
1129*fdd8201dSApple OSS Distributions 		if (chunk) {
1130*fdd8201dSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags;
1131*fdd8201dSApple OSS Distributions 
1132*fdd8201dSApple OSS Distributions 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1133*fdd8201dSApple OSS Distributions 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1134*fdd8201dSApple OSS Distributions 
1135*fdd8201dSApple OSS Distributions 			if (chunk > remain) {
1136*fdd8201dSApple OSS Distributions 				chunk = remain;
1137*fdd8201dSApple OSS Distributions 			}
1138*fdd8201dSApple OSS Distributions 			if (options & kIOMapPrefault) {
1139*fdd8201dSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1140*fdd8201dSApple OSS Distributions 
1141*fdd8201dSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1142*fdd8201dSApple OSS Distributions 				    &mapAddr,
1143*fdd8201dSApple OSS Distributions 				    chunk, 0 /* mask */,
1144*fdd8201dSApple OSS Distributions 				    (VM_FLAGS_FIXED
1145*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_OVERWRITE),
1146*fdd8201dSApple OSS Distributions 				    vmk_flags,
1147*fdd8201dSApple OSS Distributions 				    tag,
1148*fdd8201dSApple OSS Distributions 				    entry->entry,
1149*fdd8201dSApple OSS Distributions 				    entryOffset,
1150*fdd8201dSApple OSS Distributions 				    prot,                        // cur
1151*fdd8201dSApple OSS Distributions 				    prot,                        // max
1152*fdd8201dSApple OSS Distributions 				    &pageList[currentPageIndex],
1153*fdd8201dSApple OSS Distributions 				    nb_pages);
1154*fdd8201dSApple OSS Distributions 
1155*fdd8201dSApple OSS Distributions 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1156*fdd8201dSApple OSS Distributions 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1157*fdd8201dSApple OSS Distributions 				}
1158*fdd8201dSApple OSS Distributions 				// Compute the next index in the page list.
1159*fdd8201dSApple OSS Distributions 				currentPageIndex += nb_pages;
1160*fdd8201dSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1161*fdd8201dSApple OSS Distributions 			} else {
1162*fdd8201dSApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1163*fdd8201dSApple OSS Distributions 				    &mapAddr,
1164*fdd8201dSApple OSS Distributions 				    chunk, 0 /* mask */,
1165*fdd8201dSApple OSS Distributions 				    (VM_FLAGS_FIXED
1166*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_OVERWRITE),
1167*fdd8201dSApple OSS Distributions 				    vmk_flags,
1168*fdd8201dSApple OSS Distributions 				    tag,
1169*fdd8201dSApple OSS Distributions 				    entry->entry,
1170*fdd8201dSApple OSS Distributions 				    entryOffset,
1171*fdd8201dSApple OSS Distributions 				    false,               // copy
1172*fdd8201dSApple OSS Distributions 				    prot,               // cur
1173*fdd8201dSApple OSS Distributions 				    prot,               // max
1174*fdd8201dSApple OSS Distributions 				    VM_INHERIT_NONE);
1175*fdd8201dSApple OSS Distributions 			}
1176*fdd8201dSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1177*fdd8201dSApple OSS Distributions 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1178*fdd8201dSApple OSS Distributions 				break;
1179*fdd8201dSApple OSS Distributions 			}
1180*fdd8201dSApple OSS Distributions 			remain -= chunk;
1181*fdd8201dSApple OSS Distributions 			if (!remain) {
1182*fdd8201dSApple OSS Distributions 				break;
1183*fdd8201dSApple OSS Distributions 			}
1184*fdd8201dSApple OSS Distributions 			mapAddr  += chunk;
1185*fdd8201dSApple OSS Distributions 			offset   += chunk - pageOffset;
1186*fdd8201dSApple OSS Distributions 		}
1187*fdd8201dSApple OSS Distributions 		pageOffset = 0;
1188*fdd8201dSApple OSS Distributions 		entry++;
1189*fdd8201dSApple OSS Distributions 		entryIdx++;
1190*fdd8201dSApple OSS Distributions 		if (entryIdx >= ref->count) {
1191*fdd8201dSApple OSS Distributions 			err = kIOReturnOverrun;
1192*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1193*fdd8201dSApple OSS Distributions 			break;
1194*fdd8201dSApple OSS Distributions 		}
1195*fdd8201dSApple OSS Distributions 	}
1196*fdd8201dSApple OSS Distributions 
1197*fdd8201dSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1198*fdd8201dSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1199*fdd8201dSApple OSS Distributions 		addr = 0;
1200*fdd8201dSApple OSS Distributions 	}
1201*fdd8201dSApple OSS Distributions 	*inaddr = addr;
1202*fdd8201dSApple OSS Distributions 
1203*fdd8201dSApple OSS Distributions 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1204*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1205*fdd8201dSApple OSS Distributions 	}
1206*fdd8201dSApple OSS Distributions 	return err;
1207*fdd8201dSApple OSS Distributions }
1208*fdd8201dSApple OSS Distributions 
1209*fdd8201dSApple OSS Distributions #define LOGUNALIGN 0
1210*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1211*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1212*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
1213*fdd8201dSApple OSS Distributions 	vm_map_t            map,
1214*fdd8201dSApple OSS Distributions 	mach_vm_size_t      inoffset,
1215*fdd8201dSApple OSS Distributions 	mach_vm_size_t      size,
1216*fdd8201dSApple OSS Distributions 	IOOptionBits        options,
1217*fdd8201dSApple OSS Distributions 	mach_vm_address_t * inaddr)
1218*fdd8201dSApple OSS Distributions {
1219*fdd8201dSApple OSS Distributions 	IOReturn            err;
1220*fdd8201dSApple OSS Distributions 	int64_t             offset = inoffset;
1221*fdd8201dSApple OSS Distributions 	uint32_t            entryIdx, firstEntryIdx;
1222*fdd8201dSApple OSS Distributions 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1223*fdd8201dSApple OSS Distributions 	vm_map_offset_t     entryOffset, remain, chunk;
1224*fdd8201dSApple OSS Distributions 
1225*fdd8201dSApple OSS Distributions 	IOMemoryEntry    * entry;
1226*fdd8201dSApple OSS Distributions 	vm_prot_t          prot, memEntryCacheMode;
1227*fdd8201dSApple OSS Distributions 	IOOptionBits       type;
1228*fdd8201dSApple OSS Distributions 	IOOptionBits       cacheMode;
1229*fdd8201dSApple OSS Distributions 	vm_tag_t           tag;
1230*fdd8201dSApple OSS Distributions 	// for the kIOMapPrefault option.
1231*fdd8201dSApple OSS Distributions 	upl_page_info_t  * pageList = NULL;
1232*fdd8201dSApple OSS Distributions 	UInt               currentPageIndex = 0;
1233*fdd8201dSApple OSS Distributions 	bool               didAlloc;
1234*fdd8201dSApple OSS Distributions 
1235*fdd8201dSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1236*fdd8201dSApple OSS Distributions 
1237*fdd8201dSApple OSS Distributions 	if (ref->mapRef) {
1238*fdd8201dSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1239*fdd8201dSApple OSS Distributions 		return err;
1240*fdd8201dSApple OSS Distributions 	}
1241*fdd8201dSApple OSS Distributions 
1242*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1243*fdd8201dSApple OSS Distributions 	printf("MAP offset %qx, %qx\n", inoffset, size);
1244*fdd8201dSApple OSS Distributions #endif
1245*fdd8201dSApple OSS Distributions 
1246*fdd8201dSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
1247*fdd8201dSApple OSS Distributions 
1248*fdd8201dSApple OSS Distributions 	prot = VM_PROT_READ;
1249*fdd8201dSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
1250*fdd8201dSApple OSS Distributions 		prot |= VM_PROT_WRITE;
1251*fdd8201dSApple OSS Distributions 	}
1252*fdd8201dSApple OSS Distributions 	prot &= ref->prot;
1253*fdd8201dSApple OSS Distributions 
1254*fdd8201dSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1255*fdd8201dSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
1256*fdd8201dSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
1257*fdd8201dSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1258*fdd8201dSApple OSS Distributions 	}
1259*fdd8201dSApple OSS Distributions 
1260*fdd8201dSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(map);
1261*fdd8201dSApple OSS Distributions 
1262*fdd8201dSApple OSS Distributions 	addr       = 0;
1263*fdd8201dSApple OSS Distributions 	didAlloc   = false;
1264*fdd8201dSApple OSS Distributions 
1265*fdd8201dSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1266*fdd8201dSApple OSS Distributions 		addr = *inaddr;
1267*fdd8201dSApple OSS Distributions 	}
1268*fdd8201dSApple OSS Distributions 
1269*fdd8201dSApple OSS Distributions 	// find first entry for offset
1270*fdd8201dSApple OSS Distributions 	for (firstEntryIdx = 0;
1271*fdd8201dSApple OSS Distributions 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1272*fdd8201dSApple OSS Distributions 	    firstEntryIdx++) {
1273*fdd8201dSApple OSS Distributions 	}
1274*fdd8201dSApple OSS Distributions 	firstEntryIdx--;
1275*fdd8201dSApple OSS Distributions 
1276*fdd8201dSApple OSS Distributions 	// calculate required VM space
1277*fdd8201dSApple OSS Distributions 
1278*fdd8201dSApple OSS Distributions 	entryIdx = firstEntryIdx;
1279*fdd8201dSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1280*fdd8201dSApple OSS Distributions 
1281*fdd8201dSApple OSS Distributions 	remain  = size;
1282*fdd8201dSApple OSS Distributions 	int64_t iteroffset = offset;
1283*fdd8201dSApple OSS Distributions 	uint64_t mapSize = 0;
1284*fdd8201dSApple OSS Distributions 	while (remain) {
1285*fdd8201dSApple OSS Distributions 		entryOffset = iteroffset - entry->offset;
1286*fdd8201dSApple OSS Distributions 		if (entryOffset >= entry->size) {
1287*fdd8201dSApple OSS Distributions 			panic("entryOffset");
1288*fdd8201dSApple OSS Distributions 		}
1289*fdd8201dSApple OSS Distributions 
1290*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1291*fdd8201dSApple OSS Distributions 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1292*fdd8201dSApple OSS Distributions 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1293*fdd8201dSApple OSS Distributions #endif
1294*fdd8201dSApple OSS Distributions 
1295*fdd8201dSApple OSS Distributions 		chunk = entry->size - entryOffset;
1296*fdd8201dSApple OSS Distributions 		if (chunk) {
1297*fdd8201dSApple OSS Distributions 			if (chunk > remain) {
1298*fdd8201dSApple OSS Distributions 				chunk = remain;
1299*fdd8201dSApple OSS Distributions 			}
1300*fdd8201dSApple OSS Distributions 			mach_vm_size_t entrySize;
1301*fdd8201dSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1302*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1303*fdd8201dSApple OSS Distributions 			mapSize += entrySize;
1304*fdd8201dSApple OSS Distributions 
1305*fdd8201dSApple OSS Distributions 			remain -= chunk;
1306*fdd8201dSApple OSS Distributions 			if (!remain) {
1307*fdd8201dSApple OSS Distributions 				break;
1308*fdd8201dSApple OSS Distributions 			}
1309*fdd8201dSApple OSS Distributions 			iteroffset   += chunk; // - pageOffset;
1310*fdd8201dSApple OSS Distributions 		}
1311*fdd8201dSApple OSS Distributions 		entry++;
1312*fdd8201dSApple OSS Distributions 		entryIdx++;
1313*fdd8201dSApple OSS Distributions 		if (entryIdx >= ref->count) {
1314*fdd8201dSApple OSS Distributions 			panic("overrun");
1315*fdd8201dSApple OSS Distributions 			err = kIOReturnOverrun;
1316*fdd8201dSApple OSS Distributions 			break;
1317*fdd8201dSApple OSS Distributions 		}
1318*fdd8201dSApple OSS Distributions 	}
1319*fdd8201dSApple OSS Distributions 
1320*fdd8201dSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1321*fdd8201dSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1322*fdd8201dSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1323*fdd8201dSApple OSS Distributions 		}
1324*fdd8201dSApple OSS Distributions 		err = KERN_SUCCESS;
1325*fdd8201dSApple OSS Distributions 	} else {
1326*fdd8201dSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1327*fdd8201dSApple OSS Distributions 		ref.map     = map;
1328*fdd8201dSApple OSS Distributions 		ref.tag     = tag;
1329*fdd8201dSApple OSS Distributions 		ref.options = options;
1330*fdd8201dSApple OSS Distributions 		ref.size    = mapSize;
1331*fdd8201dSApple OSS Distributions 		ref.prot    = prot;
1332*fdd8201dSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1333*fdd8201dSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1334*fdd8201dSApple OSS Distributions 			ref.mapped = 0;
1335*fdd8201dSApple OSS Distributions 		} else {
1336*fdd8201dSApple OSS Distributions 			ref.mapped = addr;
1337*fdd8201dSApple OSS Distributions 		}
1338*fdd8201dSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*fdd8201dSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1340*fdd8201dSApple OSS Distributions 		} else {
1341*fdd8201dSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1342*fdd8201dSApple OSS Distributions 		}
1343*fdd8201dSApple OSS Distributions 
1344*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1345*fdd8201dSApple OSS Distributions 			addr     = ref.mapped;
1346*fdd8201dSApple OSS Distributions 			map      = ref.map;
1347*fdd8201dSApple OSS Distributions 			didAlloc = true;
1348*fdd8201dSApple OSS Distributions 		}
1349*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1350*fdd8201dSApple OSS Distributions 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1351*fdd8201dSApple OSS Distributions #endif
1352*fdd8201dSApple OSS Distributions 	}
1353*fdd8201dSApple OSS Distributions 
1354*fdd8201dSApple OSS Distributions 	/*
1355*fdd8201dSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1356*fdd8201dSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1357*fdd8201dSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1358*fdd8201dSApple OSS Distributions 	 * operations.
1359*fdd8201dSApple OSS Distributions 	 */
1360*fdd8201dSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1361*fdd8201dSApple OSS Distributions 		options &= ~kIOMapPrefault;
1362*fdd8201dSApple OSS Distributions 	}
1363*fdd8201dSApple OSS Distributions 
1364*fdd8201dSApple OSS Distributions 	/*
1365*fdd8201dSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1366*fdd8201dSApple OSS Distributions 	 * memory type, and the underlying data.
1367*fdd8201dSApple OSS Distributions 	 */
1368*fdd8201dSApple OSS Distributions 	if (options & kIOMapPrefault) {
1369*fdd8201dSApple OSS Distributions 		/*
1370*fdd8201dSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1371*fdd8201dSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1372*fdd8201dSApple OSS Distributions 		 */
1373*fdd8201dSApple OSS Distributions 		assert(_wireCount != 0);
1374*fdd8201dSApple OSS Distributions 		assert(_memoryEntries != NULL);
1375*fdd8201dSApple OSS Distributions 		if ((_wireCount == 0) ||
1376*fdd8201dSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1377*fdd8201dSApple OSS Distributions 			return kIOReturnBadArgument;
1378*fdd8201dSApple OSS Distributions 		}
1379*fdd8201dSApple OSS Distributions 
1380*fdd8201dSApple OSS Distributions 		// Get the page list.
1381*fdd8201dSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1382*fdd8201dSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1383*fdd8201dSApple OSS Distributions 		pageList = getPageList(dataP);
1384*fdd8201dSApple OSS Distributions 
1385*fdd8201dSApple OSS Distributions 		// Get the number of IOPLs.
1386*fdd8201dSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1387*fdd8201dSApple OSS Distributions 
1388*fdd8201dSApple OSS Distributions 		/*
1389*fdd8201dSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1390*fdd8201dSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1391*fdd8201dSApple OSS Distributions 		 * right range at the end.
1392*fdd8201dSApple OSS Distributions 		 */
1393*fdd8201dSApple OSS Distributions 		UInt ioplIndex = 0;
1394*fdd8201dSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1395*fdd8201dSApple OSS Distributions 			ioplIndex++;
1396*fdd8201dSApple OSS Distributions 		}
1397*fdd8201dSApple OSS Distributions 		ioplIndex--;
1398*fdd8201dSApple OSS Distributions 
1399*fdd8201dSApple OSS Distributions 		// Retrieve the IOPL info block.
1400*fdd8201dSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1401*fdd8201dSApple OSS Distributions 
1402*fdd8201dSApple OSS Distributions 		/*
1403*fdd8201dSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1404*fdd8201dSApple OSS Distributions 		 * array.
1405*fdd8201dSApple OSS Distributions 		 */
1406*fdd8201dSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1407*fdd8201dSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1408*fdd8201dSApple OSS Distributions 		} else {
1409*fdd8201dSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1410*fdd8201dSApple OSS Distributions 		}
1411*fdd8201dSApple OSS Distributions 
1412*fdd8201dSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1413*fdd8201dSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1414*fdd8201dSApple OSS Distributions 
1415*fdd8201dSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1416*fdd8201dSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1417*fdd8201dSApple OSS Distributions 	}
1418*fdd8201dSApple OSS Distributions 
1419*fdd8201dSApple OSS Distributions 	// enter mappings
1420*fdd8201dSApple OSS Distributions 	remain   = size;
1421*fdd8201dSApple OSS Distributions 	mapAddr  = addr;
1422*fdd8201dSApple OSS Distributions 	entryIdx = firstEntryIdx;
1423*fdd8201dSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1424*fdd8201dSApple OSS Distributions 
1425*fdd8201dSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1426*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1427*fdd8201dSApple OSS Distributions 		printf("offset %qx, %qx\n", offset, entry->offset);
1428*fdd8201dSApple OSS Distributions #endif
1429*fdd8201dSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1430*fdd8201dSApple OSS Distributions 			vm_size_t unused = 0;
1431*fdd8201dSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1432*fdd8201dSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1433*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1434*fdd8201dSApple OSS Distributions 		}
1435*fdd8201dSApple OSS Distributions 		entryOffset = offset - entry->offset;
1436*fdd8201dSApple OSS Distributions 		if (entryOffset >= entry->size) {
1437*fdd8201dSApple OSS Distributions 			panic("entryOffset");
1438*fdd8201dSApple OSS Distributions 		}
1439*fdd8201dSApple OSS Distributions 		chunk = entry->size - entryOffset;
1440*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1441*fdd8201dSApple OSS Distributions 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1442*fdd8201dSApple OSS Distributions #endif
1443*fdd8201dSApple OSS Distributions 		if (chunk) {
1444*fdd8201dSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags;
1445*fdd8201dSApple OSS Distributions 
1446*fdd8201dSApple OSS Distributions 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1447*fdd8201dSApple OSS Distributions 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1448*fdd8201dSApple OSS Distributions 
1449*fdd8201dSApple OSS Distributions 			if (chunk > remain) {
1450*fdd8201dSApple OSS Distributions 				chunk = remain;
1451*fdd8201dSApple OSS Distributions 			}
1452*fdd8201dSApple OSS Distributions 			mapAddrOut = mapAddr;
1453*fdd8201dSApple OSS Distributions 			if (options & kIOMapPrefault) {
1454*fdd8201dSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1455*fdd8201dSApple OSS Distributions 
1456*fdd8201dSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1457*fdd8201dSApple OSS Distributions 				    &mapAddrOut,
1458*fdd8201dSApple OSS Distributions 				    chunk, 0 /* mask */,
1459*fdd8201dSApple OSS Distributions 				    (VM_FLAGS_FIXED
1460*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_OVERWRITE
1461*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_RETURN_DATA_ADDR),
1462*fdd8201dSApple OSS Distributions 				    vmk_flags,
1463*fdd8201dSApple OSS Distributions 				    tag,
1464*fdd8201dSApple OSS Distributions 				    entry->entry,
1465*fdd8201dSApple OSS Distributions 				    entryOffset,
1466*fdd8201dSApple OSS Distributions 				    prot,                        // cur
1467*fdd8201dSApple OSS Distributions 				    prot,                        // max
1468*fdd8201dSApple OSS Distributions 				    &pageList[currentPageIndex],
1469*fdd8201dSApple OSS Distributions 				    nb_pages);
1470*fdd8201dSApple OSS Distributions 
1471*fdd8201dSApple OSS Distributions 				// Compute the next index in the page list.
1472*fdd8201dSApple OSS Distributions 				currentPageIndex += nb_pages;
1473*fdd8201dSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1474*fdd8201dSApple OSS Distributions 			} else {
1475*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1476*fdd8201dSApple OSS Distributions 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1477*fdd8201dSApple OSS Distributions #endif
1478*fdd8201dSApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1479*fdd8201dSApple OSS Distributions 				    &mapAddrOut,
1480*fdd8201dSApple OSS Distributions 				    chunk, 0 /* mask */,
1481*fdd8201dSApple OSS Distributions 				    (VM_FLAGS_FIXED
1482*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_OVERWRITE
1483*fdd8201dSApple OSS Distributions 				    | VM_FLAGS_RETURN_DATA_ADDR),
1484*fdd8201dSApple OSS Distributions 				    vmk_flags,
1485*fdd8201dSApple OSS Distributions 				    tag,
1486*fdd8201dSApple OSS Distributions 				    entry->entry,
1487*fdd8201dSApple OSS Distributions 				    entryOffset,
1488*fdd8201dSApple OSS Distributions 				    false,               // copy
1489*fdd8201dSApple OSS Distributions 				    prot,               // cur
1490*fdd8201dSApple OSS Distributions 				    prot,               // max
1491*fdd8201dSApple OSS Distributions 				    VM_INHERIT_NONE);
1492*fdd8201dSApple OSS Distributions 			}
1493*fdd8201dSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1494*fdd8201dSApple OSS Distributions 				panic("map enter err %x", err);
1495*fdd8201dSApple OSS Distributions 				break;
1496*fdd8201dSApple OSS Distributions 			}
1497*fdd8201dSApple OSS Distributions #if LOGUNALIGN
1498*fdd8201dSApple OSS Distributions 			printf("mapAddr o %qx\n", mapAddrOut);
1499*fdd8201dSApple OSS Distributions #endif
1500*fdd8201dSApple OSS Distributions 			if (entryIdx == firstEntryIdx) {
1501*fdd8201dSApple OSS Distributions 				addr = mapAddrOut;
1502*fdd8201dSApple OSS Distributions 			}
1503*fdd8201dSApple OSS Distributions 			remain -= chunk;
1504*fdd8201dSApple OSS Distributions 			if (!remain) {
1505*fdd8201dSApple OSS Distributions 				break;
1506*fdd8201dSApple OSS Distributions 			}
1507*fdd8201dSApple OSS Distributions 			mach_vm_size_t entrySize;
1508*fdd8201dSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1509*fdd8201dSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1510*fdd8201dSApple OSS Distributions 			mapAddr += entrySize;
1511*fdd8201dSApple OSS Distributions 			offset  += chunk;
1512*fdd8201dSApple OSS Distributions 		}
1513*fdd8201dSApple OSS Distributions 
1514*fdd8201dSApple OSS Distributions 		entry++;
1515*fdd8201dSApple OSS Distributions 		entryIdx++;
1516*fdd8201dSApple OSS Distributions 		if (entryIdx >= ref->count) {
1517*fdd8201dSApple OSS Distributions 			err = kIOReturnOverrun;
1518*fdd8201dSApple OSS Distributions 			break;
1519*fdd8201dSApple OSS Distributions 		}
1520*fdd8201dSApple OSS Distributions 	}
1521*fdd8201dSApple OSS Distributions 
1522*fdd8201dSApple OSS Distributions 	if (KERN_SUCCESS != err) {
1523*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1524*fdd8201dSApple OSS Distributions 	}
1525*fdd8201dSApple OSS Distributions 
1526*fdd8201dSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1527*fdd8201dSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1528*fdd8201dSApple OSS Distributions 		addr = 0;
1529*fdd8201dSApple OSS Distributions 	}
1530*fdd8201dSApple OSS Distributions 	*inaddr = addr;
1531*fdd8201dSApple OSS Distributions 
1532*fdd8201dSApple OSS Distributions 	return err;
1533*fdd8201dSApple OSS Distributions }
1534*fdd8201dSApple OSS Distributions 
1535*fdd8201dSApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1536*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1537*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
1538*fdd8201dSApple OSS Distributions 	uint64_t          * offset)
1539*fdd8201dSApple OSS Distributions {
1540*fdd8201dSApple OSS Distributions 	kern_return_t kr;
1541*fdd8201dSApple OSS Distributions 	vm_object_offset_t data_offset = 0;
1542*fdd8201dSApple OSS Distributions 	uint64_t total;
1543*fdd8201dSApple OSS Distributions 	uint32_t idx;
1544*fdd8201dSApple OSS Distributions 
1545*fdd8201dSApple OSS Distributions 	assert(ref->count);
1546*fdd8201dSApple OSS Distributions 	if (offset) {
1547*fdd8201dSApple OSS Distributions 		*offset = (uint64_t) data_offset;
1548*fdd8201dSApple OSS Distributions 	}
1549*fdd8201dSApple OSS Distributions 	total = 0;
1550*fdd8201dSApple OSS Distributions 	for (idx = 0; idx < ref->count; idx++) {
1551*fdd8201dSApple OSS Distributions 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1552*fdd8201dSApple OSS Distributions 		    &data_offset);
1553*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != kr) {
1554*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1555*fdd8201dSApple OSS Distributions 		} else if (0 != data_offset) {
1556*fdd8201dSApple OSS Distributions 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1557*fdd8201dSApple OSS Distributions 		}
1558*fdd8201dSApple OSS Distributions 		if (offset && !idx) {
1559*fdd8201dSApple OSS Distributions 			*offset = (uint64_t) data_offset;
1560*fdd8201dSApple OSS Distributions 		}
1561*fdd8201dSApple OSS Distributions 		total += round_page(data_offset + ref->entries[idx].size);
1562*fdd8201dSApple OSS Distributions 	}
1563*fdd8201dSApple OSS Distributions 
1564*fdd8201dSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1565*fdd8201dSApple OSS Distributions 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1566*fdd8201dSApple OSS Distributions 
1567*fdd8201dSApple OSS Distributions 	return total;
1568*fdd8201dSApple OSS Distributions }
1569*fdd8201dSApple OSS Distributions 
1570*fdd8201dSApple OSS Distributions 
1571*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1572*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1573*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
1574*fdd8201dSApple OSS Distributions 	IOByteCount       * residentPageCount,
1575*fdd8201dSApple OSS Distributions 	IOByteCount       * dirtyPageCount)
1576*fdd8201dSApple OSS Distributions {
1577*fdd8201dSApple OSS Distributions 	IOReturn        err;
1578*fdd8201dSApple OSS Distributions 	IOMemoryEntry * entries;
1579*fdd8201dSApple OSS Distributions 	unsigned int resident, dirty;
1580*fdd8201dSApple OSS Distributions 	unsigned int totalResident, totalDirty;
1581*fdd8201dSApple OSS Distributions 
1582*fdd8201dSApple OSS Distributions 	totalResident = totalDirty = 0;
1583*fdd8201dSApple OSS Distributions 	err = kIOReturnSuccess;
1584*fdd8201dSApple OSS Distributions 	entries = ref->entries + ref->count;
1585*fdd8201dSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1586*fdd8201dSApple OSS Distributions 		entries--;
1587*fdd8201dSApple OSS Distributions 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1588*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1589*fdd8201dSApple OSS Distributions 			break;
1590*fdd8201dSApple OSS Distributions 		}
1591*fdd8201dSApple OSS Distributions 		totalResident += resident;
1592*fdd8201dSApple OSS Distributions 		totalDirty    += dirty;
1593*fdd8201dSApple OSS Distributions 	}
1594*fdd8201dSApple OSS Distributions 
1595*fdd8201dSApple OSS Distributions 	if (residentPageCount) {
1596*fdd8201dSApple OSS Distributions 		*residentPageCount = totalResident;
1597*fdd8201dSApple OSS Distributions 	}
1598*fdd8201dSApple OSS Distributions 	if (dirtyPageCount) {
1599*fdd8201dSApple OSS Distributions 		*dirtyPageCount    = totalDirty;
1600*fdd8201dSApple OSS Distributions 	}
1601*fdd8201dSApple OSS Distributions 	return err;
1602*fdd8201dSApple OSS Distributions }
1603*fdd8201dSApple OSS Distributions 
1604*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1605*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1606*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
1607*fdd8201dSApple OSS Distributions 	IOOptionBits        newState,
1608*fdd8201dSApple OSS Distributions 	IOOptionBits      * oldState)
1609*fdd8201dSApple OSS Distributions {
1610*fdd8201dSApple OSS Distributions 	IOReturn        err;
1611*fdd8201dSApple OSS Distributions 	IOMemoryEntry * entries;
1612*fdd8201dSApple OSS Distributions 	vm_purgable_t   control;
1613*fdd8201dSApple OSS Distributions 	int             totalState, state;
1614*fdd8201dSApple OSS Distributions 
1615*fdd8201dSApple OSS Distributions 	totalState = kIOMemoryPurgeableNonVolatile;
1616*fdd8201dSApple OSS Distributions 	err = kIOReturnSuccess;
1617*fdd8201dSApple OSS Distributions 	entries = ref->entries + ref->count;
1618*fdd8201dSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1619*fdd8201dSApple OSS Distributions 		entries--;
1620*fdd8201dSApple OSS Distributions 
1621*fdd8201dSApple OSS Distributions 		err = purgeableControlBits(newState, &control, &state);
1622*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1623*fdd8201dSApple OSS Distributions 			break;
1624*fdd8201dSApple OSS Distributions 		}
1625*fdd8201dSApple OSS Distributions 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1626*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1627*fdd8201dSApple OSS Distributions 			break;
1628*fdd8201dSApple OSS Distributions 		}
1629*fdd8201dSApple OSS Distributions 		err = purgeableStateBits(&state);
1630*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1631*fdd8201dSApple OSS Distributions 			break;
1632*fdd8201dSApple OSS Distributions 		}
1633*fdd8201dSApple OSS Distributions 
1634*fdd8201dSApple OSS Distributions 		if (kIOMemoryPurgeableEmpty == state) {
1635*fdd8201dSApple OSS Distributions 			totalState = kIOMemoryPurgeableEmpty;
1636*fdd8201dSApple OSS Distributions 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1637*fdd8201dSApple OSS Distributions 			continue;
1638*fdd8201dSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1639*fdd8201dSApple OSS Distributions 			continue;
1640*fdd8201dSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == state) {
1641*fdd8201dSApple OSS Distributions 			totalState = kIOMemoryPurgeableVolatile;
1642*fdd8201dSApple OSS Distributions 		} else {
1643*fdd8201dSApple OSS Distributions 			totalState = kIOMemoryPurgeableNonVolatile;
1644*fdd8201dSApple OSS Distributions 		}
1645*fdd8201dSApple OSS Distributions 	}
1646*fdd8201dSApple OSS Distributions 
1647*fdd8201dSApple OSS Distributions 	if (oldState) {
1648*fdd8201dSApple OSS Distributions 		*oldState = totalState;
1649*fdd8201dSApple OSS Distributions 	}
1650*fdd8201dSApple OSS Distributions 	return err;
1651*fdd8201dSApple OSS Distributions }
1652*fdd8201dSApple OSS Distributions 
1653*fdd8201dSApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1654*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1655*fdd8201dSApple OSS Distributions 	IOMemoryReference * ref,
1656*fdd8201dSApple OSS Distributions 	task_t              newOwner,
1657*fdd8201dSApple OSS Distributions 	int                 newLedgerTag,
1658*fdd8201dSApple OSS Distributions 	IOOptionBits        newLedgerOptions)
1659*fdd8201dSApple OSS Distributions {
1660*fdd8201dSApple OSS Distributions 	IOReturn        err, totalErr;
1661*fdd8201dSApple OSS Distributions 	IOMemoryEntry * entries;
1662*fdd8201dSApple OSS Distributions 
1663*fdd8201dSApple OSS Distributions 	totalErr = kIOReturnSuccess;
1664*fdd8201dSApple OSS Distributions 	entries = ref->entries + ref->count;
1665*fdd8201dSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1666*fdd8201dSApple OSS Distributions 		entries--;
1667*fdd8201dSApple OSS Distributions 
1668*fdd8201dSApple OSS Distributions 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1669*fdd8201dSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1670*fdd8201dSApple OSS Distributions 			totalErr = err;
1671*fdd8201dSApple OSS Distributions 		}
1672*fdd8201dSApple OSS Distributions 	}
1673*fdd8201dSApple OSS Distributions 
1674*fdd8201dSApple OSS Distributions 	return totalErr;
1675*fdd8201dSApple OSS Distributions }
1676*fdd8201dSApple OSS Distributions 
1677*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1678*fdd8201dSApple OSS Distributions 
1679*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1680*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withAddress(void *      address,
1681*fdd8201dSApple OSS Distributions     IOByteCount   length,
1682*fdd8201dSApple OSS Distributions     IODirection direction)
1683*fdd8201dSApple OSS Distributions {
1684*fdd8201dSApple OSS Distributions 	return IOMemoryDescriptor::
1685*fdd8201dSApple OSS Distributions 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1686*fdd8201dSApple OSS Distributions }
1687*fdd8201dSApple OSS Distributions 
1688*fdd8201dSApple OSS Distributions #ifndef __LP64__
1689*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1690*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1691*fdd8201dSApple OSS Distributions     IOByteCount  length,
1692*fdd8201dSApple OSS Distributions     IODirection  direction,
1693*fdd8201dSApple OSS Distributions     task_t       task)
1694*fdd8201dSApple OSS Distributions {
1695*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1696*fdd8201dSApple OSS Distributions 	if (that) {
1697*fdd8201dSApple OSS Distributions 		if (that->initWithAddress(address, length, direction, task)) {
1698*fdd8201dSApple OSS Distributions 			return os::move(that);
1699*fdd8201dSApple OSS Distributions 		}
1700*fdd8201dSApple OSS Distributions 	}
1701*fdd8201dSApple OSS Distributions 	return nullptr;
1702*fdd8201dSApple OSS Distributions }
1703*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
1704*fdd8201dSApple OSS Distributions 
1705*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1706*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1707*fdd8201dSApple OSS Distributions 	IOPhysicalAddress       address,
1708*fdd8201dSApple OSS Distributions 	IOByteCount             length,
1709*fdd8201dSApple OSS Distributions 	IODirection             direction )
1710*fdd8201dSApple OSS Distributions {
1711*fdd8201dSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1712*fdd8201dSApple OSS Distributions }
1713*fdd8201dSApple OSS Distributions 
1714*fdd8201dSApple OSS Distributions #ifndef __LP64__
1715*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1716*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1717*fdd8201dSApple OSS Distributions     UInt32           withCount,
1718*fdd8201dSApple OSS Distributions     IODirection      direction,
1719*fdd8201dSApple OSS Distributions     task_t           task,
1720*fdd8201dSApple OSS Distributions     bool             asReference)
1721*fdd8201dSApple OSS Distributions {
1722*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1723*fdd8201dSApple OSS Distributions 	if (that) {
1724*fdd8201dSApple OSS Distributions 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1725*fdd8201dSApple OSS Distributions 			return os::move(that);
1726*fdd8201dSApple OSS Distributions 		}
1727*fdd8201dSApple OSS Distributions 	}
1728*fdd8201dSApple OSS Distributions 	return nullptr;
1729*fdd8201dSApple OSS Distributions }
1730*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
1731*fdd8201dSApple OSS Distributions 
1732*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1733*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1734*fdd8201dSApple OSS Distributions     mach_vm_size_t length,
1735*fdd8201dSApple OSS Distributions     IOOptionBits   options,
1736*fdd8201dSApple OSS Distributions     task_t         task)
1737*fdd8201dSApple OSS Distributions {
1738*fdd8201dSApple OSS Distributions 	IOAddressRange range = { address, length };
1739*fdd8201dSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1740*fdd8201dSApple OSS Distributions }
1741*fdd8201dSApple OSS Distributions 
1742*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1743*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1744*fdd8201dSApple OSS Distributions     UInt32           rangeCount,
1745*fdd8201dSApple OSS Distributions     IOOptionBits     options,
1746*fdd8201dSApple OSS Distributions     task_t           task)
1747*fdd8201dSApple OSS Distributions {
1748*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1749*fdd8201dSApple OSS Distributions 	if (that) {
1750*fdd8201dSApple OSS Distributions 		if (task) {
1751*fdd8201dSApple OSS Distributions 			options |= kIOMemoryTypeVirtual64;
1752*fdd8201dSApple OSS Distributions 		} else {
1753*fdd8201dSApple OSS Distributions 			options |= kIOMemoryTypePhysical64;
1754*fdd8201dSApple OSS Distributions 		}
1755*fdd8201dSApple OSS Distributions 
1756*fdd8201dSApple OSS Distributions 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1757*fdd8201dSApple OSS Distributions 			return os::move(that);
1758*fdd8201dSApple OSS Distributions 		}
1759*fdd8201dSApple OSS Distributions 	}
1760*fdd8201dSApple OSS Distributions 
1761*fdd8201dSApple OSS Distributions 	return nullptr;
1762*fdd8201dSApple OSS Distributions }
1763*fdd8201dSApple OSS Distributions 
1764*fdd8201dSApple OSS Distributions 
1765*fdd8201dSApple OSS Distributions /*
1766*fdd8201dSApple OSS Distributions  * withOptions:
1767*fdd8201dSApple OSS Distributions  *
1768*fdd8201dSApple OSS Distributions  * Create a new IOMemoryDescriptor. The buffer is made up of several
1769*fdd8201dSApple OSS Distributions  * virtual address ranges, from a given task.
1770*fdd8201dSApple OSS Distributions  *
1771*fdd8201dSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1772*fdd8201dSApple OSS Distributions  */
1773*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1774*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withOptions(void *          buffers,
1775*fdd8201dSApple OSS Distributions     UInt32          count,
1776*fdd8201dSApple OSS Distributions     UInt32          offset,
1777*fdd8201dSApple OSS Distributions     task_t          task,
1778*fdd8201dSApple OSS Distributions     IOOptionBits    opts,
1779*fdd8201dSApple OSS Distributions     IOMapper *      mapper)
1780*fdd8201dSApple OSS Distributions {
1781*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1782*fdd8201dSApple OSS Distributions 
1783*fdd8201dSApple OSS Distributions 	if (self
1784*fdd8201dSApple OSS Distributions 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1785*fdd8201dSApple OSS Distributions 		return nullptr;
1786*fdd8201dSApple OSS Distributions 	}
1787*fdd8201dSApple OSS Distributions 
1788*fdd8201dSApple OSS Distributions 	return os::move(self);
1789*fdd8201dSApple OSS Distributions }
1790*fdd8201dSApple OSS Distributions 
1791*fdd8201dSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1792*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithOptions(void *         buffers,
1793*fdd8201dSApple OSS Distributions     UInt32         count,
1794*fdd8201dSApple OSS Distributions     UInt32         offset,
1795*fdd8201dSApple OSS Distributions     task_t         task,
1796*fdd8201dSApple OSS Distributions     IOOptionBits   options,
1797*fdd8201dSApple OSS Distributions     IOMapper *     mapper)
1798*fdd8201dSApple OSS Distributions {
1799*fdd8201dSApple OSS Distributions 	return false;
1800*fdd8201dSApple OSS Distributions }
1801*fdd8201dSApple OSS Distributions 
1802*fdd8201dSApple OSS Distributions #ifndef __LP64__
1803*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1804*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1805*fdd8201dSApple OSS Distributions     UInt32          withCount,
1806*fdd8201dSApple OSS Distributions     IODirection     direction,
1807*fdd8201dSApple OSS Distributions     bool            asReference)
1808*fdd8201dSApple OSS Distributions {
1809*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1810*fdd8201dSApple OSS Distributions 	if (that) {
1811*fdd8201dSApple OSS Distributions 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1812*fdd8201dSApple OSS Distributions 			return os::move(that);
1813*fdd8201dSApple OSS Distributions 		}
1814*fdd8201dSApple OSS Distributions 	}
1815*fdd8201dSApple OSS Distributions 	return nullptr;
1816*fdd8201dSApple OSS Distributions }
1817*fdd8201dSApple OSS Distributions 
1818*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1819*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1820*fdd8201dSApple OSS Distributions     IOByteCount             offset,
1821*fdd8201dSApple OSS Distributions     IOByteCount             length,
1822*fdd8201dSApple OSS Distributions     IODirection             direction)
1823*fdd8201dSApple OSS Distributions {
1824*fdd8201dSApple OSS Distributions 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1825*fdd8201dSApple OSS Distributions }
1826*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
1827*fdd8201dSApple OSS Distributions 
1828*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1829*fdd8201dSApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1830*fdd8201dSApple OSS Distributions {
1831*fdd8201dSApple OSS Distributions 	IOGeneralMemoryDescriptor *origGenMD =
1832*fdd8201dSApple OSS Distributions 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1833*fdd8201dSApple OSS Distributions 
1834*fdd8201dSApple OSS Distributions 	if (origGenMD) {
1835*fdd8201dSApple OSS Distributions 		return IOGeneralMemoryDescriptor::
1836*fdd8201dSApple OSS Distributions 		       withPersistentMemoryDescriptor(origGenMD);
1837*fdd8201dSApple OSS Distributions 	} else {
1838*fdd8201dSApple OSS Distributions 		return nullptr;
1839*fdd8201dSApple OSS Distributions 	}
1840*fdd8201dSApple OSS Distributions }
1841*fdd8201dSApple OSS Distributions 
1842*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1843*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1844*fdd8201dSApple OSS Distributions {
1845*fdd8201dSApple OSS Distributions 	IOMemoryReference * memRef;
1846*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1847*fdd8201dSApple OSS Distributions 
1848*fdd8201dSApple OSS Distributions 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1849*fdd8201dSApple OSS Distributions 		return nullptr;
1850*fdd8201dSApple OSS Distributions 	}
1851*fdd8201dSApple OSS Distributions 
1852*fdd8201dSApple OSS Distributions 	if (memRef == originalMD->_memRef) {
1853*fdd8201dSApple OSS Distributions 		self.reset(originalMD, OSRetain);
1854*fdd8201dSApple OSS Distributions 		originalMD->memoryReferenceRelease(memRef);
1855*fdd8201dSApple OSS Distributions 		return os::move(self);
1856*fdd8201dSApple OSS Distributions 	}
1857*fdd8201dSApple OSS Distributions 
1858*fdd8201dSApple OSS Distributions 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1859*fdd8201dSApple OSS Distributions 	IOMDPersistentInitData initData = { originalMD, memRef };
1860*fdd8201dSApple OSS Distributions 
1861*fdd8201dSApple OSS Distributions 	if (self
1862*fdd8201dSApple OSS Distributions 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1863*fdd8201dSApple OSS Distributions 		return nullptr;
1864*fdd8201dSApple OSS Distributions 	}
1865*fdd8201dSApple OSS Distributions 	return os::move(self);
1866*fdd8201dSApple OSS Distributions }
1867*fdd8201dSApple OSS Distributions 
1868*fdd8201dSApple OSS Distributions #ifndef __LP64__
1869*fdd8201dSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1870*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1871*fdd8201dSApple OSS Distributions     IOByteCount   withLength,
1872*fdd8201dSApple OSS Distributions     IODirection withDirection)
1873*fdd8201dSApple OSS Distributions {
1874*fdd8201dSApple OSS Distributions 	_singleRange.v.address = (vm_offset_t) address;
1875*fdd8201dSApple OSS Distributions 	_singleRange.v.length  = withLength;
1876*fdd8201dSApple OSS Distributions 
1877*fdd8201dSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1878*fdd8201dSApple OSS Distributions }
1879*fdd8201dSApple OSS Distributions 
1880*fdd8201dSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1881*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1882*fdd8201dSApple OSS Distributions     IOByteCount    withLength,
1883*fdd8201dSApple OSS Distributions     IODirection  withDirection,
1884*fdd8201dSApple OSS Distributions     task_t       withTask)
1885*fdd8201dSApple OSS Distributions {
1886*fdd8201dSApple OSS Distributions 	_singleRange.v.address = address;
1887*fdd8201dSApple OSS Distributions 	_singleRange.v.length  = withLength;
1888*fdd8201dSApple OSS Distributions 
1889*fdd8201dSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1890*fdd8201dSApple OSS Distributions }
1891*fdd8201dSApple OSS Distributions 
1892*fdd8201dSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1893*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1894*fdd8201dSApple OSS Distributions 	IOPhysicalAddress      address,
1895*fdd8201dSApple OSS Distributions 	IOByteCount            withLength,
1896*fdd8201dSApple OSS Distributions 	IODirection            withDirection )
1897*fdd8201dSApple OSS Distributions {
1898*fdd8201dSApple OSS Distributions 	_singleRange.p.address = address;
1899*fdd8201dSApple OSS Distributions 	_singleRange.p.length  = withLength;
1900*fdd8201dSApple OSS Distributions 
1901*fdd8201dSApple OSS Distributions 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1902*fdd8201dSApple OSS Distributions }
1903*fdd8201dSApple OSS Distributions 
1904*fdd8201dSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1905*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1906*fdd8201dSApple OSS Distributions 	IOPhysicalRange * ranges,
1907*fdd8201dSApple OSS Distributions 	UInt32            count,
1908*fdd8201dSApple OSS Distributions 	IODirection       direction,
1909*fdd8201dSApple OSS Distributions 	bool              reference)
1910*fdd8201dSApple OSS Distributions {
1911*fdd8201dSApple OSS Distributions 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1912*fdd8201dSApple OSS Distributions 
1913*fdd8201dSApple OSS Distributions 	if (reference) {
1914*fdd8201dSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1915*fdd8201dSApple OSS Distributions 	}
1916*fdd8201dSApple OSS Distributions 
1917*fdd8201dSApple OSS Distributions 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1918*fdd8201dSApple OSS Distributions }
1919*fdd8201dSApple OSS Distributions 
1920*fdd8201dSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1921*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1922*fdd8201dSApple OSS Distributions 	IOVirtualRange * ranges,
1923*fdd8201dSApple OSS Distributions 	UInt32           count,
1924*fdd8201dSApple OSS Distributions 	IODirection      direction,
1925*fdd8201dSApple OSS Distributions 	task_t           task,
1926*fdd8201dSApple OSS Distributions 	bool             reference)
1927*fdd8201dSApple OSS Distributions {
1928*fdd8201dSApple OSS Distributions 	IOOptionBits mdOpts = direction;
1929*fdd8201dSApple OSS Distributions 
1930*fdd8201dSApple OSS Distributions 	if (reference) {
1931*fdd8201dSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1932*fdd8201dSApple OSS Distributions 	}
1933*fdd8201dSApple OSS Distributions 
1934*fdd8201dSApple OSS Distributions 	if (task) {
1935*fdd8201dSApple OSS Distributions 		mdOpts |= kIOMemoryTypeVirtual;
1936*fdd8201dSApple OSS Distributions 
1937*fdd8201dSApple OSS Distributions 		// Auto-prepare if this is a kernel memory descriptor as very few
1938*fdd8201dSApple OSS Distributions 		// clients bother to prepare() kernel memory.
1939*fdd8201dSApple OSS Distributions 		// But it was not enforced so what are you going to do?
1940*fdd8201dSApple OSS Distributions 		if (task == kernel_task) {
1941*fdd8201dSApple OSS Distributions 			mdOpts |= kIOMemoryAutoPrepare;
1942*fdd8201dSApple OSS Distributions 		}
1943*fdd8201dSApple OSS Distributions 	} else {
1944*fdd8201dSApple OSS Distributions 		mdOpts |= kIOMemoryTypePhysical;
1945*fdd8201dSApple OSS Distributions 	}
1946*fdd8201dSApple OSS Distributions 
1947*fdd8201dSApple OSS Distributions 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1948*fdd8201dSApple OSS Distributions }
1949*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
1950*fdd8201dSApple OSS Distributions 
1951*fdd8201dSApple OSS Distributions /*
1952*fdd8201dSApple OSS Distributions  * initWithOptions:
1953*fdd8201dSApple OSS Distributions  *
1954*fdd8201dSApple OSS Distributions  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1955*fdd8201dSApple OSS Distributions  * from a given task, several physical ranges, an UPL from the ubc
1956*fdd8201dSApple OSS Distributions  * system or a uio (may be 64bit) from the BSD subsystem.
1957*fdd8201dSApple OSS Distributions  *
1958*fdd8201dSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1959*fdd8201dSApple OSS Distributions  *
1960*fdd8201dSApple OSS Distributions  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1961*fdd8201dSApple OSS Distributions  * existing instance -- note this behavior is not commonly supported in other
1962*fdd8201dSApple OSS Distributions  * I/O Kit classes, although it is supported here.
1963*fdd8201dSApple OSS Distributions  */
1964*fdd8201dSApple OSS Distributions 
1965*fdd8201dSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1966*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1967*fdd8201dSApple OSS Distributions     UInt32       count,
1968*fdd8201dSApple OSS Distributions     UInt32       offset,
1969*fdd8201dSApple OSS Distributions     task_t       task,
1970*fdd8201dSApple OSS Distributions     IOOptionBits options,
1971*fdd8201dSApple OSS Distributions     IOMapper *   mapper)
1972*fdd8201dSApple OSS Distributions {
1973*fdd8201dSApple OSS Distributions 	IOOptionBits type = options & kIOMemoryTypeMask;
1974*fdd8201dSApple OSS Distributions 
1975*fdd8201dSApple OSS Distributions #ifndef __LP64__
1976*fdd8201dSApple OSS Distributions 	if (task
1977*fdd8201dSApple OSS Distributions 	    && (kIOMemoryTypeVirtual == type)
1978*fdd8201dSApple OSS Distributions 	    && vm_map_is_64bit(get_task_map(task))
1979*fdd8201dSApple OSS Distributions 	    && ((IOVirtualRange *) buffers)->address) {
1980*fdd8201dSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1981*fdd8201dSApple OSS Distributions 		return false;
1982*fdd8201dSApple OSS Distributions 	}
1983*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
1984*fdd8201dSApple OSS Distributions 
1985*fdd8201dSApple OSS Distributions 	// Grab the original MD's configuation data to initialse the
1986*fdd8201dSApple OSS Distributions 	// arguments to this function.
1987*fdd8201dSApple OSS Distributions 	if (kIOMemoryTypePersistentMD == type) {
1988*fdd8201dSApple OSS Distributions 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1989*fdd8201dSApple OSS Distributions 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
1990*fdd8201dSApple OSS Distributions 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
1991*fdd8201dSApple OSS Distributions 
1992*fdd8201dSApple OSS Distributions 		// Only accept persistent memory descriptors with valid dataP data.
1993*fdd8201dSApple OSS Distributions 		assert(orig->_rangesCount == 1);
1994*fdd8201dSApple OSS Distributions 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1995*fdd8201dSApple OSS Distributions 			return false;
1996*fdd8201dSApple OSS Distributions 		}
1997*fdd8201dSApple OSS Distributions 
1998*fdd8201dSApple OSS Distributions 		_memRef = initData->fMemRef; // Grab the new named entry
1999*fdd8201dSApple OSS Distributions 		options = orig->_flags & ~kIOMemoryAsReference;
2000*fdd8201dSApple OSS Distributions 		type = options & kIOMemoryTypeMask;
2001*fdd8201dSApple OSS Distributions 		buffers = orig->_ranges.v;
2002*fdd8201dSApple OSS Distributions 		count = orig->_rangesCount;
2003*fdd8201dSApple OSS Distributions 
2004*fdd8201dSApple OSS Distributions 		// Now grab the original task and whatever mapper was previously used
2005*fdd8201dSApple OSS Distributions 		task = orig->_task;
2006*fdd8201dSApple OSS Distributions 		mapper = dataP->fMapper;
2007*fdd8201dSApple OSS Distributions 
2008*fdd8201dSApple OSS Distributions 		// We are ready to go through the original initialisation now
2009*fdd8201dSApple OSS Distributions 	}
2010*fdd8201dSApple OSS Distributions 
2011*fdd8201dSApple OSS Distributions 	switch (type) {
2012*fdd8201dSApple OSS Distributions 	case kIOMemoryTypeUIO:
2013*fdd8201dSApple OSS Distributions 	case kIOMemoryTypeVirtual:
2014*fdd8201dSApple OSS Distributions #ifndef __LP64__
2015*fdd8201dSApple OSS Distributions 	case kIOMemoryTypeVirtual64:
2016*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2017*fdd8201dSApple OSS Distributions 		assert(task);
2018*fdd8201dSApple OSS Distributions 		if (!task) {
2019*fdd8201dSApple OSS Distributions 			return false;
2020*fdd8201dSApple OSS Distributions 		}
2021*fdd8201dSApple OSS Distributions 		break;
2022*fdd8201dSApple OSS Distributions 
2023*fdd8201dSApple OSS Distributions 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2024*fdd8201dSApple OSS Distributions #ifndef __LP64__
2025*fdd8201dSApple OSS Distributions 	case kIOMemoryTypePhysical64:
2026*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2027*fdd8201dSApple OSS Distributions 	case kIOMemoryTypeUPL:
2028*fdd8201dSApple OSS Distributions 		assert(!task);
2029*fdd8201dSApple OSS Distributions 		break;
2030*fdd8201dSApple OSS Distributions 	default:
2031*fdd8201dSApple OSS Distributions 		return false; /* bad argument */
2032*fdd8201dSApple OSS Distributions 	}
2033*fdd8201dSApple OSS Distributions 
2034*fdd8201dSApple OSS Distributions 	assert(buffers);
2035*fdd8201dSApple OSS Distributions 	assert(count);
2036*fdd8201dSApple OSS Distributions 
2037*fdd8201dSApple OSS Distributions 	/*
2038*fdd8201dSApple OSS Distributions 	 * We can check the _initialized  instance variable before having ever set
2039*fdd8201dSApple OSS Distributions 	 * it to an initial value because I/O Kit guarantees that all our instance
2040*fdd8201dSApple OSS Distributions 	 * variables are zeroed on an object's allocation.
2041*fdd8201dSApple OSS Distributions 	 */
2042*fdd8201dSApple OSS Distributions 
2043*fdd8201dSApple OSS Distributions 	if (_initialized) {
2044*fdd8201dSApple OSS Distributions 		/*
2045*fdd8201dSApple OSS Distributions 		 * An existing memory descriptor is being retargeted to point to
2046*fdd8201dSApple OSS Distributions 		 * somewhere else.  Clean up our present state.
2047*fdd8201dSApple OSS Distributions 		 */
2048*fdd8201dSApple OSS Distributions 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2049*fdd8201dSApple OSS Distributions 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2050*fdd8201dSApple OSS Distributions 			while (_wireCount) {
2051*fdd8201dSApple OSS Distributions 				complete();
2052*fdd8201dSApple OSS Distributions 			}
2053*fdd8201dSApple OSS Distributions 		}
2054*fdd8201dSApple OSS Distributions 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2055*fdd8201dSApple OSS Distributions 			if (kIOMemoryTypeUIO == type) {
2056*fdd8201dSApple OSS Distributions 				uio_free((uio_t) _ranges.v);
2057*fdd8201dSApple OSS Distributions 			}
2058*fdd8201dSApple OSS Distributions #ifndef __LP64__
2059*fdd8201dSApple OSS Distributions 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2060*fdd8201dSApple OSS Distributions 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2061*fdd8201dSApple OSS Distributions 			}
2062*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2063*fdd8201dSApple OSS Distributions 			else {
2064*fdd8201dSApple OSS Distributions 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2065*fdd8201dSApple OSS Distributions 			}
2066*fdd8201dSApple OSS Distributions 		}
2067*fdd8201dSApple OSS Distributions 
2068*fdd8201dSApple OSS Distributions 		options |= (kIOMemoryRedirected & _flags);
2069*fdd8201dSApple OSS Distributions 		if (!(kIOMemoryRedirected & options)) {
2070*fdd8201dSApple OSS Distributions 			if (_memRef) {
2071*fdd8201dSApple OSS Distributions 				memoryReferenceRelease(_memRef);
2072*fdd8201dSApple OSS Distributions 				_memRef = NULL;
2073*fdd8201dSApple OSS Distributions 			}
2074*fdd8201dSApple OSS Distributions 			if (_mappings) {
2075*fdd8201dSApple OSS Distributions 				_mappings->flushCollection();
2076*fdd8201dSApple OSS Distributions 			}
2077*fdd8201dSApple OSS Distributions 		}
2078*fdd8201dSApple OSS Distributions 	} else {
2079*fdd8201dSApple OSS Distributions 		if (!super::init()) {
2080*fdd8201dSApple OSS Distributions 			return false;
2081*fdd8201dSApple OSS Distributions 		}
2082*fdd8201dSApple OSS Distributions 		_initialized = true;
2083*fdd8201dSApple OSS Distributions 	}
2084*fdd8201dSApple OSS Distributions 
2085*fdd8201dSApple OSS Distributions 	// Grab the appropriate mapper
2086*fdd8201dSApple OSS Distributions 	if (kIOMemoryHostOrRemote & options) {
2087*fdd8201dSApple OSS Distributions 		options |= kIOMemoryMapperNone;
2088*fdd8201dSApple OSS Distributions 	}
2089*fdd8201dSApple OSS Distributions 	if (kIOMemoryMapperNone & options) {
2090*fdd8201dSApple OSS Distributions 		mapper = NULL; // No Mapper
2091*fdd8201dSApple OSS Distributions 	} else if (mapper == kIOMapperSystem) {
2092*fdd8201dSApple OSS Distributions 		IOMapper::checkForSystemMapper();
2093*fdd8201dSApple OSS Distributions 		gIOSystemMapper = mapper = IOMapper::gSystem;
2094*fdd8201dSApple OSS Distributions 	}
2095*fdd8201dSApple OSS Distributions 
2096*fdd8201dSApple OSS Distributions 	// Remove the dynamic internal use flags from the initial setting
2097*fdd8201dSApple OSS Distributions 	options               &= ~(kIOMemoryPreparedReadOnly);
2098*fdd8201dSApple OSS Distributions 	_flags                 = options;
2099*fdd8201dSApple OSS Distributions 	_task                  = task;
2100*fdd8201dSApple OSS Distributions 
2101*fdd8201dSApple OSS Distributions #ifndef __LP64__
2102*fdd8201dSApple OSS Distributions 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2103*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2104*fdd8201dSApple OSS Distributions 
2105*fdd8201dSApple OSS Distributions 	_dmaReferences = 0;
2106*fdd8201dSApple OSS Distributions 	__iomd_reservedA = 0;
2107*fdd8201dSApple OSS Distributions 	__iomd_reservedB = 0;
2108*fdd8201dSApple OSS Distributions 	_highestPage = 0;
2109*fdd8201dSApple OSS Distributions 
2110*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & options) {
2111*fdd8201dSApple OSS Distributions 		if (!_prepareLock) {
2112*fdd8201dSApple OSS Distributions 			_prepareLock = IOLockAlloc();
2113*fdd8201dSApple OSS Distributions 		}
2114*fdd8201dSApple OSS Distributions 	} else if (_prepareLock) {
2115*fdd8201dSApple OSS Distributions 		IOLockFree(_prepareLock);
2116*fdd8201dSApple OSS Distributions 		_prepareLock = NULL;
2117*fdd8201dSApple OSS Distributions 	}
2118*fdd8201dSApple OSS Distributions 
2119*fdd8201dSApple OSS Distributions 	if (kIOMemoryTypeUPL == type) {
2120*fdd8201dSApple OSS Distributions 		ioGMDData *dataP;
2121*fdd8201dSApple OSS Distributions 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2122*fdd8201dSApple OSS Distributions 
2123*fdd8201dSApple OSS Distributions 		if (!initMemoryEntries(dataSize, mapper)) {
2124*fdd8201dSApple OSS Distributions 			return false;
2125*fdd8201dSApple OSS Distributions 		}
2126*fdd8201dSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
2127*fdd8201dSApple OSS Distributions 		dataP->fPageCnt = 0;
2128*fdd8201dSApple OSS Distributions 		switch (kIOMemoryDirectionMask & options) {
2129*fdd8201dSApple OSS Distributions 		case kIODirectionOut:
2130*fdd8201dSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapReadAccess;
2131*fdd8201dSApple OSS Distributions 			break;
2132*fdd8201dSApple OSS Distributions 		case kIODirectionIn:
2133*fdd8201dSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2134*fdd8201dSApple OSS Distributions 			break;
2135*fdd8201dSApple OSS Distributions 		case kIODirectionNone:
2136*fdd8201dSApple OSS Distributions 		case kIODirectionOutIn:
2137*fdd8201dSApple OSS Distributions 		default:
2138*fdd8201dSApple OSS Distributions 			panic("bad dir for upl 0x%x", (int) options);
2139*fdd8201dSApple OSS Distributions 			break;
2140*fdd8201dSApple OSS Distributions 		}
2141*fdd8201dSApple OSS Distributions 		//       _wireCount++;	// UPLs start out life wired
2142*fdd8201dSApple OSS Distributions 
2143*fdd8201dSApple OSS Distributions 		_length    = count;
2144*fdd8201dSApple OSS Distributions 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2145*fdd8201dSApple OSS Distributions 
2146*fdd8201dSApple OSS Distributions 		ioPLBlock iopl;
2147*fdd8201dSApple OSS Distributions 		iopl.fIOPL = (upl_t) buffers;
2148*fdd8201dSApple OSS Distributions 		upl_set_referenced(iopl.fIOPL, true);
2149*fdd8201dSApple OSS Distributions 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2150*fdd8201dSApple OSS Distributions 
2151*fdd8201dSApple OSS Distributions 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2152*fdd8201dSApple OSS Distributions 			panic("short external upl");
2153*fdd8201dSApple OSS Distributions 		}
2154*fdd8201dSApple OSS Distributions 
2155*fdd8201dSApple OSS Distributions 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2156*fdd8201dSApple OSS Distributions 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2157*fdd8201dSApple OSS Distributions 
2158*fdd8201dSApple OSS Distributions 		// Set the flag kIOPLOnDevice convieniently equal to 1
2159*fdd8201dSApple OSS Distributions 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2160*fdd8201dSApple OSS Distributions 		if (!pageList->device) {
2161*fdd8201dSApple OSS Distributions 			// Pre-compute the offset into the UPL's page list
2162*fdd8201dSApple OSS Distributions 			pageList = &pageList[atop_32(offset)];
2163*fdd8201dSApple OSS Distributions 			offset &= PAGE_MASK;
2164*fdd8201dSApple OSS Distributions 		}
2165*fdd8201dSApple OSS Distributions 		iopl.fIOMDOffset = 0;
2166*fdd8201dSApple OSS Distributions 		iopl.fMappedPage = 0;
2167*fdd8201dSApple OSS Distributions 		iopl.fPageInfo = (vm_address_t) pageList;
2168*fdd8201dSApple OSS Distributions 		iopl.fPageOffset = offset;
2169*fdd8201dSApple OSS Distributions 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2170*fdd8201dSApple OSS Distributions 	} else {
2171*fdd8201dSApple OSS Distributions 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2172*fdd8201dSApple OSS Distributions 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2173*fdd8201dSApple OSS Distributions 
2174*fdd8201dSApple OSS Distributions 		// Initialize the memory descriptor
2175*fdd8201dSApple OSS Distributions 		if (options & kIOMemoryAsReference) {
2176*fdd8201dSApple OSS Distributions #ifndef __LP64__
2177*fdd8201dSApple OSS Distributions 			_rangesIsAllocated = false;
2178*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2179*fdd8201dSApple OSS Distributions 
2180*fdd8201dSApple OSS Distributions 			// Hack assignment to get the buffer arg into _ranges.
2181*fdd8201dSApple OSS Distributions 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2182*fdd8201dSApple OSS Distributions 			// work, C++ sigh.
2183*fdd8201dSApple OSS Distributions 			// This also initialises the uio & physical ranges.
2184*fdd8201dSApple OSS Distributions 			_ranges.v = (IOVirtualRange *) buffers;
2185*fdd8201dSApple OSS Distributions 		} else {
2186*fdd8201dSApple OSS Distributions #ifndef __LP64__
2187*fdd8201dSApple OSS Distributions 			_rangesIsAllocated = true;
2188*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2189*fdd8201dSApple OSS Distributions 			switch (type) {
2190*fdd8201dSApple OSS Distributions 			case kIOMemoryTypeUIO:
2191*fdd8201dSApple OSS Distributions 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2192*fdd8201dSApple OSS Distributions 				break;
2193*fdd8201dSApple OSS Distributions 
2194*fdd8201dSApple OSS Distributions #ifndef __LP64__
2195*fdd8201dSApple OSS Distributions 			case kIOMemoryTypeVirtual64:
2196*fdd8201dSApple OSS Distributions 			case kIOMemoryTypePhysical64:
2197*fdd8201dSApple OSS Distributions 				if (count == 1
2198*fdd8201dSApple OSS Distributions #ifndef __arm__
2199*fdd8201dSApple OSS Distributions 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2200*fdd8201dSApple OSS Distributions #endif
2201*fdd8201dSApple OSS Distributions 				    ) {
2202*fdd8201dSApple OSS Distributions 					if (kIOMemoryTypeVirtual64 == type) {
2203*fdd8201dSApple OSS Distributions 						type = kIOMemoryTypeVirtual;
2204*fdd8201dSApple OSS Distributions 					} else {
2205*fdd8201dSApple OSS Distributions 						type = kIOMemoryTypePhysical;
2206*fdd8201dSApple OSS Distributions 					}
2207*fdd8201dSApple OSS Distributions 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2208*fdd8201dSApple OSS Distributions 					_rangesIsAllocated = false;
2209*fdd8201dSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2210*fdd8201dSApple OSS Distributions 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2211*fdd8201dSApple OSS Distributions 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2212*fdd8201dSApple OSS Distributions 					break;
2213*fdd8201dSApple OSS Distributions 				}
2214*fdd8201dSApple OSS Distributions 				_ranges.v64 = IONew(IOAddressRange, count);
2215*fdd8201dSApple OSS Distributions 				if (!_ranges.v64) {
2216*fdd8201dSApple OSS Distributions 					return false;
2217*fdd8201dSApple OSS Distributions 				}
2218*fdd8201dSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2219*fdd8201dSApple OSS Distributions 				break;
2220*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2221*fdd8201dSApple OSS Distributions 			case kIOMemoryTypeVirtual:
2222*fdd8201dSApple OSS Distributions 			case kIOMemoryTypePhysical:
2223*fdd8201dSApple OSS Distributions 				if (count == 1) {
2224*fdd8201dSApple OSS Distributions 					_flags |= kIOMemoryAsReference;
2225*fdd8201dSApple OSS Distributions #ifndef __LP64__
2226*fdd8201dSApple OSS Distributions 					_rangesIsAllocated = false;
2227*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2228*fdd8201dSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2229*fdd8201dSApple OSS Distributions 				} else {
2230*fdd8201dSApple OSS Distributions 					_ranges.v = IONew(IOVirtualRange, count);
2231*fdd8201dSApple OSS Distributions 					if (!_ranges.v) {
2232*fdd8201dSApple OSS Distributions 						return false;
2233*fdd8201dSApple OSS Distributions 					}
2234*fdd8201dSApple OSS Distributions 				}
2235*fdd8201dSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2236*fdd8201dSApple OSS Distributions 				break;
2237*fdd8201dSApple OSS Distributions 			}
2238*fdd8201dSApple OSS Distributions 		}
2239*fdd8201dSApple OSS Distributions #if CONFIG_PROB_GZALLOC
2240*fdd8201dSApple OSS Distributions 		if (task == kernel_task) {
2241*fdd8201dSApple OSS Distributions 			for (UInt32 i = 0; i < count; i++) {
2242*fdd8201dSApple OSS Distributions 				_ranges.v[i].address = pgz_decode(_ranges.v[i].address, _ranges.v[i].length);
2243*fdd8201dSApple OSS Distributions 			}
2244*fdd8201dSApple OSS Distributions 		}
2245*fdd8201dSApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
2246*fdd8201dSApple OSS Distributions 		_rangesCount = count;
2247*fdd8201dSApple OSS Distributions 
2248*fdd8201dSApple OSS Distributions 		// Find starting address within the vector of ranges
2249*fdd8201dSApple OSS Distributions 		Ranges vec = _ranges;
2250*fdd8201dSApple OSS Distributions 		mach_vm_size_t totalLength = 0;
2251*fdd8201dSApple OSS Distributions 		unsigned int ind, pages = 0;
2252*fdd8201dSApple OSS Distributions 		for (ind = 0; ind < count; ind++) {
2253*fdd8201dSApple OSS Distributions 			mach_vm_address_t addr;
2254*fdd8201dSApple OSS Distributions 			mach_vm_address_t endAddr;
2255*fdd8201dSApple OSS Distributions 			mach_vm_size_t    len;
2256*fdd8201dSApple OSS Distributions 
2257*fdd8201dSApple OSS Distributions 			// addr & len are returned by this function
2258*fdd8201dSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, ind);
2259*fdd8201dSApple OSS Distributions 			if (_task) {
2260*fdd8201dSApple OSS Distributions 				mach_vm_size_t phys_size;
2261*fdd8201dSApple OSS Distributions 				kern_return_t kret;
2262*fdd8201dSApple OSS Distributions 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2263*fdd8201dSApple OSS Distributions 				if (KERN_SUCCESS != kret) {
2264*fdd8201dSApple OSS Distributions 					break;
2265*fdd8201dSApple OSS Distributions 				}
2266*fdd8201dSApple OSS Distributions 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2267*fdd8201dSApple OSS Distributions 					break;
2268*fdd8201dSApple OSS Distributions 				}
2269*fdd8201dSApple OSS Distributions 			} else {
2270*fdd8201dSApple OSS Distributions 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2271*fdd8201dSApple OSS Distributions 					break;
2272*fdd8201dSApple OSS Distributions 				}
2273*fdd8201dSApple OSS Distributions 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2274*fdd8201dSApple OSS Distributions 					break;
2275*fdd8201dSApple OSS Distributions 				}
2276*fdd8201dSApple OSS Distributions 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2277*fdd8201dSApple OSS Distributions 					break;
2278*fdd8201dSApple OSS Distributions 				}
2279*fdd8201dSApple OSS Distributions 			}
2280*fdd8201dSApple OSS Distributions 			if (os_add_overflow(totalLength, len, &totalLength)) {
2281*fdd8201dSApple OSS Distributions 				break;
2282*fdd8201dSApple OSS Distributions 			}
2283*fdd8201dSApple OSS Distributions 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2284*fdd8201dSApple OSS Distributions 				uint64_t highPage = atop_64(addr + len - 1);
2285*fdd8201dSApple OSS Distributions 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2286*fdd8201dSApple OSS Distributions 					_highestPage = (ppnum_t) highPage;
2287*fdd8201dSApple OSS Distributions 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2288*fdd8201dSApple OSS Distributions 				}
2289*fdd8201dSApple OSS Distributions 			}
2290*fdd8201dSApple OSS Distributions 		}
2291*fdd8201dSApple OSS Distributions 		if ((ind < count)
2292*fdd8201dSApple OSS Distributions 		    || (totalLength != ((IOByteCount) totalLength))) {
2293*fdd8201dSApple OSS Distributions 			return false;                                   /* overflow */
2294*fdd8201dSApple OSS Distributions 		}
2295*fdd8201dSApple OSS Distributions 		_length      = totalLength;
2296*fdd8201dSApple OSS Distributions 		_pages       = pages;
2297*fdd8201dSApple OSS Distributions 
2298*fdd8201dSApple OSS Distributions 		// Auto-prepare memory at creation time.
2299*fdd8201dSApple OSS Distributions 		// Implied completion when descriptor is free-ed
2300*fdd8201dSApple OSS Distributions 
2301*fdd8201dSApple OSS Distributions 
2302*fdd8201dSApple OSS Distributions 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2303*fdd8201dSApple OSS Distributions 			_wireCount++; // Physical MDs are, by definition, wired
2304*fdd8201dSApple OSS Distributions 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2305*fdd8201dSApple OSS Distributions 			ioGMDData *dataP;
2306*fdd8201dSApple OSS Distributions 			unsigned dataSize;
2307*fdd8201dSApple OSS Distributions 
2308*fdd8201dSApple OSS Distributions 			if (_pages > atop_64(max_mem)) {
2309*fdd8201dSApple OSS Distributions 				return false;
2310*fdd8201dSApple OSS Distributions 			}
2311*fdd8201dSApple OSS Distributions 
2312*fdd8201dSApple OSS Distributions 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2313*fdd8201dSApple OSS Distributions 			if (!initMemoryEntries(dataSize, mapper)) {
2314*fdd8201dSApple OSS Distributions 				return false;
2315*fdd8201dSApple OSS Distributions 			}
2316*fdd8201dSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2317*fdd8201dSApple OSS Distributions 			dataP->fPageCnt = _pages;
2318*fdd8201dSApple OSS Distributions 
2319*fdd8201dSApple OSS Distributions 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2320*fdd8201dSApple OSS Distributions 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2321*fdd8201dSApple OSS Distributions 				_kernelTag = IOMemoryTag(kernel_map);
2322*fdd8201dSApple OSS Distributions 				if (_kernelTag == gIOSurfaceTag) {
2323*fdd8201dSApple OSS Distributions 					_userTag = VM_MEMORY_IOSURFACE;
2324*fdd8201dSApple OSS Distributions 				}
2325*fdd8201dSApple OSS Distributions 			}
2326*fdd8201dSApple OSS Distributions 
2327*fdd8201dSApple OSS Distributions 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2328*fdd8201dSApple OSS Distributions 				IOReturn
2329*fdd8201dSApple OSS Distributions 				    err = memoryReferenceCreate(0, &_memRef);
2330*fdd8201dSApple OSS Distributions 				if (kIOReturnSuccess != err) {
2331*fdd8201dSApple OSS Distributions 					return false;
2332*fdd8201dSApple OSS Distributions 				}
2333*fdd8201dSApple OSS Distributions 			}
2334*fdd8201dSApple OSS Distributions 
2335*fdd8201dSApple OSS Distributions 			if ((_flags & kIOMemoryAutoPrepare)
2336*fdd8201dSApple OSS Distributions 			    && prepare() != kIOReturnSuccess) {
2337*fdd8201dSApple OSS Distributions 				return false;
2338*fdd8201dSApple OSS Distributions 			}
2339*fdd8201dSApple OSS Distributions 		}
2340*fdd8201dSApple OSS Distributions 	}
2341*fdd8201dSApple OSS Distributions 
2342*fdd8201dSApple OSS Distributions 	return true;
2343*fdd8201dSApple OSS Distributions }
2344*fdd8201dSApple OSS Distributions 
2345*fdd8201dSApple OSS Distributions /*
2346*fdd8201dSApple OSS Distributions  * free
2347*fdd8201dSApple OSS Distributions  *
2348*fdd8201dSApple OSS Distributions  * Free resources.
2349*fdd8201dSApple OSS Distributions  */
2350*fdd8201dSApple OSS Distributions void
free()2351*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::free()
2352*fdd8201dSApple OSS Distributions {
2353*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2354*fdd8201dSApple OSS Distributions 
2355*fdd8201dSApple OSS Distributions 	if (reserved && reserved->dp.memory) {
2356*fdd8201dSApple OSS Distributions 		LOCK;
2357*fdd8201dSApple OSS Distributions 		reserved->dp.memory = NULL;
2358*fdd8201dSApple OSS Distributions 		UNLOCK;
2359*fdd8201dSApple OSS Distributions 	}
2360*fdd8201dSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2361*fdd8201dSApple OSS Distributions 		ioGMDData * dataP;
2362*fdd8201dSApple OSS Distributions 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2363*fdd8201dSApple OSS Distributions 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2364*fdd8201dSApple OSS Distributions 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2365*fdd8201dSApple OSS Distributions 		}
2366*fdd8201dSApple OSS Distributions 	} else {
2367*fdd8201dSApple OSS Distributions 		while (_wireCount) {
2368*fdd8201dSApple OSS Distributions 			complete();
2369*fdd8201dSApple OSS Distributions 		}
2370*fdd8201dSApple OSS Distributions 	}
2371*fdd8201dSApple OSS Distributions 
2372*fdd8201dSApple OSS Distributions 	if (_memoryEntries) {
2373*fdd8201dSApple OSS Distributions 		_memoryEntries.reset();
2374*fdd8201dSApple OSS Distributions 	}
2375*fdd8201dSApple OSS Distributions 
2376*fdd8201dSApple OSS Distributions 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2377*fdd8201dSApple OSS Distributions 		if (kIOMemoryTypeUIO == type) {
2378*fdd8201dSApple OSS Distributions 			uio_free((uio_t) _ranges.v);
2379*fdd8201dSApple OSS Distributions 		}
2380*fdd8201dSApple OSS Distributions #ifndef __LP64__
2381*fdd8201dSApple OSS Distributions 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2382*fdd8201dSApple OSS Distributions 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2383*fdd8201dSApple OSS Distributions 		}
2384*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2385*fdd8201dSApple OSS Distributions 		else {
2386*fdd8201dSApple OSS Distributions 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2387*fdd8201dSApple OSS Distributions 		}
2388*fdd8201dSApple OSS Distributions 
2389*fdd8201dSApple OSS Distributions 		_ranges.v = NULL;
2390*fdd8201dSApple OSS Distributions 	}
2391*fdd8201dSApple OSS Distributions 
2392*fdd8201dSApple OSS Distributions 	if (reserved) {
2393*fdd8201dSApple OSS Distributions 		cleanKernelReserved(reserved);
2394*fdd8201dSApple OSS Distributions 		if (reserved->dp.devicePager) {
2395*fdd8201dSApple OSS Distributions 			// memEntry holds a ref on the device pager which owns reserved
2396*fdd8201dSApple OSS Distributions 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2397*fdd8201dSApple OSS Distributions 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2398*fdd8201dSApple OSS Distributions 		} else {
2399*fdd8201dSApple OSS Distributions 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2400*fdd8201dSApple OSS Distributions 		}
2401*fdd8201dSApple OSS Distributions 		reserved = NULL;
2402*fdd8201dSApple OSS Distributions 	}
2403*fdd8201dSApple OSS Distributions 
2404*fdd8201dSApple OSS Distributions 	if (_memRef) {
2405*fdd8201dSApple OSS Distributions 		memoryReferenceRelease(_memRef);
2406*fdd8201dSApple OSS Distributions 	}
2407*fdd8201dSApple OSS Distributions 	if (_prepareLock) {
2408*fdd8201dSApple OSS Distributions 		IOLockFree(_prepareLock);
2409*fdd8201dSApple OSS Distributions 	}
2410*fdd8201dSApple OSS Distributions 
2411*fdd8201dSApple OSS Distributions 	super::free();
2412*fdd8201dSApple OSS Distributions }
2413*fdd8201dSApple OSS Distributions 
2414*fdd8201dSApple OSS Distributions #ifndef __LP64__
2415*fdd8201dSApple OSS Distributions void
unmapFromKernel()2416*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2417*fdd8201dSApple OSS Distributions {
2418*fdd8201dSApple OSS Distributions 	panic("IOGMD::unmapFromKernel deprecated");
2419*fdd8201dSApple OSS Distributions }
2420*fdd8201dSApple OSS Distributions 
2421*fdd8201dSApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2422*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2423*fdd8201dSApple OSS Distributions {
2424*fdd8201dSApple OSS Distributions 	panic("IOGMD::mapIntoKernel deprecated");
2425*fdd8201dSApple OSS Distributions }
2426*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2427*fdd8201dSApple OSS Distributions 
2428*fdd8201dSApple OSS Distributions /*
2429*fdd8201dSApple OSS Distributions  * getDirection:
2430*fdd8201dSApple OSS Distributions  *
2431*fdd8201dSApple OSS Distributions  * Get the direction of the transfer.
2432*fdd8201dSApple OSS Distributions  */
2433*fdd8201dSApple OSS Distributions IODirection
getDirection() const2434*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getDirection() const
2435*fdd8201dSApple OSS Distributions {
2436*fdd8201dSApple OSS Distributions #ifndef __LP64__
2437*fdd8201dSApple OSS Distributions 	if (_direction) {
2438*fdd8201dSApple OSS Distributions 		return _direction;
2439*fdd8201dSApple OSS Distributions 	}
2440*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2441*fdd8201dSApple OSS Distributions 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2442*fdd8201dSApple OSS Distributions }
2443*fdd8201dSApple OSS Distributions 
2444*fdd8201dSApple OSS Distributions /*
2445*fdd8201dSApple OSS Distributions  * getLength:
2446*fdd8201dSApple OSS Distributions  *
2447*fdd8201dSApple OSS Distributions  * Get the length of the transfer (over all ranges).
2448*fdd8201dSApple OSS Distributions  */
2449*fdd8201dSApple OSS Distributions IOByteCount
getLength() const2450*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getLength() const
2451*fdd8201dSApple OSS Distributions {
2452*fdd8201dSApple OSS Distributions 	return _length;
2453*fdd8201dSApple OSS Distributions }
2454*fdd8201dSApple OSS Distributions 
2455*fdd8201dSApple OSS Distributions void
setTag(IOOptionBits tag)2456*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2457*fdd8201dSApple OSS Distributions {
2458*fdd8201dSApple OSS Distributions 	_tag = tag;
2459*fdd8201dSApple OSS Distributions }
2460*fdd8201dSApple OSS Distributions 
2461*fdd8201dSApple OSS Distributions IOOptionBits
getTag(void)2462*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getTag( void )
2463*fdd8201dSApple OSS Distributions {
2464*fdd8201dSApple OSS Distributions 	return _tag;
2465*fdd8201dSApple OSS Distributions }
2466*fdd8201dSApple OSS Distributions 
2467*fdd8201dSApple OSS Distributions uint64_t
getFlags(void)2468*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2469*fdd8201dSApple OSS Distributions {
2470*fdd8201dSApple OSS Distributions 	return _flags;
2471*fdd8201dSApple OSS Distributions }
2472*fdd8201dSApple OSS Distributions 
2473*fdd8201dSApple OSS Distributions OSObject *
copyContext(void) const2474*fdd8201dSApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2475*fdd8201dSApple OSS Distributions {
2476*fdd8201dSApple OSS Distributions 	if (reserved) {
2477*fdd8201dSApple OSS Distributions 		OSObject * context = reserved->contextObject;
2478*fdd8201dSApple OSS Distributions 		if (context) {
2479*fdd8201dSApple OSS Distributions 			context->retain();
2480*fdd8201dSApple OSS Distributions 		}
2481*fdd8201dSApple OSS Distributions 		return context;
2482*fdd8201dSApple OSS Distributions 	} else {
2483*fdd8201dSApple OSS Distributions 		return NULL;
2484*fdd8201dSApple OSS Distributions 	}
2485*fdd8201dSApple OSS Distributions }
2486*fdd8201dSApple OSS Distributions 
2487*fdd8201dSApple OSS Distributions void
setContext(OSObject * obj)2488*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2489*fdd8201dSApple OSS Distributions {
2490*fdd8201dSApple OSS Distributions 	if (this->reserved == NULL && obj == NULL) {
2491*fdd8201dSApple OSS Distributions 		// No existing object, and no object to set
2492*fdd8201dSApple OSS Distributions 		return;
2493*fdd8201dSApple OSS Distributions 	}
2494*fdd8201dSApple OSS Distributions 
2495*fdd8201dSApple OSS Distributions 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2496*fdd8201dSApple OSS Distributions 	if (reserved) {
2497*fdd8201dSApple OSS Distributions 		OSObject * oldObject = reserved->contextObject;
2498*fdd8201dSApple OSS Distributions 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2499*fdd8201dSApple OSS Distributions 			oldObject->release();
2500*fdd8201dSApple OSS Distributions 		}
2501*fdd8201dSApple OSS Distributions 		if (obj != NULL) {
2502*fdd8201dSApple OSS Distributions 			obj->retain();
2503*fdd8201dSApple OSS Distributions 			reserved->contextObject = obj;
2504*fdd8201dSApple OSS Distributions 		}
2505*fdd8201dSApple OSS Distributions 	}
2506*fdd8201dSApple OSS Distributions }
2507*fdd8201dSApple OSS Distributions 
2508*fdd8201dSApple OSS Distributions #ifndef __LP64__
2509*fdd8201dSApple OSS Distributions #pragma clang diagnostic push
2510*fdd8201dSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2511*fdd8201dSApple OSS Distributions 
2512*fdd8201dSApple OSS Distributions // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2513*fdd8201dSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2514*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2515*fdd8201dSApple OSS Distributions {
2516*fdd8201dSApple OSS Distributions 	addr64_t physAddr = 0;
2517*fdd8201dSApple OSS Distributions 
2518*fdd8201dSApple OSS Distributions 	if (prepare() == kIOReturnSuccess) {
2519*fdd8201dSApple OSS Distributions 		physAddr = getPhysicalSegment64( offset, length );
2520*fdd8201dSApple OSS Distributions 		complete();
2521*fdd8201dSApple OSS Distributions 	}
2522*fdd8201dSApple OSS Distributions 
2523*fdd8201dSApple OSS Distributions 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2524*fdd8201dSApple OSS Distributions }
2525*fdd8201dSApple OSS Distributions 
2526*fdd8201dSApple OSS Distributions #pragma clang diagnostic pop
2527*fdd8201dSApple OSS Distributions 
2528*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2529*fdd8201dSApple OSS Distributions 
2530*fdd8201dSApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2531*fdd8201dSApple OSS Distributions IOMemoryDescriptor::readBytes
2532*fdd8201dSApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2533*fdd8201dSApple OSS Distributions {
2534*fdd8201dSApple OSS Distributions 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2535*fdd8201dSApple OSS Distributions 	IOByteCount endoffset;
2536*fdd8201dSApple OSS Distributions 	IOByteCount remaining;
2537*fdd8201dSApple OSS Distributions 
2538*fdd8201dSApple OSS Distributions 
2539*fdd8201dSApple OSS Distributions 	// Check that this entire I/O is within the available range
2540*fdd8201dSApple OSS Distributions 	if ((offset > _length)
2541*fdd8201dSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2542*fdd8201dSApple OSS Distributions 	    || (endoffset > _length)) {
2543*fdd8201dSApple OSS Distributions 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2544*fdd8201dSApple OSS Distributions 		return 0;
2545*fdd8201dSApple OSS Distributions 	}
2546*fdd8201dSApple OSS Distributions 	if (offset >= _length) {
2547*fdd8201dSApple OSS Distributions 		return 0;
2548*fdd8201dSApple OSS Distributions 	}
2549*fdd8201dSApple OSS Distributions 
2550*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2551*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2552*fdd8201dSApple OSS Distributions 		return 0;
2553*fdd8201dSApple OSS Distributions 	}
2554*fdd8201dSApple OSS Distributions 
2555*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2556*fdd8201dSApple OSS Distributions 		LOCK;
2557*fdd8201dSApple OSS Distributions 	}
2558*fdd8201dSApple OSS Distributions 
2559*fdd8201dSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2560*fdd8201dSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2561*fdd8201dSApple OSS Distributions 		addr64_t        srcAddr64;
2562*fdd8201dSApple OSS Distributions 		IOByteCount     srcLen;
2563*fdd8201dSApple OSS Distributions 
2564*fdd8201dSApple OSS Distributions 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2565*fdd8201dSApple OSS Distributions 		if (!srcAddr64) {
2566*fdd8201dSApple OSS Distributions 			break;
2567*fdd8201dSApple OSS Distributions 		}
2568*fdd8201dSApple OSS Distributions 
2569*fdd8201dSApple OSS Distributions 		// Clip segment length to remaining
2570*fdd8201dSApple OSS Distributions 		if (srcLen > remaining) {
2571*fdd8201dSApple OSS Distributions 			srcLen = remaining;
2572*fdd8201dSApple OSS Distributions 		}
2573*fdd8201dSApple OSS Distributions 
2574*fdd8201dSApple OSS Distributions 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2575*fdd8201dSApple OSS Distributions 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2576*fdd8201dSApple OSS Distributions 		}
2577*fdd8201dSApple OSS Distributions 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2578*fdd8201dSApple OSS Distributions 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2579*fdd8201dSApple OSS Distributions 
2580*fdd8201dSApple OSS Distributions 		dstAddr   += srcLen;
2581*fdd8201dSApple OSS Distributions 		offset    += srcLen;
2582*fdd8201dSApple OSS Distributions 		remaining -= srcLen;
2583*fdd8201dSApple OSS Distributions 	}
2584*fdd8201dSApple OSS Distributions 
2585*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2586*fdd8201dSApple OSS Distributions 		UNLOCK;
2587*fdd8201dSApple OSS Distributions 	}
2588*fdd8201dSApple OSS Distributions 
2589*fdd8201dSApple OSS Distributions 	assert(!remaining);
2590*fdd8201dSApple OSS Distributions 
2591*fdd8201dSApple OSS Distributions 	return length - remaining;
2592*fdd8201dSApple OSS Distributions }
2593*fdd8201dSApple OSS Distributions 
2594*fdd8201dSApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2595*fdd8201dSApple OSS Distributions IOMemoryDescriptor::writeBytes
2596*fdd8201dSApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2597*fdd8201dSApple OSS Distributions {
2598*fdd8201dSApple OSS Distributions 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2599*fdd8201dSApple OSS Distributions 	IOByteCount remaining;
2600*fdd8201dSApple OSS Distributions 	IOByteCount endoffset;
2601*fdd8201dSApple OSS Distributions 	IOByteCount offset = inoffset;
2602*fdd8201dSApple OSS Distributions 
2603*fdd8201dSApple OSS Distributions 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2604*fdd8201dSApple OSS Distributions 
2605*fdd8201dSApple OSS Distributions 	// Check that this entire I/O is within the available range
2606*fdd8201dSApple OSS Distributions 	if ((offset > _length)
2607*fdd8201dSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2608*fdd8201dSApple OSS Distributions 	    || (endoffset > _length)) {
2609*fdd8201dSApple OSS Distributions 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2610*fdd8201dSApple OSS Distributions 		return 0;
2611*fdd8201dSApple OSS Distributions 	}
2612*fdd8201dSApple OSS Distributions 	if (kIOMemoryPreparedReadOnly & _flags) {
2613*fdd8201dSApple OSS Distributions 		return 0;
2614*fdd8201dSApple OSS Distributions 	}
2615*fdd8201dSApple OSS Distributions 	if (offset >= _length) {
2616*fdd8201dSApple OSS Distributions 		return 0;
2617*fdd8201dSApple OSS Distributions 	}
2618*fdd8201dSApple OSS Distributions 
2619*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2620*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2621*fdd8201dSApple OSS Distributions 		return 0;
2622*fdd8201dSApple OSS Distributions 	}
2623*fdd8201dSApple OSS Distributions 
2624*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2625*fdd8201dSApple OSS Distributions 		LOCK;
2626*fdd8201dSApple OSS Distributions 	}
2627*fdd8201dSApple OSS Distributions 
2628*fdd8201dSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2629*fdd8201dSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2630*fdd8201dSApple OSS Distributions 		addr64_t    dstAddr64;
2631*fdd8201dSApple OSS Distributions 		IOByteCount dstLen;
2632*fdd8201dSApple OSS Distributions 
2633*fdd8201dSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2634*fdd8201dSApple OSS Distributions 		if (!dstAddr64) {
2635*fdd8201dSApple OSS Distributions 			break;
2636*fdd8201dSApple OSS Distributions 		}
2637*fdd8201dSApple OSS Distributions 
2638*fdd8201dSApple OSS Distributions 		// Clip segment length to remaining
2639*fdd8201dSApple OSS Distributions 		if (dstLen > remaining) {
2640*fdd8201dSApple OSS Distributions 			dstLen = remaining;
2641*fdd8201dSApple OSS Distributions 		}
2642*fdd8201dSApple OSS Distributions 
2643*fdd8201dSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2644*fdd8201dSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2645*fdd8201dSApple OSS Distributions 		}
2646*fdd8201dSApple OSS Distributions 		if (!srcAddr) {
2647*fdd8201dSApple OSS Distributions 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2648*fdd8201dSApple OSS Distributions 		} else {
2649*fdd8201dSApple OSS Distributions 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2650*fdd8201dSApple OSS Distributions 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2651*fdd8201dSApple OSS Distributions 			srcAddr   += dstLen;
2652*fdd8201dSApple OSS Distributions 		}
2653*fdd8201dSApple OSS Distributions 		offset    += dstLen;
2654*fdd8201dSApple OSS Distributions 		remaining -= dstLen;
2655*fdd8201dSApple OSS Distributions 	}
2656*fdd8201dSApple OSS Distributions 
2657*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2658*fdd8201dSApple OSS Distributions 		UNLOCK;
2659*fdd8201dSApple OSS Distributions 	}
2660*fdd8201dSApple OSS Distributions 
2661*fdd8201dSApple OSS Distributions 	assert(!remaining);
2662*fdd8201dSApple OSS Distributions 
2663*fdd8201dSApple OSS Distributions #if defined(__x86_64__)
2664*fdd8201dSApple OSS Distributions 	// copypv does not cppvFsnk on intel
2665*fdd8201dSApple OSS Distributions #else
2666*fdd8201dSApple OSS Distributions 	if (!srcAddr) {
2667*fdd8201dSApple OSS Distributions 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2668*fdd8201dSApple OSS Distributions 	}
2669*fdd8201dSApple OSS Distributions #endif
2670*fdd8201dSApple OSS Distributions 
2671*fdd8201dSApple OSS Distributions 	return length - remaining;
2672*fdd8201dSApple OSS Distributions }
2673*fdd8201dSApple OSS Distributions 
2674*fdd8201dSApple OSS Distributions #ifndef __LP64__
2675*fdd8201dSApple OSS Distributions void
setPosition(IOByteCount position)2676*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2677*fdd8201dSApple OSS Distributions {
2678*fdd8201dSApple OSS Distributions 	panic("IOGMD::setPosition deprecated");
2679*fdd8201dSApple OSS Distributions }
2680*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
2681*fdd8201dSApple OSS Distributions 
2682*fdd8201dSApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2683*fdd8201dSApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2684*fdd8201dSApple OSS Distributions 
2685*fdd8201dSApple OSS Distributions uint64_t
getPreparationID(void)2686*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2687*fdd8201dSApple OSS Distributions {
2688*fdd8201dSApple OSS Distributions 	ioGMDData *dataP;
2689*fdd8201dSApple OSS Distributions 
2690*fdd8201dSApple OSS Distributions 	if (!_wireCount) {
2691*fdd8201dSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2692*fdd8201dSApple OSS Distributions 	}
2693*fdd8201dSApple OSS Distributions 
2694*fdd8201dSApple OSS Distributions 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2695*fdd8201dSApple OSS Distributions 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2696*fdd8201dSApple OSS Distributions 		IOMemoryDescriptor::setPreparationID();
2697*fdd8201dSApple OSS Distributions 		return IOMemoryDescriptor::getPreparationID();
2698*fdd8201dSApple OSS Distributions 	}
2699*fdd8201dSApple OSS Distributions 
2700*fdd8201dSApple OSS Distributions 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2701*fdd8201dSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2702*fdd8201dSApple OSS Distributions 	}
2703*fdd8201dSApple OSS Distributions 
2704*fdd8201dSApple OSS Distributions 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2705*fdd8201dSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2706*fdd8201dSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2707*fdd8201dSApple OSS Distributions 	}
2708*fdd8201dSApple OSS Distributions 	return dataP->fPreparationID;
2709*fdd8201dSApple OSS Distributions }
2710*fdd8201dSApple OSS Distributions 
2711*fdd8201dSApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2712*fdd8201dSApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2713*fdd8201dSApple OSS Distributions {
2714*fdd8201dSApple OSS Distributions 	if (reserved->creator) {
2715*fdd8201dSApple OSS Distributions 		task_deallocate(reserved->creator);
2716*fdd8201dSApple OSS Distributions 		reserved->creator = NULL;
2717*fdd8201dSApple OSS Distributions 	}
2718*fdd8201dSApple OSS Distributions 
2719*fdd8201dSApple OSS Distributions 	if (reserved->contextObject) {
2720*fdd8201dSApple OSS Distributions 		reserved->contextObject->release();
2721*fdd8201dSApple OSS Distributions 		reserved->contextObject = NULL;
2722*fdd8201dSApple OSS Distributions 	}
2723*fdd8201dSApple OSS Distributions }
2724*fdd8201dSApple OSS Distributions 
2725*fdd8201dSApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2726*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2727*fdd8201dSApple OSS Distributions {
2728*fdd8201dSApple OSS Distributions 	if (!reserved) {
2729*fdd8201dSApple OSS Distributions 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2730*fdd8201dSApple OSS Distributions 	}
2731*fdd8201dSApple OSS Distributions 	return reserved;
2732*fdd8201dSApple OSS Distributions }
2733*fdd8201dSApple OSS Distributions 
2734*fdd8201dSApple OSS Distributions void
setPreparationID(void)2735*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2736*fdd8201dSApple OSS Distributions {
2737*fdd8201dSApple OSS Distributions 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2738*fdd8201dSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2739*fdd8201dSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2740*fdd8201dSApple OSS Distributions 	}
2741*fdd8201dSApple OSS Distributions }
2742*fdd8201dSApple OSS Distributions 
2743*fdd8201dSApple OSS Distributions uint64_t
getPreparationID(void)2744*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2745*fdd8201dSApple OSS Distributions {
2746*fdd8201dSApple OSS Distributions 	if (reserved) {
2747*fdd8201dSApple OSS Distributions 		return reserved->preparationID;
2748*fdd8201dSApple OSS Distributions 	} else {
2749*fdd8201dSApple OSS Distributions 		return kIOPreparationIDUnsupported;
2750*fdd8201dSApple OSS Distributions 	}
2751*fdd8201dSApple OSS Distributions }
2752*fdd8201dSApple OSS Distributions 
2753*fdd8201dSApple OSS Distributions void
setDescriptorID(void)2754*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2755*fdd8201dSApple OSS Distributions {
2756*fdd8201dSApple OSS Distributions 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2757*fdd8201dSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2758*fdd8201dSApple OSS Distributions 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2759*fdd8201dSApple OSS Distributions 	}
2760*fdd8201dSApple OSS Distributions }
2761*fdd8201dSApple OSS Distributions 
2762*fdd8201dSApple OSS Distributions uint64_t
getDescriptorID(void)2763*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2764*fdd8201dSApple OSS Distributions {
2765*fdd8201dSApple OSS Distributions 	setDescriptorID();
2766*fdd8201dSApple OSS Distributions 
2767*fdd8201dSApple OSS Distributions 	if (reserved) {
2768*fdd8201dSApple OSS Distributions 		return reserved->descriptorID;
2769*fdd8201dSApple OSS Distributions 	} else {
2770*fdd8201dSApple OSS Distributions 		return kIODescriptorIDInvalid;
2771*fdd8201dSApple OSS Distributions 	}
2772*fdd8201dSApple OSS Distributions }
2773*fdd8201dSApple OSS Distributions 
2774*fdd8201dSApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2775*fdd8201dSApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2776*fdd8201dSApple OSS Distributions {
2777*fdd8201dSApple OSS Distributions 	if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2778*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
2779*fdd8201dSApple OSS Distributions 	}
2780*fdd8201dSApple OSS Distributions 
2781*fdd8201dSApple OSS Distributions 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2782*fdd8201dSApple OSS Distributions 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2783*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
2784*fdd8201dSApple OSS Distributions 	}
2785*fdd8201dSApple OSS Distributions 
2786*fdd8201dSApple OSS Distributions 	uint64_t descriptorID = getDescriptorID();
2787*fdd8201dSApple OSS Distributions 	assert(descriptorID != kIODescriptorIDInvalid);
2788*fdd8201dSApple OSS Distributions 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2789*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
2790*fdd8201dSApple OSS Distributions 	}
2791*fdd8201dSApple OSS Distributions 
2792*fdd8201dSApple OSS Distributions 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2793*fdd8201dSApple OSS Distributions 
2794*fdd8201dSApple OSS Distributions #if __LP64__
2795*fdd8201dSApple OSS Distributions 	static const uint8_t num_segments_page = 8;
2796*fdd8201dSApple OSS Distributions #else
2797*fdd8201dSApple OSS Distributions 	static const uint8_t num_segments_page = 4;
2798*fdd8201dSApple OSS Distributions #endif
2799*fdd8201dSApple OSS Distributions 	static const uint8_t num_segments_long = 2;
2800*fdd8201dSApple OSS Distributions 
2801*fdd8201dSApple OSS Distributions 	IOPhysicalAddress segments_page[num_segments_page];
2802*fdd8201dSApple OSS Distributions 	IOPhysicalRange   segments_long[num_segments_long];
2803*fdd8201dSApple OSS Distributions 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2804*fdd8201dSApple OSS Distributions 	memset(segments_long, 0, sizeof(segments_long));
2805*fdd8201dSApple OSS Distributions 
2806*fdd8201dSApple OSS Distributions 	uint8_t segment_page_idx = 0;
2807*fdd8201dSApple OSS Distributions 	uint8_t segment_long_idx = 0;
2808*fdd8201dSApple OSS Distributions 
2809*fdd8201dSApple OSS Distributions 	IOPhysicalRange physical_segment;
2810*fdd8201dSApple OSS Distributions 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2811*fdd8201dSApple OSS Distributions 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2812*fdd8201dSApple OSS Distributions 
2813*fdd8201dSApple OSS Distributions 		if (physical_segment.length == 0) {
2814*fdd8201dSApple OSS Distributions 			break;
2815*fdd8201dSApple OSS Distributions 		}
2816*fdd8201dSApple OSS Distributions 
2817*fdd8201dSApple OSS Distributions 		/**
2818*fdd8201dSApple OSS Distributions 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2819*fdd8201dSApple OSS Distributions 		 * buffer memory, pack segment events according to the following.
2820*fdd8201dSApple OSS Distributions 		 *
2821*fdd8201dSApple OSS Distributions 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2822*fdd8201dSApple OSS Distributions 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2823*fdd8201dSApple OSS Distributions 		 *
2824*fdd8201dSApple OSS Distributions 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2825*fdd8201dSApple OSS Distributions 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2826*fdd8201dSApple OSS Distributions 		 * - unmapped pages will have a ppn of MAX_INT_32
2827*fdd8201dSApple OSS Distributions 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2828*fdd8201dSApple OSS Distributions 		 * - address_0, length_0, address_0, length_1
2829*fdd8201dSApple OSS Distributions 		 * - unmapped pages will have an address of 0
2830*fdd8201dSApple OSS Distributions 		 *
2831*fdd8201dSApple OSS Distributions 		 * During each iteration do the following depending on the length of the mapping:
2832*fdd8201dSApple OSS Distributions 		 * 1. add the current segment to the appropriate queue of pending segments
2833*fdd8201dSApple OSS Distributions 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2834*fdd8201dSApple OSS Distributions 		 * 1a. if FALSE emit and reset all events in the previous queue
2835*fdd8201dSApple OSS Distributions 		 * 2. check if we have filled up the current queue of pending events
2836*fdd8201dSApple OSS Distributions 		 * 2a. if TRUE emit and reset all events in the pending queue
2837*fdd8201dSApple OSS Distributions 		 * 3. after completing all iterations emit events in the current queue
2838*fdd8201dSApple OSS Distributions 		 */
2839*fdd8201dSApple OSS Distributions 
2840*fdd8201dSApple OSS Distributions 		bool emit_page = false;
2841*fdd8201dSApple OSS Distributions 		bool emit_long = false;
2842*fdd8201dSApple OSS Distributions 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2843*fdd8201dSApple OSS Distributions 			segments_page[segment_page_idx] = physical_segment.address;
2844*fdd8201dSApple OSS Distributions 			segment_page_idx++;
2845*fdd8201dSApple OSS Distributions 
2846*fdd8201dSApple OSS Distributions 			emit_long = segment_long_idx != 0;
2847*fdd8201dSApple OSS Distributions 			emit_page = segment_page_idx == num_segments_page;
2848*fdd8201dSApple OSS Distributions 
2849*fdd8201dSApple OSS Distributions 			if (os_unlikely(emit_long)) {
2850*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2851*fdd8201dSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2852*fdd8201dSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2853*fdd8201dSApple OSS Distributions 			}
2854*fdd8201dSApple OSS Distributions 
2855*fdd8201dSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2856*fdd8201dSApple OSS Distributions #if __LP64__
2857*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2858*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2859*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2860*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2861*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2862*fdd8201dSApple OSS Distributions #else
2863*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2864*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2865*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2866*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2867*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2868*fdd8201dSApple OSS Distributions #endif
2869*fdd8201dSApple OSS Distributions 			}
2870*fdd8201dSApple OSS Distributions 		} else {
2871*fdd8201dSApple OSS Distributions 			segments_long[segment_long_idx] = physical_segment;
2872*fdd8201dSApple OSS Distributions 			segment_long_idx++;
2873*fdd8201dSApple OSS Distributions 
2874*fdd8201dSApple OSS Distributions 			emit_page = segment_page_idx != 0;
2875*fdd8201dSApple OSS Distributions 			emit_long = segment_long_idx == num_segments_long;
2876*fdd8201dSApple OSS Distributions 
2877*fdd8201dSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2878*fdd8201dSApple OSS Distributions #if __LP64__
2879*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2880*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2881*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2882*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2883*fdd8201dSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2884*fdd8201dSApple OSS Distributions #else
2885*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2886*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2887*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2888*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2889*fdd8201dSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2890*fdd8201dSApple OSS Distributions #endif
2891*fdd8201dSApple OSS Distributions 			}
2892*fdd8201dSApple OSS Distributions 
2893*fdd8201dSApple OSS Distributions 			if (emit_long) {
2894*fdd8201dSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2895*fdd8201dSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2896*fdd8201dSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2897*fdd8201dSApple OSS Distributions 			}
2898*fdd8201dSApple OSS Distributions 		}
2899*fdd8201dSApple OSS Distributions 
2900*fdd8201dSApple OSS Distributions 		if (os_unlikely(emit_page)) {
2901*fdd8201dSApple OSS Distributions 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2902*fdd8201dSApple OSS Distributions 			segment_page_idx = 0;
2903*fdd8201dSApple OSS Distributions 		}
2904*fdd8201dSApple OSS Distributions 
2905*fdd8201dSApple OSS Distributions 		if (os_unlikely(emit_long)) {
2906*fdd8201dSApple OSS Distributions 			memset(segments_long, 0, sizeof(segments_long));
2907*fdd8201dSApple OSS Distributions 			segment_long_idx = 0;
2908*fdd8201dSApple OSS Distributions 		}
2909*fdd8201dSApple OSS Distributions 	}
2910*fdd8201dSApple OSS Distributions 
2911*fdd8201dSApple OSS Distributions 	if (segment_page_idx != 0) {
2912*fdd8201dSApple OSS Distributions 		assert(segment_long_idx == 0);
2913*fdd8201dSApple OSS Distributions #if __LP64__
2914*fdd8201dSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2915*fdd8201dSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2916*fdd8201dSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2917*fdd8201dSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2918*fdd8201dSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2919*fdd8201dSApple OSS Distributions #else
2920*fdd8201dSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2921*fdd8201dSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[1]),
2922*fdd8201dSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[2]),
2923*fdd8201dSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[3]),
2924*fdd8201dSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[4]));
2925*fdd8201dSApple OSS Distributions #endif
2926*fdd8201dSApple OSS Distributions 	} else if (segment_long_idx != 0) {
2927*fdd8201dSApple OSS Distributions 		assert(segment_page_idx == 0);
2928*fdd8201dSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2929*fdd8201dSApple OSS Distributions 		    segments_long[0].address, segments_long[0].length,
2930*fdd8201dSApple OSS Distributions 		    segments_long[1].address, segments_long[1].length);
2931*fdd8201dSApple OSS Distributions 	}
2932*fdd8201dSApple OSS Distributions 
2933*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
2934*fdd8201dSApple OSS Distributions }
2935*fdd8201dSApple OSS Distributions 
2936*fdd8201dSApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2937*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2938*fdd8201dSApple OSS Distributions {
2939*fdd8201dSApple OSS Distributions 	_kernelTag = (vm_tag_t) kernelTag;
2940*fdd8201dSApple OSS Distributions 	_userTag   = (vm_tag_t) userTag;
2941*fdd8201dSApple OSS Distributions }
2942*fdd8201dSApple OSS Distributions 
2943*fdd8201dSApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2944*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2945*fdd8201dSApple OSS Distributions {
2946*fdd8201dSApple OSS Distributions 	if (vm_kernel_map_is_kernel(map)) {
2947*fdd8201dSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2948*fdd8201dSApple OSS Distributions 			return (uint32_t) _kernelTag;
2949*fdd8201dSApple OSS Distributions 		}
2950*fdd8201dSApple OSS Distributions 	} else {
2951*fdd8201dSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _userTag) {
2952*fdd8201dSApple OSS Distributions 			return (uint32_t) _userTag;
2953*fdd8201dSApple OSS Distributions 		}
2954*fdd8201dSApple OSS Distributions 	}
2955*fdd8201dSApple OSS Distributions 	return IOMemoryTag(map);
2956*fdd8201dSApple OSS Distributions }
2957*fdd8201dSApple OSS Distributions 
2958*fdd8201dSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2959*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2960*fdd8201dSApple OSS Distributions {
2961*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
2962*fdd8201dSApple OSS Distributions 	DMACommandOps params;
2963*fdd8201dSApple OSS Distributions 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2964*fdd8201dSApple OSS Distributions 	ioGMDData *dataP;
2965*fdd8201dSApple OSS Distributions 
2966*fdd8201dSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
2967*fdd8201dSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
2968*fdd8201dSApple OSS Distributions 
2969*fdd8201dSApple OSS Distributions 	if (kIOMDDMAMap == op) {
2970*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2971*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
2972*fdd8201dSApple OSS Distributions 		}
2973*fdd8201dSApple OSS Distributions 
2974*fdd8201dSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2975*fdd8201dSApple OSS Distributions 
2976*fdd8201dSApple OSS Distributions 		if (!_memoryEntries
2977*fdd8201dSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2978*fdd8201dSApple OSS Distributions 			return kIOReturnNoMemory;
2979*fdd8201dSApple OSS Distributions 		}
2980*fdd8201dSApple OSS Distributions 
2981*fdd8201dSApple OSS Distributions 		if (_memoryEntries && data->fMapper) {
2982*fdd8201dSApple OSS Distributions 			bool remap, keepMap;
2983*fdd8201dSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2984*fdd8201dSApple OSS Distributions 
2985*fdd8201dSApple OSS Distributions 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2986*fdd8201dSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2987*fdd8201dSApple OSS Distributions 			}
2988*fdd8201dSApple OSS Distributions 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2989*fdd8201dSApple OSS Distributions 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2990*fdd8201dSApple OSS Distributions 			}
2991*fdd8201dSApple OSS Distributions 
2992*fdd8201dSApple OSS Distributions 			keepMap = (data->fMapper == gIOSystemMapper);
2993*fdd8201dSApple OSS Distributions 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2994*fdd8201dSApple OSS Distributions 
2995*fdd8201dSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2996*fdd8201dSApple OSS Distributions 				IOLockLock(_prepareLock);
2997*fdd8201dSApple OSS Distributions 			}
2998*fdd8201dSApple OSS Distributions 
2999*fdd8201dSApple OSS Distributions 			remap = (!keepMap);
3000*fdd8201dSApple OSS Distributions 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3001*fdd8201dSApple OSS Distributions 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3002*fdd8201dSApple OSS Distributions 			remap |= (dataP->fDMAMapAlignment > page_size);
3003*fdd8201dSApple OSS Distributions 
3004*fdd8201dSApple OSS Distributions 			if (remap || !dataP->fMappedBaseValid) {
3005*fdd8201dSApple OSS Distributions 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3006*fdd8201dSApple OSS Distributions 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3007*fdd8201dSApple OSS Distributions 					dataP->fMappedBase      = data->fAlloc;
3008*fdd8201dSApple OSS Distributions 					dataP->fMappedBaseValid = true;
3009*fdd8201dSApple OSS Distributions 					dataP->fMappedLength    = data->fAllocLength;
3010*fdd8201dSApple OSS Distributions 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3011*fdd8201dSApple OSS Distributions 				}
3012*fdd8201dSApple OSS Distributions 			} else {
3013*fdd8201dSApple OSS Distributions 				data->fAlloc = dataP->fMappedBase;
3014*fdd8201dSApple OSS Distributions 				data->fAllocLength = 0;         // give out IOMD map
3015*fdd8201dSApple OSS Distributions 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3016*fdd8201dSApple OSS Distributions 			}
3017*fdd8201dSApple OSS Distributions 
3018*fdd8201dSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3019*fdd8201dSApple OSS Distributions 				IOLockUnlock(_prepareLock);
3020*fdd8201dSApple OSS Distributions 			}
3021*fdd8201dSApple OSS Distributions 		}
3022*fdd8201dSApple OSS Distributions 		return err;
3023*fdd8201dSApple OSS Distributions 	}
3024*fdd8201dSApple OSS Distributions 	if (kIOMDDMAUnmap == op) {
3025*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3026*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3027*fdd8201dSApple OSS Distributions 		}
3028*fdd8201dSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3029*fdd8201dSApple OSS Distributions 
3030*fdd8201dSApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3031*fdd8201dSApple OSS Distributions 
3032*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3033*fdd8201dSApple OSS Distributions 	}
3034*fdd8201dSApple OSS Distributions 
3035*fdd8201dSApple OSS Distributions 	if (kIOMDAddDMAMapSpec == op) {
3036*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IODMAMapSpecification)) {
3037*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3038*fdd8201dSApple OSS Distributions 		}
3039*fdd8201dSApple OSS Distributions 
3040*fdd8201dSApple OSS Distributions 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3041*fdd8201dSApple OSS Distributions 
3042*fdd8201dSApple OSS Distributions 		if (!_memoryEntries
3043*fdd8201dSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3044*fdd8201dSApple OSS Distributions 			return kIOReturnNoMemory;
3045*fdd8201dSApple OSS Distributions 		}
3046*fdd8201dSApple OSS Distributions 
3047*fdd8201dSApple OSS Distributions 		if (_memoryEntries) {
3048*fdd8201dSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3049*fdd8201dSApple OSS Distributions 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3050*fdd8201dSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3051*fdd8201dSApple OSS Distributions 			}
3052*fdd8201dSApple OSS Distributions 			if (data->alignment > dataP->fDMAMapAlignment) {
3053*fdd8201dSApple OSS Distributions 				dataP->fDMAMapAlignment = data->alignment;
3054*fdd8201dSApple OSS Distributions 			}
3055*fdd8201dSApple OSS Distributions 		}
3056*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3057*fdd8201dSApple OSS Distributions 	}
3058*fdd8201dSApple OSS Distributions 
3059*fdd8201dSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3060*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3061*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3062*fdd8201dSApple OSS Distributions 		}
3063*fdd8201dSApple OSS Distributions 
3064*fdd8201dSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3065*fdd8201dSApple OSS Distributions 		data->fLength = _length;
3066*fdd8201dSApple OSS Distributions 		data->fSGCount = _rangesCount;
3067*fdd8201dSApple OSS Distributions 		data->fPages = _pages;
3068*fdd8201dSApple OSS Distributions 		data->fDirection = getDirection();
3069*fdd8201dSApple OSS Distributions 		if (!_wireCount) {
3070*fdd8201dSApple OSS Distributions 			data->fIsPrepared = false;
3071*fdd8201dSApple OSS Distributions 		} else {
3072*fdd8201dSApple OSS Distributions 			data->fIsPrepared = true;
3073*fdd8201dSApple OSS Distributions 			data->fHighestPage = _highestPage;
3074*fdd8201dSApple OSS Distributions 			if (_memoryEntries) {
3075*fdd8201dSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
3076*fdd8201dSApple OSS Distributions 				ioPLBlock *ioplList = getIOPLList(dataP);
3077*fdd8201dSApple OSS Distributions 				UInt count = getNumIOPL(_memoryEntries, dataP);
3078*fdd8201dSApple OSS Distributions 				if (count == 1) {
3079*fdd8201dSApple OSS Distributions 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3080*fdd8201dSApple OSS Distributions 				}
3081*fdd8201dSApple OSS Distributions 			}
3082*fdd8201dSApple OSS Distributions 		}
3083*fdd8201dSApple OSS Distributions 
3084*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3085*fdd8201dSApple OSS Distributions 	} else if (kIOMDDMAActive == op) {
3086*fdd8201dSApple OSS Distributions 		if (params) {
3087*fdd8201dSApple OSS Distributions 			int16_t prior;
3088*fdd8201dSApple OSS Distributions 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3089*fdd8201dSApple OSS Distributions 			if (!prior) {
3090*fdd8201dSApple OSS Distributions 				md->_mapName = NULL;
3091*fdd8201dSApple OSS Distributions 			}
3092*fdd8201dSApple OSS Distributions 		} else {
3093*fdd8201dSApple OSS Distributions 			if (md->_dmaReferences) {
3094*fdd8201dSApple OSS Distributions 				OSAddAtomic16(-1, &md->_dmaReferences);
3095*fdd8201dSApple OSS Distributions 			} else {
3096*fdd8201dSApple OSS Distributions 				panic("_dmaReferences underflow");
3097*fdd8201dSApple OSS Distributions 			}
3098*fdd8201dSApple OSS Distributions 		}
3099*fdd8201dSApple OSS Distributions 	} else if (kIOMDWalkSegments != op) {
3100*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
3101*fdd8201dSApple OSS Distributions 	}
3102*fdd8201dSApple OSS Distributions 
3103*fdd8201dSApple OSS Distributions 	// Get the next segment
3104*fdd8201dSApple OSS Distributions 	struct InternalState {
3105*fdd8201dSApple OSS Distributions 		IOMDDMAWalkSegmentArgs fIO;
3106*fdd8201dSApple OSS Distributions 		mach_vm_size_t fOffset2Index;
3107*fdd8201dSApple OSS Distributions 		mach_vm_size_t fNextOffset;
3108*fdd8201dSApple OSS Distributions 		UInt fIndex;
3109*fdd8201dSApple OSS Distributions 	} *isP;
3110*fdd8201dSApple OSS Distributions 
3111*fdd8201dSApple OSS Distributions 	// Find the next segment
3112*fdd8201dSApple OSS Distributions 	if (dataSize < sizeof(*isP)) {
3113*fdd8201dSApple OSS Distributions 		return kIOReturnUnderrun;
3114*fdd8201dSApple OSS Distributions 	}
3115*fdd8201dSApple OSS Distributions 
3116*fdd8201dSApple OSS Distributions 	isP = (InternalState *) vData;
3117*fdd8201dSApple OSS Distributions 	uint64_t offset = isP->fIO.fOffset;
3118*fdd8201dSApple OSS Distributions 	uint8_t mapped = isP->fIO.fMapped;
3119*fdd8201dSApple OSS Distributions 	uint64_t mappedBase;
3120*fdd8201dSApple OSS Distributions 
3121*fdd8201dSApple OSS Distributions 	if (mapped && (kIOMemoryRemote & _flags)) {
3122*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3123*fdd8201dSApple OSS Distributions 	}
3124*fdd8201dSApple OSS Distributions 
3125*fdd8201dSApple OSS Distributions 	if (IOMapper::gSystem && mapped
3126*fdd8201dSApple OSS Distributions 	    && (!(kIOMemoryHostOnly & _flags))
3127*fdd8201dSApple OSS Distributions 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3128*fdd8201dSApple OSS Distributions //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3129*fdd8201dSApple OSS Distributions 		if (!_memoryEntries
3130*fdd8201dSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3131*fdd8201dSApple OSS Distributions 			return kIOReturnNoMemory;
3132*fdd8201dSApple OSS Distributions 		}
3133*fdd8201dSApple OSS Distributions 
3134*fdd8201dSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
3135*fdd8201dSApple OSS Distributions 		if (dataP->fMapper) {
3136*fdd8201dSApple OSS Distributions 			IODMAMapSpecification mapSpec;
3137*fdd8201dSApple OSS Distributions 			bzero(&mapSpec, sizeof(mapSpec));
3138*fdd8201dSApple OSS Distributions 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3139*fdd8201dSApple OSS Distributions 			mapSpec.alignment = dataP->fDMAMapAlignment;
3140*fdd8201dSApple OSS Distributions 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3141*fdd8201dSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3142*fdd8201dSApple OSS Distributions 				return err;
3143*fdd8201dSApple OSS Distributions 			}
3144*fdd8201dSApple OSS Distributions 			dataP->fMappedBaseValid = true;
3145*fdd8201dSApple OSS Distributions 		}
3146*fdd8201dSApple OSS Distributions 	}
3147*fdd8201dSApple OSS Distributions 
3148*fdd8201dSApple OSS Distributions 	if (mapped) {
3149*fdd8201dSApple OSS Distributions 		if (IOMapper::gSystem
3150*fdd8201dSApple OSS Distributions 		    && (!(kIOMemoryHostOnly & _flags))
3151*fdd8201dSApple OSS Distributions 		    && _memoryEntries
3152*fdd8201dSApple OSS Distributions 		    && (dataP = getDataP(_memoryEntries))
3153*fdd8201dSApple OSS Distributions 		    && dataP->fMappedBaseValid) {
3154*fdd8201dSApple OSS Distributions 			mappedBase = dataP->fMappedBase;
3155*fdd8201dSApple OSS Distributions 		} else {
3156*fdd8201dSApple OSS Distributions 			mapped = 0;
3157*fdd8201dSApple OSS Distributions 		}
3158*fdd8201dSApple OSS Distributions 	}
3159*fdd8201dSApple OSS Distributions 
3160*fdd8201dSApple OSS Distributions 	if (offset >= _length) {
3161*fdd8201dSApple OSS Distributions 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3162*fdd8201dSApple OSS Distributions 	}
3163*fdd8201dSApple OSS Distributions 
3164*fdd8201dSApple OSS Distributions 	// Validate the previous offset
3165*fdd8201dSApple OSS Distributions 	UInt ind;
3166*fdd8201dSApple OSS Distributions 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3167*fdd8201dSApple OSS Distributions 	if (!params
3168*fdd8201dSApple OSS Distributions 	    && offset
3169*fdd8201dSApple OSS Distributions 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3170*fdd8201dSApple OSS Distributions 		ind = isP->fIndex;
3171*fdd8201dSApple OSS Distributions 	} else {
3172*fdd8201dSApple OSS Distributions 		ind = off2Ind = 0; // Start from beginning
3173*fdd8201dSApple OSS Distributions 	}
3174*fdd8201dSApple OSS Distributions 	mach_vm_size_t length;
3175*fdd8201dSApple OSS Distributions 	UInt64 address;
3176*fdd8201dSApple OSS Distributions 
3177*fdd8201dSApple OSS Distributions 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3178*fdd8201dSApple OSS Distributions 		// Physical address based memory descriptor
3179*fdd8201dSApple OSS Distributions 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3180*fdd8201dSApple OSS Distributions 
3181*fdd8201dSApple OSS Distributions 		// Find the range after the one that contains the offset
3182*fdd8201dSApple OSS Distributions 		mach_vm_size_t len;
3183*fdd8201dSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3184*fdd8201dSApple OSS Distributions 			len = physP[ind].length;
3185*fdd8201dSApple OSS Distributions 			off2Ind += len;
3186*fdd8201dSApple OSS Distributions 		}
3187*fdd8201dSApple OSS Distributions 
3188*fdd8201dSApple OSS Distributions 		// Calculate length within range and starting address
3189*fdd8201dSApple OSS Distributions 		length   = off2Ind - offset;
3190*fdd8201dSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3191*fdd8201dSApple OSS Distributions 
3192*fdd8201dSApple OSS Distributions 		if (true && mapped) {
3193*fdd8201dSApple OSS Distributions 			address = mappedBase + offset;
3194*fdd8201dSApple OSS Distributions 		} else {
3195*fdd8201dSApple OSS Distributions 			// see how far we can coalesce ranges
3196*fdd8201dSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3197*fdd8201dSApple OSS Distributions 				len = physP[ind].length;
3198*fdd8201dSApple OSS Distributions 				length += len;
3199*fdd8201dSApple OSS Distributions 				off2Ind += len;
3200*fdd8201dSApple OSS Distributions 				ind++;
3201*fdd8201dSApple OSS Distributions 			}
3202*fdd8201dSApple OSS Distributions 		}
3203*fdd8201dSApple OSS Distributions 
3204*fdd8201dSApple OSS Distributions 		// correct contiguous check overshoot
3205*fdd8201dSApple OSS Distributions 		ind--;
3206*fdd8201dSApple OSS Distributions 		off2Ind -= len;
3207*fdd8201dSApple OSS Distributions 	}
3208*fdd8201dSApple OSS Distributions #ifndef __LP64__
3209*fdd8201dSApple OSS Distributions 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3210*fdd8201dSApple OSS Distributions 		// Physical address based memory descriptor
3211*fdd8201dSApple OSS Distributions 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3212*fdd8201dSApple OSS Distributions 
3213*fdd8201dSApple OSS Distributions 		// Find the range after the one that contains the offset
3214*fdd8201dSApple OSS Distributions 		mach_vm_size_t len;
3215*fdd8201dSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3216*fdd8201dSApple OSS Distributions 			len = physP[ind].length;
3217*fdd8201dSApple OSS Distributions 			off2Ind += len;
3218*fdd8201dSApple OSS Distributions 		}
3219*fdd8201dSApple OSS Distributions 
3220*fdd8201dSApple OSS Distributions 		// Calculate length within range and starting address
3221*fdd8201dSApple OSS Distributions 		length   = off2Ind - offset;
3222*fdd8201dSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3223*fdd8201dSApple OSS Distributions 
3224*fdd8201dSApple OSS Distributions 		if (true && mapped) {
3225*fdd8201dSApple OSS Distributions 			address = mappedBase + offset;
3226*fdd8201dSApple OSS Distributions 		} else {
3227*fdd8201dSApple OSS Distributions 			// see how far we can coalesce ranges
3228*fdd8201dSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3229*fdd8201dSApple OSS Distributions 				len = physP[ind].length;
3230*fdd8201dSApple OSS Distributions 				length += len;
3231*fdd8201dSApple OSS Distributions 				off2Ind += len;
3232*fdd8201dSApple OSS Distributions 				ind++;
3233*fdd8201dSApple OSS Distributions 			}
3234*fdd8201dSApple OSS Distributions 		}
3235*fdd8201dSApple OSS Distributions 		// correct contiguous check overshoot
3236*fdd8201dSApple OSS Distributions 		ind--;
3237*fdd8201dSApple OSS Distributions 		off2Ind -= len;
3238*fdd8201dSApple OSS Distributions 	}
3239*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
3240*fdd8201dSApple OSS Distributions 	else {
3241*fdd8201dSApple OSS Distributions 		do {
3242*fdd8201dSApple OSS Distributions 			if (!_wireCount) {
3243*fdd8201dSApple OSS Distributions 				panic("IOGMD: not wired for the IODMACommand");
3244*fdd8201dSApple OSS Distributions 			}
3245*fdd8201dSApple OSS Distributions 
3246*fdd8201dSApple OSS Distributions 			assert(_memoryEntries);
3247*fdd8201dSApple OSS Distributions 
3248*fdd8201dSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3249*fdd8201dSApple OSS Distributions 			const ioPLBlock *ioplList = getIOPLList(dataP);
3250*fdd8201dSApple OSS Distributions 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3251*fdd8201dSApple OSS Distributions 			upl_page_info_t *pageList = getPageList(dataP);
3252*fdd8201dSApple OSS Distributions 
3253*fdd8201dSApple OSS Distributions 			assert(numIOPLs > 0);
3254*fdd8201dSApple OSS Distributions 
3255*fdd8201dSApple OSS Distributions 			// Scan through iopl info blocks looking for block containing offset
3256*fdd8201dSApple OSS Distributions 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3257*fdd8201dSApple OSS Distributions 				ind++;
3258*fdd8201dSApple OSS Distributions 			}
3259*fdd8201dSApple OSS Distributions 
3260*fdd8201dSApple OSS Distributions 			// Go back to actual range as search goes past it
3261*fdd8201dSApple OSS Distributions 			ioPLBlock ioplInfo = ioplList[ind - 1];
3262*fdd8201dSApple OSS Distributions 			off2Ind = ioplInfo.fIOMDOffset;
3263*fdd8201dSApple OSS Distributions 
3264*fdd8201dSApple OSS Distributions 			if (ind < numIOPLs) {
3265*fdd8201dSApple OSS Distributions 				length = ioplList[ind].fIOMDOffset;
3266*fdd8201dSApple OSS Distributions 			} else {
3267*fdd8201dSApple OSS Distributions 				length = _length;
3268*fdd8201dSApple OSS Distributions 			}
3269*fdd8201dSApple OSS Distributions 			length -= offset;       // Remainder within iopl
3270*fdd8201dSApple OSS Distributions 
3271*fdd8201dSApple OSS Distributions 			// Subtract offset till this iopl in total list
3272*fdd8201dSApple OSS Distributions 			offset -= off2Ind;
3273*fdd8201dSApple OSS Distributions 
3274*fdd8201dSApple OSS Distributions 			// If a mapped address is requested and this is a pre-mapped IOPL
3275*fdd8201dSApple OSS Distributions 			// then just need to compute an offset relative to the mapped base.
3276*fdd8201dSApple OSS Distributions 			if (mapped) {
3277*fdd8201dSApple OSS Distributions 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3278*fdd8201dSApple OSS Distributions 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3279*fdd8201dSApple OSS Distributions 				continue; // Done leave do/while(false) now
3280*fdd8201dSApple OSS Distributions 			}
3281*fdd8201dSApple OSS Distributions 
3282*fdd8201dSApple OSS Distributions 			// The offset is rebased into the current iopl.
3283*fdd8201dSApple OSS Distributions 			// Now add the iopl 1st page offset.
3284*fdd8201dSApple OSS Distributions 			offset += ioplInfo.fPageOffset;
3285*fdd8201dSApple OSS Distributions 
3286*fdd8201dSApple OSS Distributions 			// For external UPLs the fPageInfo field points directly to
3287*fdd8201dSApple OSS Distributions 			// the upl's upl_page_info_t array.
3288*fdd8201dSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3289*fdd8201dSApple OSS Distributions 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3290*fdd8201dSApple OSS Distributions 			} else {
3291*fdd8201dSApple OSS Distributions 				pageList = &pageList[ioplInfo.fPageInfo];
3292*fdd8201dSApple OSS Distributions 			}
3293*fdd8201dSApple OSS Distributions 
3294*fdd8201dSApple OSS Distributions 			// Check for direct device non-paged memory
3295*fdd8201dSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3296*fdd8201dSApple OSS Distributions 				address = ptoa_64(pageList->phys_addr) + offset;
3297*fdd8201dSApple OSS Distributions 				continue; // Done leave do/while(false) now
3298*fdd8201dSApple OSS Distributions 			}
3299*fdd8201dSApple OSS Distributions 
3300*fdd8201dSApple OSS Distributions 			// Now we need compute the index into the pageList
3301*fdd8201dSApple OSS Distributions 			UInt pageInd = atop_32(offset);
3302*fdd8201dSApple OSS Distributions 			offset &= PAGE_MASK;
3303*fdd8201dSApple OSS Distributions 
3304*fdd8201dSApple OSS Distributions 			// Compute the starting address of this segment
3305*fdd8201dSApple OSS Distributions 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3306*fdd8201dSApple OSS Distributions 			if (!pageAddr) {
3307*fdd8201dSApple OSS Distributions 				panic("!pageList phys_addr");
3308*fdd8201dSApple OSS Distributions 			}
3309*fdd8201dSApple OSS Distributions 
3310*fdd8201dSApple OSS Distributions 			address = ptoa_64(pageAddr) + offset;
3311*fdd8201dSApple OSS Distributions 
3312*fdd8201dSApple OSS Distributions 			// length is currently set to the length of the remainider of the iopl.
3313*fdd8201dSApple OSS Distributions 			// We need to check that the remainder of the iopl is contiguous.
3314*fdd8201dSApple OSS Distributions 			// This is indicated by pageList[ind].phys_addr being sequential.
3315*fdd8201dSApple OSS Distributions 			IOByteCount contigLength = PAGE_SIZE - offset;
3316*fdd8201dSApple OSS Distributions 			while (contigLength < length
3317*fdd8201dSApple OSS Distributions 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3318*fdd8201dSApple OSS Distributions 				contigLength += PAGE_SIZE;
3319*fdd8201dSApple OSS Distributions 			}
3320*fdd8201dSApple OSS Distributions 
3321*fdd8201dSApple OSS Distributions 			if (contigLength < length) {
3322*fdd8201dSApple OSS Distributions 				length = contigLength;
3323*fdd8201dSApple OSS Distributions 			}
3324*fdd8201dSApple OSS Distributions 
3325*fdd8201dSApple OSS Distributions 
3326*fdd8201dSApple OSS Distributions 			assert(address);
3327*fdd8201dSApple OSS Distributions 			assert(length);
3328*fdd8201dSApple OSS Distributions 		} while (false);
3329*fdd8201dSApple OSS Distributions 	}
3330*fdd8201dSApple OSS Distributions 
3331*fdd8201dSApple OSS Distributions 	// Update return values and state
3332*fdd8201dSApple OSS Distributions 	isP->fIO.fIOVMAddr = address;
3333*fdd8201dSApple OSS Distributions 	isP->fIO.fLength   = length;
3334*fdd8201dSApple OSS Distributions 	isP->fIndex        = ind;
3335*fdd8201dSApple OSS Distributions 	isP->fOffset2Index = off2Ind;
3336*fdd8201dSApple OSS Distributions 	isP->fNextOffset   = isP->fIO.fOffset + length;
3337*fdd8201dSApple OSS Distributions 
3338*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
3339*fdd8201dSApple OSS Distributions }
3340*fdd8201dSApple OSS Distributions 
3341*fdd8201dSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3342*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3343*fdd8201dSApple OSS Distributions {
3344*fdd8201dSApple OSS Distributions 	IOReturn          ret;
3345*fdd8201dSApple OSS Distributions 	mach_vm_address_t address = 0;
3346*fdd8201dSApple OSS Distributions 	mach_vm_size_t    length  = 0;
3347*fdd8201dSApple OSS Distributions 	IOMapper *        mapper  = gIOSystemMapper;
3348*fdd8201dSApple OSS Distributions 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3349*fdd8201dSApple OSS Distributions 
3350*fdd8201dSApple OSS Distributions 	if (lengthOfSegment) {
3351*fdd8201dSApple OSS Distributions 		*lengthOfSegment = 0;
3352*fdd8201dSApple OSS Distributions 	}
3353*fdd8201dSApple OSS Distributions 
3354*fdd8201dSApple OSS Distributions 	if (offset >= _length) {
3355*fdd8201dSApple OSS Distributions 		return 0;
3356*fdd8201dSApple OSS Distributions 	}
3357*fdd8201dSApple OSS Distributions 
3358*fdd8201dSApple OSS Distributions 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3359*fdd8201dSApple OSS Distributions 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3360*fdd8201dSApple OSS Distributions 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3361*fdd8201dSApple OSS Distributions 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3362*fdd8201dSApple OSS Distributions 
3363*fdd8201dSApple OSS Distributions 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3364*fdd8201dSApple OSS Distributions 		unsigned rangesIndex = 0;
3365*fdd8201dSApple OSS Distributions 		Ranges vec = _ranges;
3366*fdd8201dSApple OSS Distributions 		mach_vm_address_t addr;
3367*fdd8201dSApple OSS Distributions 
3368*fdd8201dSApple OSS Distributions 		// Find starting address within the vector of ranges
3369*fdd8201dSApple OSS Distributions 		for (;;) {
3370*fdd8201dSApple OSS Distributions 			getAddrLenForInd(addr, length, type, vec, rangesIndex);
3371*fdd8201dSApple OSS Distributions 			if (offset < length) {
3372*fdd8201dSApple OSS Distributions 				break;
3373*fdd8201dSApple OSS Distributions 			}
3374*fdd8201dSApple OSS Distributions 			offset -= length; // (make offset relative)
3375*fdd8201dSApple OSS Distributions 			rangesIndex++;
3376*fdd8201dSApple OSS Distributions 		}
3377*fdd8201dSApple OSS Distributions 
3378*fdd8201dSApple OSS Distributions 		// Now that we have the starting range,
3379*fdd8201dSApple OSS Distributions 		// lets find the last contiguous range
3380*fdd8201dSApple OSS Distributions 		addr   += offset;
3381*fdd8201dSApple OSS Distributions 		length -= offset;
3382*fdd8201dSApple OSS Distributions 
3383*fdd8201dSApple OSS Distributions 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3384*fdd8201dSApple OSS Distributions 			mach_vm_address_t newAddr;
3385*fdd8201dSApple OSS Distributions 			mach_vm_size_t    newLen;
3386*fdd8201dSApple OSS Distributions 
3387*fdd8201dSApple OSS Distributions 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3388*fdd8201dSApple OSS Distributions 			if (addr + length != newAddr) {
3389*fdd8201dSApple OSS Distributions 				break;
3390*fdd8201dSApple OSS Distributions 			}
3391*fdd8201dSApple OSS Distributions 			length += newLen;
3392*fdd8201dSApple OSS Distributions 		}
3393*fdd8201dSApple OSS Distributions 		if (addr) {
3394*fdd8201dSApple OSS Distributions 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3395*fdd8201dSApple OSS Distributions 		}
3396*fdd8201dSApple OSS Distributions 	} else {
3397*fdd8201dSApple OSS Distributions 		IOMDDMAWalkSegmentState _state;
3398*fdd8201dSApple OSS Distributions 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3399*fdd8201dSApple OSS Distributions 
3400*fdd8201dSApple OSS Distributions 		state->fOffset = offset;
3401*fdd8201dSApple OSS Distributions 		state->fLength = _length - offset;
3402*fdd8201dSApple OSS Distributions 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3403*fdd8201dSApple OSS Distributions 
3404*fdd8201dSApple OSS Distributions 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3405*fdd8201dSApple OSS Distributions 
3406*fdd8201dSApple OSS Distributions 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3407*fdd8201dSApple OSS Distributions 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3408*fdd8201dSApple OSS Distributions 			    ret, this, state->fOffset,
3409*fdd8201dSApple OSS Distributions 			    state->fIOVMAddr, state->fLength);
3410*fdd8201dSApple OSS Distributions 		}
3411*fdd8201dSApple OSS Distributions 		if (kIOReturnSuccess == ret) {
3412*fdd8201dSApple OSS Distributions 			address = state->fIOVMAddr;
3413*fdd8201dSApple OSS Distributions 			length  = state->fLength;
3414*fdd8201dSApple OSS Distributions 		}
3415*fdd8201dSApple OSS Distributions 
3416*fdd8201dSApple OSS Distributions 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3417*fdd8201dSApple OSS Distributions 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3418*fdd8201dSApple OSS Distributions 
3419*fdd8201dSApple OSS Distributions 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3420*fdd8201dSApple OSS Distributions 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3421*fdd8201dSApple OSS Distributions 				addr64_t    origAddr = address;
3422*fdd8201dSApple OSS Distributions 				IOByteCount origLen  = length;
3423*fdd8201dSApple OSS Distributions 
3424*fdd8201dSApple OSS Distributions 				address = mapper->mapToPhysicalAddress(origAddr);
3425*fdd8201dSApple OSS Distributions 				length = page_size - (address & (page_size - 1));
3426*fdd8201dSApple OSS Distributions 				while ((length < origLen)
3427*fdd8201dSApple OSS Distributions 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3428*fdd8201dSApple OSS Distributions 					length += page_size;
3429*fdd8201dSApple OSS Distributions 				}
3430*fdd8201dSApple OSS Distributions 				if (length > origLen) {
3431*fdd8201dSApple OSS Distributions 					length = origLen;
3432*fdd8201dSApple OSS Distributions 				}
3433*fdd8201dSApple OSS Distributions 			}
3434*fdd8201dSApple OSS Distributions 		}
3435*fdd8201dSApple OSS Distributions 	}
3436*fdd8201dSApple OSS Distributions 
3437*fdd8201dSApple OSS Distributions 	if (!address) {
3438*fdd8201dSApple OSS Distributions 		length = 0;
3439*fdd8201dSApple OSS Distributions 	}
3440*fdd8201dSApple OSS Distributions 
3441*fdd8201dSApple OSS Distributions 	if (lengthOfSegment) {
3442*fdd8201dSApple OSS Distributions 		*lengthOfSegment = length;
3443*fdd8201dSApple OSS Distributions 	}
3444*fdd8201dSApple OSS Distributions 
3445*fdd8201dSApple OSS Distributions 	return address;
3446*fdd8201dSApple OSS Distributions }
3447*fdd8201dSApple OSS Distributions 
3448*fdd8201dSApple OSS Distributions #ifndef __LP64__
3449*fdd8201dSApple OSS Distributions #pragma clang diagnostic push
3450*fdd8201dSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3451*fdd8201dSApple OSS Distributions 
3452*fdd8201dSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3453*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3454*fdd8201dSApple OSS Distributions {
3455*fdd8201dSApple OSS Distributions 	addr64_t address = 0;
3456*fdd8201dSApple OSS Distributions 
3457*fdd8201dSApple OSS Distributions 	if (options & _kIOMemorySourceSegment) {
3458*fdd8201dSApple OSS Distributions 		address = getSourceSegment(offset, lengthOfSegment);
3459*fdd8201dSApple OSS Distributions 	} else if (options & kIOMemoryMapperNone) {
3460*fdd8201dSApple OSS Distributions 		address = getPhysicalSegment64(offset, lengthOfSegment);
3461*fdd8201dSApple OSS Distributions 	} else {
3462*fdd8201dSApple OSS Distributions 		address = getPhysicalSegment(offset, lengthOfSegment);
3463*fdd8201dSApple OSS Distributions 	}
3464*fdd8201dSApple OSS Distributions 
3465*fdd8201dSApple OSS Distributions 	return address;
3466*fdd8201dSApple OSS Distributions }
3467*fdd8201dSApple OSS Distributions #pragma clang diagnostic pop
3468*fdd8201dSApple OSS Distributions 
3469*fdd8201dSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3470*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3471*fdd8201dSApple OSS Distributions {
3472*fdd8201dSApple OSS Distributions 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3473*fdd8201dSApple OSS Distributions }
3474*fdd8201dSApple OSS Distributions 
3475*fdd8201dSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3476*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3477*fdd8201dSApple OSS Distributions {
3478*fdd8201dSApple OSS Distributions 	addr64_t    address = 0;
3479*fdd8201dSApple OSS Distributions 	IOByteCount length  = 0;
3480*fdd8201dSApple OSS Distributions 
3481*fdd8201dSApple OSS Distributions 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3482*fdd8201dSApple OSS Distributions 
3483*fdd8201dSApple OSS Distributions 	if (lengthOfSegment) {
3484*fdd8201dSApple OSS Distributions 		length = *lengthOfSegment;
3485*fdd8201dSApple OSS Distributions 	}
3486*fdd8201dSApple OSS Distributions 
3487*fdd8201dSApple OSS Distributions 	if ((address + length) > 0x100000000ULL) {
3488*fdd8201dSApple OSS Distributions 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3489*fdd8201dSApple OSS Distributions 		    address, (long) length, (getMetaClass())->getClassName());
3490*fdd8201dSApple OSS Distributions 	}
3491*fdd8201dSApple OSS Distributions 
3492*fdd8201dSApple OSS Distributions 	return (IOPhysicalAddress) address;
3493*fdd8201dSApple OSS Distributions }
3494*fdd8201dSApple OSS Distributions 
3495*fdd8201dSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3496*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3497*fdd8201dSApple OSS Distributions {
3498*fdd8201dSApple OSS Distributions 	IOPhysicalAddress phys32;
3499*fdd8201dSApple OSS Distributions 	IOByteCount       length;
3500*fdd8201dSApple OSS Distributions 	addr64_t          phys64;
3501*fdd8201dSApple OSS Distributions 	IOMapper *        mapper = NULL;
3502*fdd8201dSApple OSS Distributions 
3503*fdd8201dSApple OSS Distributions 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3504*fdd8201dSApple OSS Distributions 	if (!phys32) {
3505*fdd8201dSApple OSS Distributions 		return 0;
3506*fdd8201dSApple OSS Distributions 	}
3507*fdd8201dSApple OSS Distributions 
3508*fdd8201dSApple OSS Distributions 	if (gIOSystemMapper) {
3509*fdd8201dSApple OSS Distributions 		mapper = gIOSystemMapper;
3510*fdd8201dSApple OSS Distributions 	}
3511*fdd8201dSApple OSS Distributions 
3512*fdd8201dSApple OSS Distributions 	if (mapper) {
3513*fdd8201dSApple OSS Distributions 		IOByteCount origLen;
3514*fdd8201dSApple OSS Distributions 
3515*fdd8201dSApple OSS Distributions 		phys64 = mapper->mapToPhysicalAddress(phys32);
3516*fdd8201dSApple OSS Distributions 		origLen = *lengthOfSegment;
3517*fdd8201dSApple OSS Distributions 		length = page_size - (phys64 & (page_size - 1));
3518*fdd8201dSApple OSS Distributions 		while ((length < origLen)
3519*fdd8201dSApple OSS Distributions 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3520*fdd8201dSApple OSS Distributions 			length += page_size;
3521*fdd8201dSApple OSS Distributions 		}
3522*fdd8201dSApple OSS Distributions 		if (length > origLen) {
3523*fdd8201dSApple OSS Distributions 			length = origLen;
3524*fdd8201dSApple OSS Distributions 		}
3525*fdd8201dSApple OSS Distributions 
3526*fdd8201dSApple OSS Distributions 		*lengthOfSegment = length;
3527*fdd8201dSApple OSS Distributions 	} else {
3528*fdd8201dSApple OSS Distributions 		phys64 = (addr64_t) phys32;
3529*fdd8201dSApple OSS Distributions 	}
3530*fdd8201dSApple OSS Distributions 
3531*fdd8201dSApple OSS Distributions 	return phys64;
3532*fdd8201dSApple OSS Distributions }
3533*fdd8201dSApple OSS Distributions 
3534*fdd8201dSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3535*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3536*fdd8201dSApple OSS Distributions {
3537*fdd8201dSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3538*fdd8201dSApple OSS Distributions }
3539*fdd8201dSApple OSS Distributions 
3540*fdd8201dSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3541*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3542*fdd8201dSApple OSS Distributions {
3543*fdd8201dSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3544*fdd8201dSApple OSS Distributions }
3545*fdd8201dSApple OSS Distributions 
3546*fdd8201dSApple OSS Distributions #pragma clang diagnostic push
3547*fdd8201dSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3548*fdd8201dSApple OSS Distributions 
3549*fdd8201dSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3550*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3551*fdd8201dSApple OSS Distributions     IOByteCount * lengthOfSegment)
3552*fdd8201dSApple OSS Distributions {
3553*fdd8201dSApple OSS Distributions 	if (_task == kernel_task) {
3554*fdd8201dSApple OSS Distributions 		return (void *) getSourceSegment(offset, lengthOfSegment);
3555*fdd8201dSApple OSS Distributions 	} else {
3556*fdd8201dSApple OSS Distributions 		panic("IOGMD::getVirtualSegment deprecated");
3557*fdd8201dSApple OSS Distributions 	}
3558*fdd8201dSApple OSS Distributions 
3559*fdd8201dSApple OSS Distributions 	return NULL;
3560*fdd8201dSApple OSS Distributions }
3561*fdd8201dSApple OSS Distributions #pragma clang diagnostic pop
3562*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
3563*fdd8201dSApple OSS Distributions 
3564*fdd8201dSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3565*fdd8201dSApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3566*fdd8201dSApple OSS Distributions {
3567*fdd8201dSApple OSS Distributions 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3568*fdd8201dSApple OSS Distributions 	DMACommandOps params;
3569*fdd8201dSApple OSS Distributions 	IOReturn err;
3570*fdd8201dSApple OSS Distributions 
3571*fdd8201dSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
3572*fdd8201dSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
3573*fdd8201dSApple OSS Distributions 
3574*fdd8201dSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3575*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3576*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3577*fdd8201dSApple OSS Distributions 		}
3578*fdd8201dSApple OSS Distributions 
3579*fdd8201dSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3580*fdd8201dSApple OSS Distributions 		data->fLength = getLength();
3581*fdd8201dSApple OSS Distributions 		data->fSGCount = 0;
3582*fdd8201dSApple OSS Distributions 		data->fDirection = getDirection();
3583*fdd8201dSApple OSS Distributions 		data->fIsPrepared = true; // Assume prepared - fails safe
3584*fdd8201dSApple OSS Distributions 	} else if (kIOMDWalkSegments == op) {
3585*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3586*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3587*fdd8201dSApple OSS Distributions 		}
3588*fdd8201dSApple OSS Distributions 
3589*fdd8201dSApple OSS Distributions 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3590*fdd8201dSApple OSS Distributions 		IOByteCount offset  = (IOByteCount) data->fOffset;
3591*fdd8201dSApple OSS Distributions 		IOPhysicalLength length, nextLength;
3592*fdd8201dSApple OSS Distributions 		addr64_t         addr, nextAddr;
3593*fdd8201dSApple OSS Distributions 
3594*fdd8201dSApple OSS Distributions 		if (data->fMapped) {
3595*fdd8201dSApple OSS Distributions 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3596*fdd8201dSApple OSS Distributions 		}
3597*fdd8201dSApple OSS Distributions 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3598*fdd8201dSApple OSS Distributions 		offset += length;
3599*fdd8201dSApple OSS Distributions 		while (offset < getLength()) {
3600*fdd8201dSApple OSS Distributions 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3601*fdd8201dSApple OSS Distributions 			if ((addr + length) != nextAddr) {
3602*fdd8201dSApple OSS Distributions 				break;
3603*fdd8201dSApple OSS Distributions 			}
3604*fdd8201dSApple OSS Distributions 			length += nextLength;
3605*fdd8201dSApple OSS Distributions 			offset += nextLength;
3606*fdd8201dSApple OSS Distributions 		}
3607*fdd8201dSApple OSS Distributions 		data->fIOVMAddr = addr;
3608*fdd8201dSApple OSS Distributions 		data->fLength   = length;
3609*fdd8201dSApple OSS Distributions 	} else if (kIOMDAddDMAMapSpec == op) {
3610*fdd8201dSApple OSS Distributions 		return kIOReturnUnsupported;
3611*fdd8201dSApple OSS Distributions 	} else if (kIOMDDMAMap == op) {
3612*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3613*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3614*fdd8201dSApple OSS Distributions 		}
3615*fdd8201dSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3616*fdd8201dSApple OSS Distributions 
3617*fdd8201dSApple OSS Distributions 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3618*fdd8201dSApple OSS Distributions 
3619*fdd8201dSApple OSS Distributions 		return err;
3620*fdd8201dSApple OSS Distributions 	} else if (kIOMDDMAUnmap == op) {
3621*fdd8201dSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3622*fdd8201dSApple OSS Distributions 			return kIOReturnUnderrun;
3623*fdd8201dSApple OSS Distributions 		}
3624*fdd8201dSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3625*fdd8201dSApple OSS Distributions 
3626*fdd8201dSApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3627*fdd8201dSApple OSS Distributions 
3628*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3629*fdd8201dSApple OSS Distributions 	} else {
3630*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
3631*fdd8201dSApple OSS Distributions 	}
3632*fdd8201dSApple OSS Distributions 
3633*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
3634*fdd8201dSApple OSS Distributions }
3635*fdd8201dSApple OSS Distributions 
3636*fdd8201dSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3637*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3638*fdd8201dSApple OSS Distributions     IOOptionBits * oldState )
3639*fdd8201dSApple OSS Distributions {
3640*fdd8201dSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3641*fdd8201dSApple OSS Distributions 
3642*fdd8201dSApple OSS Distributions 	vm_purgable_t control;
3643*fdd8201dSApple OSS Distributions 	int           state;
3644*fdd8201dSApple OSS Distributions 
3645*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3646*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3647*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3648*fdd8201dSApple OSS Distributions 	}
3649*fdd8201dSApple OSS Distributions 
3650*fdd8201dSApple OSS Distributions 	if (_memRef) {
3651*fdd8201dSApple OSS Distributions 		err = super::setPurgeable(newState, oldState);
3652*fdd8201dSApple OSS Distributions 	} else {
3653*fdd8201dSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3654*fdd8201dSApple OSS Distributions 			LOCK;
3655*fdd8201dSApple OSS Distributions 		}
3656*fdd8201dSApple OSS Distributions 		do{
3657*fdd8201dSApple OSS Distributions 			// Find the appropriate vm_map for the given task
3658*fdd8201dSApple OSS Distributions 			vm_map_t curMap;
3659*fdd8201dSApple OSS Distributions 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3660*fdd8201dSApple OSS Distributions 				err = kIOReturnNotReady;
3661*fdd8201dSApple OSS Distributions 				break;
3662*fdd8201dSApple OSS Distributions 			} else if (!_task) {
3663*fdd8201dSApple OSS Distributions 				err = kIOReturnUnsupported;
3664*fdd8201dSApple OSS Distributions 				break;
3665*fdd8201dSApple OSS Distributions 			} else {
3666*fdd8201dSApple OSS Distributions 				curMap = get_task_map(_task);
3667*fdd8201dSApple OSS Distributions 				if (NULL == curMap) {
3668*fdd8201dSApple OSS Distributions 					err = KERN_INVALID_ARGUMENT;
3669*fdd8201dSApple OSS Distributions 					break;
3670*fdd8201dSApple OSS Distributions 				}
3671*fdd8201dSApple OSS Distributions 			}
3672*fdd8201dSApple OSS Distributions 
3673*fdd8201dSApple OSS Distributions 			// can only do one range
3674*fdd8201dSApple OSS Distributions 			Ranges vec = _ranges;
3675*fdd8201dSApple OSS Distributions 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3676*fdd8201dSApple OSS Distributions 			mach_vm_address_t addr;
3677*fdd8201dSApple OSS Distributions 			mach_vm_size_t    len;
3678*fdd8201dSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, 0);
3679*fdd8201dSApple OSS Distributions 
3680*fdd8201dSApple OSS Distributions 			err = purgeableControlBits(newState, &control, &state);
3681*fdd8201dSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3682*fdd8201dSApple OSS Distributions 				break;
3683*fdd8201dSApple OSS Distributions 			}
3684*fdd8201dSApple OSS Distributions 			err = vm_map_purgable_control(curMap, addr, control, &state);
3685*fdd8201dSApple OSS Distributions 			if (oldState) {
3686*fdd8201dSApple OSS Distributions 				if (kIOReturnSuccess == err) {
3687*fdd8201dSApple OSS Distributions 					err = purgeableStateBits(&state);
3688*fdd8201dSApple OSS Distributions 					*oldState = state;
3689*fdd8201dSApple OSS Distributions 				}
3690*fdd8201dSApple OSS Distributions 			}
3691*fdd8201dSApple OSS Distributions 		}while (false);
3692*fdd8201dSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3693*fdd8201dSApple OSS Distributions 			UNLOCK;
3694*fdd8201dSApple OSS Distributions 		}
3695*fdd8201dSApple OSS Distributions 	}
3696*fdd8201dSApple OSS Distributions 
3697*fdd8201dSApple OSS Distributions 	return err;
3698*fdd8201dSApple OSS Distributions }
3699*fdd8201dSApple OSS Distributions 
3700*fdd8201dSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3701*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3702*fdd8201dSApple OSS Distributions     IOOptionBits * oldState )
3703*fdd8201dSApple OSS Distributions {
3704*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3705*fdd8201dSApple OSS Distributions 
3706*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3707*fdd8201dSApple OSS Distributions 		LOCK;
3708*fdd8201dSApple OSS Distributions 	}
3709*fdd8201dSApple OSS Distributions 	if (_memRef) {
3710*fdd8201dSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3711*fdd8201dSApple OSS Distributions 	}
3712*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3713*fdd8201dSApple OSS Distributions 		UNLOCK;
3714*fdd8201dSApple OSS Distributions 	}
3715*fdd8201dSApple OSS Distributions 
3716*fdd8201dSApple OSS Distributions 	return err;
3717*fdd8201dSApple OSS Distributions }
3718*fdd8201dSApple OSS Distributions 
3719*fdd8201dSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3720*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3721*fdd8201dSApple OSS Distributions     int newLedgerTag,
3722*fdd8201dSApple OSS Distributions     IOOptionBits newLedgerOptions )
3723*fdd8201dSApple OSS Distributions {
3724*fdd8201dSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3725*fdd8201dSApple OSS Distributions 
3726*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3727*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3728*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3729*fdd8201dSApple OSS Distributions 	}
3730*fdd8201dSApple OSS Distributions 
3731*fdd8201dSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3732*fdd8201dSApple OSS Distributions 		return kIOReturnUnsupported;
3733*fdd8201dSApple OSS Distributions 	}
3734*fdd8201dSApple OSS Distributions 
3735*fdd8201dSApple OSS Distributions 	if (_memRef) {
3736*fdd8201dSApple OSS Distributions 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3737*fdd8201dSApple OSS Distributions 	} else {
3738*fdd8201dSApple OSS Distributions 		err = kIOReturnUnsupported;
3739*fdd8201dSApple OSS Distributions 	}
3740*fdd8201dSApple OSS Distributions 
3741*fdd8201dSApple OSS Distributions 	return err;
3742*fdd8201dSApple OSS Distributions }
3743*fdd8201dSApple OSS Distributions 
3744*fdd8201dSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3745*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3746*fdd8201dSApple OSS Distributions     int newLedgerTag,
3747*fdd8201dSApple OSS Distributions     IOOptionBits newLedgerOptions )
3748*fdd8201dSApple OSS Distributions {
3749*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3750*fdd8201dSApple OSS Distributions 
3751*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3752*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3753*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3754*fdd8201dSApple OSS Distributions 	}
3755*fdd8201dSApple OSS Distributions 
3756*fdd8201dSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3757*fdd8201dSApple OSS Distributions 		return kIOReturnUnsupported;
3758*fdd8201dSApple OSS Distributions 	}
3759*fdd8201dSApple OSS Distributions 
3760*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3761*fdd8201dSApple OSS Distributions 		LOCK;
3762*fdd8201dSApple OSS Distributions 	}
3763*fdd8201dSApple OSS Distributions 	if (_memRef) {
3764*fdd8201dSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3765*fdd8201dSApple OSS Distributions 	} else {
3766*fdd8201dSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3767*fdd8201dSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3768*fdd8201dSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3769*fdd8201dSApple OSS Distributions 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3770*fdd8201dSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3771*fdd8201dSApple OSS Distributions 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3772*fdd8201dSApple OSS Distributions 		}
3773*fdd8201dSApple OSS Distributions 	}
3774*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3775*fdd8201dSApple OSS Distributions 		UNLOCK;
3776*fdd8201dSApple OSS Distributions 	}
3777*fdd8201dSApple OSS Distributions 
3778*fdd8201dSApple OSS Distributions 	return err;
3779*fdd8201dSApple OSS Distributions }
3780*fdd8201dSApple OSS Distributions 
3781*fdd8201dSApple OSS Distributions 
3782*fdd8201dSApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3783*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3784*fdd8201dSApple OSS Distributions {
3785*fdd8201dSApple OSS Distributions 	uint64_t length;
3786*fdd8201dSApple OSS Distributions 
3787*fdd8201dSApple OSS Distributions 	if (_memRef) {
3788*fdd8201dSApple OSS Distributions 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3789*fdd8201dSApple OSS Distributions 	} else {
3790*fdd8201dSApple OSS Distributions 		IOByteCount       iterate, segLen;
3791*fdd8201dSApple OSS Distributions 		IOPhysicalAddress sourceAddr, sourceAlign;
3792*fdd8201dSApple OSS Distributions 
3793*fdd8201dSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3794*fdd8201dSApple OSS Distributions 			LOCK;
3795*fdd8201dSApple OSS Distributions 		}
3796*fdd8201dSApple OSS Distributions 		length = 0;
3797*fdd8201dSApple OSS Distributions 		iterate = 0;
3798*fdd8201dSApple OSS Distributions 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3799*fdd8201dSApple OSS Distributions 			sourceAlign = (sourceAddr & page_mask);
3800*fdd8201dSApple OSS Distributions 			if (offset && !iterate) {
3801*fdd8201dSApple OSS Distributions 				*offset = sourceAlign;
3802*fdd8201dSApple OSS Distributions 			}
3803*fdd8201dSApple OSS Distributions 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3804*fdd8201dSApple OSS Distributions 			iterate += segLen;
3805*fdd8201dSApple OSS Distributions 		}
3806*fdd8201dSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3807*fdd8201dSApple OSS Distributions 			UNLOCK;
3808*fdd8201dSApple OSS Distributions 		}
3809*fdd8201dSApple OSS Distributions 	}
3810*fdd8201dSApple OSS Distributions 
3811*fdd8201dSApple OSS Distributions 	return length;
3812*fdd8201dSApple OSS Distributions }
3813*fdd8201dSApple OSS Distributions 
3814*fdd8201dSApple OSS Distributions 
3815*fdd8201dSApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3816*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3817*fdd8201dSApple OSS Distributions     IOByteCount * dirtyPageCount )
3818*fdd8201dSApple OSS Distributions {
3819*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3820*fdd8201dSApple OSS Distributions 
3821*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3822*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3823*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3824*fdd8201dSApple OSS Distributions 	}
3825*fdd8201dSApple OSS Distributions 
3826*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3827*fdd8201dSApple OSS Distributions 		LOCK;
3828*fdd8201dSApple OSS Distributions 	}
3829*fdd8201dSApple OSS Distributions 	if (_memRef) {
3830*fdd8201dSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3831*fdd8201dSApple OSS Distributions 	} else {
3832*fdd8201dSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3833*fdd8201dSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3834*fdd8201dSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3835*fdd8201dSApple OSS Distributions 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3836*fdd8201dSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3837*fdd8201dSApple OSS Distributions 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3838*fdd8201dSApple OSS Distributions 		}
3839*fdd8201dSApple OSS Distributions 	}
3840*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3841*fdd8201dSApple OSS Distributions 		UNLOCK;
3842*fdd8201dSApple OSS Distributions 	}
3843*fdd8201dSApple OSS Distributions 
3844*fdd8201dSApple OSS Distributions 	return err;
3845*fdd8201dSApple OSS Distributions }
3846*fdd8201dSApple OSS Distributions 
3847*fdd8201dSApple OSS Distributions 
3848*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3849*fdd8201dSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3850*fdd8201dSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3851*fdd8201dSApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3852*fdd8201dSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3853*fdd8201dSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3854*fdd8201dSApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3855*fdd8201dSApple OSS Distributions 
3856*fdd8201dSApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3857*fdd8201dSApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3858*fdd8201dSApple OSS Distributions {
3859*fdd8201dSApple OSS Distributions 	ppnum_t page, end;
3860*fdd8201dSApple OSS Distributions 
3861*fdd8201dSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3862*fdd8201dSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3863*fdd8201dSApple OSS Distributions 	for (; page < end; page++) {
3864*fdd8201dSApple OSS Distributions 		pmap_clear_noencrypt(page);
3865*fdd8201dSApple OSS Distributions 	}
3866*fdd8201dSApple OSS Distributions }
3867*fdd8201dSApple OSS Distributions 
3868*fdd8201dSApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3869*fdd8201dSApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3870*fdd8201dSApple OSS Distributions {
3871*fdd8201dSApple OSS Distributions 	ppnum_t page, end;
3872*fdd8201dSApple OSS Distributions 
3873*fdd8201dSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3874*fdd8201dSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3875*fdd8201dSApple OSS Distributions 	for (; page < end; page++) {
3876*fdd8201dSApple OSS Distributions 		pmap_set_noencrypt(page);
3877*fdd8201dSApple OSS Distributions 	}
3878*fdd8201dSApple OSS Distributions }
3879*fdd8201dSApple OSS Distributions 
3880*fdd8201dSApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3881*fdd8201dSApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3882*fdd8201dSApple OSS Distributions     IOByteCount offset, IOByteCount length )
3883*fdd8201dSApple OSS Distributions {
3884*fdd8201dSApple OSS Distributions 	IOByteCount remaining;
3885*fdd8201dSApple OSS Distributions 	unsigned int res;
3886*fdd8201dSApple OSS Distributions 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3887*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3888*fdd8201dSApple OSS Distributions 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3889*fdd8201dSApple OSS Distributions #endif
3890*fdd8201dSApple OSS Distributions 
3891*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3892*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3893*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
3894*fdd8201dSApple OSS Distributions 	}
3895*fdd8201dSApple OSS Distributions 
3896*fdd8201dSApple OSS Distributions 	switch (options) {
3897*fdd8201dSApple OSS Distributions 	case kIOMemoryIncoherentIOFlush:
3898*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3899*fdd8201dSApple OSS Distributions 		func_ext = &dcache_incoherent_io_flush64;
3900*fdd8201dSApple OSS Distributions #if __ARM_COHERENT_IO__
3901*fdd8201dSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3902*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3903*fdd8201dSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3904*fdd8201dSApple OSS Distributions 		break;
3905*fdd8201dSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3906*fdd8201dSApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3907*fdd8201dSApple OSS Distributions 		func = &dcache_incoherent_io_flush64;
3908*fdd8201dSApple OSS Distributions 		break;
3909*fdd8201dSApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3910*fdd8201dSApple OSS Distributions 	case kIOMemoryIncoherentIOStore:
3911*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3912*fdd8201dSApple OSS Distributions 		func_ext = &dcache_incoherent_io_store64;
3913*fdd8201dSApple OSS Distributions #if __ARM_COHERENT_IO__
3914*fdd8201dSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3915*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
3916*fdd8201dSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3917*fdd8201dSApple OSS Distributions 		break;
3918*fdd8201dSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3919*fdd8201dSApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3920*fdd8201dSApple OSS Distributions 		func = &dcache_incoherent_io_store64;
3921*fdd8201dSApple OSS Distributions 		break;
3922*fdd8201dSApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3923*fdd8201dSApple OSS Distributions 
3924*fdd8201dSApple OSS Distributions 	case kIOMemorySetEncrypted:
3925*fdd8201dSApple OSS Distributions 		func = &SetEncryptOp;
3926*fdd8201dSApple OSS Distributions 		break;
3927*fdd8201dSApple OSS Distributions 	case kIOMemoryClearEncrypted:
3928*fdd8201dSApple OSS Distributions 		func = &ClearEncryptOp;
3929*fdd8201dSApple OSS Distributions 		break;
3930*fdd8201dSApple OSS Distributions 	}
3931*fdd8201dSApple OSS Distributions 
3932*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3933*fdd8201dSApple OSS Distributions 	if ((func == NULL) && (func_ext == NULL)) {
3934*fdd8201dSApple OSS Distributions 		return kIOReturnUnsupported;
3935*fdd8201dSApple OSS Distributions 	}
3936*fdd8201dSApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3937*fdd8201dSApple OSS Distributions 	if (!func) {
3938*fdd8201dSApple OSS Distributions 		return kIOReturnUnsupported;
3939*fdd8201dSApple OSS Distributions 	}
3940*fdd8201dSApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3941*fdd8201dSApple OSS Distributions 
3942*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3943*fdd8201dSApple OSS Distributions 		LOCK;
3944*fdd8201dSApple OSS Distributions 	}
3945*fdd8201dSApple OSS Distributions 
3946*fdd8201dSApple OSS Distributions 	res = 0x0UL;
3947*fdd8201dSApple OSS Distributions 	remaining = length = min(length, getLength() - offset);
3948*fdd8201dSApple OSS Distributions 	while (remaining) {
3949*fdd8201dSApple OSS Distributions 		// (process another target segment?)
3950*fdd8201dSApple OSS Distributions 		addr64_t    dstAddr64;
3951*fdd8201dSApple OSS Distributions 		IOByteCount dstLen;
3952*fdd8201dSApple OSS Distributions 
3953*fdd8201dSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3954*fdd8201dSApple OSS Distributions 		if (!dstAddr64) {
3955*fdd8201dSApple OSS Distributions 			break;
3956*fdd8201dSApple OSS Distributions 		}
3957*fdd8201dSApple OSS Distributions 
3958*fdd8201dSApple OSS Distributions 		// Clip segment length to remaining
3959*fdd8201dSApple OSS Distributions 		if (dstLen > remaining) {
3960*fdd8201dSApple OSS Distributions 			dstLen = remaining;
3961*fdd8201dSApple OSS Distributions 		}
3962*fdd8201dSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3963*fdd8201dSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3964*fdd8201dSApple OSS Distributions 		}
3965*fdd8201dSApple OSS Distributions 		if (remaining > UINT_MAX) {
3966*fdd8201dSApple OSS Distributions 			remaining = UINT_MAX;
3967*fdd8201dSApple OSS Distributions 		}
3968*fdd8201dSApple OSS Distributions 
3969*fdd8201dSApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3970*fdd8201dSApple OSS Distributions 		if (func) {
3971*fdd8201dSApple OSS Distributions 			(*func)(dstAddr64, (unsigned int) dstLen);
3972*fdd8201dSApple OSS Distributions 		}
3973*fdd8201dSApple OSS Distributions 		if (func_ext) {
3974*fdd8201dSApple OSS Distributions 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3975*fdd8201dSApple OSS Distributions 			if (res != 0x0UL) {
3976*fdd8201dSApple OSS Distributions 				remaining = 0;
3977*fdd8201dSApple OSS Distributions 				break;
3978*fdd8201dSApple OSS Distributions 			}
3979*fdd8201dSApple OSS Distributions 		}
3980*fdd8201dSApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3981*fdd8201dSApple OSS Distributions 		(*func)(dstAddr64, (unsigned int) dstLen);
3982*fdd8201dSApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3983*fdd8201dSApple OSS Distributions 
3984*fdd8201dSApple OSS Distributions 		offset    += dstLen;
3985*fdd8201dSApple OSS Distributions 		remaining -= dstLen;
3986*fdd8201dSApple OSS Distributions 	}
3987*fdd8201dSApple OSS Distributions 
3988*fdd8201dSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3989*fdd8201dSApple OSS Distributions 		UNLOCK;
3990*fdd8201dSApple OSS Distributions 	}
3991*fdd8201dSApple OSS Distributions 
3992*fdd8201dSApple OSS Distributions 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3993*fdd8201dSApple OSS Distributions }
3994*fdd8201dSApple OSS Distributions 
3995*fdd8201dSApple OSS Distributions /*
3996*fdd8201dSApple OSS Distributions  *
3997*fdd8201dSApple OSS Distributions  */
3998*fdd8201dSApple OSS Distributions 
3999*fdd8201dSApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4000*fdd8201dSApple OSS Distributions 
4001*fdd8201dSApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4002*fdd8201dSApple OSS Distributions 
4003*fdd8201dSApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4004*fdd8201dSApple OSS Distributions  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4005*fdd8201dSApple OSS Distributions  * kernel non-text data -- should we just add another range instead?
4006*fdd8201dSApple OSS Distributions  */
4007*fdd8201dSApple OSS Distributions #define io_kernel_static_start  vm_kernel_stext
4008*fdd8201dSApple OSS Distributions #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4009*fdd8201dSApple OSS Distributions 
4010*fdd8201dSApple OSS Distributions #elif defined(__arm__) || defined(__arm64__)
4011*fdd8201dSApple OSS Distributions 
4012*fdd8201dSApple OSS Distributions extern vm_offset_t              static_memory_end;
4013*fdd8201dSApple OSS Distributions 
4014*fdd8201dSApple OSS Distributions #if defined(__arm64__)
4015*fdd8201dSApple OSS Distributions #define io_kernel_static_start vm_kext_base
4016*fdd8201dSApple OSS Distributions #else /* defined(__arm64__) */
4017*fdd8201dSApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4018*fdd8201dSApple OSS Distributions #endif /* defined(__arm64__) */
4019*fdd8201dSApple OSS Distributions 
4020*fdd8201dSApple OSS Distributions #define io_kernel_static_end    static_memory_end
4021*fdd8201dSApple OSS Distributions 
4022*fdd8201dSApple OSS Distributions #else
4023*fdd8201dSApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4024*fdd8201dSApple OSS Distributions #endif
4025*fdd8201dSApple OSS Distributions 
4026*fdd8201dSApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4027*fdd8201dSApple OSS Distributions io_get_kernel_static_upl(
4028*fdd8201dSApple OSS Distributions 	vm_map_t                /* map */,
4029*fdd8201dSApple OSS Distributions 	uintptr_t               offset,
4030*fdd8201dSApple OSS Distributions 	upl_size_t              *upl_size,
4031*fdd8201dSApple OSS Distributions 	unsigned int            *page_offset,
4032*fdd8201dSApple OSS Distributions 	upl_t                   *upl,
4033*fdd8201dSApple OSS Distributions 	upl_page_info_array_t   page_list,
4034*fdd8201dSApple OSS Distributions 	unsigned int            *count,
4035*fdd8201dSApple OSS Distributions 	ppnum_t                 *highest_page)
4036*fdd8201dSApple OSS Distributions {
4037*fdd8201dSApple OSS Distributions 	unsigned int pageCount, page;
4038*fdd8201dSApple OSS Distributions 	ppnum_t phys;
4039*fdd8201dSApple OSS Distributions 	ppnum_t highestPage = 0;
4040*fdd8201dSApple OSS Distributions 
4041*fdd8201dSApple OSS Distributions 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4042*fdd8201dSApple OSS Distributions 	if (pageCount > *count) {
4043*fdd8201dSApple OSS Distributions 		pageCount = *count;
4044*fdd8201dSApple OSS Distributions 	}
4045*fdd8201dSApple OSS Distributions 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4046*fdd8201dSApple OSS Distributions 
4047*fdd8201dSApple OSS Distributions 	*upl = NULL;
4048*fdd8201dSApple OSS Distributions 	*page_offset = ((unsigned int) page_mask & offset);
4049*fdd8201dSApple OSS Distributions 
4050*fdd8201dSApple OSS Distributions 	for (page = 0; page < pageCount; page++) {
4051*fdd8201dSApple OSS Distributions 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4052*fdd8201dSApple OSS Distributions 		if (!phys) {
4053*fdd8201dSApple OSS Distributions 			break;
4054*fdd8201dSApple OSS Distributions 		}
4055*fdd8201dSApple OSS Distributions 		page_list[page].phys_addr = phys;
4056*fdd8201dSApple OSS Distributions 		page_list[page].free_when_done = 0;
4057*fdd8201dSApple OSS Distributions 		page_list[page].absent    = 0;
4058*fdd8201dSApple OSS Distributions 		page_list[page].dirty     = 0;
4059*fdd8201dSApple OSS Distributions 		page_list[page].precious  = 0;
4060*fdd8201dSApple OSS Distributions 		page_list[page].device    = 0;
4061*fdd8201dSApple OSS Distributions 		if (phys > highestPage) {
4062*fdd8201dSApple OSS Distributions 			highestPage = phys;
4063*fdd8201dSApple OSS Distributions 		}
4064*fdd8201dSApple OSS Distributions 	}
4065*fdd8201dSApple OSS Distributions 
4066*fdd8201dSApple OSS Distributions 	*highest_page = highestPage;
4067*fdd8201dSApple OSS Distributions 
4068*fdd8201dSApple OSS Distributions 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4069*fdd8201dSApple OSS Distributions }
4070*fdd8201dSApple OSS Distributions 
4071*fdd8201dSApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4072*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4073*fdd8201dSApple OSS Distributions {
4074*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4075*fdd8201dSApple OSS Distributions 	IOReturn error = kIOReturnSuccess;
4076*fdd8201dSApple OSS Distributions 	ioGMDData *dataP;
4077*fdd8201dSApple OSS Distributions 	upl_page_info_array_t pageInfo;
4078*fdd8201dSApple OSS Distributions 	ppnum_t mapBase;
4079*fdd8201dSApple OSS Distributions 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4080*fdd8201dSApple OSS Distributions 	mach_vm_size_t numBytesWired = 0;
4081*fdd8201dSApple OSS Distributions 
4082*fdd8201dSApple OSS Distributions 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4083*fdd8201dSApple OSS Distributions 
4084*fdd8201dSApple OSS Distributions 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4085*fdd8201dSApple OSS Distributions 		forDirection = (IODirection) (forDirection | getDirection());
4086*fdd8201dSApple OSS Distributions 	}
4087*fdd8201dSApple OSS Distributions 
4088*fdd8201dSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4089*fdd8201dSApple OSS Distributions 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4090*fdd8201dSApple OSS Distributions 	switch (kIODirectionOutIn & forDirection) {
4091*fdd8201dSApple OSS Distributions 	case kIODirectionOut:
4092*fdd8201dSApple OSS Distributions 		// Pages do not need to be marked as dirty on commit
4093*fdd8201dSApple OSS Distributions 		uplFlags = UPL_COPYOUT_FROM;
4094*fdd8201dSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess;
4095*fdd8201dSApple OSS Distributions 		break;
4096*fdd8201dSApple OSS Distributions 
4097*fdd8201dSApple OSS Distributions 	case kIODirectionIn:
4098*fdd8201dSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4099*fdd8201dSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4100*fdd8201dSApple OSS Distributions 		break;
4101*fdd8201dSApple OSS Distributions 
4102*fdd8201dSApple OSS Distributions 	default:
4103*fdd8201dSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4104*fdd8201dSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4105*fdd8201dSApple OSS Distributions 		break;
4106*fdd8201dSApple OSS Distributions 	}
4107*fdd8201dSApple OSS Distributions 
4108*fdd8201dSApple OSS Distributions 	if (_wireCount) {
4109*fdd8201dSApple OSS Distributions 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4110*fdd8201dSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4111*fdd8201dSApple OSS Distributions 			    (size_t)VM_KERNEL_ADDRPERM(this));
4112*fdd8201dSApple OSS Distributions 			error = kIOReturnNotWritable;
4113*fdd8201dSApple OSS Distributions 		}
4114*fdd8201dSApple OSS Distributions 	} else {
4115*fdd8201dSApple OSS Distributions 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4116*fdd8201dSApple OSS Distributions 		IOMapper *mapper;
4117*fdd8201dSApple OSS Distributions 
4118*fdd8201dSApple OSS Distributions 		mapper = dataP->fMapper;
4119*fdd8201dSApple OSS Distributions 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4120*fdd8201dSApple OSS Distributions 
4121*fdd8201dSApple OSS Distributions 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4122*fdd8201dSApple OSS Distributions 		tag = _kernelTag;
4123*fdd8201dSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE == tag) {
4124*fdd8201dSApple OSS Distributions 			tag = IOMemoryTag(kernel_map);
4125*fdd8201dSApple OSS Distributions 		}
4126*fdd8201dSApple OSS Distributions 
4127*fdd8201dSApple OSS Distributions 		if (kIODirectionPrepareToPhys32 & forDirection) {
4128*fdd8201dSApple OSS Distributions 			if (!mapper) {
4129*fdd8201dSApple OSS Distributions 				uplFlags |= UPL_NEED_32BIT_ADDR;
4130*fdd8201dSApple OSS Distributions 			}
4131*fdd8201dSApple OSS Distributions 			if (dataP->fDMAMapNumAddressBits > 32) {
4132*fdd8201dSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = 32;
4133*fdd8201dSApple OSS Distributions 			}
4134*fdd8201dSApple OSS Distributions 		}
4135*fdd8201dSApple OSS Distributions 		if (kIODirectionPrepareNoFault    & forDirection) {
4136*fdd8201dSApple OSS Distributions 			uplFlags |= UPL_REQUEST_NO_FAULT;
4137*fdd8201dSApple OSS Distributions 		}
4138*fdd8201dSApple OSS Distributions 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4139*fdd8201dSApple OSS Distributions 			uplFlags |= UPL_NOZEROFILLIO;
4140*fdd8201dSApple OSS Distributions 		}
4141*fdd8201dSApple OSS Distributions 		if (kIODirectionPrepareNonCoherent & forDirection) {
4142*fdd8201dSApple OSS Distributions 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4143*fdd8201dSApple OSS Distributions 		}
4144*fdd8201dSApple OSS Distributions 
4145*fdd8201dSApple OSS Distributions 		mapBase = 0;
4146*fdd8201dSApple OSS Distributions 
4147*fdd8201dSApple OSS Distributions 		// Note that appendBytes(NULL) zeros the data up to the desired length
4148*fdd8201dSApple OSS Distributions 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4149*fdd8201dSApple OSS Distributions 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4150*fdd8201dSApple OSS Distributions 			error = kIOReturnNoMemory;
4151*fdd8201dSApple OSS Distributions 			traceInterval.setEndArg2(error);
4152*fdd8201dSApple OSS Distributions 			return error;
4153*fdd8201dSApple OSS Distributions 		}
4154*fdd8201dSApple OSS Distributions 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4155*fdd8201dSApple OSS Distributions 			error = kIOReturnNoMemory;
4156*fdd8201dSApple OSS Distributions 			traceInterval.setEndArg2(error);
4157*fdd8201dSApple OSS Distributions 			return error;
4158*fdd8201dSApple OSS Distributions 		}
4159*fdd8201dSApple OSS Distributions 		dataP = NULL;
4160*fdd8201dSApple OSS Distributions 
4161*fdd8201dSApple OSS Distributions 		// Find the appropriate vm_map for the given task
4162*fdd8201dSApple OSS Distributions 		vm_map_t curMap;
4163*fdd8201dSApple OSS Distributions 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4164*fdd8201dSApple OSS Distributions 			curMap = NULL;
4165*fdd8201dSApple OSS Distributions 		} else {
4166*fdd8201dSApple OSS Distributions 			curMap = get_task_map(_task);
4167*fdd8201dSApple OSS Distributions 		}
4168*fdd8201dSApple OSS Distributions 
4169*fdd8201dSApple OSS Distributions 		// Iterate over the vector of virtual ranges
4170*fdd8201dSApple OSS Distributions 		Ranges vec = _ranges;
4171*fdd8201dSApple OSS Distributions 		unsigned int pageIndex  = 0;
4172*fdd8201dSApple OSS Distributions 		IOByteCount mdOffset    = 0;
4173*fdd8201dSApple OSS Distributions 		ppnum_t highestPage     = 0;
4174*fdd8201dSApple OSS Distributions 		bool         byteAlignUPL;
4175*fdd8201dSApple OSS Distributions 
4176*fdd8201dSApple OSS Distributions 		IOMemoryEntry * memRefEntry = NULL;
4177*fdd8201dSApple OSS Distributions 		if (_memRef) {
4178*fdd8201dSApple OSS Distributions 			memRefEntry = &_memRef->entries[0];
4179*fdd8201dSApple OSS Distributions 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4180*fdd8201dSApple OSS Distributions 		} else {
4181*fdd8201dSApple OSS Distributions 			byteAlignUPL = true;
4182*fdd8201dSApple OSS Distributions 		}
4183*fdd8201dSApple OSS Distributions 
4184*fdd8201dSApple OSS Distributions 		for (UInt range = 0; mdOffset < _length; range++) {
4185*fdd8201dSApple OSS Distributions 			ioPLBlock iopl;
4186*fdd8201dSApple OSS Distributions 			mach_vm_address_t startPage, startPageOffset;
4187*fdd8201dSApple OSS Distributions 			mach_vm_size_t    numBytes;
4188*fdd8201dSApple OSS Distributions 			ppnum_t highPage = 0;
4189*fdd8201dSApple OSS Distributions 
4190*fdd8201dSApple OSS Distributions 			if (_memRef) {
4191*fdd8201dSApple OSS Distributions 				if (range >= _memRef->count) {
4192*fdd8201dSApple OSS Distributions 					panic("memRefEntry");
4193*fdd8201dSApple OSS Distributions 				}
4194*fdd8201dSApple OSS Distributions 				memRefEntry = &_memRef->entries[range];
4195*fdd8201dSApple OSS Distributions 				numBytes    = memRefEntry->size;
4196*fdd8201dSApple OSS Distributions 				startPage   = -1ULL;
4197*fdd8201dSApple OSS Distributions 				if (byteAlignUPL) {
4198*fdd8201dSApple OSS Distributions 					startPageOffset = 0;
4199*fdd8201dSApple OSS Distributions 				} else {
4200*fdd8201dSApple OSS Distributions 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4201*fdd8201dSApple OSS Distributions 				}
4202*fdd8201dSApple OSS Distributions 			} else {
4203*fdd8201dSApple OSS Distributions 				// Get the startPage address and length of vec[range]
4204*fdd8201dSApple OSS Distributions 				getAddrLenForInd(startPage, numBytes, type, vec, range);
4205*fdd8201dSApple OSS Distributions 				if (byteAlignUPL) {
4206*fdd8201dSApple OSS Distributions 					startPageOffset = 0;
4207*fdd8201dSApple OSS Distributions 				} else {
4208*fdd8201dSApple OSS Distributions 					startPageOffset = startPage & PAGE_MASK;
4209*fdd8201dSApple OSS Distributions 					startPage = trunc_page_64(startPage);
4210*fdd8201dSApple OSS Distributions 				}
4211*fdd8201dSApple OSS Distributions 			}
4212*fdd8201dSApple OSS Distributions 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4213*fdd8201dSApple OSS Distributions 			numBytes += startPageOffset;
4214*fdd8201dSApple OSS Distributions 
4215*fdd8201dSApple OSS Distributions 			if (mapper) {
4216*fdd8201dSApple OSS Distributions 				iopl.fMappedPage = mapBase + pageIndex;
4217*fdd8201dSApple OSS Distributions 			} else {
4218*fdd8201dSApple OSS Distributions 				iopl.fMappedPage = 0;
4219*fdd8201dSApple OSS Distributions 			}
4220*fdd8201dSApple OSS Distributions 
4221*fdd8201dSApple OSS Distributions 			// Iterate over the current range, creating UPLs
4222*fdd8201dSApple OSS Distributions 			while (numBytes) {
4223*fdd8201dSApple OSS Distributions 				vm_address_t kernelStart = (vm_address_t) startPage;
4224*fdd8201dSApple OSS Distributions 				vm_map_t theMap;
4225*fdd8201dSApple OSS Distributions 				if (curMap) {
4226*fdd8201dSApple OSS Distributions 					theMap = curMap;
4227*fdd8201dSApple OSS Distributions 				} else if (_memRef) {
4228*fdd8201dSApple OSS Distributions 					theMap = NULL;
4229*fdd8201dSApple OSS Distributions 				} else {
4230*fdd8201dSApple OSS Distributions 					assert(_task == kernel_task);
4231*fdd8201dSApple OSS Distributions 					theMap = IOPageableMapForAddress(kernelStart);
4232*fdd8201dSApple OSS Distributions 				}
4233*fdd8201dSApple OSS Distributions 
4234*fdd8201dSApple OSS Distributions 				// ioplFlags is an in/out parameter
4235*fdd8201dSApple OSS Distributions 				upl_control_flags_t ioplFlags = uplFlags;
4236*fdd8201dSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
4237*fdd8201dSApple OSS Distributions 				pageInfo = getPageList(dataP);
4238*fdd8201dSApple OSS Distributions 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4239*fdd8201dSApple OSS Distributions 
4240*fdd8201dSApple OSS Distributions 				mach_vm_size_t ioplPhysSize;
4241*fdd8201dSApple OSS Distributions 				upl_size_t     ioplSize;
4242*fdd8201dSApple OSS Distributions 				unsigned int   numPageInfo;
4243*fdd8201dSApple OSS Distributions 
4244*fdd8201dSApple OSS Distributions 				if (_memRef) {
4245*fdd8201dSApple OSS Distributions 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4246*fdd8201dSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4247*fdd8201dSApple OSS Distributions 				} else {
4248*fdd8201dSApple OSS Distributions 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4249*fdd8201dSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4250*fdd8201dSApple OSS Distributions 				}
4251*fdd8201dSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4252*fdd8201dSApple OSS Distributions 					if (_memRef) {
4253*fdd8201dSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4254*fdd8201dSApple OSS Distributions 					} else {
4255*fdd8201dSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4256*fdd8201dSApple OSS Distributions 					}
4257*fdd8201dSApple OSS Distributions 					printf("entry size error %d\n", error);
4258*fdd8201dSApple OSS Distributions 					goto abortExit;
4259*fdd8201dSApple OSS Distributions 				}
4260*fdd8201dSApple OSS Distributions 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4261*fdd8201dSApple OSS Distributions 				numPageInfo = atop_32(ioplPhysSize);
4262*fdd8201dSApple OSS Distributions 				if (byteAlignUPL) {
4263*fdd8201dSApple OSS Distributions 					if (numBytes > ioplPhysSize) {
4264*fdd8201dSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4265*fdd8201dSApple OSS Distributions 					} else {
4266*fdd8201dSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))numBytes);
4267*fdd8201dSApple OSS Distributions 					}
4268*fdd8201dSApple OSS Distributions 				} else {
4269*fdd8201dSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4270*fdd8201dSApple OSS Distributions 				}
4271*fdd8201dSApple OSS Distributions 
4272*fdd8201dSApple OSS Distributions 				if (_memRef) {
4273*fdd8201dSApple OSS Distributions 					memory_object_offset_t entryOffset;
4274*fdd8201dSApple OSS Distributions 
4275*fdd8201dSApple OSS Distributions 					entryOffset = mdOffset;
4276*fdd8201dSApple OSS Distributions 					if (byteAlignUPL) {
4277*fdd8201dSApple OSS Distributions 						entryOffset = (entryOffset - memRefEntry->offset);
4278*fdd8201dSApple OSS Distributions 					} else {
4279*fdd8201dSApple OSS Distributions 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4280*fdd8201dSApple OSS Distributions 					}
4281*fdd8201dSApple OSS Distributions 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4282*fdd8201dSApple OSS Distributions 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4283*fdd8201dSApple OSS Distributions 					}
4284*fdd8201dSApple OSS Distributions 					error = memory_object_iopl_request(memRefEntry->entry,
4285*fdd8201dSApple OSS Distributions 					    entryOffset,
4286*fdd8201dSApple OSS Distributions 					    &ioplSize,
4287*fdd8201dSApple OSS Distributions 					    &iopl.fIOPL,
4288*fdd8201dSApple OSS Distributions 					    baseInfo,
4289*fdd8201dSApple OSS Distributions 					    &numPageInfo,
4290*fdd8201dSApple OSS Distributions 					    &ioplFlags,
4291*fdd8201dSApple OSS Distributions 					    tag);
4292*fdd8201dSApple OSS Distributions 				} else if ((theMap == kernel_map)
4293*fdd8201dSApple OSS Distributions 				    && (kernelStart >= io_kernel_static_start)
4294*fdd8201dSApple OSS Distributions 				    && (kernelStart < io_kernel_static_end)) {
4295*fdd8201dSApple OSS Distributions 					error = io_get_kernel_static_upl(theMap,
4296*fdd8201dSApple OSS Distributions 					    kernelStart,
4297*fdd8201dSApple OSS Distributions 					    &ioplSize,
4298*fdd8201dSApple OSS Distributions 					    &iopl.fPageOffset,
4299*fdd8201dSApple OSS Distributions 					    &iopl.fIOPL,
4300*fdd8201dSApple OSS Distributions 					    baseInfo,
4301*fdd8201dSApple OSS Distributions 					    &numPageInfo,
4302*fdd8201dSApple OSS Distributions 					    &highPage);
4303*fdd8201dSApple OSS Distributions 				} else {
4304*fdd8201dSApple OSS Distributions 					assert(theMap);
4305*fdd8201dSApple OSS Distributions 					error = vm_map_create_upl(theMap,
4306*fdd8201dSApple OSS Distributions 					    startPage,
4307*fdd8201dSApple OSS Distributions 					    (upl_size_t*)&ioplSize,
4308*fdd8201dSApple OSS Distributions 					    &iopl.fIOPL,
4309*fdd8201dSApple OSS Distributions 					    baseInfo,
4310*fdd8201dSApple OSS Distributions 					    &numPageInfo,
4311*fdd8201dSApple OSS Distributions 					    &ioplFlags,
4312*fdd8201dSApple OSS Distributions 					    tag);
4313*fdd8201dSApple OSS Distributions 				}
4314*fdd8201dSApple OSS Distributions 
4315*fdd8201dSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4316*fdd8201dSApple OSS Distributions 					traceInterval.setEndArg2(error);
4317*fdd8201dSApple OSS Distributions 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4318*fdd8201dSApple OSS Distributions 					goto abortExit;
4319*fdd8201dSApple OSS Distributions 				}
4320*fdd8201dSApple OSS Distributions 
4321*fdd8201dSApple OSS Distributions 				assert(ioplSize);
4322*fdd8201dSApple OSS Distributions 
4323*fdd8201dSApple OSS Distributions 				if (iopl.fIOPL) {
4324*fdd8201dSApple OSS Distributions 					highPage = upl_get_highest_page(iopl.fIOPL);
4325*fdd8201dSApple OSS Distributions 				}
4326*fdd8201dSApple OSS Distributions 				if (highPage > highestPage) {
4327*fdd8201dSApple OSS Distributions 					highestPage = highPage;
4328*fdd8201dSApple OSS Distributions 				}
4329*fdd8201dSApple OSS Distributions 
4330*fdd8201dSApple OSS Distributions 				if (baseInfo->device) {
4331*fdd8201dSApple OSS Distributions 					numPageInfo = 1;
4332*fdd8201dSApple OSS Distributions 					iopl.fFlags = kIOPLOnDevice;
4333*fdd8201dSApple OSS Distributions 				} else {
4334*fdd8201dSApple OSS Distributions 					iopl.fFlags = 0;
4335*fdd8201dSApple OSS Distributions 				}
4336*fdd8201dSApple OSS Distributions 
4337*fdd8201dSApple OSS Distributions 				if (byteAlignUPL) {
4338*fdd8201dSApple OSS Distributions 					if (iopl.fIOPL) {
4339*fdd8201dSApple OSS Distributions 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4340*fdd8201dSApple OSS Distributions 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4341*fdd8201dSApple OSS Distributions 					}
4342*fdd8201dSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4343*fdd8201dSApple OSS Distributions 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4344*fdd8201dSApple OSS Distributions 						startPage -= iopl.fPageOffset;
4345*fdd8201dSApple OSS Distributions 					}
4346*fdd8201dSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4347*fdd8201dSApple OSS Distributions 					numBytes += iopl.fPageOffset;
4348*fdd8201dSApple OSS Distributions 				}
4349*fdd8201dSApple OSS Distributions 
4350*fdd8201dSApple OSS Distributions 				iopl.fIOMDOffset = mdOffset;
4351*fdd8201dSApple OSS Distributions 				iopl.fPageInfo = pageIndex;
4352*fdd8201dSApple OSS Distributions 
4353*fdd8201dSApple OSS Distributions 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4354*fdd8201dSApple OSS Distributions 					// Clean up partial created and unsaved iopl
4355*fdd8201dSApple OSS Distributions 					if (iopl.fIOPL) {
4356*fdd8201dSApple OSS Distributions 						upl_abort(iopl.fIOPL, 0);
4357*fdd8201dSApple OSS Distributions 						upl_deallocate(iopl.fIOPL);
4358*fdd8201dSApple OSS Distributions 					}
4359*fdd8201dSApple OSS Distributions 					error = kIOReturnNoMemory;
4360*fdd8201dSApple OSS Distributions 					traceInterval.setEndArg2(error);
4361*fdd8201dSApple OSS Distributions 					goto abortExit;
4362*fdd8201dSApple OSS Distributions 				}
4363*fdd8201dSApple OSS Distributions 				dataP = NULL;
4364*fdd8201dSApple OSS Distributions 
4365*fdd8201dSApple OSS Distributions 				// Check for a multiple iopl's in one virtual range
4366*fdd8201dSApple OSS Distributions 				pageIndex += numPageInfo;
4367*fdd8201dSApple OSS Distributions 				mdOffset -= iopl.fPageOffset;
4368*fdd8201dSApple OSS Distributions 				numBytesWired += ioplSize;
4369*fdd8201dSApple OSS Distributions 				if (ioplSize < numBytes) {
4370*fdd8201dSApple OSS Distributions 					numBytes -= ioplSize;
4371*fdd8201dSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4372*fdd8201dSApple OSS Distributions 						startPage += ioplSize;
4373*fdd8201dSApple OSS Distributions 					}
4374*fdd8201dSApple OSS Distributions 					mdOffset += ioplSize;
4375*fdd8201dSApple OSS Distributions 					iopl.fPageOffset = 0;
4376*fdd8201dSApple OSS Distributions 					if (mapper) {
4377*fdd8201dSApple OSS Distributions 						iopl.fMappedPage = mapBase + pageIndex;
4378*fdd8201dSApple OSS Distributions 					}
4379*fdd8201dSApple OSS Distributions 				} else {
4380*fdd8201dSApple OSS Distributions 					mdOffset += numBytes;
4381*fdd8201dSApple OSS Distributions 					break;
4382*fdd8201dSApple OSS Distributions 				}
4383*fdd8201dSApple OSS Distributions 			}
4384*fdd8201dSApple OSS Distributions 		}
4385*fdd8201dSApple OSS Distributions 
4386*fdd8201dSApple OSS Distributions 		_highestPage = highestPage;
4387*fdd8201dSApple OSS Distributions 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4388*fdd8201dSApple OSS Distributions 
4389*fdd8201dSApple OSS Distributions 		if (UPL_COPYOUT_FROM & uplFlags) {
4390*fdd8201dSApple OSS Distributions 			_flags |= kIOMemoryPreparedReadOnly;
4391*fdd8201dSApple OSS Distributions 		}
4392*fdd8201dSApple OSS Distributions 		traceInterval.setEndCodes(numBytesWired, error);
4393*fdd8201dSApple OSS Distributions 	}
4394*fdd8201dSApple OSS Distributions 
4395*fdd8201dSApple OSS Distributions #if IOTRACKING
4396*fdd8201dSApple OSS Distributions 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4397*fdd8201dSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4398*fdd8201dSApple OSS Distributions 		if (!dataP->fWireTracking.link.next) {
4399*fdd8201dSApple OSS Distributions 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4400*fdd8201dSApple OSS Distributions 		}
4401*fdd8201dSApple OSS Distributions 	}
4402*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
4403*fdd8201dSApple OSS Distributions 
4404*fdd8201dSApple OSS Distributions 	return error;
4405*fdd8201dSApple OSS Distributions 
4406*fdd8201dSApple OSS Distributions abortExit:
4407*fdd8201dSApple OSS Distributions 	{
4408*fdd8201dSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4409*fdd8201dSApple OSS Distributions 		UInt done = getNumIOPL(_memoryEntries, dataP);
4410*fdd8201dSApple OSS Distributions 		ioPLBlock *ioplList = getIOPLList(dataP);
4411*fdd8201dSApple OSS Distributions 
4412*fdd8201dSApple OSS Distributions 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4413*fdd8201dSApple OSS Distributions 			if (ioplList[ioplIdx].fIOPL) {
4414*fdd8201dSApple OSS Distributions 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4415*fdd8201dSApple OSS Distributions 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4416*fdd8201dSApple OSS Distributions 			}
4417*fdd8201dSApple OSS Distributions 		}
4418*fdd8201dSApple OSS Distributions 		_memoryEntries->setLength(computeDataSize(0, 0));
4419*fdd8201dSApple OSS Distributions 	}
4420*fdd8201dSApple OSS Distributions 
4421*fdd8201dSApple OSS Distributions 	if (error == KERN_FAILURE) {
4422*fdd8201dSApple OSS Distributions 		error = kIOReturnCannotWire;
4423*fdd8201dSApple OSS Distributions 	} else if (error == KERN_MEMORY_ERROR) {
4424*fdd8201dSApple OSS Distributions 		error = kIOReturnNoResources;
4425*fdd8201dSApple OSS Distributions 	}
4426*fdd8201dSApple OSS Distributions 
4427*fdd8201dSApple OSS Distributions 	return error;
4428*fdd8201dSApple OSS Distributions }
4429*fdd8201dSApple OSS Distributions 
4430*fdd8201dSApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4431*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4432*fdd8201dSApple OSS Distributions {
4433*fdd8201dSApple OSS Distributions 	ioGMDData * dataP;
4434*fdd8201dSApple OSS Distributions 
4435*fdd8201dSApple OSS Distributions 	if (size > UINT_MAX) {
4436*fdd8201dSApple OSS Distributions 		return false;
4437*fdd8201dSApple OSS Distributions 	}
4438*fdd8201dSApple OSS Distributions 	if (!_memoryEntries) {
4439*fdd8201dSApple OSS Distributions 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4440*fdd8201dSApple OSS Distributions 		if (!_memoryEntries) {
4441*fdd8201dSApple OSS Distributions 			return false;
4442*fdd8201dSApple OSS Distributions 		}
4443*fdd8201dSApple OSS Distributions 	} else if (!_memoryEntries->initWithCapacity(size)) {
4444*fdd8201dSApple OSS Distributions 		return false;
4445*fdd8201dSApple OSS Distributions 	}
4446*fdd8201dSApple OSS Distributions 
4447*fdd8201dSApple OSS Distributions 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4448*fdd8201dSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4449*fdd8201dSApple OSS Distributions 
4450*fdd8201dSApple OSS Distributions 	if (mapper == kIOMapperWaitSystem) {
4451*fdd8201dSApple OSS Distributions 		IOMapper::checkForSystemMapper();
4452*fdd8201dSApple OSS Distributions 		mapper = IOMapper::gSystem;
4453*fdd8201dSApple OSS Distributions 	}
4454*fdd8201dSApple OSS Distributions 	dataP->fMapper               = mapper;
4455*fdd8201dSApple OSS Distributions 	dataP->fPageCnt              = 0;
4456*fdd8201dSApple OSS Distributions 	dataP->fMappedBase           = 0;
4457*fdd8201dSApple OSS Distributions 	dataP->fDMAMapNumAddressBits = 64;
4458*fdd8201dSApple OSS Distributions 	dataP->fDMAMapAlignment      = 0;
4459*fdd8201dSApple OSS Distributions 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4460*fdd8201dSApple OSS Distributions 	dataP->fCompletionError      = false;
4461*fdd8201dSApple OSS Distributions 	dataP->fMappedBaseValid      = false;
4462*fdd8201dSApple OSS Distributions 
4463*fdd8201dSApple OSS Distributions 	return true;
4464*fdd8201dSApple OSS Distributions }
4465*fdd8201dSApple OSS Distributions 
4466*fdd8201dSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4467*fdd8201dSApple OSS Distributions IOMemoryDescriptor::dmaMap(
4468*fdd8201dSApple OSS Distributions 	IOMapper                    * mapper,
4469*fdd8201dSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4470*fdd8201dSApple OSS Distributions 	IODMACommand                * command,
4471*fdd8201dSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4472*fdd8201dSApple OSS Distributions 	uint64_t                      offset,
4473*fdd8201dSApple OSS Distributions 	uint64_t                      length,
4474*fdd8201dSApple OSS Distributions 	uint64_t                    * mapAddress,
4475*fdd8201dSApple OSS Distributions 	uint64_t                    * mapLength)
4476*fdd8201dSApple OSS Distributions {
4477*fdd8201dSApple OSS Distributions 	IOReturn err;
4478*fdd8201dSApple OSS Distributions 	uint32_t mapOptions;
4479*fdd8201dSApple OSS Distributions 
4480*fdd8201dSApple OSS Distributions 	mapOptions = 0;
4481*fdd8201dSApple OSS Distributions 	mapOptions |= kIODMAMapReadAccess;
4482*fdd8201dSApple OSS Distributions 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4483*fdd8201dSApple OSS Distributions 		mapOptions |= kIODMAMapWriteAccess;
4484*fdd8201dSApple OSS Distributions 	}
4485*fdd8201dSApple OSS Distributions 
4486*fdd8201dSApple OSS Distributions 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4487*fdd8201dSApple OSS Distributions 	    mapSpec, command, NULL, mapAddress, mapLength);
4488*fdd8201dSApple OSS Distributions 
4489*fdd8201dSApple OSS Distributions 	if (kIOReturnSuccess == err) {
4490*fdd8201dSApple OSS Distributions 		dmaMapRecord(mapper, command, *mapLength);
4491*fdd8201dSApple OSS Distributions 	}
4492*fdd8201dSApple OSS Distributions 
4493*fdd8201dSApple OSS Distributions 	return err;
4494*fdd8201dSApple OSS Distributions }
4495*fdd8201dSApple OSS Distributions 
4496*fdd8201dSApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4497*fdd8201dSApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4498*fdd8201dSApple OSS Distributions 	IOMapper                    * mapper,
4499*fdd8201dSApple OSS Distributions 	IODMACommand                * command,
4500*fdd8201dSApple OSS Distributions 	uint64_t                      mapLength)
4501*fdd8201dSApple OSS Distributions {
4502*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4503*fdd8201dSApple OSS Distributions 	kern_allocation_name_t alloc;
4504*fdd8201dSApple OSS Distributions 	int16_t                prior;
4505*fdd8201dSApple OSS Distributions 
4506*fdd8201dSApple OSS Distributions 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4507*fdd8201dSApple OSS Distributions 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4508*fdd8201dSApple OSS Distributions 	}
4509*fdd8201dSApple OSS Distributions 
4510*fdd8201dSApple OSS Distributions 	if (!command) {
4511*fdd8201dSApple OSS Distributions 		return;
4512*fdd8201dSApple OSS Distributions 	}
4513*fdd8201dSApple OSS Distributions 	prior = OSAddAtomic16(1, &_dmaReferences);
4514*fdd8201dSApple OSS Distributions 	if (!prior) {
4515*fdd8201dSApple OSS Distributions 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4516*fdd8201dSApple OSS Distributions 			_mapName  = alloc;
4517*fdd8201dSApple OSS Distributions 			mapLength = _length;
4518*fdd8201dSApple OSS Distributions 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4519*fdd8201dSApple OSS Distributions 		} else {
4520*fdd8201dSApple OSS Distributions 			_mapName = NULL;
4521*fdd8201dSApple OSS Distributions 		}
4522*fdd8201dSApple OSS Distributions 	}
4523*fdd8201dSApple OSS Distributions }
4524*fdd8201dSApple OSS Distributions 
4525*fdd8201dSApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4526*fdd8201dSApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4527*fdd8201dSApple OSS Distributions 	IOMapper                    * mapper,
4528*fdd8201dSApple OSS Distributions 	IODMACommand                * command,
4529*fdd8201dSApple OSS Distributions 	uint64_t                      offset,
4530*fdd8201dSApple OSS Distributions 	uint64_t                      mapAddress,
4531*fdd8201dSApple OSS Distributions 	uint64_t                      mapLength)
4532*fdd8201dSApple OSS Distributions {
4533*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4534*fdd8201dSApple OSS Distributions 	IOReturn ret;
4535*fdd8201dSApple OSS Distributions 	kern_allocation_name_t alloc;
4536*fdd8201dSApple OSS Distributions 	kern_allocation_name_t mapName;
4537*fdd8201dSApple OSS Distributions 	int16_t prior;
4538*fdd8201dSApple OSS Distributions 
4539*fdd8201dSApple OSS Distributions 	mapName = NULL;
4540*fdd8201dSApple OSS Distributions 	prior = 0;
4541*fdd8201dSApple OSS Distributions 	if (command) {
4542*fdd8201dSApple OSS Distributions 		mapName = _mapName;
4543*fdd8201dSApple OSS Distributions 		if (_dmaReferences) {
4544*fdd8201dSApple OSS Distributions 			prior = OSAddAtomic16(-1, &_dmaReferences);
4545*fdd8201dSApple OSS Distributions 		} else {
4546*fdd8201dSApple OSS Distributions 			panic("_dmaReferences underflow");
4547*fdd8201dSApple OSS Distributions 		}
4548*fdd8201dSApple OSS Distributions 	}
4549*fdd8201dSApple OSS Distributions 
4550*fdd8201dSApple OSS Distributions 	if (!mapLength) {
4551*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4552*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
4553*fdd8201dSApple OSS Distributions 	}
4554*fdd8201dSApple OSS Distributions 
4555*fdd8201dSApple OSS Distributions 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4556*fdd8201dSApple OSS Distributions 
4557*fdd8201dSApple OSS Distributions 	if ((alloc = mapper->fAllocName)) {
4558*fdd8201dSApple OSS Distributions 		kern_allocation_update_size(alloc, -mapLength);
4559*fdd8201dSApple OSS Distributions 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4560*fdd8201dSApple OSS Distributions 			mapLength = _length;
4561*fdd8201dSApple OSS Distributions 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4562*fdd8201dSApple OSS Distributions 		}
4563*fdd8201dSApple OSS Distributions 	}
4564*fdd8201dSApple OSS Distributions 
4565*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(ret);
4566*fdd8201dSApple OSS Distributions 	return ret;
4567*fdd8201dSApple OSS Distributions }
4568*fdd8201dSApple OSS Distributions 
4569*fdd8201dSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4570*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4571*fdd8201dSApple OSS Distributions 	IOMapper                    * mapper,
4572*fdd8201dSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4573*fdd8201dSApple OSS Distributions 	IODMACommand                * command,
4574*fdd8201dSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4575*fdd8201dSApple OSS Distributions 	uint64_t                      offset,
4576*fdd8201dSApple OSS Distributions 	uint64_t                      length,
4577*fdd8201dSApple OSS Distributions 	uint64_t                    * mapAddress,
4578*fdd8201dSApple OSS Distributions 	uint64_t                    * mapLength)
4579*fdd8201dSApple OSS Distributions {
4580*fdd8201dSApple OSS Distributions 	IOReturn          err = kIOReturnSuccess;
4581*fdd8201dSApple OSS Distributions 	ioGMDData *       dataP;
4582*fdd8201dSApple OSS Distributions 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4583*fdd8201dSApple OSS Distributions 
4584*fdd8201dSApple OSS Distributions 	*mapAddress = 0;
4585*fdd8201dSApple OSS Distributions 	if (kIOMemoryHostOnly & _flags) {
4586*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
4587*fdd8201dSApple OSS Distributions 	}
4588*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4589*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
4590*fdd8201dSApple OSS Distributions 	}
4591*fdd8201dSApple OSS Distributions 
4592*fdd8201dSApple OSS Distributions 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4593*fdd8201dSApple OSS Distributions 	    || offset || (length != _length)) {
4594*fdd8201dSApple OSS Distributions 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4595*fdd8201dSApple OSS Distributions 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4596*fdd8201dSApple OSS Distributions 		const ioPLBlock * ioplList = getIOPLList(dataP);
4597*fdd8201dSApple OSS Distributions 		upl_page_info_t * pageList;
4598*fdd8201dSApple OSS Distributions 		uint32_t          mapOptions = 0;
4599*fdd8201dSApple OSS Distributions 
4600*fdd8201dSApple OSS Distributions 		IODMAMapSpecification mapSpec;
4601*fdd8201dSApple OSS Distributions 		bzero(&mapSpec, sizeof(mapSpec));
4602*fdd8201dSApple OSS Distributions 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4603*fdd8201dSApple OSS Distributions 		mapSpec.alignment = dataP->fDMAMapAlignment;
4604*fdd8201dSApple OSS Distributions 
4605*fdd8201dSApple OSS Distributions 		// For external UPLs the fPageInfo field points directly to
4606*fdd8201dSApple OSS Distributions 		// the upl's upl_page_info_t array.
4607*fdd8201dSApple OSS Distributions 		if (ioplList->fFlags & kIOPLExternUPL) {
4608*fdd8201dSApple OSS Distributions 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4609*fdd8201dSApple OSS Distributions 			mapOptions |= kIODMAMapPagingPath;
4610*fdd8201dSApple OSS Distributions 		} else {
4611*fdd8201dSApple OSS Distributions 			pageList = getPageList(dataP);
4612*fdd8201dSApple OSS Distributions 		}
4613*fdd8201dSApple OSS Distributions 
4614*fdd8201dSApple OSS Distributions 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4615*fdd8201dSApple OSS Distributions 			mapOptions |= kIODMAMapPageListFullyOccupied;
4616*fdd8201dSApple OSS Distributions 		}
4617*fdd8201dSApple OSS Distributions 
4618*fdd8201dSApple OSS Distributions 		assert(dataP->fDMAAccess);
4619*fdd8201dSApple OSS Distributions 		mapOptions |= dataP->fDMAAccess;
4620*fdd8201dSApple OSS Distributions 
4621*fdd8201dSApple OSS Distributions 		// Check for direct device non-paged memory
4622*fdd8201dSApple OSS Distributions 		if (ioplList->fFlags & kIOPLOnDevice) {
4623*fdd8201dSApple OSS Distributions 			mapOptions |= kIODMAMapPhysicallyContiguous;
4624*fdd8201dSApple OSS Distributions 		}
4625*fdd8201dSApple OSS Distributions 
4626*fdd8201dSApple OSS Distributions 		IODMAMapPageList dmaPageList =
4627*fdd8201dSApple OSS Distributions 		{
4628*fdd8201dSApple OSS Distributions 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4629*fdd8201dSApple OSS Distributions 			.pageListCount = _pages,
4630*fdd8201dSApple OSS Distributions 			.pageList      = &pageList[0]
4631*fdd8201dSApple OSS Distributions 		};
4632*fdd8201dSApple OSS Distributions 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4633*fdd8201dSApple OSS Distributions 		    command, &dmaPageList, mapAddress, mapLength);
4634*fdd8201dSApple OSS Distributions 
4635*fdd8201dSApple OSS Distributions 		if (kIOReturnSuccess == err) {
4636*fdd8201dSApple OSS Distributions 			dmaMapRecord(mapper, command, *mapLength);
4637*fdd8201dSApple OSS Distributions 		}
4638*fdd8201dSApple OSS Distributions 	}
4639*fdd8201dSApple OSS Distributions 
4640*fdd8201dSApple OSS Distributions 	return err;
4641*fdd8201dSApple OSS Distributions }
4642*fdd8201dSApple OSS Distributions 
4643*fdd8201dSApple OSS Distributions /*
4644*fdd8201dSApple OSS Distributions  * prepare
4645*fdd8201dSApple OSS Distributions  *
4646*fdd8201dSApple OSS Distributions  * Prepare the memory for an I/O transfer.  This involves paging in
4647*fdd8201dSApple OSS Distributions  * the memory, if necessary, and wiring it down for the duration of
4648*fdd8201dSApple OSS Distributions  * the transfer.  The complete() method completes the processing of
4649*fdd8201dSApple OSS Distributions  * the memory after the I/O transfer finishes.  This method needn't
4650*fdd8201dSApple OSS Distributions  * called for non-pageable memory.
4651*fdd8201dSApple OSS Distributions  */
4652*fdd8201dSApple OSS Distributions 
4653*fdd8201dSApple OSS Distributions IOReturn
prepare(IODirection forDirection)4654*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4655*fdd8201dSApple OSS Distributions {
4656*fdd8201dSApple OSS Distributions 	IOReturn     error    = kIOReturnSuccess;
4657*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4658*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4659*fdd8201dSApple OSS Distributions 
4660*fdd8201dSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4661*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4662*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
4663*fdd8201dSApple OSS Distributions 	}
4664*fdd8201dSApple OSS Distributions 
4665*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4666*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4667*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4668*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
4669*fdd8201dSApple OSS Distributions 	}
4670*fdd8201dSApple OSS Distributions 
4671*fdd8201dSApple OSS Distributions 	if (_prepareLock) {
4672*fdd8201dSApple OSS Distributions 		IOLockLock(_prepareLock);
4673*fdd8201dSApple OSS Distributions 	}
4674*fdd8201dSApple OSS Distributions 
4675*fdd8201dSApple OSS Distributions 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4676*fdd8201dSApple OSS Distributions 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4677*fdd8201dSApple OSS Distributions 			error = kIOReturnNotReady;
4678*fdd8201dSApple OSS Distributions 			goto finish;
4679*fdd8201dSApple OSS Distributions 		}
4680*fdd8201dSApple OSS Distributions 		error = wireVirtual(forDirection);
4681*fdd8201dSApple OSS Distributions 	}
4682*fdd8201dSApple OSS Distributions 
4683*fdd8201dSApple OSS Distributions 	if (kIOReturnSuccess == error) {
4684*fdd8201dSApple OSS Distributions 		if (1 == ++_wireCount) {
4685*fdd8201dSApple OSS Distributions 			if (kIOMemoryClearEncrypt & _flags) {
4686*fdd8201dSApple OSS Distributions 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4687*fdd8201dSApple OSS Distributions 			}
4688*fdd8201dSApple OSS Distributions 
4689*fdd8201dSApple OSS Distributions 			ktraceEmitPhysicalSegments();
4690*fdd8201dSApple OSS Distributions 		}
4691*fdd8201dSApple OSS Distributions 	}
4692*fdd8201dSApple OSS Distributions 
4693*fdd8201dSApple OSS Distributions finish:
4694*fdd8201dSApple OSS Distributions 
4695*fdd8201dSApple OSS Distributions 	if (_prepareLock) {
4696*fdd8201dSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4697*fdd8201dSApple OSS Distributions 	}
4698*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(error);
4699*fdd8201dSApple OSS Distributions 
4700*fdd8201dSApple OSS Distributions 	return error;
4701*fdd8201dSApple OSS Distributions }
4702*fdd8201dSApple OSS Distributions 
4703*fdd8201dSApple OSS Distributions /*
4704*fdd8201dSApple OSS Distributions  * complete
4705*fdd8201dSApple OSS Distributions  *
4706*fdd8201dSApple OSS Distributions  * Complete processing of the memory after an I/O transfer finishes.
4707*fdd8201dSApple OSS Distributions  * This method should not be called unless a prepare was previously
4708*fdd8201dSApple OSS Distributions  * issued; the prepare() and complete() must occur in pairs, before
4709*fdd8201dSApple OSS Distributions  * before and after an I/O transfer involving pageable memory.
4710*fdd8201dSApple OSS Distributions  */
4711*fdd8201dSApple OSS Distributions 
4712*fdd8201dSApple OSS Distributions IOReturn
complete(IODirection forDirection)4713*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4714*fdd8201dSApple OSS Distributions {
4715*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4716*fdd8201dSApple OSS Distributions 	ioGMDData  * dataP;
4717*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4718*fdd8201dSApple OSS Distributions 
4719*fdd8201dSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4720*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4721*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
4722*fdd8201dSApple OSS Distributions 	}
4723*fdd8201dSApple OSS Distributions 
4724*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4725*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4726*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4727*fdd8201dSApple OSS Distributions 		return kIOReturnNotAttached;
4728*fdd8201dSApple OSS Distributions 	}
4729*fdd8201dSApple OSS Distributions 
4730*fdd8201dSApple OSS Distributions 	if (_prepareLock) {
4731*fdd8201dSApple OSS Distributions 		IOLockLock(_prepareLock);
4732*fdd8201dSApple OSS Distributions 	}
4733*fdd8201dSApple OSS Distributions 	do{
4734*fdd8201dSApple OSS Distributions 		assert(_wireCount);
4735*fdd8201dSApple OSS Distributions 		if (!_wireCount) {
4736*fdd8201dSApple OSS Distributions 			break;
4737*fdd8201dSApple OSS Distributions 		}
4738*fdd8201dSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4739*fdd8201dSApple OSS Distributions 		if (!dataP) {
4740*fdd8201dSApple OSS Distributions 			break;
4741*fdd8201dSApple OSS Distributions 		}
4742*fdd8201dSApple OSS Distributions 
4743*fdd8201dSApple OSS Distributions 		if (kIODirectionCompleteWithError & forDirection) {
4744*fdd8201dSApple OSS Distributions 			dataP->fCompletionError = true;
4745*fdd8201dSApple OSS Distributions 		}
4746*fdd8201dSApple OSS Distributions 
4747*fdd8201dSApple OSS Distributions 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4748*fdd8201dSApple OSS Distributions 			performOperation(kIOMemorySetEncrypted, 0, _length);
4749*fdd8201dSApple OSS Distributions 		}
4750*fdd8201dSApple OSS Distributions 
4751*fdd8201dSApple OSS Distributions 		_wireCount--;
4752*fdd8201dSApple OSS Distributions 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4753*fdd8201dSApple OSS Distributions 			ioPLBlock *ioplList = getIOPLList(dataP);
4754*fdd8201dSApple OSS Distributions 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4755*fdd8201dSApple OSS Distributions 
4756*fdd8201dSApple OSS Distributions 			if (_wireCount) {
4757*fdd8201dSApple OSS Distributions 				// kIODirectionCompleteWithDataValid & forDirection
4758*fdd8201dSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4759*fdd8201dSApple OSS Distributions 					vm_tag_t tag;
4760*fdd8201dSApple OSS Distributions 					tag = (typeof(tag))getVMTag(kernel_map);
4761*fdd8201dSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4762*fdd8201dSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4763*fdd8201dSApple OSS Distributions 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4764*fdd8201dSApple OSS Distributions 						}
4765*fdd8201dSApple OSS Distributions 					}
4766*fdd8201dSApple OSS Distributions 				}
4767*fdd8201dSApple OSS Distributions 			} else {
4768*fdd8201dSApple OSS Distributions 				if (_dmaReferences) {
4769*fdd8201dSApple OSS Distributions 					panic("complete() while dma active");
4770*fdd8201dSApple OSS Distributions 				}
4771*fdd8201dSApple OSS Distributions 
4772*fdd8201dSApple OSS Distributions 				if (dataP->fMappedBaseValid) {
4773*fdd8201dSApple OSS Distributions 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4774*fdd8201dSApple OSS Distributions 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4775*fdd8201dSApple OSS Distributions 				}
4776*fdd8201dSApple OSS Distributions #if IOTRACKING
4777*fdd8201dSApple OSS Distributions 				if (dataP->fWireTracking.link.next) {
4778*fdd8201dSApple OSS Distributions 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4779*fdd8201dSApple OSS Distributions 				}
4780*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
4781*fdd8201dSApple OSS Distributions 				// Only complete iopls that we created which are for TypeVirtual
4782*fdd8201dSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4783*fdd8201dSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4784*fdd8201dSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4785*fdd8201dSApple OSS Distributions 							if (dataP->fCompletionError) {
4786*fdd8201dSApple OSS Distributions 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4787*fdd8201dSApple OSS Distributions 							} else {
4788*fdd8201dSApple OSS Distributions 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4789*fdd8201dSApple OSS Distributions 							}
4790*fdd8201dSApple OSS Distributions 							upl_deallocate(ioplList[ind].fIOPL);
4791*fdd8201dSApple OSS Distributions 						}
4792*fdd8201dSApple OSS Distributions 					}
4793*fdd8201dSApple OSS Distributions 				} else if (kIOMemoryTypeUPL == type) {
4794*fdd8201dSApple OSS Distributions 					upl_set_referenced(ioplList[0].fIOPL, false);
4795*fdd8201dSApple OSS Distributions 				}
4796*fdd8201dSApple OSS Distributions 
4797*fdd8201dSApple OSS Distributions 				_memoryEntries->setLength(computeDataSize(0, 0));
4798*fdd8201dSApple OSS Distributions 
4799*fdd8201dSApple OSS Distributions 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4800*fdd8201dSApple OSS Distributions 				_flags &= ~kIOMemoryPreparedReadOnly;
4801*fdd8201dSApple OSS Distributions 
4802*fdd8201dSApple OSS Distributions 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4803*fdd8201dSApple OSS Distributions 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4804*fdd8201dSApple OSS Distributions 				}
4805*fdd8201dSApple OSS Distributions 			}
4806*fdd8201dSApple OSS Distributions 		}
4807*fdd8201dSApple OSS Distributions 	}while (false);
4808*fdd8201dSApple OSS Distributions 
4809*fdd8201dSApple OSS Distributions 	if (_prepareLock) {
4810*fdd8201dSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4811*fdd8201dSApple OSS Distributions 	}
4812*fdd8201dSApple OSS Distributions 
4813*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4814*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
4815*fdd8201dSApple OSS Distributions }
4816*fdd8201dSApple OSS Distributions 
4817*fdd8201dSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4818*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4819*fdd8201dSApple OSS Distributions 	vm_map_t                __addressMap,
4820*fdd8201dSApple OSS Distributions 	IOVirtualAddress *      __address,
4821*fdd8201dSApple OSS Distributions 	IOOptionBits            options,
4822*fdd8201dSApple OSS Distributions 	IOByteCount             __offset,
4823*fdd8201dSApple OSS Distributions 	IOByteCount             __length )
4824*fdd8201dSApple OSS Distributions {
4825*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4826*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4827*fdd8201dSApple OSS Distributions #ifndef __LP64__
4828*fdd8201dSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4829*fdd8201dSApple OSS Distributions 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4830*fdd8201dSApple OSS Distributions 	}
4831*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
4832*fdd8201dSApple OSS Distributions 
4833*fdd8201dSApple OSS Distributions 	kern_return_t  err;
4834*fdd8201dSApple OSS Distributions 
4835*fdd8201dSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4836*fdd8201dSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4837*fdd8201dSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
4838*fdd8201dSApple OSS Distributions 
4839*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4840*fdd8201dSApple OSS Distributions 	Ranges vec = _ranges;
4841*fdd8201dSApple OSS Distributions 
4842*fdd8201dSApple OSS Distributions 	mach_vm_address_t range0Addr = 0;
4843*fdd8201dSApple OSS Distributions 	mach_vm_size_t    range0Len = 0;
4844*fdd8201dSApple OSS Distributions 
4845*fdd8201dSApple OSS Distributions 	if ((offset >= _length) || ((offset + length) > _length)) {
4846*fdd8201dSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnBadArgument);
4847*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4848*fdd8201dSApple OSS Distributions 		// assert(offset == 0 && _length == 0 && length == 0);
4849*fdd8201dSApple OSS Distributions 		return kIOReturnBadArgument;
4850*fdd8201dSApple OSS Distributions 	}
4851*fdd8201dSApple OSS Distributions 
4852*fdd8201dSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4853*fdd8201dSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4854*fdd8201dSApple OSS Distributions 		return 0;
4855*fdd8201dSApple OSS Distributions 	}
4856*fdd8201dSApple OSS Distributions 
4857*fdd8201dSApple OSS Distributions 	if (vec.v) {
4858*fdd8201dSApple OSS Distributions 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4859*fdd8201dSApple OSS Distributions 	}
4860*fdd8201dSApple OSS Distributions 
4861*fdd8201dSApple OSS Distributions 	// mapping source == dest? (could be much better)
4862*fdd8201dSApple OSS Distributions 	if (_task
4863*fdd8201dSApple OSS Distributions 	    && (mapping->fAddressTask == _task)
4864*fdd8201dSApple OSS Distributions 	    && (mapping->fAddressMap == get_task_map(_task))
4865*fdd8201dSApple OSS Distributions 	    && (options & kIOMapAnywhere)
4866*fdd8201dSApple OSS Distributions 	    && (!(kIOMapUnique & options))
4867*fdd8201dSApple OSS Distributions 	    && (!(kIOMapGuardedMask & options))
4868*fdd8201dSApple OSS Distributions 	    && (1 == _rangesCount)
4869*fdd8201dSApple OSS Distributions 	    && (0 == offset)
4870*fdd8201dSApple OSS Distributions 	    && range0Addr
4871*fdd8201dSApple OSS Distributions 	    && (length <= range0Len)) {
4872*fdd8201dSApple OSS Distributions 		mapping->fAddress = range0Addr;
4873*fdd8201dSApple OSS Distributions 		mapping->fOptions |= kIOMapStatic;
4874*fdd8201dSApple OSS Distributions 
4875*fdd8201dSApple OSS Distributions 		return kIOReturnSuccess;
4876*fdd8201dSApple OSS Distributions 	}
4877*fdd8201dSApple OSS Distributions 
4878*fdd8201dSApple OSS Distributions 	if (!_memRef) {
4879*fdd8201dSApple OSS Distributions 		IOOptionBits createOptions = 0;
4880*fdd8201dSApple OSS Distributions 		if (!(kIOMapReadOnly & options)) {
4881*fdd8201dSApple OSS Distributions 			createOptions |= kIOMemoryReferenceWrite;
4882*fdd8201dSApple OSS Distributions #if DEVELOPMENT || DEBUG
4883*fdd8201dSApple OSS Distributions 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4884*fdd8201dSApple OSS Distributions 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4885*fdd8201dSApple OSS Distributions 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4886*fdd8201dSApple OSS Distributions 			}
4887*fdd8201dSApple OSS Distributions #endif
4888*fdd8201dSApple OSS Distributions 		}
4889*fdd8201dSApple OSS Distributions 		err = memoryReferenceCreate(createOptions, &_memRef);
4890*fdd8201dSApple OSS Distributions 		if (kIOReturnSuccess != err) {
4891*fdd8201dSApple OSS Distributions 			traceInterval.setEndArg1(err);
4892*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4893*fdd8201dSApple OSS Distributions 			return err;
4894*fdd8201dSApple OSS Distributions 		}
4895*fdd8201dSApple OSS Distributions 	}
4896*fdd8201dSApple OSS Distributions 
4897*fdd8201dSApple OSS Distributions 	memory_object_t pager;
4898*fdd8201dSApple OSS Distributions 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4899*fdd8201dSApple OSS Distributions 
4900*fdd8201dSApple OSS Distributions 	// <upl_transpose //
4901*fdd8201dSApple OSS Distributions 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4902*fdd8201dSApple OSS Distributions 		do{
4903*fdd8201dSApple OSS Distributions 			upl_t               redirUPL2;
4904*fdd8201dSApple OSS Distributions 			upl_size_t          size;
4905*fdd8201dSApple OSS Distributions 			upl_control_flags_t flags;
4906*fdd8201dSApple OSS Distributions 			unsigned int        lock_count;
4907*fdd8201dSApple OSS Distributions 
4908*fdd8201dSApple OSS Distributions 			if (!_memRef || (1 != _memRef->count)) {
4909*fdd8201dSApple OSS Distributions 				err = kIOReturnNotReadable;
4910*fdd8201dSApple OSS Distributions 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4911*fdd8201dSApple OSS Distributions 				break;
4912*fdd8201dSApple OSS Distributions 			}
4913*fdd8201dSApple OSS Distributions 
4914*fdd8201dSApple OSS Distributions 			size = (upl_size_t) round_page(mapping->fLength);
4915*fdd8201dSApple OSS Distributions 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4916*fdd8201dSApple OSS Distributions 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4917*fdd8201dSApple OSS Distributions 
4918*fdd8201dSApple OSS Distributions 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4919*fdd8201dSApple OSS Distributions 			    NULL, NULL,
4920*fdd8201dSApple OSS Distributions 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4921*fdd8201dSApple OSS Distributions 				redirUPL2 = NULL;
4922*fdd8201dSApple OSS Distributions 			}
4923*fdd8201dSApple OSS Distributions 
4924*fdd8201dSApple OSS Distributions 			for (lock_count = 0;
4925*fdd8201dSApple OSS Distributions 			    IORecursiveLockHaveLock(gIOMemoryLock);
4926*fdd8201dSApple OSS Distributions 			    lock_count++) {
4927*fdd8201dSApple OSS Distributions 				UNLOCK;
4928*fdd8201dSApple OSS Distributions 			}
4929*fdd8201dSApple OSS Distributions 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4930*fdd8201dSApple OSS Distributions 			for (;
4931*fdd8201dSApple OSS Distributions 			    lock_count;
4932*fdd8201dSApple OSS Distributions 			    lock_count--) {
4933*fdd8201dSApple OSS Distributions 				LOCK;
4934*fdd8201dSApple OSS Distributions 			}
4935*fdd8201dSApple OSS Distributions 
4936*fdd8201dSApple OSS Distributions 			if (kIOReturnSuccess != err) {
4937*fdd8201dSApple OSS Distributions 				IOLog("upl_transpose(%x)\n", err);
4938*fdd8201dSApple OSS Distributions 				err = kIOReturnSuccess;
4939*fdd8201dSApple OSS Distributions 			}
4940*fdd8201dSApple OSS Distributions 
4941*fdd8201dSApple OSS Distributions 			if (redirUPL2) {
4942*fdd8201dSApple OSS Distributions 				upl_commit(redirUPL2, NULL, 0);
4943*fdd8201dSApple OSS Distributions 				upl_deallocate(redirUPL2);
4944*fdd8201dSApple OSS Distributions 				redirUPL2 = NULL;
4945*fdd8201dSApple OSS Distributions 			}
4946*fdd8201dSApple OSS Distributions 			{
4947*fdd8201dSApple OSS Distributions 				// swap the memEntries since they now refer to different vm_objects
4948*fdd8201dSApple OSS Distributions 				IOMemoryReference * me = _memRef;
4949*fdd8201dSApple OSS Distributions 				_memRef = mapping->fMemory->_memRef;
4950*fdd8201dSApple OSS Distributions 				mapping->fMemory->_memRef = me;
4951*fdd8201dSApple OSS Distributions 			}
4952*fdd8201dSApple OSS Distributions 			if (pager) {
4953*fdd8201dSApple OSS Distributions 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4954*fdd8201dSApple OSS Distributions 			}
4955*fdd8201dSApple OSS Distributions 		}while (false);
4956*fdd8201dSApple OSS Distributions 	}
4957*fdd8201dSApple OSS Distributions 	// upl_transpose> //
4958*fdd8201dSApple OSS Distributions 	else {
4959*fdd8201dSApple OSS Distributions 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4960*fdd8201dSApple OSS Distributions 		if (err) {
4961*fdd8201dSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4962*fdd8201dSApple OSS Distributions 		}
4963*fdd8201dSApple OSS Distributions #if IOTRACKING
4964*fdd8201dSApple OSS Distributions 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4965*fdd8201dSApple OSS Distributions 			// only dram maps in the default on developement case
4966*fdd8201dSApple OSS Distributions 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4967*fdd8201dSApple OSS Distributions 		}
4968*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
4969*fdd8201dSApple OSS Distributions 		if ((err == KERN_SUCCESS) && pager) {
4970*fdd8201dSApple OSS Distributions 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4971*fdd8201dSApple OSS Distributions 
4972*fdd8201dSApple OSS Distributions 			if (err != KERN_SUCCESS) {
4973*fdd8201dSApple OSS Distributions 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4974*fdd8201dSApple OSS Distributions 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4975*fdd8201dSApple OSS Distributions 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4976*fdd8201dSApple OSS Distributions 			}
4977*fdd8201dSApple OSS Distributions 		}
4978*fdd8201dSApple OSS Distributions 	}
4979*fdd8201dSApple OSS Distributions 
4980*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(err);
4981*fdd8201dSApple OSS Distributions 	if (err) {
4982*fdd8201dSApple OSS Distributions 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4983*fdd8201dSApple OSS Distributions 	}
4984*fdd8201dSApple OSS Distributions 	return err;
4985*fdd8201dSApple OSS Distributions }
4986*fdd8201dSApple OSS Distributions 
4987*fdd8201dSApple OSS Distributions #if IOTRACKING
4988*fdd8201dSApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4989*fdd8201dSApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4990*fdd8201dSApple OSS Distributions     mach_vm_address_t * address, mach_vm_size_t * size)
4991*fdd8201dSApple OSS Distributions {
4992*fdd8201dSApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4993*fdd8201dSApple OSS Distributions 
4994*fdd8201dSApple OSS Distributions 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4995*fdd8201dSApple OSS Distributions 
4996*fdd8201dSApple OSS Distributions 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4997*fdd8201dSApple OSS Distributions 		return kIOReturnNotReady;
4998*fdd8201dSApple OSS Distributions 	}
4999*fdd8201dSApple OSS Distributions 
5000*fdd8201dSApple OSS Distributions 	*task    = map->fAddressTask;
5001*fdd8201dSApple OSS Distributions 	*address = map->fAddress;
5002*fdd8201dSApple OSS Distributions 	*size    = map->fLength;
5003*fdd8201dSApple OSS Distributions 
5004*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
5005*fdd8201dSApple OSS Distributions }
5006*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
5007*fdd8201dSApple OSS Distributions 
5008*fdd8201dSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5009*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5010*fdd8201dSApple OSS Distributions 	vm_map_t                addressMap,
5011*fdd8201dSApple OSS Distributions 	IOVirtualAddress        __address,
5012*fdd8201dSApple OSS Distributions 	IOByteCount             __length )
5013*fdd8201dSApple OSS Distributions {
5014*fdd8201dSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5015*fdd8201dSApple OSS Distributions 	IOReturn ret;
5016*fdd8201dSApple OSS Distributions 	ret = super::doUnmap(addressMap, __address, __length);
5017*fdd8201dSApple OSS Distributions 	traceInterval.setEndArg1(ret);
5018*fdd8201dSApple OSS Distributions 	return ret;
5019*fdd8201dSApple OSS Distributions }
5020*fdd8201dSApple OSS Distributions 
5021*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5022*fdd8201dSApple OSS Distributions 
5023*fdd8201dSApple OSS Distributions #undef super
5024*fdd8201dSApple OSS Distributions #define super OSObject
5025*fdd8201dSApple OSS Distributions 
5026*fdd8201dSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5027*fdd8201dSApple OSS Distributions 
5028*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5029*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5030*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5031*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5032*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5033*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5034*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5035*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5036*fdd8201dSApple OSS Distributions 
5037*fdd8201dSApple OSS Distributions /* ex-inline function implementation */
5038*fdd8201dSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5039*fdd8201dSApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5040*fdd8201dSApple OSS Distributions {
5041*fdd8201dSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
5042*fdd8201dSApple OSS Distributions }
5043*fdd8201dSApple OSS Distributions 
5044*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5045*fdd8201dSApple OSS Distributions 
5046*fdd8201dSApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5047*fdd8201dSApple OSS Distributions IOMemoryMap::init(
5048*fdd8201dSApple OSS Distributions 	task_t                  intoTask,
5049*fdd8201dSApple OSS Distributions 	mach_vm_address_t       toAddress,
5050*fdd8201dSApple OSS Distributions 	IOOptionBits            _options,
5051*fdd8201dSApple OSS Distributions 	mach_vm_size_t          _offset,
5052*fdd8201dSApple OSS Distributions 	mach_vm_size_t          _length )
5053*fdd8201dSApple OSS Distributions {
5054*fdd8201dSApple OSS Distributions 	if (!intoTask) {
5055*fdd8201dSApple OSS Distributions 		return false;
5056*fdd8201dSApple OSS Distributions 	}
5057*fdd8201dSApple OSS Distributions 
5058*fdd8201dSApple OSS Distributions 	if (!super::init()) {
5059*fdd8201dSApple OSS Distributions 		return false;
5060*fdd8201dSApple OSS Distributions 	}
5061*fdd8201dSApple OSS Distributions 
5062*fdd8201dSApple OSS Distributions 	fAddressMap  = get_task_map(intoTask);
5063*fdd8201dSApple OSS Distributions 	if (!fAddressMap) {
5064*fdd8201dSApple OSS Distributions 		return false;
5065*fdd8201dSApple OSS Distributions 	}
5066*fdd8201dSApple OSS Distributions 	vm_map_reference(fAddressMap);
5067*fdd8201dSApple OSS Distributions 
5068*fdd8201dSApple OSS Distributions 	fAddressTask = intoTask;
5069*fdd8201dSApple OSS Distributions 	fOptions     = _options;
5070*fdd8201dSApple OSS Distributions 	fLength      = _length;
5071*fdd8201dSApple OSS Distributions 	fOffset      = _offset;
5072*fdd8201dSApple OSS Distributions 	fAddress     = toAddress;
5073*fdd8201dSApple OSS Distributions 
5074*fdd8201dSApple OSS Distributions 	return true;
5075*fdd8201dSApple OSS Distributions }
5076*fdd8201dSApple OSS Distributions 
5077*fdd8201dSApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5078*fdd8201dSApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5079*fdd8201dSApple OSS Distributions {
5080*fdd8201dSApple OSS Distributions 	if (!_memory) {
5081*fdd8201dSApple OSS Distributions 		return false;
5082*fdd8201dSApple OSS Distributions 	}
5083*fdd8201dSApple OSS Distributions 
5084*fdd8201dSApple OSS Distributions 	if (!fSuperMap) {
5085*fdd8201dSApple OSS Distributions 		if ((_offset + fLength) > _memory->getLength()) {
5086*fdd8201dSApple OSS Distributions 			return false;
5087*fdd8201dSApple OSS Distributions 		}
5088*fdd8201dSApple OSS Distributions 		fOffset = _offset;
5089*fdd8201dSApple OSS Distributions 	}
5090*fdd8201dSApple OSS Distributions 
5091*fdd8201dSApple OSS Distributions 
5092*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5093*fdd8201dSApple OSS Distributions 	if (fMemory) {
5094*fdd8201dSApple OSS Distributions 		if (fMemory != _memory) {
5095*fdd8201dSApple OSS Distributions 			fMemory->removeMapping(this);
5096*fdd8201dSApple OSS Distributions 		}
5097*fdd8201dSApple OSS Distributions 	}
5098*fdd8201dSApple OSS Distributions 	fMemory = os::move(tempval);
5099*fdd8201dSApple OSS Distributions 
5100*fdd8201dSApple OSS Distributions 	return true;
5101*fdd8201dSApple OSS Distributions }
5102*fdd8201dSApple OSS Distributions 
5103*fdd8201dSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5104*fdd8201dSApple OSS Distributions IOMemoryDescriptor::doMap(
5105*fdd8201dSApple OSS Distributions 	vm_map_t                __addressMap,
5106*fdd8201dSApple OSS Distributions 	IOVirtualAddress *      __address,
5107*fdd8201dSApple OSS Distributions 	IOOptionBits            options,
5108*fdd8201dSApple OSS Distributions 	IOByteCount             __offset,
5109*fdd8201dSApple OSS Distributions 	IOByteCount             __length )
5110*fdd8201dSApple OSS Distributions {
5111*fdd8201dSApple OSS Distributions 	return kIOReturnUnsupported;
5112*fdd8201dSApple OSS Distributions }
5113*fdd8201dSApple OSS Distributions 
5114*fdd8201dSApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5115*fdd8201dSApple OSS Distributions IOMemoryDescriptor::handleFault(
5116*fdd8201dSApple OSS Distributions 	void *                  _pager,
5117*fdd8201dSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5118*fdd8201dSApple OSS Distributions 	mach_vm_size_t          length)
5119*fdd8201dSApple OSS Distributions {
5120*fdd8201dSApple OSS Distributions 	if (kIOMemoryRedirected & _flags) {
5121*fdd8201dSApple OSS Distributions #if DEBUG
5122*fdd8201dSApple OSS Distributions 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5123*fdd8201dSApple OSS Distributions #endif
5124*fdd8201dSApple OSS Distributions 		do {
5125*fdd8201dSApple OSS Distributions 			SLEEP;
5126*fdd8201dSApple OSS Distributions 		} while (kIOMemoryRedirected & _flags);
5127*fdd8201dSApple OSS Distributions 	}
5128*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
5129*fdd8201dSApple OSS Distributions }
5130*fdd8201dSApple OSS Distributions 
5131*fdd8201dSApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5132*fdd8201dSApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5133*fdd8201dSApple OSS Distributions 	void *                  _pager,
5134*fdd8201dSApple OSS Distributions 	vm_map_t                addressMap,
5135*fdd8201dSApple OSS Distributions 	mach_vm_address_t       address,
5136*fdd8201dSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5137*fdd8201dSApple OSS Distributions 	mach_vm_size_t          length,
5138*fdd8201dSApple OSS Distributions 	IOOptionBits            options )
5139*fdd8201dSApple OSS Distributions {
5140*fdd8201dSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5141*fdd8201dSApple OSS Distributions 	memory_object_t     pager = (memory_object_t) _pager;
5142*fdd8201dSApple OSS Distributions 	mach_vm_size_t      size;
5143*fdd8201dSApple OSS Distributions 	mach_vm_size_t      bytes;
5144*fdd8201dSApple OSS Distributions 	mach_vm_size_t      page;
5145*fdd8201dSApple OSS Distributions 	mach_vm_size_t      pageOffset;
5146*fdd8201dSApple OSS Distributions 	mach_vm_size_t      pagerOffset;
5147*fdd8201dSApple OSS Distributions 	IOPhysicalLength    segLen, chunk;
5148*fdd8201dSApple OSS Distributions 	addr64_t            physAddr;
5149*fdd8201dSApple OSS Distributions 	IOOptionBits        type;
5150*fdd8201dSApple OSS Distributions 
5151*fdd8201dSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
5152*fdd8201dSApple OSS Distributions 
5153*fdd8201dSApple OSS Distributions 	if (reserved->dp.pagerContig) {
5154*fdd8201dSApple OSS Distributions 		sourceOffset = 0;
5155*fdd8201dSApple OSS Distributions 		pagerOffset  = 0;
5156*fdd8201dSApple OSS Distributions 	}
5157*fdd8201dSApple OSS Distributions 
5158*fdd8201dSApple OSS Distributions 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5159*fdd8201dSApple OSS Distributions 	assert( physAddr );
5160*fdd8201dSApple OSS Distributions 	pageOffset = physAddr - trunc_page_64( physAddr );
5161*fdd8201dSApple OSS Distributions 	pagerOffset = sourceOffset;
5162*fdd8201dSApple OSS Distributions 
5163*fdd8201dSApple OSS Distributions 	size = length + pageOffset;
5164*fdd8201dSApple OSS Distributions 	physAddr -= pageOffset;
5165*fdd8201dSApple OSS Distributions 
5166*fdd8201dSApple OSS Distributions 	segLen += pageOffset;
5167*fdd8201dSApple OSS Distributions 	bytes = size;
5168*fdd8201dSApple OSS Distributions 	do{
5169*fdd8201dSApple OSS Distributions 		// in the middle of the loop only map whole pages
5170*fdd8201dSApple OSS Distributions 		if (segLen >= bytes) {
5171*fdd8201dSApple OSS Distributions 			segLen = bytes;
5172*fdd8201dSApple OSS Distributions 		} else if (segLen != trunc_page_64(segLen)) {
5173*fdd8201dSApple OSS Distributions 			err = kIOReturnVMError;
5174*fdd8201dSApple OSS Distributions 		}
5175*fdd8201dSApple OSS Distributions 		if (physAddr != trunc_page_64(physAddr)) {
5176*fdd8201dSApple OSS Distributions 			err = kIOReturnBadArgument;
5177*fdd8201dSApple OSS Distributions 		}
5178*fdd8201dSApple OSS Distributions 
5179*fdd8201dSApple OSS Distributions 		if (kIOReturnSuccess != err) {
5180*fdd8201dSApple OSS Distributions 			break;
5181*fdd8201dSApple OSS Distributions 		}
5182*fdd8201dSApple OSS Distributions 
5183*fdd8201dSApple OSS Distributions #if DEBUG || DEVELOPMENT
5184*fdd8201dSApple OSS Distributions 		if ((kIOMemoryTypeUPL != type)
5185*fdd8201dSApple OSS Distributions 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5186*fdd8201dSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5187*fdd8201dSApple OSS Distributions 			    physAddr, (uint64_t)segLen);
5188*fdd8201dSApple OSS Distributions 		}
5189*fdd8201dSApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5190*fdd8201dSApple OSS Distributions 
5191*fdd8201dSApple OSS Distributions 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5192*fdd8201dSApple OSS Distributions 		for (page = 0;
5193*fdd8201dSApple OSS Distributions 		    (page < segLen) && (KERN_SUCCESS == err);
5194*fdd8201dSApple OSS Distributions 		    page += chunk) {
5195*fdd8201dSApple OSS Distributions 			err = device_pager_populate_object(pager, pagerOffset,
5196*fdd8201dSApple OSS Distributions 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5197*fdd8201dSApple OSS Distributions 			pagerOffset += chunk;
5198*fdd8201dSApple OSS Distributions 		}
5199*fdd8201dSApple OSS Distributions 
5200*fdd8201dSApple OSS Distributions 		assert(KERN_SUCCESS == err);
5201*fdd8201dSApple OSS Distributions 		if (err) {
5202*fdd8201dSApple OSS Distributions 			break;
5203*fdd8201dSApple OSS Distributions 		}
5204*fdd8201dSApple OSS Distributions 
5205*fdd8201dSApple OSS Distributions 		// This call to vm_fault causes an early pmap level resolution
5206*fdd8201dSApple OSS Distributions 		// of the mappings created above for kernel mappings, since
5207*fdd8201dSApple OSS Distributions 		// faulting in later can't take place from interrupt level.
5208*fdd8201dSApple OSS Distributions 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5209*fdd8201dSApple OSS Distributions 			err = vm_fault(addressMap,
5210*fdd8201dSApple OSS Distributions 			    (vm_map_offset_t)trunc_page_64(address),
5211*fdd8201dSApple OSS Distributions 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5212*fdd8201dSApple OSS Distributions 			    FALSE, VM_KERN_MEMORY_NONE,
5213*fdd8201dSApple OSS Distributions 			    THREAD_UNINT, NULL,
5214*fdd8201dSApple OSS Distributions 			    (vm_map_offset_t)0);
5215*fdd8201dSApple OSS Distributions 
5216*fdd8201dSApple OSS Distributions 			if (KERN_SUCCESS != err) {
5217*fdd8201dSApple OSS Distributions 				break;
5218*fdd8201dSApple OSS Distributions 			}
5219*fdd8201dSApple OSS Distributions 		}
5220*fdd8201dSApple OSS Distributions 
5221*fdd8201dSApple OSS Distributions 		sourceOffset += segLen - pageOffset;
5222*fdd8201dSApple OSS Distributions 		address += segLen;
5223*fdd8201dSApple OSS Distributions 		bytes -= segLen;
5224*fdd8201dSApple OSS Distributions 		pageOffset = 0;
5225*fdd8201dSApple OSS Distributions 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5226*fdd8201dSApple OSS Distributions 
5227*fdd8201dSApple OSS Distributions 	if (bytes) {
5228*fdd8201dSApple OSS Distributions 		err = kIOReturnBadArgument;
5229*fdd8201dSApple OSS Distributions 	}
5230*fdd8201dSApple OSS Distributions 
5231*fdd8201dSApple OSS Distributions 	return err;
5232*fdd8201dSApple OSS Distributions }
5233*fdd8201dSApple OSS Distributions 
5234*fdd8201dSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5235*fdd8201dSApple OSS Distributions IOMemoryDescriptor::doUnmap(
5236*fdd8201dSApple OSS Distributions 	vm_map_t                addressMap,
5237*fdd8201dSApple OSS Distributions 	IOVirtualAddress        __address,
5238*fdd8201dSApple OSS Distributions 	IOByteCount             __length )
5239*fdd8201dSApple OSS Distributions {
5240*fdd8201dSApple OSS Distributions 	IOReturn          err;
5241*fdd8201dSApple OSS Distributions 	IOMemoryMap *     mapping;
5242*fdd8201dSApple OSS Distributions 	mach_vm_address_t address;
5243*fdd8201dSApple OSS Distributions 	mach_vm_size_t    length;
5244*fdd8201dSApple OSS Distributions 
5245*fdd8201dSApple OSS Distributions 	if (__length) {
5246*fdd8201dSApple OSS Distributions 		panic("doUnmap");
5247*fdd8201dSApple OSS Distributions 	}
5248*fdd8201dSApple OSS Distributions 
5249*fdd8201dSApple OSS Distributions 	mapping = (IOMemoryMap *) __address;
5250*fdd8201dSApple OSS Distributions 	addressMap = mapping->fAddressMap;
5251*fdd8201dSApple OSS Distributions 	address    = mapping->fAddress;
5252*fdd8201dSApple OSS Distributions 	length     = mapping->fLength;
5253*fdd8201dSApple OSS Distributions 
5254*fdd8201dSApple OSS Distributions 	if (kIOMapOverwrite & mapping->fOptions) {
5255*fdd8201dSApple OSS Distributions 		err = KERN_SUCCESS;
5256*fdd8201dSApple OSS Distributions 	} else {
5257*fdd8201dSApple OSS Distributions 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5258*fdd8201dSApple OSS Distributions 			addressMap = IOPageableMapForAddress( address );
5259*fdd8201dSApple OSS Distributions 		}
5260*fdd8201dSApple OSS Distributions #if DEBUG
5261*fdd8201dSApple OSS Distributions 		if (kIOLogMapping & gIOKitDebug) {
5262*fdd8201dSApple OSS Distributions 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5263*fdd8201dSApple OSS Distributions 			    addressMap, address, length );
5264*fdd8201dSApple OSS Distributions 		}
5265*fdd8201dSApple OSS Distributions #endif
5266*fdd8201dSApple OSS Distributions 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5267*fdd8201dSApple OSS Distributions 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5268*fdd8201dSApple OSS Distributions 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5269*fdd8201dSApple OSS Distributions 		}
5270*fdd8201dSApple OSS Distributions 	}
5271*fdd8201dSApple OSS Distributions 
5272*fdd8201dSApple OSS Distributions #if IOTRACKING
5273*fdd8201dSApple OSS Distributions 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5274*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
5275*fdd8201dSApple OSS Distributions 
5276*fdd8201dSApple OSS Distributions 	return err;
5277*fdd8201dSApple OSS Distributions }
5278*fdd8201dSApple OSS Distributions 
5279*fdd8201dSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5280*fdd8201dSApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5281*fdd8201dSApple OSS Distributions {
5282*fdd8201dSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5283*fdd8201dSApple OSS Distributions 	IOMemoryMap *       mapping = NULL;
5284*fdd8201dSApple OSS Distributions 	OSSharedPtr<OSIterator>        iter;
5285*fdd8201dSApple OSS Distributions 
5286*fdd8201dSApple OSS Distributions 	LOCK;
5287*fdd8201dSApple OSS Distributions 
5288*fdd8201dSApple OSS Distributions 	if (doRedirect) {
5289*fdd8201dSApple OSS Distributions 		_flags |= kIOMemoryRedirected;
5290*fdd8201dSApple OSS Distributions 	} else {
5291*fdd8201dSApple OSS Distributions 		_flags &= ~kIOMemoryRedirected;
5292*fdd8201dSApple OSS Distributions 	}
5293*fdd8201dSApple OSS Distributions 
5294*fdd8201dSApple OSS Distributions 	do {
5295*fdd8201dSApple OSS Distributions 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5296*fdd8201dSApple OSS Distributions 			memory_object_t   pager;
5297*fdd8201dSApple OSS Distributions 
5298*fdd8201dSApple OSS Distributions 			if (reserved) {
5299*fdd8201dSApple OSS Distributions 				pager = (memory_object_t) reserved->dp.devicePager;
5300*fdd8201dSApple OSS Distributions 			} else {
5301*fdd8201dSApple OSS Distributions 				pager = MACH_PORT_NULL;
5302*fdd8201dSApple OSS Distributions 			}
5303*fdd8201dSApple OSS Distributions 
5304*fdd8201dSApple OSS Distributions 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5305*fdd8201dSApple OSS Distributions 				mapping->redirect( safeTask, doRedirect );
5306*fdd8201dSApple OSS Distributions 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5307*fdd8201dSApple OSS Distributions 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5308*fdd8201dSApple OSS Distributions 				}
5309*fdd8201dSApple OSS Distributions 			}
5310*fdd8201dSApple OSS Distributions 
5311*fdd8201dSApple OSS Distributions 			iter.reset();
5312*fdd8201dSApple OSS Distributions 		}
5313*fdd8201dSApple OSS Distributions 	} while (false);
5314*fdd8201dSApple OSS Distributions 
5315*fdd8201dSApple OSS Distributions 	if (!doRedirect) {
5316*fdd8201dSApple OSS Distributions 		WAKEUP;
5317*fdd8201dSApple OSS Distributions 	}
5318*fdd8201dSApple OSS Distributions 
5319*fdd8201dSApple OSS Distributions 	UNLOCK;
5320*fdd8201dSApple OSS Distributions 
5321*fdd8201dSApple OSS Distributions #ifndef __LP64__
5322*fdd8201dSApple OSS Distributions 	// temporary binary compatibility
5323*fdd8201dSApple OSS Distributions 	IOSubMemoryDescriptor * subMem;
5324*fdd8201dSApple OSS Distributions 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5325*fdd8201dSApple OSS Distributions 		err = subMem->redirect( safeTask, doRedirect );
5326*fdd8201dSApple OSS Distributions 	} else {
5327*fdd8201dSApple OSS Distributions 		err = kIOReturnSuccess;
5328*fdd8201dSApple OSS Distributions 	}
5329*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5330*fdd8201dSApple OSS Distributions 
5331*fdd8201dSApple OSS Distributions 	return err;
5332*fdd8201dSApple OSS Distributions }
5333*fdd8201dSApple OSS Distributions 
5334*fdd8201dSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5335*fdd8201dSApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5336*fdd8201dSApple OSS Distributions {
5337*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5338*fdd8201dSApple OSS Distributions 
5339*fdd8201dSApple OSS Distributions 	if (fSuperMap) {
5340*fdd8201dSApple OSS Distributions //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5341*fdd8201dSApple OSS Distributions 	} else {
5342*fdd8201dSApple OSS Distributions 		LOCK;
5343*fdd8201dSApple OSS Distributions 
5344*fdd8201dSApple OSS Distributions 		do{
5345*fdd8201dSApple OSS Distributions 			if (!fAddress) {
5346*fdd8201dSApple OSS Distributions 				break;
5347*fdd8201dSApple OSS Distributions 			}
5348*fdd8201dSApple OSS Distributions 			if (!fAddressMap) {
5349*fdd8201dSApple OSS Distributions 				break;
5350*fdd8201dSApple OSS Distributions 			}
5351*fdd8201dSApple OSS Distributions 
5352*fdd8201dSApple OSS Distributions 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5353*fdd8201dSApple OSS Distributions 			    && (0 == (fOptions & kIOMapStatic))) {
5354*fdd8201dSApple OSS Distributions 				IOUnmapPages( fAddressMap, fAddress, fLength );
5355*fdd8201dSApple OSS Distributions 				err = kIOReturnSuccess;
5356*fdd8201dSApple OSS Distributions #if DEBUG
5357*fdd8201dSApple OSS Distributions 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5358*fdd8201dSApple OSS Distributions #endif
5359*fdd8201dSApple OSS Distributions 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5360*fdd8201dSApple OSS Distributions 				IOOptionBits newMode;
5361*fdd8201dSApple OSS Distributions 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5362*fdd8201dSApple OSS Distributions 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5363*fdd8201dSApple OSS Distributions 			}
5364*fdd8201dSApple OSS Distributions 		}while (false);
5365*fdd8201dSApple OSS Distributions 		UNLOCK;
5366*fdd8201dSApple OSS Distributions 	}
5367*fdd8201dSApple OSS Distributions 
5368*fdd8201dSApple OSS Distributions 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5369*fdd8201dSApple OSS Distributions 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5370*fdd8201dSApple OSS Distributions 	    && safeTask
5371*fdd8201dSApple OSS Distributions 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5372*fdd8201dSApple OSS Distributions 		fMemory->redirect(safeTask, doRedirect);
5373*fdd8201dSApple OSS Distributions 	}
5374*fdd8201dSApple OSS Distributions 
5375*fdd8201dSApple OSS Distributions 	return err;
5376*fdd8201dSApple OSS Distributions }
5377*fdd8201dSApple OSS Distributions 
5378*fdd8201dSApple OSS Distributions IOReturn
unmap(void)5379*fdd8201dSApple OSS Distributions IOMemoryMap::unmap( void )
5380*fdd8201dSApple OSS Distributions {
5381*fdd8201dSApple OSS Distributions 	IOReturn    err;
5382*fdd8201dSApple OSS Distributions 
5383*fdd8201dSApple OSS Distributions 	LOCK;
5384*fdd8201dSApple OSS Distributions 
5385*fdd8201dSApple OSS Distributions 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5386*fdd8201dSApple OSS Distributions 	    && (0 == (kIOMapStatic & fOptions))) {
5387*fdd8201dSApple OSS Distributions 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5388*fdd8201dSApple OSS Distributions 	} else {
5389*fdd8201dSApple OSS Distributions 		err = kIOReturnSuccess;
5390*fdd8201dSApple OSS Distributions 	}
5391*fdd8201dSApple OSS Distributions 
5392*fdd8201dSApple OSS Distributions 	if (fAddressMap) {
5393*fdd8201dSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5394*fdd8201dSApple OSS Distributions 		fAddressMap = NULL;
5395*fdd8201dSApple OSS Distributions 	}
5396*fdd8201dSApple OSS Distributions 
5397*fdd8201dSApple OSS Distributions 	fAddress = 0;
5398*fdd8201dSApple OSS Distributions 
5399*fdd8201dSApple OSS Distributions 	UNLOCK;
5400*fdd8201dSApple OSS Distributions 
5401*fdd8201dSApple OSS Distributions 	return err;
5402*fdd8201dSApple OSS Distributions }
5403*fdd8201dSApple OSS Distributions 
5404*fdd8201dSApple OSS Distributions void
taskDied(void)5405*fdd8201dSApple OSS Distributions IOMemoryMap::taskDied( void )
5406*fdd8201dSApple OSS Distributions {
5407*fdd8201dSApple OSS Distributions 	LOCK;
5408*fdd8201dSApple OSS Distributions 	if (fUserClientUnmap) {
5409*fdd8201dSApple OSS Distributions 		unmap();
5410*fdd8201dSApple OSS Distributions 	}
5411*fdd8201dSApple OSS Distributions #if IOTRACKING
5412*fdd8201dSApple OSS Distributions 	else {
5413*fdd8201dSApple OSS Distributions 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5414*fdd8201dSApple OSS Distributions 	}
5415*fdd8201dSApple OSS Distributions #endif /* IOTRACKING */
5416*fdd8201dSApple OSS Distributions 
5417*fdd8201dSApple OSS Distributions 	if (fAddressMap) {
5418*fdd8201dSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5419*fdd8201dSApple OSS Distributions 		fAddressMap = NULL;
5420*fdd8201dSApple OSS Distributions 	}
5421*fdd8201dSApple OSS Distributions 	fAddressTask = NULL;
5422*fdd8201dSApple OSS Distributions 	fAddress     = 0;
5423*fdd8201dSApple OSS Distributions 	UNLOCK;
5424*fdd8201dSApple OSS Distributions }
5425*fdd8201dSApple OSS Distributions 
5426*fdd8201dSApple OSS Distributions IOReturn
userClientUnmap(void)5427*fdd8201dSApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5428*fdd8201dSApple OSS Distributions {
5429*fdd8201dSApple OSS Distributions 	fUserClientUnmap = true;
5430*fdd8201dSApple OSS Distributions 	return kIOReturnSuccess;
5431*fdd8201dSApple OSS Distributions }
5432*fdd8201dSApple OSS Distributions 
5433*fdd8201dSApple OSS Distributions // Overload the release mechanism.  All mappings must be a member
5434*fdd8201dSApple OSS Distributions // of a memory descriptors _mappings set.  This means that we
5435*fdd8201dSApple OSS Distributions // always have 2 references on a mapping.  When either of these mappings
5436*fdd8201dSApple OSS Distributions // are released we need to free ourselves.
5437*fdd8201dSApple OSS Distributions void
taggedRelease(const void * tag) const5438*fdd8201dSApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5439*fdd8201dSApple OSS Distributions {
5440*fdd8201dSApple OSS Distributions 	LOCK;
5441*fdd8201dSApple OSS Distributions 	super::taggedRelease(tag, 2);
5442*fdd8201dSApple OSS Distributions 	UNLOCK;
5443*fdd8201dSApple OSS Distributions }
5444*fdd8201dSApple OSS Distributions 
5445*fdd8201dSApple OSS Distributions void
free()5446*fdd8201dSApple OSS Distributions IOMemoryMap::free()
5447*fdd8201dSApple OSS Distributions {
5448*fdd8201dSApple OSS Distributions 	unmap();
5449*fdd8201dSApple OSS Distributions 
5450*fdd8201dSApple OSS Distributions 	if (fMemory) {
5451*fdd8201dSApple OSS Distributions 		LOCK;
5452*fdd8201dSApple OSS Distributions 		fMemory->removeMapping(this);
5453*fdd8201dSApple OSS Distributions 		UNLOCK;
5454*fdd8201dSApple OSS Distributions 		fMemory.reset();
5455*fdd8201dSApple OSS Distributions 	}
5456*fdd8201dSApple OSS Distributions 
5457*fdd8201dSApple OSS Distributions 	if (fSuperMap) {
5458*fdd8201dSApple OSS Distributions 		fSuperMap.reset();
5459*fdd8201dSApple OSS Distributions 	}
5460*fdd8201dSApple OSS Distributions 
5461*fdd8201dSApple OSS Distributions 	if (fRedirUPL) {
5462*fdd8201dSApple OSS Distributions 		upl_commit(fRedirUPL, NULL, 0);
5463*fdd8201dSApple OSS Distributions 		upl_deallocate(fRedirUPL);
5464*fdd8201dSApple OSS Distributions 	}
5465*fdd8201dSApple OSS Distributions 
5466*fdd8201dSApple OSS Distributions 	super::free();
5467*fdd8201dSApple OSS Distributions }
5468*fdd8201dSApple OSS Distributions 
5469*fdd8201dSApple OSS Distributions IOByteCount
getLength()5470*fdd8201dSApple OSS Distributions IOMemoryMap::getLength()
5471*fdd8201dSApple OSS Distributions {
5472*fdd8201dSApple OSS Distributions 	return fLength;
5473*fdd8201dSApple OSS Distributions }
5474*fdd8201dSApple OSS Distributions 
5475*fdd8201dSApple OSS Distributions IOVirtualAddress
getVirtualAddress()5476*fdd8201dSApple OSS Distributions IOMemoryMap::getVirtualAddress()
5477*fdd8201dSApple OSS Distributions {
5478*fdd8201dSApple OSS Distributions #ifndef __LP64__
5479*fdd8201dSApple OSS Distributions 	if (fSuperMap) {
5480*fdd8201dSApple OSS Distributions 		fSuperMap->getVirtualAddress();
5481*fdd8201dSApple OSS Distributions 	} else if (fAddressMap
5482*fdd8201dSApple OSS Distributions 	    && vm_map_is_64bit(fAddressMap)
5483*fdd8201dSApple OSS Distributions 	    && (sizeof(IOVirtualAddress) < 8)) {
5484*fdd8201dSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5485*fdd8201dSApple OSS Distributions 	}
5486*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5487*fdd8201dSApple OSS Distributions 
5488*fdd8201dSApple OSS Distributions 	return fAddress;
5489*fdd8201dSApple OSS Distributions }
5490*fdd8201dSApple OSS Distributions 
5491*fdd8201dSApple OSS Distributions #ifndef __LP64__
5492*fdd8201dSApple OSS Distributions mach_vm_address_t
getAddress()5493*fdd8201dSApple OSS Distributions IOMemoryMap::getAddress()
5494*fdd8201dSApple OSS Distributions {
5495*fdd8201dSApple OSS Distributions 	return fAddress;
5496*fdd8201dSApple OSS Distributions }
5497*fdd8201dSApple OSS Distributions 
5498*fdd8201dSApple OSS Distributions mach_vm_size_t
getSize()5499*fdd8201dSApple OSS Distributions IOMemoryMap::getSize()
5500*fdd8201dSApple OSS Distributions {
5501*fdd8201dSApple OSS Distributions 	return fLength;
5502*fdd8201dSApple OSS Distributions }
5503*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5504*fdd8201dSApple OSS Distributions 
5505*fdd8201dSApple OSS Distributions 
5506*fdd8201dSApple OSS Distributions task_t
getAddressTask()5507*fdd8201dSApple OSS Distributions IOMemoryMap::getAddressTask()
5508*fdd8201dSApple OSS Distributions {
5509*fdd8201dSApple OSS Distributions 	if (fSuperMap) {
5510*fdd8201dSApple OSS Distributions 		return fSuperMap->getAddressTask();
5511*fdd8201dSApple OSS Distributions 	} else {
5512*fdd8201dSApple OSS Distributions 		return fAddressTask;
5513*fdd8201dSApple OSS Distributions 	}
5514*fdd8201dSApple OSS Distributions }
5515*fdd8201dSApple OSS Distributions 
5516*fdd8201dSApple OSS Distributions IOOptionBits
getMapOptions()5517*fdd8201dSApple OSS Distributions IOMemoryMap::getMapOptions()
5518*fdd8201dSApple OSS Distributions {
5519*fdd8201dSApple OSS Distributions 	return fOptions;
5520*fdd8201dSApple OSS Distributions }
5521*fdd8201dSApple OSS Distributions 
5522*fdd8201dSApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5523*fdd8201dSApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5524*fdd8201dSApple OSS Distributions {
5525*fdd8201dSApple OSS Distributions 	return fMemory.get();
5526*fdd8201dSApple OSS Distributions }
5527*fdd8201dSApple OSS Distributions 
5528*fdd8201dSApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5529*fdd8201dSApple OSS Distributions IOMemoryMap::copyCompatible(
5530*fdd8201dSApple OSS Distributions 	IOMemoryMap * newMapping )
5531*fdd8201dSApple OSS Distributions {
5532*fdd8201dSApple OSS Distributions 	task_t              task      = newMapping->getAddressTask();
5533*fdd8201dSApple OSS Distributions 	mach_vm_address_t   toAddress = newMapping->fAddress;
5534*fdd8201dSApple OSS Distributions 	IOOptionBits        _options  = newMapping->fOptions;
5535*fdd8201dSApple OSS Distributions 	mach_vm_size_t      _offset   = newMapping->fOffset;
5536*fdd8201dSApple OSS Distributions 	mach_vm_size_t      _length   = newMapping->fLength;
5537*fdd8201dSApple OSS Distributions 
5538*fdd8201dSApple OSS Distributions 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5539*fdd8201dSApple OSS Distributions 		return NULL;
5540*fdd8201dSApple OSS Distributions 	}
5541*fdd8201dSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5542*fdd8201dSApple OSS Distributions 		return NULL;
5543*fdd8201dSApple OSS Distributions 	}
5544*fdd8201dSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5545*fdd8201dSApple OSS Distributions 		return NULL;
5546*fdd8201dSApple OSS Distributions 	}
5547*fdd8201dSApple OSS Distributions 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5548*fdd8201dSApple OSS Distributions 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5549*fdd8201dSApple OSS Distributions 		return NULL;
5550*fdd8201dSApple OSS Distributions 	}
5551*fdd8201dSApple OSS Distributions 
5552*fdd8201dSApple OSS Distributions 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5553*fdd8201dSApple OSS Distributions 		return NULL;
5554*fdd8201dSApple OSS Distributions 	}
5555*fdd8201dSApple OSS Distributions 
5556*fdd8201dSApple OSS Distributions 	if (_offset < fOffset) {
5557*fdd8201dSApple OSS Distributions 		return NULL;
5558*fdd8201dSApple OSS Distributions 	}
5559*fdd8201dSApple OSS Distributions 
5560*fdd8201dSApple OSS Distributions 	_offset -= fOffset;
5561*fdd8201dSApple OSS Distributions 
5562*fdd8201dSApple OSS Distributions 	if ((_offset + _length) > fLength) {
5563*fdd8201dSApple OSS Distributions 		return NULL;
5564*fdd8201dSApple OSS Distributions 	}
5565*fdd8201dSApple OSS Distributions 
5566*fdd8201dSApple OSS Distributions 	if ((fLength == _length) && (!_offset)) {
5567*fdd8201dSApple OSS Distributions 		retain();
5568*fdd8201dSApple OSS Distributions 		newMapping = this;
5569*fdd8201dSApple OSS Distributions 	} else {
5570*fdd8201dSApple OSS Distributions 		newMapping->fSuperMap.reset(this, OSRetain);
5571*fdd8201dSApple OSS Distributions 		newMapping->fOffset   = fOffset + _offset;
5572*fdd8201dSApple OSS Distributions 		newMapping->fAddress  = fAddress + _offset;
5573*fdd8201dSApple OSS Distributions 	}
5574*fdd8201dSApple OSS Distributions 
5575*fdd8201dSApple OSS Distributions 	return newMapping;
5576*fdd8201dSApple OSS Distributions }
5577*fdd8201dSApple OSS Distributions 
5578*fdd8201dSApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5579*fdd8201dSApple OSS Distributions IOMemoryMap::wireRange(
5580*fdd8201dSApple OSS Distributions 	uint32_t                options,
5581*fdd8201dSApple OSS Distributions 	mach_vm_size_t          offset,
5582*fdd8201dSApple OSS Distributions 	mach_vm_size_t          length)
5583*fdd8201dSApple OSS Distributions {
5584*fdd8201dSApple OSS Distributions 	IOReturn kr;
5585*fdd8201dSApple OSS Distributions 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5586*fdd8201dSApple OSS Distributions 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5587*fdd8201dSApple OSS Distributions 	vm_prot_t prot;
5588*fdd8201dSApple OSS Distributions 
5589*fdd8201dSApple OSS Distributions 	prot = (kIODirectionOutIn & options);
5590*fdd8201dSApple OSS Distributions 	if (prot) {
5591*fdd8201dSApple OSS Distributions 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5592*fdd8201dSApple OSS Distributions 	} else {
5593*fdd8201dSApple OSS Distributions 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5594*fdd8201dSApple OSS Distributions 	}
5595*fdd8201dSApple OSS Distributions 
5596*fdd8201dSApple OSS Distributions 	return kr;
5597*fdd8201dSApple OSS Distributions }
5598*fdd8201dSApple OSS Distributions 
5599*fdd8201dSApple OSS Distributions 
5600*fdd8201dSApple OSS Distributions IOPhysicalAddress
5601*fdd8201dSApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5602*fdd8201dSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5603*fdd8201dSApple OSS Distributions #else /* !__LP64__ */
5604*fdd8201dSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5605*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5606*fdd8201dSApple OSS Distributions {
5607*fdd8201dSApple OSS Distributions 	IOPhysicalAddress   address;
5608*fdd8201dSApple OSS Distributions 
5609*fdd8201dSApple OSS Distributions 	LOCK;
5610*fdd8201dSApple OSS Distributions #ifdef __LP64__
5611*fdd8201dSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5612*fdd8201dSApple OSS Distributions #else /* !__LP64__ */
5613*fdd8201dSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5614*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5615*fdd8201dSApple OSS Distributions 	UNLOCK;
5616*fdd8201dSApple OSS Distributions 
5617*fdd8201dSApple OSS Distributions 	return address;
5618*fdd8201dSApple OSS Distributions }
5619*fdd8201dSApple OSS Distributions 
5620*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5621*fdd8201dSApple OSS Distributions 
5622*fdd8201dSApple OSS Distributions #undef super
5623*fdd8201dSApple OSS Distributions #define super OSObject
5624*fdd8201dSApple OSS Distributions 
5625*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5626*fdd8201dSApple OSS Distributions 
5627*fdd8201dSApple OSS Distributions void
initialize(void)5628*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initialize( void )
5629*fdd8201dSApple OSS Distributions {
5630*fdd8201dSApple OSS Distributions 	if (NULL == gIOMemoryLock) {
5631*fdd8201dSApple OSS Distributions 		gIOMemoryLock = IORecursiveLockAlloc();
5632*fdd8201dSApple OSS Distributions 	}
5633*fdd8201dSApple OSS Distributions 
5634*fdd8201dSApple OSS Distributions 	gIOLastPage = IOGetLastPageNumber();
5635*fdd8201dSApple OSS Distributions }
5636*fdd8201dSApple OSS Distributions 
5637*fdd8201dSApple OSS Distributions void
free(void)5638*fdd8201dSApple OSS Distributions IOMemoryDescriptor::free( void )
5639*fdd8201dSApple OSS Distributions {
5640*fdd8201dSApple OSS Distributions 	if (_mappings) {
5641*fdd8201dSApple OSS Distributions 		_mappings.reset();
5642*fdd8201dSApple OSS Distributions 	}
5643*fdd8201dSApple OSS Distributions 
5644*fdd8201dSApple OSS Distributions 	if (reserved) {
5645*fdd8201dSApple OSS Distributions 		cleanKernelReserved(reserved);
5646*fdd8201dSApple OSS Distributions 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5647*fdd8201dSApple OSS Distributions 		reserved = NULL;
5648*fdd8201dSApple OSS Distributions 	}
5649*fdd8201dSApple OSS Distributions 	super::free();
5650*fdd8201dSApple OSS Distributions }
5651*fdd8201dSApple OSS Distributions 
5652*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5653*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setMapping(
5654*fdd8201dSApple OSS Distributions 	task_t                  intoTask,
5655*fdd8201dSApple OSS Distributions 	IOVirtualAddress        mapAddress,
5656*fdd8201dSApple OSS Distributions 	IOOptionBits            options )
5657*fdd8201dSApple OSS Distributions {
5658*fdd8201dSApple OSS Distributions 	return createMappingInTask( intoTask, mapAddress,
5659*fdd8201dSApple OSS Distributions 	           options | kIOMapStatic,
5660*fdd8201dSApple OSS Distributions 	           0, getLength());
5661*fdd8201dSApple OSS Distributions }
5662*fdd8201dSApple OSS Distributions 
5663*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5664*fdd8201dSApple OSS Distributions IOMemoryDescriptor::map(
5665*fdd8201dSApple OSS Distributions 	IOOptionBits            options )
5666*fdd8201dSApple OSS Distributions {
5667*fdd8201dSApple OSS Distributions 	return createMappingInTask( kernel_task, 0,
5668*fdd8201dSApple OSS Distributions 	           options | kIOMapAnywhere,
5669*fdd8201dSApple OSS Distributions 	           0, getLength());
5670*fdd8201dSApple OSS Distributions }
5671*fdd8201dSApple OSS Distributions 
5672*fdd8201dSApple OSS Distributions #ifndef __LP64__
5673*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5674*fdd8201dSApple OSS Distributions IOMemoryDescriptor::map(
5675*fdd8201dSApple OSS Distributions 	task_t                  intoTask,
5676*fdd8201dSApple OSS Distributions 	IOVirtualAddress        atAddress,
5677*fdd8201dSApple OSS Distributions 	IOOptionBits            options,
5678*fdd8201dSApple OSS Distributions 	IOByteCount             offset,
5679*fdd8201dSApple OSS Distributions 	IOByteCount             length )
5680*fdd8201dSApple OSS Distributions {
5681*fdd8201dSApple OSS Distributions 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5682*fdd8201dSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5683*fdd8201dSApple OSS Distributions 		return NULL;
5684*fdd8201dSApple OSS Distributions 	}
5685*fdd8201dSApple OSS Distributions 
5686*fdd8201dSApple OSS Distributions 	return createMappingInTask(intoTask, atAddress,
5687*fdd8201dSApple OSS Distributions 	           options, offset, length);
5688*fdd8201dSApple OSS Distributions }
5689*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5690*fdd8201dSApple OSS Distributions 
5691*fdd8201dSApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5692*fdd8201dSApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5693*fdd8201dSApple OSS Distributions 	task_t                  intoTask,
5694*fdd8201dSApple OSS Distributions 	mach_vm_address_t       atAddress,
5695*fdd8201dSApple OSS Distributions 	IOOptionBits            options,
5696*fdd8201dSApple OSS Distributions 	mach_vm_size_t          offset,
5697*fdd8201dSApple OSS Distributions 	mach_vm_size_t          length)
5698*fdd8201dSApple OSS Distributions {
5699*fdd8201dSApple OSS Distributions 	IOMemoryMap * result;
5700*fdd8201dSApple OSS Distributions 	IOMemoryMap * mapping;
5701*fdd8201dSApple OSS Distributions 
5702*fdd8201dSApple OSS Distributions 	if (0 == length) {
5703*fdd8201dSApple OSS Distributions 		length = getLength();
5704*fdd8201dSApple OSS Distributions 	}
5705*fdd8201dSApple OSS Distributions 
5706*fdd8201dSApple OSS Distributions 	mapping = new IOMemoryMap;
5707*fdd8201dSApple OSS Distributions 
5708*fdd8201dSApple OSS Distributions 	if (mapping
5709*fdd8201dSApple OSS Distributions 	    && !mapping->init( intoTask, atAddress,
5710*fdd8201dSApple OSS Distributions 	    options, offset, length )) {
5711*fdd8201dSApple OSS Distributions 		mapping->release();
5712*fdd8201dSApple OSS Distributions 		mapping = NULL;
5713*fdd8201dSApple OSS Distributions 	}
5714*fdd8201dSApple OSS Distributions 
5715*fdd8201dSApple OSS Distributions 	if (mapping) {
5716*fdd8201dSApple OSS Distributions 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5717*fdd8201dSApple OSS Distributions 	} else {
5718*fdd8201dSApple OSS Distributions 		result = nullptr;
5719*fdd8201dSApple OSS Distributions 	}
5720*fdd8201dSApple OSS Distributions 
5721*fdd8201dSApple OSS Distributions #if DEBUG
5722*fdd8201dSApple OSS Distributions 	if (!result) {
5723*fdd8201dSApple OSS Distributions 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5724*fdd8201dSApple OSS Distributions 		    this, atAddress, (uint32_t) options, offset, length);
5725*fdd8201dSApple OSS Distributions 	}
5726*fdd8201dSApple OSS Distributions #endif
5727*fdd8201dSApple OSS Distributions 
5728*fdd8201dSApple OSS Distributions 	// already retained through makeMapping
5729*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5730*fdd8201dSApple OSS Distributions 
5731*fdd8201dSApple OSS Distributions 	return retval;
5732*fdd8201dSApple OSS Distributions }
5733*fdd8201dSApple OSS Distributions 
5734*fdd8201dSApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5735*fdd8201dSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5736*fdd8201dSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5737*fdd8201dSApple OSS Distributions     IOOptionBits         options,
5738*fdd8201dSApple OSS Distributions     IOByteCount          offset)
5739*fdd8201dSApple OSS Distributions {
5740*fdd8201dSApple OSS Distributions 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5741*fdd8201dSApple OSS Distributions }
5742*fdd8201dSApple OSS Distributions #endif
5743*fdd8201dSApple OSS Distributions 
5744*fdd8201dSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5745*fdd8201dSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5746*fdd8201dSApple OSS Distributions     IOOptionBits         options,
5747*fdd8201dSApple OSS Distributions     mach_vm_size_t       offset)
5748*fdd8201dSApple OSS Distributions {
5749*fdd8201dSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5750*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> physMem;
5751*fdd8201dSApple OSS Distributions 
5752*fdd8201dSApple OSS Distributions 	LOCK;
5753*fdd8201dSApple OSS Distributions 
5754*fdd8201dSApple OSS Distributions 	if (fAddress && fAddressMap) {
5755*fdd8201dSApple OSS Distributions 		do{
5756*fdd8201dSApple OSS Distributions 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5757*fdd8201dSApple OSS Distributions 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5758*fdd8201dSApple OSS Distributions 				physMem = fMemory;
5759*fdd8201dSApple OSS Distributions 			}
5760*fdd8201dSApple OSS Distributions 
5761*fdd8201dSApple OSS Distributions 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5762*fdd8201dSApple OSS Distributions 				upl_size_t          size = (typeof(size))round_page(fLength);
5763*fdd8201dSApple OSS Distributions 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5764*fdd8201dSApple OSS Distributions 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5765*fdd8201dSApple OSS Distributions 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5766*fdd8201dSApple OSS Distributions 				    NULL, NULL,
5767*fdd8201dSApple OSS Distributions 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5768*fdd8201dSApple OSS Distributions 					fRedirUPL = NULL;
5769*fdd8201dSApple OSS Distributions 				}
5770*fdd8201dSApple OSS Distributions 
5771*fdd8201dSApple OSS Distributions 				if (physMem) {
5772*fdd8201dSApple OSS Distributions 					IOUnmapPages( fAddressMap, fAddress, fLength );
5773*fdd8201dSApple OSS Distributions 					if ((false)) {
5774*fdd8201dSApple OSS Distributions 						physMem->redirect(NULL, true);
5775*fdd8201dSApple OSS Distributions 					}
5776*fdd8201dSApple OSS Distributions 				}
5777*fdd8201dSApple OSS Distributions 			}
5778*fdd8201dSApple OSS Distributions 
5779*fdd8201dSApple OSS Distributions 			if (newBackingMemory) {
5780*fdd8201dSApple OSS Distributions 				if (newBackingMemory != fMemory) {
5781*fdd8201dSApple OSS Distributions 					fOffset = 0;
5782*fdd8201dSApple OSS Distributions 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5783*fdd8201dSApple OSS Distributions 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5784*fdd8201dSApple OSS Distributions 					    offset, fLength)) {
5785*fdd8201dSApple OSS Distributions 						err = kIOReturnError;
5786*fdd8201dSApple OSS Distributions 					}
5787*fdd8201dSApple OSS Distributions 				}
5788*fdd8201dSApple OSS Distributions 				if (fRedirUPL) {
5789*fdd8201dSApple OSS Distributions 					upl_commit(fRedirUPL, NULL, 0);
5790*fdd8201dSApple OSS Distributions 					upl_deallocate(fRedirUPL);
5791*fdd8201dSApple OSS Distributions 					fRedirUPL = NULL;
5792*fdd8201dSApple OSS Distributions 				}
5793*fdd8201dSApple OSS Distributions 				if ((false) && physMem) {
5794*fdd8201dSApple OSS Distributions 					physMem->redirect(NULL, false);
5795*fdd8201dSApple OSS Distributions 				}
5796*fdd8201dSApple OSS Distributions 			}
5797*fdd8201dSApple OSS Distributions 		}while (false);
5798*fdd8201dSApple OSS Distributions 	}
5799*fdd8201dSApple OSS Distributions 
5800*fdd8201dSApple OSS Distributions 	UNLOCK;
5801*fdd8201dSApple OSS Distributions 
5802*fdd8201dSApple OSS Distributions 	return err;
5803*fdd8201dSApple OSS Distributions }
5804*fdd8201dSApple OSS Distributions 
5805*fdd8201dSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5806*fdd8201dSApple OSS Distributions IOMemoryDescriptor::makeMapping(
5807*fdd8201dSApple OSS Distributions 	IOMemoryDescriptor *    owner,
5808*fdd8201dSApple OSS Distributions 	task_t                  __intoTask,
5809*fdd8201dSApple OSS Distributions 	IOVirtualAddress        __address,
5810*fdd8201dSApple OSS Distributions 	IOOptionBits            options,
5811*fdd8201dSApple OSS Distributions 	IOByteCount             __offset,
5812*fdd8201dSApple OSS Distributions 	IOByteCount             __length )
5813*fdd8201dSApple OSS Distributions {
5814*fdd8201dSApple OSS Distributions #ifndef __LP64__
5815*fdd8201dSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
5816*fdd8201dSApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
5817*fdd8201dSApple OSS Distributions 	}
5818*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5819*fdd8201dSApple OSS Distributions 
5820*fdd8201dSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5821*fdd8201dSApple OSS Distributions 	__block IOMemoryMap * result  = NULL;
5822*fdd8201dSApple OSS Distributions 
5823*fdd8201dSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5824*fdd8201dSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5825*fdd8201dSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
5826*fdd8201dSApple OSS Distributions 
5827*fdd8201dSApple OSS Distributions 	mapping->fOffset = offset;
5828*fdd8201dSApple OSS Distributions 
5829*fdd8201dSApple OSS Distributions 	LOCK;
5830*fdd8201dSApple OSS Distributions 
5831*fdd8201dSApple OSS Distributions 	do{
5832*fdd8201dSApple OSS Distributions 		if (kIOMapStatic & options) {
5833*fdd8201dSApple OSS Distributions 			result = mapping;
5834*fdd8201dSApple OSS Distributions 			addMapping(mapping);
5835*fdd8201dSApple OSS Distributions 			mapping->setMemoryDescriptor(this, 0);
5836*fdd8201dSApple OSS Distributions 			continue;
5837*fdd8201dSApple OSS Distributions 		}
5838*fdd8201dSApple OSS Distributions 
5839*fdd8201dSApple OSS Distributions 		if (kIOMapUnique & options) {
5840*fdd8201dSApple OSS Distributions 			addr64_t phys;
5841*fdd8201dSApple OSS Distributions 			IOByteCount       physLen;
5842*fdd8201dSApple OSS Distributions 
5843*fdd8201dSApple OSS Distributions //	    if (owner != this)		continue;
5844*fdd8201dSApple OSS Distributions 
5845*fdd8201dSApple OSS Distributions 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5846*fdd8201dSApple OSS Distributions 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5847*fdd8201dSApple OSS Distributions 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5848*fdd8201dSApple OSS Distributions 				if (!phys || (physLen < length)) {
5849*fdd8201dSApple OSS Distributions 					continue;
5850*fdd8201dSApple OSS Distributions 				}
5851*fdd8201dSApple OSS Distributions 
5852*fdd8201dSApple OSS Distributions 				mapDesc = IOMemoryDescriptor::withAddressRange(
5853*fdd8201dSApple OSS Distributions 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5854*fdd8201dSApple OSS Distributions 				if (!mapDesc) {
5855*fdd8201dSApple OSS Distributions 					continue;
5856*fdd8201dSApple OSS Distributions 				}
5857*fdd8201dSApple OSS Distributions 				offset = 0;
5858*fdd8201dSApple OSS Distributions 				mapping->fOffset = offset;
5859*fdd8201dSApple OSS Distributions 			}
5860*fdd8201dSApple OSS Distributions 		} else {
5861*fdd8201dSApple OSS Distributions 			// look for a compatible existing mapping
5862*fdd8201dSApple OSS Distributions 			if (_mappings) {
5863*fdd8201dSApple OSS Distributions 				_mappings->iterateObjects(^(OSObject * object)
5864*fdd8201dSApple OSS Distributions 				{
5865*fdd8201dSApple OSS Distributions 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5866*fdd8201dSApple OSS Distributions 					if ((result = lookMapping->copyCompatible(mapping))) {
5867*fdd8201dSApple OSS Distributions 					        addMapping(result);
5868*fdd8201dSApple OSS Distributions 					        result->setMemoryDescriptor(this, offset);
5869*fdd8201dSApple OSS Distributions 					        return true;
5870*fdd8201dSApple OSS Distributions 					}
5871*fdd8201dSApple OSS Distributions 					return false;
5872*fdd8201dSApple OSS Distributions 				});
5873*fdd8201dSApple OSS Distributions 			}
5874*fdd8201dSApple OSS Distributions 			if (result || (options & kIOMapReference)) {
5875*fdd8201dSApple OSS Distributions 				if (result != mapping) {
5876*fdd8201dSApple OSS Distributions 					mapping->release();
5877*fdd8201dSApple OSS Distributions 					mapping = NULL;
5878*fdd8201dSApple OSS Distributions 				}
5879*fdd8201dSApple OSS Distributions 				continue;
5880*fdd8201dSApple OSS Distributions 			}
5881*fdd8201dSApple OSS Distributions 		}
5882*fdd8201dSApple OSS Distributions 
5883*fdd8201dSApple OSS Distributions 		if (!mapDesc) {
5884*fdd8201dSApple OSS Distributions 			mapDesc.reset(this, OSRetain);
5885*fdd8201dSApple OSS Distributions 		}
5886*fdd8201dSApple OSS Distributions 		IOReturn
5887*fdd8201dSApple OSS Distributions 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5888*fdd8201dSApple OSS Distributions 		if (kIOReturnSuccess == kr) {
5889*fdd8201dSApple OSS Distributions 			result = mapping;
5890*fdd8201dSApple OSS Distributions 			mapDesc->addMapping(result);
5891*fdd8201dSApple OSS Distributions 			result->setMemoryDescriptor(mapDesc.get(), offset);
5892*fdd8201dSApple OSS Distributions 		} else {
5893*fdd8201dSApple OSS Distributions 			mapping->release();
5894*fdd8201dSApple OSS Distributions 			mapping = NULL;
5895*fdd8201dSApple OSS Distributions 		}
5896*fdd8201dSApple OSS Distributions 	}while (false);
5897*fdd8201dSApple OSS Distributions 
5898*fdd8201dSApple OSS Distributions 	UNLOCK;
5899*fdd8201dSApple OSS Distributions 
5900*fdd8201dSApple OSS Distributions 	return result;
5901*fdd8201dSApple OSS Distributions }
5902*fdd8201dSApple OSS Distributions 
5903*fdd8201dSApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5904*fdd8201dSApple OSS Distributions IOMemoryDescriptor::addMapping(
5905*fdd8201dSApple OSS Distributions 	IOMemoryMap * mapping )
5906*fdd8201dSApple OSS Distributions {
5907*fdd8201dSApple OSS Distributions 	if (mapping) {
5908*fdd8201dSApple OSS Distributions 		if (NULL == _mappings) {
5909*fdd8201dSApple OSS Distributions 			_mappings = OSSet::withCapacity(1);
5910*fdd8201dSApple OSS Distributions 		}
5911*fdd8201dSApple OSS Distributions 		if (_mappings) {
5912*fdd8201dSApple OSS Distributions 			_mappings->setObject( mapping );
5913*fdd8201dSApple OSS Distributions 		}
5914*fdd8201dSApple OSS Distributions 	}
5915*fdd8201dSApple OSS Distributions }
5916*fdd8201dSApple OSS Distributions 
5917*fdd8201dSApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5918*fdd8201dSApple OSS Distributions IOMemoryDescriptor::removeMapping(
5919*fdd8201dSApple OSS Distributions 	IOMemoryMap * mapping )
5920*fdd8201dSApple OSS Distributions {
5921*fdd8201dSApple OSS Distributions 	if (_mappings) {
5922*fdd8201dSApple OSS Distributions 		_mappings->removeObject( mapping);
5923*fdd8201dSApple OSS Distributions 	}
5924*fdd8201dSApple OSS Distributions }
5925*fdd8201dSApple OSS Distributions 
5926*fdd8201dSApple OSS Distributions void
setMapperOptions(uint16_t options)5927*fdd8201dSApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5928*fdd8201dSApple OSS Distributions {
5929*fdd8201dSApple OSS Distributions 	_iomapperOptions = options;
5930*fdd8201dSApple OSS Distributions }
5931*fdd8201dSApple OSS Distributions 
5932*fdd8201dSApple OSS Distributions uint16_t
getMapperOptions(void)5933*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5934*fdd8201dSApple OSS Distributions {
5935*fdd8201dSApple OSS Distributions 	return _iomapperOptions;
5936*fdd8201dSApple OSS Distributions }
5937*fdd8201dSApple OSS Distributions 
5938*fdd8201dSApple OSS Distributions #ifndef __LP64__
5939*fdd8201dSApple OSS Distributions // obsolete initializers
5940*fdd8201dSApple OSS Distributions // - initWithOptions is the designated initializer
5941*fdd8201dSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5942*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithAddress(void *      address,
5943*fdd8201dSApple OSS Distributions     IOByteCount   length,
5944*fdd8201dSApple OSS Distributions     IODirection direction)
5945*fdd8201dSApple OSS Distributions {
5946*fdd8201dSApple OSS Distributions 	return false;
5947*fdd8201dSApple OSS Distributions }
5948*fdd8201dSApple OSS Distributions 
5949*fdd8201dSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5950*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5951*fdd8201dSApple OSS Distributions     IOByteCount    length,
5952*fdd8201dSApple OSS Distributions     IODirection  direction,
5953*fdd8201dSApple OSS Distributions     task_t       task)
5954*fdd8201dSApple OSS Distributions {
5955*fdd8201dSApple OSS Distributions 	return false;
5956*fdd8201dSApple OSS Distributions }
5957*fdd8201dSApple OSS Distributions 
5958*fdd8201dSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5959*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
5960*fdd8201dSApple OSS Distributions 	IOPhysicalAddress      address,
5961*fdd8201dSApple OSS Distributions 	IOByteCount            length,
5962*fdd8201dSApple OSS Distributions 	IODirection            direction )
5963*fdd8201dSApple OSS Distributions {
5964*fdd8201dSApple OSS Distributions 	return false;
5965*fdd8201dSApple OSS Distributions }
5966*fdd8201dSApple OSS Distributions 
5967*fdd8201dSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5968*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithRanges(
5969*fdd8201dSApple OSS Distributions 	IOVirtualRange * ranges,
5970*fdd8201dSApple OSS Distributions 	UInt32           withCount,
5971*fdd8201dSApple OSS Distributions 	IODirection      direction,
5972*fdd8201dSApple OSS Distributions 	task_t           task,
5973*fdd8201dSApple OSS Distributions 	bool             asReference)
5974*fdd8201dSApple OSS Distributions {
5975*fdd8201dSApple OSS Distributions 	return false;
5976*fdd8201dSApple OSS Distributions }
5977*fdd8201dSApple OSS Distributions 
5978*fdd8201dSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5979*fdd8201dSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5980*fdd8201dSApple OSS Distributions     UInt32           withCount,
5981*fdd8201dSApple OSS Distributions     IODirection      direction,
5982*fdd8201dSApple OSS Distributions     bool             asReference)
5983*fdd8201dSApple OSS Distributions {
5984*fdd8201dSApple OSS Distributions 	return false;
5985*fdd8201dSApple OSS Distributions }
5986*fdd8201dSApple OSS Distributions 
5987*fdd8201dSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5988*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5989*fdd8201dSApple OSS Distributions     IOByteCount * lengthOfSegment)
5990*fdd8201dSApple OSS Distributions {
5991*fdd8201dSApple OSS Distributions 	return NULL;
5992*fdd8201dSApple OSS Distributions }
5993*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
5994*fdd8201dSApple OSS Distributions 
5995*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5996*fdd8201dSApple OSS Distributions 
5997*fdd8201dSApple OSS Distributions bool
serialize(OSSerialize * s) const5998*fdd8201dSApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5999*fdd8201dSApple OSS Distributions {
6000*fdd8201dSApple OSS Distributions 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6001*fdd8201dSApple OSS Distributions 	OSSharedPtr<OSObject>           values[2] = {NULL};
6002*fdd8201dSApple OSS Distributions 	OSSharedPtr<OSArray>            array;
6003*fdd8201dSApple OSS Distributions 
6004*fdd8201dSApple OSS Distributions 	struct SerData {
6005*fdd8201dSApple OSS Distributions 		user_addr_t address;
6006*fdd8201dSApple OSS Distributions 		user_size_t length;
6007*fdd8201dSApple OSS Distributions 	};
6008*fdd8201dSApple OSS Distributions 
6009*fdd8201dSApple OSS Distributions 	unsigned int index;
6010*fdd8201dSApple OSS Distributions 
6011*fdd8201dSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6012*fdd8201dSApple OSS Distributions 
6013*fdd8201dSApple OSS Distributions 	if (s == NULL) {
6014*fdd8201dSApple OSS Distributions 		return false;
6015*fdd8201dSApple OSS Distributions 	}
6016*fdd8201dSApple OSS Distributions 
6017*fdd8201dSApple OSS Distributions 	array = OSArray::withCapacity(4);
6018*fdd8201dSApple OSS Distributions 	if (!array) {
6019*fdd8201dSApple OSS Distributions 		return false;
6020*fdd8201dSApple OSS Distributions 	}
6021*fdd8201dSApple OSS Distributions 
6022*fdd8201dSApple OSS Distributions 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6023*fdd8201dSApple OSS Distributions 	if (!vcopy) {
6024*fdd8201dSApple OSS Distributions 		return false;
6025*fdd8201dSApple OSS Distributions 	}
6026*fdd8201dSApple OSS Distributions 
6027*fdd8201dSApple OSS Distributions 	keys[0] = OSSymbol::withCString("address");
6028*fdd8201dSApple OSS Distributions 	keys[1] = OSSymbol::withCString("length");
6029*fdd8201dSApple OSS Distributions 
6030*fdd8201dSApple OSS Distributions 	// Copy the volatile data so we don't have to allocate memory
6031*fdd8201dSApple OSS Distributions 	// while the lock is held.
6032*fdd8201dSApple OSS Distributions 	LOCK;
6033*fdd8201dSApple OSS Distributions 	if (vcopy.size() == _rangesCount) {
6034*fdd8201dSApple OSS Distributions 		Ranges vec = _ranges;
6035*fdd8201dSApple OSS Distributions 		for (index = 0; index < vcopy.size(); index++) {
6036*fdd8201dSApple OSS Distributions 			mach_vm_address_t addr; mach_vm_size_t len;
6037*fdd8201dSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, index);
6038*fdd8201dSApple OSS Distributions 			vcopy[index].address = addr;
6039*fdd8201dSApple OSS Distributions 			vcopy[index].length  = len;
6040*fdd8201dSApple OSS Distributions 		}
6041*fdd8201dSApple OSS Distributions 	} else {
6042*fdd8201dSApple OSS Distributions 		// The descriptor changed out from under us.  Give up.
6043*fdd8201dSApple OSS Distributions 		UNLOCK;
6044*fdd8201dSApple OSS Distributions 		return false;
6045*fdd8201dSApple OSS Distributions 	}
6046*fdd8201dSApple OSS Distributions 	UNLOCK;
6047*fdd8201dSApple OSS Distributions 
6048*fdd8201dSApple OSS Distributions 	for (index = 0; index < vcopy.size(); index++) {
6049*fdd8201dSApple OSS Distributions 		user_addr_t addr = vcopy[index].address;
6050*fdd8201dSApple OSS Distributions 		IOByteCount len = (IOByteCount) vcopy[index].length;
6051*fdd8201dSApple OSS Distributions 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6052*fdd8201dSApple OSS Distributions 		if (values[0] == NULL) {
6053*fdd8201dSApple OSS Distributions 			return false;
6054*fdd8201dSApple OSS Distributions 		}
6055*fdd8201dSApple OSS Distributions 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6056*fdd8201dSApple OSS Distributions 		if (values[1] == NULL) {
6057*fdd8201dSApple OSS Distributions 			return false;
6058*fdd8201dSApple OSS Distributions 		}
6059*fdd8201dSApple OSS Distributions 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6060*fdd8201dSApple OSS Distributions 		if (dict == NULL) {
6061*fdd8201dSApple OSS Distributions 			return false;
6062*fdd8201dSApple OSS Distributions 		}
6063*fdd8201dSApple OSS Distributions 		array->setObject(dict.get());
6064*fdd8201dSApple OSS Distributions 		dict.reset();
6065*fdd8201dSApple OSS Distributions 		values[0].reset();
6066*fdd8201dSApple OSS Distributions 		values[1].reset();
6067*fdd8201dSApple OSS Distributions 	}
6068*fdd8201dSApple OSS Distributions 
6069*fdd8201dSApple OSS Distributions 	return array->serialize(s);
6070*fdd8201dSApple OSS Distributions }
6071*fdd8201dSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6072*fdd8201dSApple OSS Distributions 
6073*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6074*fdd8201dSApple OSS Distributions #ifdef __LP64__
6075*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6076*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6077*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6078*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6079*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6080*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6081*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6082*fdd8201dSApple OSS Distributions #else /* !__LP64__ */
6083*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6084*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6085*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6086*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6087*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6088*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6089*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6090*fdd8201dSApple OSS Distributions #endif /* !__LP64__ */
6091*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6092*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6093*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6094*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6095*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6096*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6097*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6098*fdd8201dSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6099*fdd8201dSApple OSS Distributions 
6100*fdd8201dSApple OSS Distributions /* ex-inline function implementation */
6101*fdd8201dSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6102*fdd8201dSApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6103*fdd8201dSApple OSS Distributions {
6104*fdd8201dSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
6105*fdd8201dSApple OSS Distributions }
6106*fdd8201dSApple OSS Distributions 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6107*fdd8201dSApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6108*fdd8201dSApple OSS Distributions 
6109*fdd8201dSApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6110*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6111*fdd8201dSApple OSS Distributions {
6112*fdd8201dSApple OSS Distributions 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6113*fdd8201dSApple OSS Distributions 	if (me && !me->initWithCapacity(capacity)) {
6114*fdd8201dSApple OSS Distributions 		return nullptr;
6115*fdd8201dSApple OSS Distributions 	}
6116*fdd8201dSApple OSS Distributions 	return me;
6117*fdd8201dSApple OSS Distributions }
6118*fdd8201dSApple OSS Distributions 
6119*fdd8201dSApple OSS Distributions bool
initWithCapacity(size_t capacity)6120*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6121*fdd8201dSApple OSS Distributions {
6122*fdd8201dSApple OSS Distributions 	if (_data && (!capacity || (_capacity < capacity))) {
6123*fdd8201dSApple OSS Distributions 		freeMemory();
6124*fdd8201dSApple OSS Distributions 	}
6125*fdd8201dSApple OSS Distributions 
6126*fdd8201dSApple OSS Distributions 	if (!OSObject::init()) {
6127*fdd8201dSApple OSS Distributions 		return false;
6128*fdd8201dSApple OSS Distributions 	}
6129*fdd8201dSApple OSS Distributions 
6130*fdd8201dSApple OSS Distributions 	if (!_data && capacity) {
6131*fdd8201dSApple OSS Distributions 		_data = IOMalloc(capacity);
6132*fdd8201dSApple OSS Distributions 		if (!_data) {
6133*fdd8201dSApple OSS Distributions 			return false;
6134*fdd8201dSApple OSS Distributions 		}
6135*fdd8201dSApple OSS Distributions 		_capacity = capacity;
6136*fdd8201dSApple OSS Distributions 	}
6137*fdd8201dSApple OSS Distributions 
6138*fdd8201dSApple OSS Distributions 	_length = 0;
6139*fdd8201dSApple OSS Distributions 
6140*fdd8201dSApple OSS Distributions 	return true;
6141*fdd8201dSApple OSS Distributions }
6142*fdd8201dSApple OSS Distributions 
6143*fdd8201dSApple OSS Distributions void
free()6144*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6145*fdd8201dSApple OSS Distributions {
6146*fdd8201dSApple OSS Distributions 	freeMemory();
6147*fdd8201dSApple OSS Distributions 	OSObject::free();
6148*fdd8201dSApple OSS Distributions }
6149*fdd8201dSApple OSS Distributions 
6150*fdd8201dSApple OSS Distributions void
freeMemory()6151*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6152*fdd8201dSApple OSS Distributions {
6153*fdd8201dSApple OSS Distributions 	IOFree(_data, _capacity);
6154*fdd8201dSApple OSS Distributions 	_data = nullptr;
6155*fdd8201dSApple OSS Distributions 	_capacity = _length = 0;
6156*fdd8201dSApple OSS Distributions }
6157*fdd8201dSApple OSS Distributions 
6158*fdd8201dSApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6159*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6160*fdd8201dSApple OSS Distributions {
6161*fdd8201dSApple OSS Distributions 	const auto oldLength = getLength();
6162*fdd8201dSApple OSS Distributions 	size_t newLength;
6163*fdd8201dSApple OSS Distributions 	if (os_add_overflow(oldLength, length, &newLength)) {
6164*fdd8201dSApple OSS Distributions 		return false;
6165*fdd8201dSApple OSS Distributions 	}
6166*fdd8201dSApple OSS Distributions 
6167*fdd8201dSApple OSS Distributions 	if (newLength > _capacity) {
6168*fdd8201dSApple OSS Distributions 		void * const newData = IOMalloc(newLength);
6169*fdd8201dSApple OSS Distributions 		if (_data) {
6170*fdd8201dSApple OSS Distributions 			bcopy(_data, newData, oldLength);
6171*fdd8201dSApple OSS Distributions 			IOFree(_data, _capacity);
6172*fdd8201dSApple OSS Distributions 		}
6173*fdd8201dSApple OSS Distributions 		_data = newData;
6174*fdd8201dSApple OSS Distributions 		_capacity = newLength;
6175*fdd8201dSApple OSS Distributions 	}
6176*fdd8201dSApple OSS Distributions 
6177*fdd8201dSApple OSS Distributions 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6178*fdd8201dSApple OSS Distributions 	if (bytes) {
6179*fdd8201dSApple OSS Distributions 		bcopy(bytes, dest, length);
6180*fdd8201dSApple OSS Distributions 	} else {
6181*fdd8201dSApple OSS Distributions 		bzero(dest, length);
6182*fdd8201dSApple OSS Distributions 	}
6183*fdd8201dSApple OSS Distributions 
6184*fdd8201dSApple OSS Distributions 	_length = newLength;
6185*fdd8201dSApple OSS Distributions 
6186*fdd8201dSApple OSS Distributions 	return true;
6187*fdd8201dSApple OSS Distributions }
6188*fdd8201dSApple OSS Distributions 
6189*fdd8201dSApple OSS Distributions void
setLength(size_t length)6190*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6191*fdd8201dSApple OSS Distributions {
6192*fdd8201dSApple OSS Distributions 	if (!_data || (length > _capacity)) {
6193*fdd8201dSApple OSS Distributions 		void * const newData = IOMallocZero(length);
6194*fdd8201dSApple OSS Distributions 		if (_data) {
6195*fdd8201dSApple OSS Distributions 			bcopy(_data, newData, _length);
6196*fdd8201dSApple OSS Distributions 			IOFree(_data, _capacity);
6197*fdd8201dSApple OSS Distributions 		}
6198*fdd8201dSApple OSS Distributions 		_data = newData;
6199*fdd8201dSApple OSS Distributions 		_capacity = length;
6200*fdd8201dSApple OSS Distributions 	}
6201*fdd8201dSApple OSS Distributions 	_length = length;
6202*fdd8201dSApple OSS Distributions }
6203*fdd8201dSApple OSS Distributions 
6204*fdd8201dSApple OSS Distributions const void *
getBytes() const6205*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6206*fdd8201dSApple OSS Distributions {
6207*fdd8201dSApple OSS Distributions 	return _length ? _data : nullptr;
6208*fdd8201dSApple OSS Distributions }
6209*fdd8201dSApple OSS Distributions 
6210*fdd8201dSApple OSS Distributions size_t
getLength() const6211*fdd8201dSApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6212*fdd8201dSApple OSS Distributions {
6213*fdd8201dSApple OSS Distributions 	return _data ? _length : 0;
6214*fdd8201dSApple OSS Distributions }
6215