xref: /xnu-8792.41.9/iokit/Kernel/IOMemoryDescriptor.cpp (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1*5c2921b0SApple OSS Distributions /*
2*5c2921b0SApple OSS Distributions  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*5c2921b0SApple OSS Distributions  *
4*5c2921b0SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*5c2921b0SApple OSS Distributions  *
6*5c2921b0SApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*5c2921b0SApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*5c2921b0SApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*5c2921b0SApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*5c2921b0SApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*5c2921b0SApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*5c2921b0SApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*5c2921b0SApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*5c2921b0SApple OSS Distributions  *
15*5c2921b0SApple OSS Distributions  * Please obtain a copy of the License at
16*5c2921b0SApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*5c2921b0SApple OSS Distributions  *
18*5c2921b0SApple OSS Distributions  * The Original Code and all software distributed under the License are
19*5c2921b0SApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*5c2921b0SApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*5c2921b0SApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*5c2921b0SApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*5c2921b0SApple OSS Distributions  * Please see the License for the specific language governing rights and
24*5c2921b0SApple OSS Distributions  * limitations under the License.
25*5c2921b0SApple OSS Distributions  *
26*5c2921b0SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*5c2921b0SApple OSS Distributions  */
28*5c2921b0SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*5c2921b0SApple OSS Distributions 
30*5c2921b0SApple OSS Distributions #include <sys/cdefs.h>
31*5c2921b0SApple OSS Distributions 
32*5c2921b0SApple OSS Distributions #include <IOKit/assert.h>
33*5c2921b0SApple OSS Distributions #include <IOKit/system.h>
34*5c2921b0SApple OSS Distributions #include <IOKit/IOLib.h>
35*5c2921b0SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*5c2921b0SApple OSS Distributions #include <IOKit/IOMapper.h>
37*5c2921b0SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*5c2921b0SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*5c2921b0SApple OSS Distributions 
40*5c2921b0SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*5c2921b0SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*5c2921b0SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*5c2921b0SApple OSS Distributions 
44*5c2921b0SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*5c2921b0SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*5c2921b0SApple OSS Distributions #include <libkern/OSDebug.h>
47*5c2921b0SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*5c2921b0SApple OSS Distributions 
49*5c2921b0SApple OSS Distributions #include "IOKitKernelInternal.h"
50*5c2921b0SApple OSS Distributions 
51*5c2921b0SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*5c2921b0SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*5c2921b0SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*5c2921b0SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*5c2921b0SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*5c2921b0SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*5c2921b0SApple OSS Distributions #include <os/overflow.h>
58*5c2921b0SApple OSS Distributions #include <os/cpp_util.h>
59*5c2921b0SApple OSS Distributions #include <os/base_private.h>
60*5c2921b0SApple OSS Distributions 
61*5c2921b0SApple OSS Distributions #include <sys/uio.h>
62*5c2921b0SApple OSS Distributions 
63*5c2921b0SApple OSS Distributions __BEGIN_DECLS
64*5c2921b0SApple OSS Distributions #include <vm/pmap.h>
65*5c2921b0SApple OSS Distributions #include <vm/vm_pageout.h>
66*5c2921b0SApple OSS Distributions #include <mach/memory_object_types.h>
67*5c2921b0SApple OSS Distributions #include <device/device_port.h>
68*5c2921b0SApple OSS Distributions 
69*5c2921b0SApple OSS Distributions #include <mach/vm_prot.h>
70*5c2921b0SApple OSS Distributions #include <mach/mach_vm.h>
71*5c2921b0SApple OSS Distributions #include <mach/memory_entry.h>
72*5c2921b0SApple OSS Distributions #include <vm/vm_fault.h>
73*5c2921b0SApple OSS Distributions #include <vm/vm_protos.h>
74*5c2921b0SApple OSS Distributions 
75*5c2921b0SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76*5c2921b0SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
77*5c2921b0SApple OSS Distributions 
78*5c2921b0SApple OSS Distributions extern kern_return_t
79*5c2921b0SApple OSS Distributions mach_memory_entry_ownership(
80*5c2921b0SApple OSS Distributions 	ipc_port_t      entry_port,
81*5c2921b0SApple OSS Distributions 	task_t          owner,
82*5c2921b0SApple OSS Distributions 	int             ledger_tag,
83*5c2921b0SApple OSS Distributions 	int             ledger_flags);
84*5c2921b0SApple OSS Distributions 
85*5c2921b0SApple OSS Distributions __END_DECLS
86*5c2921b0SApple OSS Distributions 
87*5c2921b0SApple OSS Distributions #define kIOMapperWaitSystem     ((IOMapper *) 1)
88*5c2921b0SApple OSS Distributions 
89*5c2921b0SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
90*5c2921b0SApple OSS Distributions 
91*5c2921b0SApple OSS Distributions ppnum_t           gIOLastPage;
92*5c2921b0SApple OSS Distributions 
93*5c2921b0SApple OSS Distributions enum {
94*5c2921b0SApple OSS Distributions 	kIOMapGuardSizeLarge = 65536
95*5c2921b0SApple OSS Distributions };
96*5c2921b0SApple OSS Distributions 
97*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98*5c2921b0SApple OSS Distributions 
99*5c2921b0SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100*5c2921b0SApple OSS Distributions 
101*5c2921b0SApple OSS Distributions #define super IOMemoryDescriptor
102*5c2921b0SApple OSS Distributions 
103*5c2921b0SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104*5c2921b0SApple OSS Distributions     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105*5c2921b0SApple OSS Distributions 
106*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107*5c2921b0SApple OSS Distributions 
108*5c2921b0SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
109*5c2921b0SApple OSS Distributions 
110*5c2921b0SApple OSS Distributions #define LOCK    IORecursiveLockLock( gIOMemoryLock)
111*5c2921b0SApple OSS Distributions #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
112*5c2921b0SApple OSS Distributions #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113*5c2921b0SApple OSS Distributions #define WAKEUP  \
114*5c2921b0SApple OSS Distributions     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115*5c2921b0SApple OSS Distributions 
116*5c2921b0SApple OSS Distributions #if 0
117*5c2921b0SApple OSS Distributions #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
118*5c2921b0SApple OSS Distributions #else
119*5c2921b0SApple OSS Distributions #define DEBG(fmt, args...)      {}
120*5c2921b0SApple OSS Distributions #endif
121*5c2921b0SApple OSS Distributions 
122*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123*5c2921b0SApple OSS Distributions 
124*5c2921b0SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
125*5c2921b0SApple OSS Distributions // Function
126*5c2921b0SApple OSS Distributions 
127*5c2921b0SApple OSS Distributions enum ioPLBlockFlags {
128*5c2921b0SApple OSS Distributions 	kIOPLOnDevice  = 0x00000001,
129*5c2921b0SApple OSS Distributions 	kIOPLExternUPL = 0x00000002,
130*5c2921b0SApple OSS Distributions };
131*5c2921b0SApple OSS Distributions 
132*5c2921b0SApple OSS Distributions struct IOMDPersistentInitData {
133*5c2921b0SApple OSS Distributions 	const IOGeneralMemoryDescriptor * fMD;
134*5c2921b0SApple OSS Distributions 	IOMemoryReference               * fMemRef;
135*5c2921b0SApple OSS Distributions };
136*5c2921b0SApple OSS Distributions 
137*5c2921b0SApple OSS Distributions struct ioPLBlock {
138*5c2921b0SApple OSS Distributions 	upl_t fIOPL;
139*5c2921b0SApple OSS Distributions 	vm_address_t fPageInfo; // Pointer to page list or index into it
140*5c2921b0SApple OSS Distributions 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
141*5c2921b0SApple OSS Distributions 	ppnum_t fMappedPage;        // Page number of first page in this iopl
142*5c2921b0SApple OSS Distributions 	unsigned int fPageOffset;   // Offset within first page of iopl
143*5c2921b0SApple OSS Distributions 	unsigned int fFlags;        // Flags
144*5c2921b0SApple OSS Distributions };
145*5c2921b0SApple OSS Distributions 
146*5c2921b0SApple OSS Distributions enum { kMaxWireTags = 6 };
147*5c2921b0SApple OSS Distributions 
148*5c2921b0SApple OSS Distributions struct ioGMDData {
149*5c2921b0SApple OSS Distributions 	IOMapper *  fMapper;
150*5c2921b0SApple OSS Distributions 	uint64_t    fDMAMapAlignment;
151*5c2921b0SApple OSS Distributions 	uint64_t    fMappedBase;
152*5c2921b0SApple OSS Distributions 	uint64_t    fMappedLength;
153*5c2921b0SApple OSS Distributions 	uint64_t    fPreparationID;
154*5c2921b0SApple OSS Distributions #if IOTRACKING
155*5c2921b0SApple OSS Distributions 	IOTracking  fWireTracking;
156*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
157*5c2921b0SApple OSS Distributions 	unsigned int      fPageCnt;
158*5c2921b0SApple OSS Distributions 	uint8_t           fDMAMapNumAddressBits;
159*5c2921b0SApple OSS Distributions 	unsigned char     fCompletionError:1;
160*5c2921b0SApple OSS Distributions 	unsigned char     fMappedBaseValid:1;
161*5c2921b0SApple OSS Distributions 	unsigned char     _resv:4;
162*5c2921b0SApple OSS Distributions 	unsigned char     fDMAAccess:2;
163*5c2921b0SApple OSS Distributions 
164*5c2921b0SApple OSS Distributions 	/* variable length arrays */
165*5c2921b0SApple OSS Distributions 	upl_page_info_t fPageList[1]
166*5c2921b0SApple OSS Distributions #if __LP64__
167*5c2921b0SApple OSS Distributions 	// align fPageList as for ioPLBlock
168*5c2921b0SApple OSS Distributions 	__attribute__((aligned(sizeof(upl_t))))
169*5c2921b0SApple OSS Distributions #endif
170*5c2921b0SApple OSS Distributions 	;
171*5c2921b0SApple OSS Distributions 	//ioPLBlock fBlocks[1];
172*5c2921b0SApple OSS Distributions };
173*5c2921b0SApple OSS Distributions 
174*5c2921b0SApple OSS Distributions #pragma GCC visibility push(hidden)
175*5c2921b0SApple OSS Distributions 
176*5c2921b0SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
177*5c2921b0SApple OSS Distributions {
178*5c2921b0SApple OSS Distributions 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179*5c2921b0SApple OSS Distributions 
180*5c2921b0SApple OSS Distributions public:
181*5c2921b0SApple OSS Distributions 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182*5c2921b0SApple OSS Distributions 	virtual bool initWithCapacity(size_t capacity);
183*5c2921b0SApple OSS Distributions 	virtual void free() APPLE_KEXT_OVERRIDE;
184*5c2921b0SApple OSS Distributions 
185*5c2921b0SApple OSS Distributions 	virtual bool appendBytes(const void * bytes, size_t length);
186*5c2921b0SApple OSS Distributions 	virtual void setLength(size_t length);
187*5c2921b0SApple OSS Distributions 
188*5c2921b0SApple OSS Distributions 	virtual const void * getBytes() const;
189*5c2921b0SApple OSS Distributions 	virtual size_t getLength() const;
190*5c2921b0SApple OSS Distributions 
191*5c2921b0SApple OSS Distributions private:
192*5c2921b0SApple OSS Distributions 	void freeMemory();
193*5c2921b0SApple OSS Distributions 
194*5c2921b0SApple OSS Distributions 	void *  _data = nullptr;
195*5c2921b0SApple OSS Distributions 	size_t  _length = 0;
196*5c2921b0SApple OSS Distributions 	size_t  _capacity = 0;
197*5c2921b0SApple OSS Distributions };
198*5c2921b0SApple OSS Distributions 
199*5c2921b0SApple OSS Distributions #pragma GCC visibility pop
200*5c2921b0SApple OSS Distributions 
201*5c2921b0SApple OSS Distributions #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
202*5c2921b0SApple OSS Distributions #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203*5c2921b0SApple OSS Distributions #define getNumIOPL(osd, d)      \
204*5c2921b0SApple OSS Distributions     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205*5c2921b0SApple OSS Distributions #define getPageList(d)  (&(d->fPageList[0]))
206*5c2921b0SApple OSS Distributions #define computeDataSize(p, u) \
207*5c2921b0SApple OSS Distributions     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208*5c2921b0SApple OSS Distributions 
209*5c2921b0SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210*5c2921b0SApple OSS Distributions 
211*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212*5c2921b0SApple OSS Distributions 
213*5c2921b0SApple OSS Distributions extern "C" {
214*5c2921b0SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)215*5c2921b0SApple OSS Distributions device_data_action(
216*5c2921b0SApple OSS Distributions 	uintptr_t               device_handle,
217*5c2921b0SApple OSS Distributions 	ipc_port_t              device_pager,
218*5c2921b0SApple OSS Distributions 	vm_prot_t               protection,
219*5c2921b0SApple OSS Distributions 	vm_object_offset_t      offset,
220*5c2921b0SApple OSS Distributions 	vm_size_t               size)
221*5c2921b0SApple OSS Distributions {
222*5c2921b0SApple OSS Distributions 	kern_return_t        kr;
223*5c2921b0SApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> memDesc;
225*5c2921b0SApple OSS Distributions 
226*5c2921b0SApple OSS Distributions 	LOCK;
227*5c2921b0SApple OSS Distributions 	if (ref->dp.memory) {
228*5c2921b0SApple OSS Distributions 		memDesc.reset(ref->dp.memory, OSRetain);
229*5c2921b0SApple OSS Distributions 		kr = memDesc->handleFault(device_pager, offset, size);
230*5c2921b0SApple OSS Distributions 		memDesc.reset();
231*5c2921b0SApple OSS Distributions 	} else {
232*5c2921b0SApple OSS Distributions 		kr = KERN_ABORTED;
233*5c2921b0SApple OSS Distributions 	}
234*5c2921b0SApple OSS Distributions 	UNLOCK;
235*5c2921b0SApple OSS Distributions 
236*5c2921b0SApple OSS Distributions 	return kr;
237*5c2921b0SApple OSS Distributions }
238*5c2921b0SApple OSS Distributions 
239*5c2921b0SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)240*5c2921b0SApple OSS Distributions device_close(
241*5c2921b0SApple OSS Distributions 	uintptr_t     device_handle)
242*5c2921b0SApple OSS Distributions {
243*5c2921b0SApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244*5c2921b0SApple OSS Distributions 
245*5c2921b0SApple OSS Distributions 	IOFreeType( ref, IOMemoryDescriptorReserved );
246*5c2921b0SApple OSS Distributions 
247*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
248*5c2921b0SApple OSS Distributions }
249*5c2921b0SApple OSS Distributions };      // end extern "C"
250*5c2921b0SApple OSS Distributions 
251*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252*5c2921b0SApple OSS Distributions 
253*5c2921b0SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
254*5c2921b0SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
255*5c2921b0SApple OSS Distributions // checked for as a NULL reference is illegal.
256*5c2921b0SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)257*5c2921b0SApple OSS Distributions getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
258*5c2921b0SApple OSS Distributions     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
259*5c2921b0SApple OSS Distributions {
260*5c2921b0SApple OSS Distributions 	assert(kIOMemoryTypeUIO == type
261*5c2921b0SApple OSS Distributions 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
262*5c2921b0SApple OSS Distributions 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
263*5c2921b0SApple OSS Distributions 	if (kIOMemoryTypeUIO == type) {
264*5c2921b0SApple OSS Distributions 		user_size_t us;
265*5c2921b0SApple OSS Distributions 		user_addr_t ad;
266*5c2921b0SApple OSS Distributions 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
267*5c2921b0SApple OSS Distributions 	}
268*5c2921b0SApple OSS Distributions #ifndef __LP64__
269*5c2921b0SApple OSS Distributions 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
270*5c2921b0SApple OSS Distributions 		IOAddressRange cur = r.v64[ind];
271*5c2921b0SApple OSS Distributions 		addr = cur.address;
272*5c2921b0SApple OSS Distributions 		len  = cur.length;
273*5c2921b0SApple OSS Distributions 	}
274*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
275*5c2921b0SApple OSS Distributions 	else {
276*5c2921b0SApple OSS Distributions 		IOVirtualRange cur = r.v[ind];
277*5c2921b0SApple OSS Distributions 		addr = cur.address;
278*5c2921b0SApple OSS Distributions 		len  = cur.length;
279*5c2921b0SApple OSS Distributions 	}
280*5c2921b0SApple OSS Distributions }
281*5c2921b0SApple OSS Distributions 
282*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
283*5c2921b0SApple OSS Distributions 
284*5c2921b0SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)285*5c2921b0SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
286*5c2921b0SApple OSS Distributions {
287*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
288*5c2921b0SApple OSS Distributions 
289*5c2921b0SApple OSS Distributions 	*control = VM_PURGABLE_SET_STATE;
290*5c2921b0SApple OSS Distributions 
291*5c2921b0SApple OSS Distributions 	enum { kIOMemoryPurgeableControlMask = 15 };
292*5c2921b0SApple OSS Distributions 
293*5c2921b0SApple OSS Distributions 	switch (kIOMemoryPurgeableControlMask & newState) {
294*5c2921b0SApple OSS Distributions 	case kIOMemoryPurgeableKeepCurrent:
295*5c2921b0SApple OSS Distributions 		*control = VM_PURGABLE_GET_STATE;
296*5c2921b0SApple OSS Distributions 		break;
297*5c2921b0SApple OSS Distributions 
298*5c2921b0SApple OSS Distributions 	case kIOMemoryPurgeableNonVolatile:
299*5c2921b0SApple OSS Distributions 		*state = VM_PURGABLE_NONVOLATILE;
300*5c2921b0SApple OSS Distributions 		break;
301*5c2921b0SApple OSS Distributions 	case kIOMemoryPurgeableVolatile:
302*5c2921b0SApple OSS Distributions 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
303*5c2921b0SApple OSS Distributions 		break;
304*5c2921b0SApple OSS Distributions 	case kIOMemoryPurgeableEmpty:
305*5c2921b0SApple OSS Distributions 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
306*5c2921b0SApple OSS Distributions 		break;
307*5c2921b0SApple OSS Distributions 	default:
308*5c2921b0SApple OSS Distributions 		err = kIOReturnBadArgument;
309*5c2921b0SApple OSS Distributions 		break;
310*5c2921b0SApple OSS Distributions 	}
311*5c2921b0SApple OSS Distributions 
312*5c2921b0SApple OSS Distributions 	if (*control == VM_PURGABLE_SET_STATE) {
313*5c2921b0SApple OSS Distributions 		// let VM know this call is from the kernel and is allowed to alter
314*5c2921b0SApple OSS Distributions 		// the volatility of the memory entry even if it was created with
315*5c2921b0SApple OSS Distributions 		// MAP_MEM_PURGABLE_KERNEL_ONLY
316*5c2921b0SApple OSS Distributions 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
317*5c2921b0SApple OSS Distributions 	}
318*5c2921b0SApple OSS Distributions 
319*5c2921b0SApple OSS Distributions 	return err;
320*5c2921b0SApple OSS Distributions }
321*5c2921b0SApple OSS Distributions 
322*5c2921b0SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)323*5c2921b0SApple OSS Distributions purgeableStateBits(int * state)
324*5c2921b0SApple OSS Distributions {
325*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
326*5c2921b0SApple OSS Distributions 
327*5c2921b0SApple OSS Distributions 	switch (VM_PURGABLE_STATE_MASK & *state) {
328*5c2921b0SApple OSS Distributions 	case VM_PURGABLE_NONVOLATILE:
329*5c2921b0SApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
330*5c2921b0SApple OSS Distributions 		break;
331*5c2921b0SApple OSS Distributions 	case VM_PURGABLE_VOLATILE:
332*5c2921b0SApple OSS Distributions 		*state = kIOMemoryPurgeableVolatile;
333*5c2921b0SApple OSS Distributions 		break;
334*5c2921b0SApple OSS Distributions 	case VM_PURGABLE_EMPTY:
335*5c2921b0SApple OSS Distributions 		*state = kIOMemoryPurgeableEmpty;
336*5c2921b0SApple OSS Distributions 		break;
337*5c2921b0SApple OSS Distributions 	default:
338*5c2921b0SApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
339*5c2921b0SApple OSS Distributions 		err = kIOReturnNotReady;
340*5c2921b0SApple OSS Distributions 		break;
341*5c2921b0SApple OSS Distributions 	}
342*5c2921b0SApple OSS Distributions 	return err;
343*5c2921b0SApple OSS Distributions }
344*5c2921b0SApple OSS Distributions 
345*5c2921b0SApple OSS Distributions typedef struct {
346*5c2921b0SApple OSS Distributions 	unsigned int wimg;
347*5c2921b0SApple OSS Distributions 	unsigned int object_type;
348*5c2921b0SApple OSS Distributions } iokit_memtype_entry;
349*5c2921b0SApple OSS Distributions 
350*5c2921b0SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
351*5c2921b0SApple OSS Distributions 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
352*5c2921b0SApple OSS Distributions 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
353*5c2921b0SApple OSS Distributions 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
354*5c2921b0SApple OSS Distributions 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
355*5c2921b0SApple OSS Distributions 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
356*5c2921b0SApple OSS Distributions 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
357*5c2921b0SApple OSS Distributions 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
358*5c2921b0SApple OSS Distributions 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
359*5c2921b0SApple OSS Distributions 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
360*5c2921b0SApple OSS Distributions 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
361*5c2921b0SApple OSS Distributions };
362*5c2921b0SApple OSS Distributions 
363*5c2921b0SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)364*5c2921b0SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
365*5c2921b0SApple OSS Distributions {
366*5c2921b0SApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
367*5c2921b0SApple OSS Distributions 	vm_prot_t prot = 0;
368*5c2921b0SApple OSS Distributions 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
369*5c2921b0SApple OSS Distributions 	return prot;
370*5c2921b0SApple OSS Distributions }
371*5c2921b0SApple OSS Distributions 
372*5c2921b0SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)373*5c2921b0SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
374*5c2921b0SApple OSS Distributions {
375*5c2921b0SApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376*5c2921b0SApple OSS Distributions 	if (cacheMode == kIODefaultCache) {
377*5c2921b0SApple OSS Distributions 		return -1U;
378*5c2921b0SApple OSS Distributions 	}
379*5c2921b0SApple OSS Distributions 	return iomd_mem_types[cacheMode].wimg;
380*5c2921b0SApple OSS Distributions }
381*5c2921b0SApple OSS Distributions 
382*5c2921b0SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)383*5c2921b0SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
384*5c2921b0SApple OSS Distributions {
385*5c2921b0SApple OSS Distributions 	pagerFlags &= VM_WIMG_MASK;
386*5c2921b0SApple OSS Distributions 	IOOptionBits cacheMode = kIODefaultCache;
387*5c2921b0SApple OSS Distributions 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
388*5c2921b0SApple OSS Distributions 		if (iomd_mem_types[i].wimg == pagerFlags) {
389*5c2921b0SApple OSS Distributions 			cacheMode = i;
390*5c2921b0SApple OSS Distributions 			break;
391*5c2921b0SApple OSS Distributions 		}
392*5c2921b0SApple OSS Distributions 	}
393*5c2921b0SApple OSS Distributions 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
394*5c2921b0SApple OSS Distributions }
395*5c2921b0SApple OSS Distributions 
396*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
397*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
398*5c2921b0SApple OSS Distributions 
399*5c2921b0SApple OSS Distributions struct IOMemoryEntry {
400*5c2921b0SApple OSS Distributions 	ipc_port_t entry;
401*5c2921b0SApple OSS Distributions 	int64_t    offset;
402*5c2921b0SApple OSS Distributions 	uint64_t   size;
403*5c2921b0SApple OSS Distributions 	uint64_t   start;
404*5c2921b0SApple OSS Distributions };
405*5c2921b0SApple OSS Distributions 
406*5c2921b0SApple OSS Distributions struct IOMemoryReference {
407*5c2921b0SApple OSS Distributions 	volatile SInt32             refCount;
408*5c2921b0SApple OSS Distributions 	vm_prot_t                   prot;
409*5c2921b0SApple OSS Distributions 	uint32_t                    capacity;
410*5c2921b0SApple OSS Distributions 	uint32_t                    count;
411*5c2921b0SApple OSS Distributions 	struct IOMemoryReference  * mapRef;
412*5c2921b0SApple OSS Distributions 	IOMemoryEntry               entries[0];
413*5c2921b0SApple OSS Distributions };
414*5c2921b0SApple OSS Distributions 
415*5c2921b0SApple OSS Distributions enum{
416*5c2921b0SApple OSS Distributions 	kIOMemoryReferenceReuse = 0x00000001,
417*5c2921b0SApple OSS Distributions 	kIOMemoryReferenceWrite = 0x00000002,
418*5c2921b0SApple OSS Distributions 	kIOMemoryReferenceCOW   = 0x00000004,
419*5c2921b0SApple OSS Distributions };
420*5c2921b0SApple OSS Distributions 
421*5c2921b0SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
422*5c2921b0SApple OSS Distributions 
423*5c2921b0SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)424*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
425*5c2921b0SApple OSS Distributions {
426*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref;
427*5c2921b0SApple OSS Distributions 	size_t              oldCapacity;
428*5c2921b0SApple OSS Distributions 
429*5c2921b0SApple OSS Distributions 	if (realloc) {
430*5c2921b0SApple OSS Distributions 		oldCapacity = realloc->capacity;
431*5c2921b0SApple OSS Distributions 	} else {
432*5c2921b0SApple OSS Distributions 		oldCapacity = 0;
433*5c2921b0SApple OSS Distributions 	}
434*5c2921b0SApple OSS Distributions 
435*5c2921b0SApple OSS Distributions 	// Use the kalloc API instead of manually handling the reallocation
436*5c2921b0SApple OSS Distributions 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
437*5c2921b0SApple OSS Distributions 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
438*5c2921b0SApple OSS Distributions 	if (ref) {
439*5c2921b0SApple OSS Distributions 		if (oldCapacity == 0) {
440*5c2921b0SApple OSS Distributions 			ref->refCount = 1;
441*5c2921b0SApple OSS Distributions 			OSIncrementAtomic(&gIOMemoryReferenceCount);
442*5c2921b0SApple OSS Distributions 		}
443*5c2921b0SApple OSS Distributions 		ref->capacity = capacity;
444*5c2921b0SApple OSS Distributions 	}
445*5c2921b0SApple OSS Distributions 	return ref;
446*5c2921b0SApple OSS Distributions }
447*5c2921b0SApple OSS Distributions 
448*5c2921b0SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)449*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
450*5c2921b0SApple OSS Distributions {
451*5c2921b0SApple OSS Distributions 	IOMemoryEntry * entries;
452*5c2921b0SApple OSS Distributions 
453*5c2921b0SApple OSS Distributions 	if (ref->mapRef) {
454*5c2921b0SApple OSS Distributions 		memoryReferenceFree(ref->mapRef);
455*5c2921b0SApple OSS Distributions 		ref->mapRef = NULL;
456*5c2921b0SApple OSS Distributions 	}
457*5c2921b0SApple OSS Distributions 
458*5c2921b0SApple OSS Distributions 	entries = ref->entries + ref->count;
459*5c2921b0SApple OSS Distributions 	while (entries > &ref->entries[0]) {
460*5c2921b0SApple OSS Distributions 		entries--;
461*5c2921b0SApple OSS Distributions 		ipc_port_release_send(entries->entry);
462*5c2921b0SApple OSS Distributions 	}
463*5c2921b0SApple OSS Distributions 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
464*5c2921b0SApple OSS Distributions 
465*5c2921b0SApple OSS Distributions 	OSDecrementAtomic(&gIOMemoryReferenceCount);
466*5c2921b0SApple OSS Distributions }
467*5c2921b0SApple OSS Distributions 
468*5c2921b0SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)469*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
470*5c2921b0SApple OSS Distributions {
471*5c2921b0SApple OSS Distributions 	if (1 == OSDecrementAtomic(&ref->refCount)) {
472*5c2921b0SApple OSS Distributions 		memoryReferenceFree(ref);
473*5c2921b0SApple OSS Distributions 	}
474*5c2921b0SApple OSS Distributions }
475*5c2921b0SApple OSS Distributions 
476*5c2921b0SApple OSS Distributions 
477*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)478*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
479*5c2921b0SApple OSS Distributions 	IOOptionBits         options,
480*5c2921b0SApple OSS Distributions 	IOMemoryReference ** reference)
481*5c2921b0SApple OSS Distributions {
482*5c2921b0SApple OSS Distributions 	enum { kCapacity = 4, kCapacityInc = 4 };
483*5c2921b0SApple OSS Distributions 
484*5c2921b0SApple OSS Distributions 	kern_return_t        err;
485*5c2921b0SApple OSS Distributions 	IOMemoryReference *  ref;
486*5c2921b0SApple OSS Distributions 	IOMemoryEntry *      entries;
487*5c2921b0SApple OSS Distributions 	IOMemoryEntry *      cloneEntries = NULL;
488*5c2921b0SApple OSS Distributions 	vm_map_t             map;
489*5c2921b0SApple OSS Distributions 	ipc_port_t           entry, cloneEntry;
490*5c2921b0SApple OSS Distributions 	vm_prot_t            prot;
491*5c2921b0SApple OSS Distributions 	memory_object_size_t actualSize;
492*5c2921b0SApple OSS Distributions 	uint32_t             rangeIdx;
493*5c2921b0SApple OSS Distributions 	uint32_t             count;
494*5c2921b0SApple OSS Distributions 	mach_vm_address_t    entryAddr, endAddr, entrySize;
495*5c2921b0SApple OSS Distributions 	mach_vm_size_t       srcAddr, srcLen;
496*5c2921b0SApple OSS Distributions 	mach_vm_size_t       nextAddr, nextLen;
497*5c2921b0SApple OSS Distributions 	mach_vm_size_t       offset, remain;
498*5c2921b0SApple OSS Distributions 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
499*5c2921b0SApple OSS Distributions 	int                  misaligned_start = 0, misaligned_end = 0;
500*5c2921b0SApple OSS Distributions 	IOByteCount          physLen;
501*5c2921b0SApple OSS Distributions 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
502*5c2921b0SApple OSS Distributions 	IOOptionBits         cacheMode;
503*5c2921b0SApple OSS Distributions 	unsigned int         pagerFlags;
504*5c2921b0SApple OSS Distributions 	vm_tag_t             tag;
505*5c2921b0SApple OSS Distributions 	vm_named_entry_kernel_flags_t vmne_kflags;
506*5c2921b0SApple OSS Distributions 
507*5c2921b0SApple OSS Distributions 	ref = memoryReferenceAlloc(kCapacity, NULL);
508*5c2921b0SApple OSS Distributions 	if (!ref) {
509*5c2921b0SApple OSS Distributions 		return kIOReturnNoMemory;
510*5c2921b0SApple OSS Distributions 	}
511*5c2921b0SApple OSS Distributions 
512*5c2921b0SApple OSS Distributions 	tag = (vm_tag_t) getVMTag(kernel_map);
513*5c2921b0SApple OSS Distributions 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
514*5c2921b0SApple OSS Distributions 	entries = &ref->entries[0];
515*5c2921b0SApple OSS Distributions 	count = 0;
516*5c2921b0SApple OSS Distributions 	err = KERN_SUCCESS;
517*5c2921b0SApple OSS Distributions 
518*5c2921b0SApple OSS Distributions 	offset = 0;
519*5c2921b0SApple OSS Distributions 	rangeIdx = 0;
520*5c2921b0SApple OSS Distributions 	remain = _length;
521*5c2921b0SApple OSS Distributions 	if (_task) {
522*5c2921b0SApple OSS Distributions 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
523*5c2921b0SApple OSS Distributions 
524*5c2921b0SApple OSS Distributions 		// account for IOBMD setLength(), use its capacity as length
525*5c2921b0SApple OSS Distributions 		IOBufferMemoryDescriptor * bmd;
526*5c2921b0SApple OSS Distributions 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
527*5c2921b0SApple OSS Distributions 			nextLen = bmd->getCapacity();
528*5c2921b0SApple OSS Distributions 			remain  = nextLen;
529*5c2921b0SApple OSS Distributions 		}
530*5c2921b0SApple OSS Distributions 	} else {
531*5c2921b0SApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
532*5c2921b0SApple OSS Distributions 		nextLen = physLen;
533*5c2921b0SApple OSS Distributions 
534*5c2921b0SApple OSS Distributions 		// default cache mode for physical
535*5c2921b0SApple OSS Distributions 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
536*5c2921b0SApple OSS Distributions 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
537*5c2921b0SApple OSS Distributions 			_flags |= (mode << kIOMemoryBufferCacheShift);
538*5c2921b0SApple OSS Distributions 		}
539*5c2921b0SApple OSS Distributions 	}
540*5c2921b0SApple OSS Distributions 
541*5c2921b0SApple OSS Distributions 	// cache mode & vm_prot
542*5c2921b0SApple OSS Distributions 	prot = VM_PROT_READ;
543*5c2921b0SApple OSS Distributions 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
544*5c2921b0SApple OSS Distributions 	prot |= vmProtForCacheMode(cacheMode);
545*5c2921b0SApple OSS Distributions 	// VM system requires write access to change cache mode
546*5c2921b0SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
547*5c2921b0SApple OSS Distributions 		prot |= VM_PROT_WRITE;
548*5c2921b0SApple OSS Distributions 	}
549*5c2921b0SApple OSS Distributions 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
550*5c2921b0SApple OSS Distributions 		prot |= VM_PROT_WRITE;
551*5c2921b0SApple OSS Distributions 	}
552*5c2921b0SApple OSS Distributions 	if (kIOMemoryReferenceWrite & options) {
553*5c2921b0SApple OSS Distributions 		prot |= VM_PROT_WRITE;
554*5c2921b0SApple OSS Distributions 	}
555*5c2921b0SApple OSS Distributions 	if (kIOMemoryReferenceCOW   & options) {
556*5c2921b0SApple OSS Distributions 		prot |= MAP_MEM_VM_COPY;
557*5c2921b0SApple OSS Distributions 	}
558*5c2921b0SApple OSS Distributions 
559*5c2921b0SApple OSS Distributions 	if (kIOMemoryUseReserve & _flags) {
560*5c2921b0SApple OSS Distributions 		prot |= MAP_MEM_GRAB_SECLUDED;
561*5c2921b0SApple OSS Distributions 	}
562*5c2921b0SApple OSS Distributions 
563*5c2921b0SApple OSS Distributions 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
564*5c2921b0SApple OSS Distributions 		cloneEntries = &_memRef->entries[0];
565*5c2921b0SApple OSS Distributions 		prot |= MAP_MEM_NAMED_REUSE;
566*5c2921b0SApple OSS Distributions 	}
567*5c2921b0SApple OSS Distributions 
568*5c2921b0SApple OSS Distributions 	if (_task) {
569*5c2921b0SApple OSS Distributions 		// virtual ranges
570*5c2921b0SApple OSS Distributions 
571*5c2921b0SApple OSS Distributions 		if (kIOMemoryBufferPageable & _flags) {
572*5c2921b0SApple OSS Distributions 			int ledger_tag, ledger_no_footprint;
573*5c2921b0SApple OSS Distributions 
574*5c2921b0SApple OSS Distributions 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
575*5c2921b0SApple OSS Distributions 			prot |= MAP_MEM_NAMED_CREATE;
576*5c2921b0SApple OSS Distributions 
577*5c2921b0SApple OSS Distributions 			// default accounting settings:
578*5c2921b0SApple OSS Distributions 			//   + "none" ledger tag
579*5c2921b0SApple OSS Distributions 			//   + include in footprint
580*5c2921b0SApple OSS Distributions 			// can be changed later with ::setOwnership()
581*5c2921b0SApple OSS Distributions 			ledger_tag = VM_LEDGER_TAG_NONE;
582*5c2921b0SApple OSS Distributions 			ledger_no_footprint = 0;
583*5c2921b0SApple OSS Distributions 
584*5c2921b0SApple OSS Distributions 			if (kIOMemoryBufferPurgeable & _flags) {
585*5c2921b0SApple OSS Distributions 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
586*5c2921b0SApple OSS Distributions 				if (VM_KERN_MEMORY_SKYWALK == tag) {
587*5c2921b0SApple OSS Distributions 					// Skywalk purgeable memory accounting:
588*5c2921b0SApple OSS Distributions 					//    + "network" ledger tag
589*5c2921b0SApple OSS Distributions 					//    + not included in footprint
590*5c2921b0SApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NETWORK;
591*5c2921b0SApple OSS Distributions 					ledger_no_footprint = 1;
592*5c2921b0SApple OSS Distributions 				} else {
593*5c2921b0SApple OSS Distributions 					// regular purgeable memory accounting:
594*5c2921b0SApple OSS Distributions 					//    + no ledger tag
595*5c2921b0SApple OSS Distributions 					//    + included in footprint
596*5c2921b0SApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NONE;
597*5c2921b0SApple OSS Distributions 					ledger_no_footprint = 0;
598*5c2921b0SApple OSS Distributions 				}
599*5c2921b0SApple OSS Distributions 			}
600*5c2921b0SApple OSS Distributions 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
601*5c2921b0SApple OSS Distributions 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
602*5c2921b0SApple OSS Distributions 			if (kIOMemoryUseReserve & _flags) {
603*5c2921b0SApple OSS Distributions 				prot |= MAP_MEM_GRAB_SECLUDED;
604*5c2921b0SApple OSS Distributions 			}
605*5c2921b0SApple OSS Distributions 
606*5c2921b0SApple OSS Distributions 			prot |= VM_PROT_WRITE;
607*5c2921b0SApple OSS Distributions 			map = NULL;
608*5c2921b0SApple OSS Distributions 		} else {
609*5c2921b0SApple OSS Distributions 			prot |= MAP_MEM_USE_DATA_ADDR;
610*5c2921b0SApple OSS Distributions 			map = get_task_map(_task);
611*5c2921b0SApple OSS Distributions 		}
612*5c2921b0SApple OSS Distributions 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
613*5c2921b0SApple OSS Distributions 
614*5c2921b0SApple OSS Distributions 		while (remain) {
615*5c2921b0SApple OSS Distributions 			srcAddr  = nextAddr;
616*5c2921b0SApple OSS Distributions 			srcLen   = nextLen;
617*5c2921b0SApple OSS Distributions 			nextAddr = 0;
618*5c2921b0SApple OSS Distributions 			nextLen  = 0;
619*5c2921b0SApple OSS Distributions 			// coalesce addr range
620*5c2921b0SApple OSS Distributions 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
621*5c2921b0SApple OSS Distributions 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
622*5c2921b0SApple OSS Distributions 				if ((srcAddr + srcLen) != nextAddr) {
623*5c2921b0SApple OSS Distributions 					break;
624*5c2921b0SApple OSS Distributions 				}
625*5c2921b0SApple OSS Distributions 				srcLen += nextLen;
626*5c2921b0SApple OSS Distributions 			}
627*5c2921b0SApple OSS Distributions 
628*5c2921b0SApple OSS Distributions 			if (MAP_MEM_USE_DATA_ADDR & prot) {
629*5c2921b0SApple OSS Distributions 				entryAddr = srcAddr;
630*5c2921b0SApple OSS Distributions 				endAddr   = srcAddr + srcLen;
631*5c2921b0SApple OSS Distributions 			} else {
632*5c2921b0SApple OSS Distributions 				entryAddr = trunc_page_64(srcAddr);
633*5c2921b0SApple OSS Distributions 				endAddr   = round_page_64(srcAddr + srcLen);
634*5c2921b0SApple OSS Distributions 			}
635*5c2921b0SApple OSS Distributions 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
636*5c2921b0SApple OSS Distributions 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
637*5c2921b0SApple OSS Distributions 			}
638*5c2921b0SApple OSS Distributions 
639*5c2921b0SApple OSS Distributions 			do{
640*5c2921b0SApple OSS Distributions 				entrySize = (endAddr - entryAddr);
641*5c2921b0SApple OSS Distributions 				if (!entrySize) {
642*5c2921b0SApple OSS Distributions 					break;
643*5c2921b0SApple OSS Distributions 				}
644*5c2921b0SApple OSS Distributions 				actualSize = entrySize;
645*5c2921b0SApple OSS Distributions 
646*5c2921b0SApple OSS Distributions 				cloneEntry = MACH_PORT_NULL;
647*5c2921b0SApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
648*5c2921b0SApple OSS Distributions 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
649*5c2921b0SApple OSS Distributions 						cloneEntry = cloneEntries->entry;
650*5c2921b0SApple OSS Distributions 					} else {
651*5c2921b0SApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
652*5c2921b0SApple OSS Distributions 					}
653*5c2921b0SApple OSS Distributions 				}
654*5c2921b0SApple OSS Distributions 
655*5c2921b0SApple OSS Distributions 				err = mach_make_memory_entry_internal(map,
656*5c2921b0SApple OSS Distributions 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
657*5c2921b0SApple OSS Distributions 
658*5c2921b0SApple OSS Distributions 				if (KERN_SUCCESS != err) {
659*5c2921b0SApple OSS Distributions 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
660*5c2921b0SApple OSS Distributions 					break;
661*5c2921b0SApple OSS Distributions 				}
662*5c2921b0SApple OSS Distributions 				if (MAP_MEM_USE_DATA_ADDR & prot) {
663*5c2921b0SApple OSS Distributions 					if (actualSize > entrySize) {
664*5c2921b0SApple OSS Distributions 						actualSize = entrySize;
665*5c2921b0SApple OSS Distributions 					}
666*5c2921b0SApple OSS Distributions 				} else if (actualSize > entrySize) {
667*5c2921b0SApple OSS Distributions 					panic("mach_make_memory_entry_64 actualSize");
668*5c2921b0SApple OSS Distributions 				}
669*5c2921b0SApple OSS Distributions 
670*5c2921b0SApple OSS Distributions 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
671*5c2921b0SApple OSS Distributions 
672*5c2921b0SApple OSS Distributions 				if (count && overmap_start) {
673*5c2921b0SApple OSS Distributions 					/*
674*5c2921b0SApple OSS Distributions 					 * Track misaligned start for all
675*5c2921b0SApple OSS Distributions 					 * except the first entry.
676*5c2921b0SApple OSS Distributions 					 */
677*5c2921b0SApple OSS Distributions 					misaligned_start++;
678*5c2921b0SApple OSS Distributions 				}
679*5c2921b0SApple OSS Distributions 
680*5c2921b0SApple OSS Distributions 				if (overmap_end) {
681*5c2921b0SApple OSS Distributions 					/*
682*5c2921b0SApple OSS Distributions 					 * Ignore misaligned end for the
683*5c2921b0SApple OSS Distributions 					 * last entry.
684*5c2921b0SApple OSS Distributions 					 */
685*5c2921b0SApple OSS Distributions 					if ((entryAddr + actualSize) != endAddr) {
686*5c2921b0SApple OSS Distributions 						misaligned_end++;
687*5c2921b0SApple OSS Distributions 					}
688*5c2921b0SApple OSS Distributions 				}
689*5c2921b0SApple OSS Distributions 
690*5c2921b0SApple OSS Distributions 				if (count) {
691*5c2921b0SApple OSS Distributions 					/* Middle entries */
692*5c2921b0SApple OSS Distributions 					if (misaligned_start || misaligned_end) {
693*5c2921b0SApple OSS Distributions 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
694*5c2921b0SApple OSS Distributions 						ipc_port_release_send(entry);
695*5c2921b0SApple OSS Distributions 						err = KERN_NOT_SUPPORTED;
696*5c2921b0SApple OSS Distributions 						break;
697*5c2921b0SApple OSS Distributions 					}
698*5c2921b0SApple OSS Distributions 				}
699*5c2921b0SApple OSS Distributions 
700*5c2921b0SApple OSS Distributions 				if (count >= ref->capacity) {
701*5c2921b0SApple OSS Distributions 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
702*5c2921b0SApple OSS Distributions 					entries = &ref->entries[count];
703*5c2921b0SApple OSS Distributions 				}
704*5c2921b0SApple OSS Distributions 				entries->entry  = entry;
705*5c2921b0SApple OSS Distributions 				entries->size   = actualSize;
706*5c2921b0SApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
707*5c2921b0SApple OSS Distributions 				entries->start = entryAddr;
708*5c2921b0SApple OSS Distributions 				entryAddr += actualSize;
709*5c2921b0SApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
710*5c2921b0SApple OSS Distributions 					if ((cloneEntries->entry == entries->entry)
711*5c2921b0SApple OSS Distributions 					    && (cloneEntries->size == entries->size)
712*5c2921b0SApple OSS Distributions 					    && (cloneEntries->offset == entries->offset)) {
713*5c2921b0SApple OSS Distributions 						cloneEntries++;
714*5c2921b0SApple OSS Distributions 					} else {
715*5c2921b0SApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
716*5c2921b0SApple OSS Distributions 					}
717*5c2921b0SApple OSS Distributions 				}
718*5c2921b0SApple OSS Distributions 				entries++;
719*5c2921b0SApple OSS Distributions 				count++;
720*5c2921b0SApple OSS Distributions 			}while (true);
721*5c2921b0SApple OSS Distributions 			offset += srcLen;
722*5c2921b0SApple OSS Distributions 			remain -= srcLen;
723*5c2921b0SApple OSS Distributions 		}
724*5c2921b0SApple OSS Distributions 	} else {
725*5c2921b0SApple OSS Distributions 		// _task == 0, physical or kIOMemoryTypeUPL
726*5c2921b0SApple OSS Distributions 		memory_object_t pager;
727*5c2921b0SApple OSS Distributions 		vm_size_t       size = ptoa_64(_pages);
728*5c2921b0SApple OSS Distributions 
729*5c2921b0SApple OSS Distributions 		if (!getKernelReserved()) {
730*5c2921b0SApple OSS Distributions 			panic("getKernelReserved");
731*5c2921b0SApple OSS Distributions 		}
732*5c2921b0SApple OSS Distributions 
733*5c2921b0SApple OSS Distributions 		reserved->dp.pagerContig = (1 == _rangesCount);
734*5c2921b0SApple OSS Distributions 		reserved->dp.memory      = this;
735*5c2921b0SApple OSS Distributions 
736*5c2921b0SApple OSS Distributions 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
737*5c2921b0SApple OSS Distributions 		if (-1U == pagerFlags) {
738*5c2921b0SApple OSS Distributions 			panic("phys is kIODefaultCache");
739*5c2921b0SApple OSS Distributions 		}
740*5c2921b0SApple OSS Distributions 		if (reserved->dp.pagerContig) {
741*5c2921b0SApple OSS Distributions 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
742*5c2921b0SApple OSS Distributions 		}
743*5c2921b0SApple OSS Distributions 
744*5c2921b0SApple OSS Distributions 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
745*5c2921b0SApple OSS Distributions 		    size, pagerFlags);
746*5c2921b0SApple OSS Distributions 		assert(pager);
747*5c2921b0SApple OSS Distributions 		if (!pager) {
748*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
749*5c2921b0SApple OSS Distributions 			err = kIOReturnVMError;
750*5c2921b0SApple OSS Distributions 		} else {
751*5c2921b0SApple OSS Distributions 			srcAddr  = nextAddr;
752*5c2921b0SApple OSS Distributions 			entryAddr = trunc_page_64(srcAddr);
753*5c2921b0SApple OSS Distributions 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
754*5c2921b0SApple OSS Distributions 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
755*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
756*5c2921b0SApple OSS Distributions 			if (KERN_SUCCESS != err) {
757*5c2921b0SApple OSS Distributions 				device_pager_deallocate(pager);
758*5c2921b0SApple OSS Distributions 			} else {
759*5c2921b0SApple OSS Distributions 				reserved->dp.devicePager = pager;
760*5c2921b0SApple OSS Distributions 				entries->entry  = entry;
761*5c2921b0SApple OSS Distributions 				entries->size   = size;
762*5c2921b0SApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
763*5c2921b0SApple OSS Distributions 				entries++;
764*5c2921b0SApple OSS Distributions 				count++;
765*5c2921b0SApple OSS Distributions 			}
766*5c2921b0SApple OSS Distributions 		}
767*5c2921b0SApple OSS Distributions 	}
768*5c2921b0SApple OSS Distributions 
769*5c2921b0SApple OSS Distributions 	ref->count = count;
770*5c2921b0SApple OSS Distributions 	ref->prot  = prot;
771*5c2921b0SApple OSS Distributions 
772*5c2921b0SApple OSS Distributions 	if (_task && (KERN_SUCCESS == err)
773*5c2921b0SApple OSS Distributions 	    && (kIOMemoryMapCopyOnWrite & _flags)
774*5c2921b0SApple OSS Distributions 	    && !(kIOMemoryReferenceCOW & options)) {
775*5c2921b0SApple OSS Distributions 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
776*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
777*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
778*5c2921b0SApple OSS Distributions 		}
779*5c2921b0SApple OSS Distributions 	}
780*5c2921b0SApple OSS Distributions 
781*5c2921b0SApple OSS Distributions 	if (KERN_SUCCESS == err) {
782*5c2921b0SApple OSS Distributions 		if (MAP_MEM_NAMED_REUSE & prot) {
783*5c2921b0SApple OSS Distributions 			memoryReferenceFree(ref);
784*5c2921b0SApple OSS Distributions 			OSIncrementAtomic(&_memRef->refCount);
785*5c2921b0SApple OSS Distributions 			ref = _memRef;
786*5c2921b0SApple OSS Distributions 		}
787*5c2921b0SApple OSS Distributions 	} else {
788*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
789*5c2921b0SApple OSS Distributions 		memoryReferenceFree(ref);
790*5c2921b0SApple OSS Distributions 		ref = NULL;
791*5c2921b0SApple OSS Distributions 	}
792*5c2921b0SApple OSS Distributions 
793*5c2921b0SApple OSS Distributions 	*reference = ref;
794*5c2921b0SApple OSS Distributions 
795*5c2921b0SApple OSS Distributions 	return err;
796*5c2921b0SApple OSS Distributions }
797*5c2921b0SApple OSS Distributions 
798*5c2921b0SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)799*5c2921b0SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
800*5c2921b0SApple OSS Distributions {
801*5c2921b0SApple OSS Distributions 	switch (kIOMapGuardedMask & options) {
802*5c2921b0SApple OSS Distributions 	default:
803*5c2921b0SApple OSS Distributions 	case kIOMapGuardedSmall:
804*5c2921b0SApple OSS Distributions 		return vm_map_page_size(map);
805*5c2921b0SApple OSS Distributions 	case kIOMapGuardedLarge:
806*5c2921b0SApple OSS Distributions 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
807*5c2921b0SApple OSS Distributions 		return kIOMapGuardSizeLarge;
808*5c2921b0SApple OSS Distributions 	}
809*5c2921b0SApple OSS Distributions 	;
810*5c2921b0SApple OSS Distributions }
811*5c2921b0SApple OSS Distributions 
812*5c2921b0SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)813*5c2921b0SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
814*5c2921b0SApple OSS Distributions     vm_map_offset_t addr, mach_vm_size_t size)
815*5c2921b0SApple OSS Distributions {
816*5c2921b0SApple OSS Distributions 	kern_return_t   kr;
817*5c2921b0SApple OSS Distributions 	vm_map_offset_t actualAddr;
818*5c2921b0SApple OSS Distributions 	mach_vm_size_t  actualSize;
819*5c2921b0SApple OSS Distributions 
820*5c2921b0SApple OSS Distributions 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
821*5c2921b0SApple OSS Distributions 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
822*5c2921b0SApple OSS Distributions 
823*5c2921b0SApple OSS Distributions 	if (kIOMapGuardedMask & options) {
824*5c2921b0SApple OSS Distributions 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
825*5c2921b0SApple OSS Distributions 		actualAddr -= guardSize;
826*5c2921b0SApple OSS Distributions 		actualSize += 2 * guardSize;
827*5c2921b0SApple OSS Distributions 	}
828*5c2921b0SApple OSS Distributions 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
829*5c2921b0SApple OSS Distributions 
830*5c2921b0SApple OSS Distributions 	return kr;
831*5c2921b0SApple OSS Distributions }
832*5c2921b0SApple OSS Distributions 
833*5c2921b0SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)834*5c2921b0SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
835*5c2921b0SApple OSS Distributions {
836*5c2921b0SApple OSS Distributions 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
837*5c2921b0SApple OSS Distributions 	IOReturn                        err;
838*5c2921b0SApple OSS Distributions 	vm_map_offset_t                 addr;
839*5c2921b0SApple OSS Distributions 	mach_vm_size_t                  size;
840*5c2921b0SApple OSS Distributions 	mach_vm_size_t                  guardSize;
841*5c2921b0SApple OSS Distributions 	vm_map_kernel_flags_t           vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
842*5c2921b0SApple OSS Distributions 
843*5c2921b0SApple OSS Distributions 	addr = ref->mapped;
844*5c2921b0SApple OSS Distributions 	size = ref->size;
845*5c2921b0SApple OSS Distributions 	guardSize = 0;
846*5c2921b0SApple OSS Distributions 
847*5c2921b0SApple OSS Distributions 	if (kIOMapGuardedMask & ref->options) {
848*5c2921b0SApple OSS Distributions 		if (!(kIOMapAnywhere & ref->options)) {
849*5c2921b0SApple OSS Distributions 			return kIOReturnBadArgument;
850*5c2921b0SApple OSS Distributions 		}
851*5c2921b0SApple OSS Distributions 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
852*5c2921b0SApple OSS Distributions 		size += 2 * guardSize;
853*5c2921b0SApple OSS Distributions 	}
854*5c2921b0SApple OSS Distributions 
855*5c2921b0SApple OSS Distributions 	/*
856*5c2921b0SApple OSS Distributions 	 * Mapping memory into the kernel_map using IOMDs use the data range.
857*5c2921b0SApple OSS Distributions 	 * Memory being mapped should not contain kernel pointers.
858*5c2921b0SApple OSS Distributions 	 */
859*5c2921b0SApple OSS Distributions 	if (map == kernel_map) {
860*5c2921b0SApple OSS Distributions 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
861*5c2921b0SApple OSS Distributions 	}
862*5c2921b0SApple OSS Distributions 
863*5c2921b0SApple OSS Distributions 	err = vm_map_enter_mem_object(map, &addr, size,
864*5c2921b0SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
865*5c2921b0SApple OSS Distributions 	    // TODO4K this should not be necessary...
866*5c2921b0SApple OSS Distributions 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
867*5c2921b0SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
868*5c2921b0SApple OSS Distributions 	    (vm_map_offset_t) 0,
869*5c2921b0SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
870*5c2921b0SApple OSS Distributions 	    (((ref->options & kIOMapAnywhere)
871*5c2921b0SApple OSS Distributions 	    ? VM_FLAGS_ANYWHERE
872*5c2921b0SApple OSS Distributions 	    : VM_FLAGS_FIXED)),
873*5c2921b0SApple OSS Distributions 	    vmk_flags,
874*5c2921b0SApple OSS Distributions 	    ref->tag,
875*5c2921b0SApple OSS Distributions 	    IPC_PORT_NULL,
876*5c2921b0SApple OSS Distributions 	    (memory_object_offset_t) 0,
877*5c2921b0SApple OSS Distributions 	    false,                       /* copy */
878*5c2921b0SApple OSS Distributions 	    ref->prot,
879*5c2921b0SApple OSS Distributions 	    ref->prot,
880*5c2921b0SApple OSS Distributions 	    VM_INHERIT_NONE);
881*5c2921b0SApple OSS Distributions 	if (KERN_SUCCESS == err) {
882*5c2921b0SApple OSS Distributions 		ref->mapped = (mach_vm_address_t) addr;
883*5c2921b0SApple OSS Distributions 		ref->map = map;
884*5c2921b0SApple OSS Distributions 		if (kIOMapGuardedMask & ref->options) {
885*5c2921b0SApple OSS Distributions 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
886*5c2921b0SApple OSS Distributions 
887*5c2921b0SApple OSS Distributions 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
888*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
889*5c2921b0SApple OSS Distributions 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
890*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
891*5c2921b0SApple OSS Distributions 			ref->mapped += guardSize;
892*5c2921b0SApple OSS Distributions 		}
893*5c2921b0SApple OSS Distributions 	}
894*5c2921b0SApple OSS Distributions 
895*5c2921b0SApple OSS Distributions 	return err;
896*5c2921b0SApple OSS Distributions }
897*5c2921b0SApple OSS Distributions 
898*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)899*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
900*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
901*5c2921b0SApple OSS Distributions 	vm_map_t            map,
902*5c2921b0SApple OSS Distributions 	mach_vm_size_t      inoffset,
903*5c2921b0SApple OSS Distributions 	mach_vm_size_t      size,
904*5c2921b0SApple OSS Distributions 	IOOptionBits        options,
905*5c2921b0SApple OSS Distributions 	mach_vm_address_t * inaddr)
906*5c2921b0SApple OSS Distributions {
907*5c2921b0SApple OSS Distributions 	IOReturn        err;
908*5c2921b0SApple OSS Distributions 	int64_t         offset = inoffset;
909*5c2921b0SApple OSS Distributions 	uint32_t        rangeIdx, entryIdx;
910*5c2921b0SApple OSS Distributions 	vm_map_offset_t addr, mapAddr;
911*5c2921b0SApple OSS Distributions 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
912*5c2921b0SApple OSS Distributions 
913*5c2921b0SApple OSS Distributions 	mach_vm_address_t nextAddr;
914*5c2921b0SApple OSS Distributions 	mach_vm_size_t    nextLen;
915*5c2921b0SApple OSS Distributions 	IOByteCount       physLen;
916*5c2921b0SApple OSS Distributions 	IOMemoryEntry   * entry;
917*5c2921b0SApple OSS Distributions 	vm_prot_t         prot, memEntryCacheMode;
918*5c2921b0SApple OSS Distributions 	IOOptionBits      type;
919*5c2921b0SApple OSS Distributions 	IOOptionBits      cacheMode;
920*5c2921b0SApple OSS Distributions 	vm_tag_t          tag;
921*5c2921b0SApple OSS Distributions 	// for the kIOMapPrefault option.
922*5c2921b0SApple OSS Distributions 	upl_page_info_t * pageList = NULL;
923*5c2921b0SApple OSS Distributions 	UInt              currentPageIndex = 0;
924*5c2921b0SApple OSS Distributions 	bool              didAlloc;
925*5c2921b0SApple OSS Distributions 
926*5c2921b0SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
927*5c2921b0SApple OSS Distributions 
928*5c2921b0SApple OSS Distributions 	if (ref->mapRef) {
929*5c2921b0SApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
930*5c2921b0SApple OSS Distributions 		return err;
931*5c2921b0SApple OSS Distributions 	}
932*5c2921b0SApple OSS Distributions 
933*5c2921b0SApple OSS Distributions 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
934*5c2921b0SApple OSS Distributions 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
935*5c2921b0SApple OSS Distributions 		return err;
936*5c2921b0SApple OSS Distributions 	}
937*5c2921b0SApple OSS Distributions 
938*5c2921b0SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
939*5c2921b0SApple OSS Distributions 
940*5c2921b0SApple OSS Distributions 	prot = VM_PROT_READ;
941*5c2921b0SApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
942*5c2921b0SApple OSS Distributions 		prot |= VM_PROT_WRITE;
943*5c2921b0SApple OSS Distributions 	}
944*5c2921b0SApple OSS Distributions 	prot &= ref->prot;
945*5c2921b0SApple OSS Distributions 
946*5c2921b0SApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
947*5c2921b0SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
948*5c2921b0SApple OSS Distributions 		// VM system requires write access to update named entry cache mode
949*5c2921b0SApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
950*5c2921b0SApple OSS Distributions 	}
951*5c2921b0SApple OSS Distributions 
952*5c2921b0SApple OSS Distributions 	tag = (typeof(tag))getVMTag(map);
953*5c2921b0SApple OSS Distributions 
954*5c2921b0SApple OSS Distributions 	if (_task) {
955*5c2921b0SApple OSS Distributions 		// Find first range for offset
956*5c2921b0SApple OSS Distributions 		if (!_rangesCount) {
957*5c2921b0SApple OSS Distributions 			return kIOReturnBadArgument;
958*5c2921b0SApple OSS Distributions 		}
959*5c2921b0SApple OSS Distributions 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
960*5c2921b0SApple OSS Distributions 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
961*5c2921b0SApple OSS Distributions 			if (remain < nextLen) {
962*5c2921b0SApple OSS Distributions 				break;
963*5c2921b0SApple OSS Distributions 			}
964*5c2921b0SApple OSS Distributions 			remain -= nextLen;
965*5c2921b0SApple OSS Distributions 		}
966*5c2921b0SApple OSS Distributions 	} else {
967*5c2921b0SApple OSS Distributions 		rangeIdx = 0;
968*5c2921b0SApple OSS Distributions 		remain   = 0;
969*5c2921b0SApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
970*5c2921b0SApple OSS Distributions 		nextLen  = size;
971*5c2921b0SApple OSS Distributions 	}
972*5c2921b0SApple OSS Distributions 
973*5c2921b0SApple OSS Distributions 	assert(remain < nextLen);
974*5c2921b0SApple OSS Distributions 	if (remain >= nextLen) {
975*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
976*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
977*5c2921b0SApple OSS Distributions 	}
978*5c2921b0SApple OSS Distributions 
979*5c2921b0SApple OSS Distributions 	nextAddr  += remain;
980*5c2921b0SApple OSS Distributions 	nextLen   -= remain;
981*5c2921b0SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
982*5c2921b0SApple OSS Distributions 	pageOffset = (vm_map_page_mask(map) & nextAddr);
983*5c2921b0SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
984*5c2921b0SApple OSS Distributions 	pageOffset = (page_mask & nextAddr);
985*5c2921b0SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
986*5c2921b0SApple OSS Distributions 	addr       = 0;
987*5c2921b0SApple OSS Distributions 	didAlloc   = false;
988*5c2921b0SApple OSS Distributions 
989*5c2921b0SApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
990*5c2921b0SApple OSS Distributions 		addr = *inaddr;
991*5c2921b0SApple OSS Distributions 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
992*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
993*5c2921b0SApple OSS Distributions 		}
994*5c2921b0SApple OSS Distributions 		addr -= pageOffset;
995*5c2921b0SApple OSS Distributions 	}
996*5c2921b0SApple OSS Distributions 
997*5c2921b0SApple OSS Distributions 	// find first entry for offset
998*5c2921b0SApple OSS Distributions 	for (entryIdx = 0;
999*5c2921b0SApple OSS Distributions 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1000*5c2921b0SApple OSS Distributions 	    entryIdx++) {
1001*5c2921b0SApple OSS Distributions 	}
1002*5c2921b0SApple OSS Distributions 	entryIdx--;
1003*5c2921b0SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1004*5c2921b0SApple OSS Distributions 
1005*5c2921b0SApple OSS Distributions 	// allocate VM
1006*5c2921b0SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1007*5c2921b0SApple OSS Distributions 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1008*5c2921b0SApple OSS Distributions #else
1009*5c2921b0SApple OSS Distributions 	size = round_page_64(size + pageOffset);
1010*5c2921b0SApple OSS Distributions #endif
1011*5c2921b0SApple OSS Distributions 	if (kIOMapOverwrite & options) {
1012*5c2921b0SApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1013*5c2921b0SApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1014*5c2921b0SApple OSS Distributions 		}
1015*5c2921b0SApple OSS Distributions 		err = KERN_SUCCESS;
1016*5c2921b0SApple OSS Distributions 	} else {
1017*5c2921b0SApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1018*5c2921b0SApple OSS Distributions 		ref.map     = map;
1019*5c2921b0SApple OSS Distributions 		ref.tag     = tag;
1020*5c2921b0SApple OSS Distributions 		ref.options = options;
1021*5c2921b0SApple OSS Distributions 		ref.size    = size;
1022*5c2921b0SApple OSS Distributions 		ref.prot    = prot;
1023*5c2921b0SApple OSS Distributions 		if (options & kIOMapAnywhere) {
1024*5c2921b0SApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1025*5c2921b0SApple OSS Distributions 			ref.mapped = 0;
1026*5c2921b0SApple OSS Distributions 		} else {
1027*5c2921b0SApple OSS Distributions 			ref.mapped = addr;
1028*5c2921b0SApple OSS Distributions 		}
1029*5c2921b0SApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1030*5c2921b0SApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1031*5c2921b0SApple OSS Distributions 		} else {
1032*5c2921b0SApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1033*5c2921b0SApple OSS Distributions 		}
1034*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS == err) {
1035*5c2921b0SApple OSS Distributions 			addr     = ref.mapped;
1036*5c2921b0SApple OSS Distributions 			map      = ref.map;
1037*5c2921b0SApple OSS Distributions 			didAlloc = true;
1038*5c2921b0SApple OSS Distributions 		}
1039*5c2921b0SApple OSS Distributions 	}
1040*5c2921b0SApple OSS Distributions 
1041*5c2921b0SApple OSS Distributions 	/*
1042*5c2921b0SApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1043*5c2921b0SApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1044*5c2921b0SApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1045*5c2921b0SApple OSS Distributions 	 * operations.
1046*5c2921b0SApple OSS Distributions 	 */
1047*5c2921b0SApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1048*5c2921b0SApple OSS Distributions 		options &= ~kIOMapPrefault;
1049*5c2921b0SApple OSS Distributions 	}
1050*5c2921b0SApple OSS Distributions 
1051*5c2921b0SApple OSS Distributions 	/*
1052*5c2921b0SApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1053*5c2921b0SApple OSS Distributions 	 * memory type, and the underlying data.
1054*5c2921b0SApple OSS Distributions 	 */
1055*5c2921b0SApple OSS Distributions 	if (options & kIOMapPrefault) {
1056*5c2921b0SApple OSS Distributions 		/*
1057*5c2921b0SApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1058*5c2921b0SApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1059*5c2921b0SApple OSS Distributions 		 */
1060*5c2921b0SApple OSS Distributions 		assert(_wireCount != 0);
1061*5c2921b0SApple OSS Distributions 		assert(_memoryEntries != NULL);
1062*5c2921b0SApple OSS Distributions 		if ((_wireCount == 0) ||
1063*5c2921b0SApple OSS Distributions 		    (_memoryEntries == NULL)) {
1064*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1065*5c2921b0SApple OSS Distributions 			return kIOReturnBadArgument;
1066*5c2921b0SApple OSS Distributions 		}
1067*5c2921b0SApple OSS Distributions 
1068*5c2921b0SApple OSS Distributions 		// Get the page list.
1069*5c2921b0SApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1070*5c2921b0SApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1071*5c2921b0SApple OSS Distributions 		pageList = getPageList(dataP);
1072*5c2921b0SApple OSS Distributions 
1073*5c2921b0SApple OSS Distributions 		// Get the number of IOPLs.
1074*5c2921b0SApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1075*5c2921b0SApple OSS Distributions 
1076*5c2921b0SApple OSS Distributions 		/*
1077*5c2921b0SApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1078*5c2921b0SApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1079*5c2921b0SApple OSS Distributions 		 * right range at the end.
1080*5c2921b0SApple OSS Distributions 		 */
1081*5c2921b0SApple OSS Distributions 		UInt ioplIndex = 0;
1082*5c2921b0SApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1083*5c2921b0SApple OSS Distributions 			ioplIndex++;
1084*5c2921b0SApple OSS Distributions 		}
1085*5c2921b0SApple OSS Distributions 		ioplIndex--;
1086*5c2921b0SApple OSS Distributions 
1087*5c2921b0SApple OSS Distributions 		// Retrieve the IOPL info block.
1088*5c2921b0SApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1089*5c2921b0SApple OSS Distributions 
1090*5c2921b0SApple OSS Distributions 		/*
1091*5c2921b0SApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1092*5c2921b0SApple OSS Distributions 		 * array.
1093*5c2921b0SApple OSS Distributions 		 */
1094*5c2921b0SApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1095*5c2921b0SApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1096*5c2921b0SApple OSS Distributions 		} else {
1097*5c2921b0SApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1098*5c2921b0SApple OSS Distributions 		}
1099*5c2921b0SApple OSS Distributions 
1100*5c2921b0SApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1101*5c2921b0SApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1102*5c2921b0SApple OSS Distributions 
1103*5c2921b0SApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1104*5c2921b0SApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1105*5c2921b0SApple OSS Distributions 	}
1106*5c2921b0SApple OSS Distributions 
1107*5c2921b0SApple OSS Distributions 	// enter mappings
1108*5c2921b0SApple OSS Distributions 	remain  = size;
1109*5c2921b0SApple OSS Distributions 	mapAddr = addr;
1110*5c2921b0SApple OSS Distributions 	addr    += pageOffset;
1111*5c2921b0SApple OSS Distributions 
1112*5c2921b0SApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1113*5c2921b0SApple OSS Distributions 		entryOffset = offset - entry->offset;
1114*5c2921b0SApple OSS Distributions 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1115*5c2921b0SApple OSS Distributions 			err = kIOReturnNotAligned;
1116*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1117*5c2921b0SApple OSS Distributions 			break;
1118*5c2921b0SApple OSS Distributions 		}
1119*5c2921b0SApple OSS Distributions 
1120*5c2921b0SApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1121*5c2921b0SApple OSS Distributions 			vm_size_t unused = 0;
1122*5c2921b0SApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1123*5c2921b0SApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1124*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1125*5c2921b0SApple OSS Distributions 		}
1126*5c2921b0SApple OSS Distributions 
1127*5c2921b0SApple OSS Distributions 		entryOffset -= pageOffset;
1128*5c2921b0SApple OSS Distributions 		if (entryOffset >= entry->size) {
1129*5c2921b0SApple OSS Distributions 			panic("entryOffset");
1130*5c2921b0SApple OSS Distributions 		}
1131*5c2921b0SApple OSS Distributions 		chunk = entry->size - entryOffset;
1132*5c2921b0SApple OSS Distributions 		if (chunk) {
1133*5c2921b0SApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags;
1134*5c2921b0SApple OSS Distributions 
1135*5c2921b0SApple OSS Distributions 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1136*5c2921b0SApple OSS Distributions 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1137*5c2921b0SApple OSS Distributions 
1138*5c2921b0SApple OSS Distributions 			if (chunk > remain) {
1139*5c2921b0SApple OSS Distributions 				chunk = remain;
1140*5c2921b0SApple OSS Distributions 			}
1141*5c2921b0SApple OSS Distributions 			if (options & kIOMapPrefault) {
1142*5c2921b0SApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1143*5c2921b0SApple OSS Distributions 
1144*5c2921b0SApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1145*5c2921b0SApple OSS Distributions 				    &mapAddr,
1146*5c2921b0SApple OSS Distributions 				    chunk, 0 /* mask */,
1147*5c2921b0SApple OSS Distributions 				    (VM_FLAGS_FIXED
1148*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_OVERWRITE),
1149*5c2921b0SApple OSS Distributions 				    vmk_flags,
1150*5c2921b0SApple OSS Distributions 				    tag,
1151*5c2921b0SApple OSS Distributions 				    entry->entry,
1152*5c2921b0SApple OSS Distributions 				    entryOffset,
1153*5c2921b0SApple OSS Distributions 				    prot,                        // cur
1154*5c2921b0SApple OSS Distributions 				    prot,                        // max
1155*5c2921b0SApple OSS Distributions 				    &pageList[currentPageIndex],
1156*5c2921b0SApple OSS Distributions 				    nb_pages);
1157*5c2921b0SApple OSS Distributions 
1158*5c2921b0SApple OSS Distributions 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1159*5c2921b0SApple OSS Distributions 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1160*5c2921b0SApple OSS Distributions 				}
1161*5c2921b0SApple OSS Distributions 				// Compute the next index in the page list.
1162*5c2921b0SApple OSS Distributions 				currentPageIndex += nb_pages;
1163*5c2921b0SApple OSS Distributions 				assert(currentPageIndex <= _pages);
1164*5c2921b0SApple OSS Distributions 			} else {
1165*5c2921b0SApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1166*5c2921b0SApple OSS Distributions 				    &mapAddr,
1167*5c2921b0SApple OSS Distributions 				    chunk, 0 /* mask */,
1168*5c2921b0SApple OSS Distributions 				    (VM_FLAGS_FIXED
1169*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_OVERWRITE),
1170*5c2921b0SApple OSS Distributions 				    vmk_flags,
1171*5c2921b0SApple OSS Distributions 				    tag,
1172*5c2921b0SApple OSS Distributions 				    entry->entry,
1173*5c2921b0SApple OSS Distributions 				    entryOffset,
1174*5c2921b0SApple OSS Distributions 				    false,               // copy
1175*5c2921b0SApple OSS Distributions 				    prot,               // cur
1176*5c2921b0SApple OSS Distributions 				    prot,               // max
1177*5c2921b0SApple OSS Distributions 				    VM_INHERIT_NONE);
1178*5c2921b0SApple OSS Distributions 			}
1179*5c2921b0SApple OSS Distributions 			if (KERN_SUCCESS != err) {
1180*5c2921b0SApple OSS Distributions 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1181*5c2921b0SApple OSS Distributions 				break;
1182*5c2921b0SApple OSS Distributions 			}
1183*5c2921b0SApple OSS Distributions 			remain -= chunk;
1184*5c2921b0SApple OSS Distributions 			if (!remain) {
1185*5c2921b0SApple OSS Distributions 				break;
1186*5c2921b0SApple OSS Distributions 			}
1187*5c2921b0SApple OSS Distributions 			mapAddr  += chunk;
1188*5c2921b0SApple OSS Distributions 			offset   += chunk - pageOffset;
1189*5c2921b0SApple OSS Distributions 		}
1190*5c2921b0SApple OSS Distributions 		pageOffset = 0;
1191*5c2921b0SApple OSS Distributions 		entry++;
1192*5c2921b0SApple OSS Distributions 		entryIdx++;
1193*5c2921b0SApple OSS Distributions 		if (entryIdx >= ref->count) {
1194*5c2921b0SApple OSS Distributions 			err = kIOReturnOverrun;
1195*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1196*5c2921b0SApple OSS Distributions 			break;
1197*5c2921b0SApple OSS Distributions 		}
1198*5c2921b0SApple OSS Distributions 	}
1199*5c2921b0SApple OSS Distributions 
1200*5c2921b0SApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1201*5c2921b0SApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1202*5c2921b0SApple OSS Distributions 		addr = 0;
1203*5c2921b0SApple OSS Distributions 	}
1204*5c2921b0SApple OSS Distributions 	*inaddr = addr;
1205*5c2921b0SApple OSS Distributions 
1206*5c2921b0SApple OSS Distributions 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1207*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1208*5c2921b0SApple OSS Distributions 	}
1209*5c2921b0SApple OSS Distributions 	return err;
1210*5c2921b0SApple OSS Distributions }
1211*5c2921b0SApple OSS Distributions 
1212*5c2921b0SApple OSS Distributions #define LOGUNALIGN 0
1213*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1214*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1215*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
1216*5c2921b0SApple OSS Distributions 	vm_map_t            map,
1217*5c2921b0SApple OSS Distributions 	mach_vm_size_t      inoffset,
1218*5c2921b0SApple OSS Distributions 	mach_vm_size_t      size,
1219*5c2921b0SApple OSS Distributions 	IOOptionBits        options,
1220*5c2921b0SApple OSS Distributions 	mach_vm_address_t * inaddr)
1221*5c2921b0SApple OSS Distributions {
1222*5c2921b0SApple OSS Distributions 	IOReturn            err;
1223*5c2921b0SApple OSS Distributions 	int64_t             offset = inoffset;
1224*5c2921b0SApple OSS Distributions 	uint32_t            entryIdx, firstEntryIdx;
1225*5c2921b0SApple OSS Distributions 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1226*5c2921b0SApple OSS Distributions 	vm_map_offset_t     entryOffset, remain, chunk;
1227*5c2921b0SApple OSS Distributions 
1228*5c2921b0SApple OSS Distributions 	IOMemoryEntry    * entry;
1229*5c2921b0SApple OSS Distributions 	vm_prot_t          prot, memEntryCacheMode;
1230*5c2921b0SApple OSS Distributions 	IOOptionBits       type;
1231*5c2921b0SApple OSS Distributions 	IOOptionBits       cacheMode;
1232*5c2921b0SApple OSS Distributions 	vm_tag_t           tag;
1233*5c2921b0SApple OSS Distributions 	// for the kIOMapPrefault option.
1234*5c2921b0SApple OSS Distributions 	upl_page_info_t  * pageList = NULL;
1235*5c2921b0SApple OSS Distributions 	UInt               currentPageIndex = 0;
1236*5c2921b0SApple OSS Distributions 	bool               didAlloc;
1237*5c2921b0SApple OSS Distributions 
1238*5c2921b0SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1239*5c2921b0SApple OSS Distributions 
1240*5c2921b0SApple OSS Distributions 	if (ref->mapRef) {
1241*5c2921b0SApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1242*5c2921b0SApple OSS Distributions 		return err;
1243*5c2921b0SApple OSS Distributions 	}
1244*5c2921b0SApple OSS Distributions 
1245*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1246*5c2921b0SApple OSS Distributions 	printf("MAP offset %qx, %qx\n", inoffset, size);
1247*5c2921b0SApple OSS Distributions #endif
1248*5c2921b0SApple OSS Distributions 
1249*5c2921b0SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
1250*5c2921b0SApple OSS Distributions 
1251*5c2921b0SApple OSS Distributions 	prot = VM_PROT_READ;
1252*5c2921b0SApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
1253*5c2921b0SApple OSS Distributions 		prot |= VM_PROT_WRITE;
1254*5c2921b0SApple OSS Distributions 	}
1255*5c2921b0SApple OSS Distributions 	prot &= ref->prot;
1256*5c2921b0SApple OSS Distributions 
1257*5c2921b0SApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1258*5c2921b0SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
1259*5c2921b0SApple OSS Distributions 		// VM system requires write access to update named entry cache mode
1260*5c2921b0SApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1261*5c2921b0SApple OSS Distributions 	}
1262*5c2921b0SApple OSS Distributions 
1263*5c2921b0SApple OSS Distributions 	tag = (vm_tag_t) getVMTag(map);
1264*5c2921b0SApple OSS Distributions 
1265*5c2921b0SApple OSS Distributions 	addr       = 0;
1266*5c2921b0SApple OSS Distributions 	didAlloc   = false;
1267*5c2921b0SApple OSS Distributions 
1268*5c2921b0SApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1269*5c2921b0SApple OSS Distributions 		addr = *inaddr;
1270*5c2921b0SApple OSS Distributions 	}
1271*5c2921b0SApple OSS Distributions 
1272*5c2921b0SApple OSS Distributions 	// find first entry for offset
1273*5c2921b0SApple OSS Distributions 	for (firstEntryIdx = 0;
1274*5c2921b0SApple OSS Distributions 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1275*5c2921b0SApple OSS Distributions 	    firstEntryIdx++) {
1276*5c2921b0SApple OSS Distributions 	}
1277*5c2921b0SApple OSS Distributions 	firstEntryIdx--;
1278*5c2921b0SApple OSS Distributions 
1279*5c2921b0SApple OSS Distributions 	// calculate required VM space
1280*5c2921b0SApple OSS Distributions 
1281*5c2921b0SApple OSS Distributions 	entryIdx = firstEntryIdx;
1282*5c2921b0SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1283*5c2921b0SApple OSS Distributions 
1284*5c2921b0SApple OSS Distributions 	remain  = size;
1285*5c2921b0SApple OSS Distributions 	int64_t iteroffset = offset;
1286*5c2921b0SApple OSS Distributions 	uint64_t mapSize = 0;
1287*5c2921b0SApple OSS Distributions 	while (remain) {
1288*5c2921b0SApple OSS Distributions 		entryOffset = iteroffset - entry->offset;
1289*5c2921b0SApple OSS Distributions 		if (entryOffset >= entry->size) {
1290*5c2921b0SApple OSS Distributions 			panic("entryOffset");
1291*5c2921b0SApple OSS Distributions 		}
1292*5c2921b0SApple OSS Distributions 
1293*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1294*5c2921b0SApple OSS Distributions 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1295*5c2921b0SApple OSS Distributions 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1296*5c2921b0SApple OSS Distributions #endif
1297*5c2921b0SApple OSS Distributions 
1298*5c2921b0SApple OSS Distributions 		chunk = entry->size - entryOffset;
1299*5c2921b0SApple OSS Distributions 		if (chunk) {
1300*5c2921b0SApple OSS Distributions 			if (chunk > remain) {
1301*5c2921b0SApple OSS Distributions 				chunk = remain;
1302*5c2921b0SApple OSS Distributions 			}
1303*5c2921b0SApple OSS Distributions 			mach_vm_size_t entrySize;
1304*5c2921b0SApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1305*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1306*5c2921b0SApple OSS Distributions 			mapSize += entrySize;
1307*5c2921b0SApple OSS Distributions 
1308*5c2921b0SApple OSS Distributions 			remain -= chunk;
1309*5c2921b0SApple OSS Distributions 			if (!remain) {
1310*5c2921b0SApple OSS Distributions 				break;
1311*5c2921b0SApple OSS Distributions 			}
1312*5c2921b0SApple OSS Distributions 			iteroffset   += chunk; // - pageOffset;
1313*5c2921b0SApple OSS Distributions 		}
1314*5c2921b0SApple OSS Distributions 		entry++;
1315*5c2921b0SApple OSS Distributions 		entryIdx++;
1316*5c2921b0SApple OSS Distributions 		if (entryIdx >= ref->count) {
1317*5c2921b0SApple OSS Distributions 			panic("overrun");
1318*5c2921b0SApple OSS Distributions 			err = kIOReturnOverrun;
1319*5c2921b0SApple OSS Distributions 			break;
1320*5c2921b0SApple OSS Distributions 		}
1321*5c2921b0SApple OSS Distributions 	}
1322*5c2921b0SApple OSS Distributions 
1323*5c2921b0SApple OSS Distributions 	if (kIOMapOverwrite & options) {
1324*5c2921b0SApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1325*5c2921b0SApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1326*5c2921b0SApple OSS Distributions 		}
1327*5c2921b0SApple OSS Distributions 		err = KERN_SUCCESS;
1328*5c2921b0SApple OSS Distributions 	} else {
1329*5c2921b0SApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1330*5c2921b0SApple OSS Distributions 		ref.map     = map;
1331*5c2921b0SApple OSS Distributions 		ref.tag     = tag;
1332*5c2921b0SApple OSS Distributions 		ref.options = options;
1333*5c2921b0SApple OSS Distributions 		ref.size    = mapSize;
1334*5c2921b0SApple OSS Distributions 		ref.prot    = prot;
1335*5c2921b0SApple OSS Distributions 		if (options & kIOMapAnywhere) {
1336*5c2921b0SApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1337*5c2921b0SApple OSS Distributions 			ref.mapped = 0;
1338*5c2921b0SApple OSS Distributions 		} else {
1339*5c2921b0SApple OSS Distributions 			ref.mapped = addr;
1340*5c2921b0SApple OSS Distributions 		}
1341*5c2921b0SApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1342*5c2921b0SApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1343*5c2921b0SApple OSS Distributions 		} else {
1344*5c2921b0SApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1345*5c2921b0SApple OSS Distributions 		}
1346*5c2921b0SApple OSS Distributions 
1347*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS == err) {
1348*5c2921b0SApple OSS Distributions 			addr     = ref.mapped;
1349*5c2921b0SApple OSS Distributions 			map      = ref.map;
1350*5c2921b0SApple OSS Distributions 			didAlloc = true;
1351*5c2921b0SApple OSS Distributions 		}
1352*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1353*5c2921b0SApple OSS Distributions 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1354*5c2921b0SApple OSS Distributions #endif
1355*5c2921b0SApple OSS Distributions 	}
1356*5c2921b0SApple OSS Distributions 
1357*5c2921b0SApple OSS Distributions 	/*
1358*5c2921b0SApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1359*5c2921b0SApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1360*5c2921b0SApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1361*5c2921b0SApple OSS Distributions 	 * operations.
1362*5c2921b0SApple OSS Distributions 	 */
1363*5c2921b0SApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1364*5c2921b0SApple OSS Distributions 		options &= ~kIOMapPrefault;
1365*5c2921b0SApple OSS Distributions 	}
1366*5c2921b0SApple OSS Distributions 
1367*5c2921b0SApple OSS Distributions 	/*
1368*5c2921b0SApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1369*5c2921b0SApple OSS Distributions 	 * memory type, and the underlying data.
1370*5c2921b0SApple OSS Distributions 	 */
1371*5c2921b0SApple OSS Distributions 	if (options & kIOMapPrefault) {
1372*5c2921b0SApple OSS Distributions 		/*
1373*5c2921b0SApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1374*5c2921b0SApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1375*5c2921b0SApple OSS Distributions 		 */
1376*5c2921b0SApple OSS Distributions 		assert(_wireCount != 0);
1377*5c2921b0SApple OSS Distributions 		assert(_memoryEntries != NULL);
1378*5c2921b0SApple OSS Distributions 		if ((_wireCount == 0) ||
1379*5c2921b0SApple OSS Distributions 		    (_memoryEntries == NULL)) {
1380*5c2921b0SApple OSS Distributions 			return kIOReturnBadArgument;
1381*5c2921b0SApple OSS Distributions 		}
1382*5c2921b0SApple OSS Distributions 
1383*5c2921b0SApple OSS Distributions 		// Get the page list.
1384*5c2921b0SApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1385*5c2921b0SApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1386*5c2921b0SApple OSS Distributions 		pageList = getPageList(dataP);
1387*5c2921b0SApple OSS Distributions 
1388*5c2921b0SApple OSS Distributions 		// Get the number of IOPLs.
1389*5c2921b0SApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1390*5c2921b0SApple OSS Distributions 
1391*5c2921b0SApple OSS Distributions 		/*
1392*5c2921b0SApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1393*5c2921b0SApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1394*5c2921b0SApple OSS Distributions 		 * right range at the end.
1395*5c2921b0SApple OSS Distributions 		 */
1396*5c2921b0SApple OSS Distributions 		UInt ioplIndex = 0;
1397*5c2921b0SApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1398*5c2921b0SApple OSS Distributions 			ioplIndex++;
1399*5c2921b0SApple OSS Distributions 		}
1400*5c2921b0SApple OSS Distributions 		ioplIndex--;
1401*5c2921b0SApple OSS Distributions 
1402*5c2921b0SApple OSS Distributions 		// Retrieve the IOPL info block.
1403*5c2921b0SApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1404*5c2921b0SApple OSS Distributions 
1405*5c2921b0SApple OSS Distributions 		/*
1406*5c2921b0SApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1407*5c2921b0SApple OSS Distributions 		 * array.
1408*5c2921b0SApple OSS Distributions 		 */
1409*5c2921b0SApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1410*5c2921b0SApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1411*5c2921b0SApple OSS Distributions 		} else {
1412*5c2921b0SApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1413*5c2921b0SApple OSS Distributions 		}
1414*5c2921b0SApple OSS Distributions 
1415*5c2921b0SApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1416*5c2921b0SApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1417*5c2921b0SApple OSS Distributions 
1418*5c2921b0SApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1419*5c2921b0SApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1420*5c2921b0SApple OSS Distributions 	}
1421*5c2921b0SApple OSS Distributions 
1422*5c2921b0SApple OSS Distributions 	// enter mappings
1423*5c2921b0SApple OSS Distributions 	remain   = size;
1424*5c2921b0SApple OSS Distributions 	mapAddr  = addr;
1425*5c2921b0SApple OSS Distributions 	entryIdx = firstEntryIdx;
1426*5c2921b0SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1427*5c2921b0SApple OSS Distributions 
1428*5c2921b0SApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1429*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1430*5c2921b0SApple OSS Distributions 		printf("offset %qx, %qx\n", offset, entry->offset);
1431*5c2921b0SApple OSS Distributions #endif
1432*5c2921b0SApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1433*5c2921b0SApple OSS Distributions 			vm_size_t unused = 0;
1434*5c2921b0SApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1435*5c2921b0SApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1436*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1437*5c2921b0SApple OSS Distributions 		}
1438*5c2921b0SApple OSS Distributions 		entryOffset = offset - entry->offset;
1439*5c2921b0SApple OSS Distributions 		if (entryOffset >= entry->size) {
1440*5c2921b0SApple OSS Distributions 			panic("entryOffset");
1441*5c2921b0SApple OSS Distributions 		}
1442*5c2921b0SApple OSS Distributions 		chunk = entry->size - entryOffset;
1443*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1444*5c2921b0SApple OSS Distributions 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1445*5c2921b0SApple OSS Distributions #endif
1446*5c2921b0SApple OSS Distributions 		if (chunk) {
1447*5c2921b0SApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags;
1448*5c2921b0SApple OSS Distributions 
1449*5c2921b0SApple OSS Distributions 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1450*5c2921b0SApple OSS Distributions 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1451*5c2921b0SApple OSS Distributions 
1452*5c2921b0SApple OSS Distributions 			if (chunk > remain) {
1453*5c2921b0SApple OSS Distributions 				chunk = remain;
1454*5c2921b0SApple OSS Distributions 			}
1455*5c2921b0SApple OSS Distributions 			mapAddrOut = mapAddr;
1456*5c2921b0SApple OSS Distributions 			if (options & kIOMapPrefault) {
1457*5c2921b0SApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1458*5c2921b0SApple OSS Distributions 
1459*5c2921b0SApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1460*5c2921b0SApple OSS Distributions 				    &mapAddrOut,
1461*5c2921b0SApple OSS Distributions 				    chunk, 0 /* mask */,
1462*5c2921b0SApple OSS Distributions 				    (VM_FLAGS_FIXED
1463*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_OVERWRITE
1464*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_RETURN_DATA_ADDR),
1465*5c2921b0SApple OSS Distributions 				    vmk_flags,
1466*5c2921b0SApple OSS Distributions 				    tag,
1467*5c2921b0SApple OSS Distributions 				    entry->entry,
1468*5c2921b0SApple OSS Distributions 				    entryOffset,
1469*5c2921b0SApple OSS Distributions 				    prot,                        // cur
1470*5c2921b0SApple OSS Distributions 				    prot,                        // max
1471*5c2921b0SApple OSS Distributions 				    &pageList[currentPageIndex],
1472*5c2921b0SApple OSS Distributions 				    nb_pages);
1473*5c2921b0SApple OSS Distributions 
1474*5c2921b0SApple OSS Distributions 				// Compute the next index in the page list.
1475*5c2921b0SApple OSS Distributions 				currentPageIndex += nb_pages;
1476*5c2921b0SApple OSS Distributions 				assert(currentPageIndex <= _pages);
1477*5c2921b0SApple OSS Distributions 			} else {
1478*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1479*5c2921b0SApple OSS Distributions 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1480*5c2921b0SApple OSS Distributions #endif
1481*5c2921b0SApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1482*5c2921b0SApple OSS Distributions 				    &mapAddrOut,
1483*5c2921b0SApple OSS Distributions 				    chunk, 0 /* mask */,
1484*5c2921b0SApple OSS Distributions 				    (VM_FLAGS_FIXED
1485*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_OVERWRITE
1486*5c2921b0SApple OSS Distributions 				    | VM_FLAGS_RETURN_DATA_ADDR),
1487*5c2921b0SApple OSS Distributions 				    vmk_flags,
1488*5c2921b0SApple OSS Distributions 				    tag,
1489*5c2921b0SApple OSS Distributions 				    entry->entry,
1490*5c2921b0SApple OSS Distributions 				    entryOffset,
1491*5c2921b0SApple OSS Distributions 				    false,               // copy
1492*5c2921b0SApple OSS Distributions 				    prot,               // cur
1493*5c2921b0SApple OSS Distributions 				    prot,               // max
1494*5c2921b0SApple OSS Distributions 				    VM_INHERIT_NONE);
1495*5c2921b0SApple OSS Distributions 			}
1496*5c2921b0SApple OSS Distributions 			if (KERN_SUCCESS != err) {
1497*5c2921b0SApple OSS Distributions 				panic("map enter err %x", err);
1498*5c2921b0SApple OSS Distributions 				break;
1499*5c2921b0SApple OSS Distributions 			}
1500*5c2921b0SApple OSS Distributions #if LOGUNALIGN
1501*5c2921b0SApple OSS Distributions 			printf("mapAddr o %qx\n", mapAddrOut);
1502*5c2921b0SApple OSS Distributions #endif
1503*5c2921b0SApple OSS Distributions 			if (entryIdx == firstEntryIdx) {
1504*5c2921b0SApple OSS Distributions 				addr = mapAddrOut;
1505*5c2921b0SApple OSS Distributions 			}
1506*5c2921b0SApple OSS Distributions 			remain -= chunk;
1507*5c2921b0SApple OSS Distributions 			if (!remain) {
1508*5c2921b0SApple OSS Distributions 				break;
1509*5c2921b0SApple OSS Distributions 			}
1510*5c2921b0SApple OSS Distributions 			mach_vm_size_t entrySize;
1511*5c2921b0SApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1512*5c2921b0SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1513*5c2921b0SApple OSS Distributions 			mapAddr += entrySize;
1514*5c2921b0SApple OSS Distributions 			offset  += chunk;
1515*5c2921b0SApple OSS Distributions 		}
1516*5c2921b0SApple OSS Distributions 
1517*5c2921b0SApple OSS Distributions 		entry++;
1518*5c2921b0SApple OSS Distributions 		entryIdx++;
1519*5c2921b0SApple OSS Distributions 		if (entryIdx >= ref->count) {
1520*5c2921b0SApple OSS Distributions 			err = kIOReturnOverrun;
1521*5c2921b0SApple OSS Distributions 			break;
1522*5c2921b0SApple OSS Distributions 		}
1523*5c2921b0SApple OSS Distributions 	}
1524*5c2921b0SApple OSS Distributions 
1525*5c2921b0SApple OSS Distributions 	if (KERN_SUCCESS != err) {
1526*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1527*5c2921b0SApple OSS Distributions 	}
1528*5c2921b0SApple OSS Distributions 
1529*5c2921b0SApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1530*5c2921b0SApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1531*5c2921b0SApple OSS Distributions 		addr = 0;
1532*5c2921b0SApple OSS Distributions 	}
1533*5c2921b0SApple OSS Distributions 	*inaddr = addr;
1534*5c2921b0SApple OSS Distributions 
1535*5c2921b0SApple OSS Distributions 	return err;
1536*5c2921b0SApple OSS Distributions }
1537*5c2921b0SApple OSS Distributions 
1538*5c2921b0SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1539*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1540*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
1541*5c2921b0SApple OSS Distributions 	uint64_t          * offset)
1542*5c2921b0SApple OSS Distributions {
1543*5c2921b0SApple OSS Distributions 	kern_return_t kr;
1544*5c2921b0SApple OSS Distributions 	vm_object_offset_t data_offset = 0;
1545*5c2921b0SApple OSS Distributions 	uint64_t total;
1546*5c2921b0SApple OSS Distributions 	uint32_t idx;
1547*5c2921b0SApple OSS Distributions 
1548*5c2921b0SApple OSS Distributions 	assert(ref->count);
1549*5c2921b0SApple OSS Distributions 	if (offset) {
1550*5c2921b0SApple OSS Distributions 		*offset = (uint64_t) data_offset;
1551*5c2921b0SApple OSS Distributions 	}
1552*5c2921b0SApple OSS Distributions 	total = 0;
1553*5c2921b0SApple OSS Distributions 	for (idx = 0; idx < ref->count; idx++) {
1554*5c2921b0SApple OSS Distributions 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1555*5c2921b0SApple OSS Distributions 		    &data_offset);
1556*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != kr) {
1557*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1558*5c2921b0SApple OSS Distributions 		} else if (0 != data_offset) {
1559*5c2921b0SApple OSS Distributions 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1560*5c2921b0SApple OSS Distributions 		}
1561*5c2921b0SApple OSS Distributions 		if (offset && !idx) {
1562*5c2921b0SApple OSS Distributions 			*offset = (uint64_t) data_offset;
1563*5c2921b0SApple OSS Distributions 		}
1564*5c2921b0SApple OSS Distributions 		total += round_page(data_offset + ref->entries[idx].size);
1565*5c2921b0SApple OSS Distributions 	}
1566*5c2921b0SApple OSS Distributions 
1567*5c2921b0SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1568*5c2921b0SApple OSS Distributions 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1569*5c2921b0SApple OSS Distributions 
1570*5c2921b0SApple OSS Distributions 	return total;
1571*5c2921b0SApple OSS Distributions }
1572*5c2921b0SApple OSS Distributions 
1573*5c2921b0SApple OSS Distributions 
1574*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1575*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1576*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
1577*5c2921b0SApple OSS Distributions 	IOByteCount       * residentPageCount,
1578*5c2921b0SApple OSS Distributions 	IOByteCount       * dirtyPageCount)
1579*5c2921b0SApple OSS Distributions {
1580*5c2921b0SApple OSS Distributions 	IOReturn        err;
1581*5c2921b0SApple OSS Distributions 	IOMemoryEntry * entries;
1582*5c2921b0SApple OSS Distributions 	unsigned int resident, dirty;
1583*5c2921b0SApple OSS Distributions 	unsigned int totalResident, totalDirty;
1584*5c2921b0SApple OSS Distributions 
1585*5c2921b0SApple OSS Distributions 	totalResident = totalDirty = 0;
1586*5c2921b0SApple OSS Distributions 	err = kIOReturnSuccess;
1587*5c2921b0SApple OSS Distributions 	entries = ref->entries + ref->count;
1588*5c2921b0SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1589*5c2921b0SApple OSS Distributions 		entries--;
1590*5c2921b0SApple OSS Distributions 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1591*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1592*5c2921b0SApple OSS Distributions 			break;
1593*5c2921b0SApple OSS Distributions 		}
1594*5c2921b0SApple OSS Distributions 		totalResident += resident;
1595*5c2921b0SApple OSS Distributions 		totalDirty    += dirty;
1596*5c2921b0SApple OSS Distributions 	}
1597*5c2921b0SApple OSS Distributions 
1598*5c2921b0SApple OSS Distributions 	if (residentPageCount) {
1599*5c2921b0SApple OSS Distributions 		*residentPageCount = totalResident;
1600*5c2921b0SApple OSS Distributions 	}
1601*5c2921b0SApple OSS Distributions 	if (dirtyPageCount) {
1602*5c2921b0SApple OSS Distributions 		*dirtyPageCount    = totalDirty;
1603*5c2921b0SApple OSS Distributions 	}
1604*5c2921b0SApple OSS Distributions 	return err;
1605*5c2921b0SApple OSS Distributions }
1606*5c2921b0SApple OSS Distributions 
1607*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1608*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1609*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
1610*5c2921b0SApple OSS Distributions 	IOOptionBits        newState,
1611*5c2921b0SApple OSS Distributions 	IOOptionBits      * oldState)
1612*5c2921b0SApple OSS Distributions {
1613*5c2921b0SApple OSS Distributions 	IOReturn        err;
1614*5c2921b0SApple OSS Distributions 	IOMemoryEntry * entries;
1615*5c2921b0SApple OSS Distributions 	vm_purgable_t   control;
1616*5c2921b0SApple OSS Distributions 	int             totalState, state;
1617*5c2921b0SApple OSS Distributions 
1618*5c2921b0SApple OSS Distributions 	totalState = kIOMemoryPurgeableNonVolatile;
1619*5c2921b0SApple OSS Distributions 	err = kIOReturnSuccess;
1620*5c2921b0SApple OSS Distributions 	entries = ref->entries + ref->count;
1621*5c2921b0SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1622*5c2921b0SApple OSS Distributions 		entries--;
1623*5c2921b0SApple OSS Distributions 
1624*5c2921b0SApple OSS Distributions 		err = purgeableControlBits(newState, &control, &state);
1625*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1626*5c2921b0SApple OSS Distributions 			break;
1627*5c2921b0SApple OSS Distributions 		}
1628*5c2921b0SApple OSS Distributions 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1629*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1630*5c2921b0SApple OSS Distributions 			break;
1631*5c2921b0SApple OSS Distributions 		}
1632*5c2921b0SApple OSS Distributions 		err = purgeableStateBits(&state);
1633*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1634*5c2921b0SApple OSS Distributions 			break;
1635*5c2921b0SApple OSS Distributions 		}
1636*5c2921b0SApple OSS Distributions 
1637*5c2921b0SApple OSS Distributions 		if (kIOMemoryPurgeableEmpty == state) {
1638*5c2921b0SApple OSS Distributions 			totalState = kIOMemoryPurgeableEmpty;
1639*5c2921b0SApple OSS Distributions 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1640*5c2921b0SApple OSS Distributions 			continue;
1641*5c2921b0SApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1642*5c2921b0SApple OSS Distributions 			continue;
1643*5c2921b0SApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == state) {
1644*5c2921b0SApple OSS Distributions 			totalState = kIOMemoryPurgeableVolatile;
1645*5c2921b0SApple OSS Distributions 		} else {
1646*5c2921b0SApple OSS Distributions 			totalState = kIOMemoryPurgeableNonVolatile;
1647*5c2921b0SApple OSS Distributions 		}
1648*5c2921b0SApple OSS Distributions 	}
1649*5c2921b0SApple OSS Distributions 
1650*5c2921b0SApple OSS Distributions 	if (oldState) {
1651*5c2921b0SApple OSS Distributions 		*oldState = totalState;
1652*5c2921b0SApple OSS Distributions 	}
1653*5c2921b0SApple OSS Distributions 	return err;
1654*5c2921b0SApple OSS Distributions }
1655*5c2921b0SApple OSS Distributions 
1656*5c2921b0SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1657*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1658*5c2921b0SApple OSS Distributions 	IOMemoryReference * ref,
1659*5c2921b0SApple OSS Distributions 	task_t              newOwner,
1660*5c2921b0SApple OSS Distributions 	int                 newLedgerTag,
1661*5c2921b0SApple OSS Distributions 	IOOptionBits        newLedgerOptions)
1662*5c2921b0SApple OSS Distributions {
1663*5c2921b0SApple OSS Distributions 	IOReturn        err, totalErr;
1664*5c2921b0SApple OSS Distributions 	IOMemoryEntry * entries;
1665*5c2921b0SApple OSS Distributions 
1666*5c2921b0SApple OSS Distributions 	totalErr = kIOReturnSuccess;
1667*5c2921b0SApple OSS Distributions 	entries = ref->entries + ref->count;
1668*5c2921b0SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1669*5c2921b0SApple OSS Distributions 		entries--;
1670*5c2921b0SApple OSS Distributions 
1671*5c2921b0SApple OSS Distributions 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1672*5c2921b0SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1673*5c2921b0SApple OSS Distributions 			totalErr = err;
1674*5c2921b0SApple OSS Distributions 		}
1675*5c2921b0SApple OSS Distributions 	}
1676*5c2921b0SApple OSS Distributions 
1677*5c2921b0SApple OSS Distributions 	return totalErr;
1678*5c2921b0SApple OSS Distributions }
1679*5c2921b0SApple OSS Distributions 
1680*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1681*5c2921b0SApple OSS Distributions 
1682*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1683*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withAddress(void *      address,
1684*5c2921b0SApple OSS Distributions     IOByteCount   length,
1685*5c2921b0SApple OSS Distributions     IODirection direction)
1686*5c2921b0SApple OSS Distributions {
1687*5c2921b0SApple OSS Distributions 	return IOMemoryDescriptor::
1688*5c2921b0SApple OSS Distributions 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1689*5c2921b0SApple OSS Distributions }
1690*5c2921b0SApple OSS Distributions 
1691*5c2921b0SApple OSS Distributions #ifndef __LP64__
1692*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1693*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1694*5c2921b0SApple OSS Distributions     IOByteCount  length,
1695*5c2921b0SApple OSS Distributions     IODirection  direction,
1696*5c2921b0SApple OSS Distributions     task_t       task)
1697*5c2921b0SApple OSS Distributions {
1698*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1699*5c2921b0SApple OSS Distributions 	if (that) {
1700*5c2921b0SApple OSS Distributions 		if (that->initWithAddress(address, length, direction, task)) {
1701*5c2921b0SApple OSS Distributions 			return os::move(that);
1702*5c2921b0SApple OSS Distributions 		}
1703*5c2921b0SApple OSS Distributions 	}
1704*5c2921b0SApple OSS Distributions 	return nullptr;
1705*5c2921b0SApple OSS Distributions }
1706*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
1707*5c2921b0SApple OSS Distributions 
1708*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1709*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1710*5c2921b0SApple OSS Distributions 	IOPhysicalAddress       address,
1711*5c2921b0SApple OSS Distributions 	IOByteCount             length,
1712*5c2921b0SApple OSS Distributions 	IODirection             direction )
1713*5c2921b0SApple OSS Distributions {
1714*5c2921b0SApple OSS Distributions 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1715*5c2921b0SApple OSS Distributions }
1716*5c2921b0SApple OSS Distributions 
1717*5c2921b0SApple OSS Distributions #ifndef __LP64__
1718*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1719*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1720*5c2921b0SApple OSS Distributions     UInt32           withCount,
1721*5c2921b0SApple OSS Distributions     IODirection      direction,
1722*5c2921b0SApple OSS Distributions     task_t           task,
1723*5c2921b0SApple OSS Distributions     bool             asReference)
1724*5c2921b0SApple OSS Distributions {
1725*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1726*5c2921b0SApple OSS Distributions 	if (that) {
1727*5c2921b0SApple OSS Distributions 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1728*5c2921b0SApple OSS Distributions 			return os::move(that);
1729*5c2921b0SApple OSS Distributions 		}
1730*5c2921b0SApple OSS Distributions 	}
1731*5c2921b0SApple OSS Distributions 	return nullptr;
1732*5c2921b0SApple OSS Distributions }
1733*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
1734*5c2921b0SApple OSS Distributions 
1735*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1736*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1737*5c2921b0SApple OSS Distributions     mach_vm_size_t length,
1738*5c2921b0SApple OSS Distributions     IOOptionBits   options,
1739*5c2921b0SApple OSS Distributions     task_t         task)
1740*5c2921b0SApple OSS Distributions {
1741*5c2921b0SApple OSS Distributions 	IOAddressRange range = { address, length };
1742*5c2921b0SApple OSS Distributions 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1743*5c2921b0SApple OSS Distributions }
1744*5c2921b0SApple OSS Distributions 
1745*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1746*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1747*5c2921b0SApple OSS Distributions     UInt32           rangeCount,
1748*5c2921b0SApple OSS Distributions     IOOptionBits     options,
1749*5c2921b0SApple OSS Distributions     task_t           task)
1750*5c2921b0SApple OSS Distributions {
1751*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1752*5c2921b0SApple OSS Distributions 	if (that) {
1753*5c2921b0SApple OSS Distributions 		if (task) {
1754*5c2921b0SApple OSS Distributions 			options |= kIOMemoryTypeVirtual64;
1755*5c2921b0SApple OSS Distributions 		} else {
1756*5c2921b0SApple OSS Distributions 			options |= kIOMemoryTypePhysical64;
1757*5c2921b0SApple OSS Distributions 		}
1758*5c2921b0SApple OSS Distributions 
1759*5c2921b0SApple OSS Distributions 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1760*5c2921b0SApple OSS Distributions 			return os::move(that);
1761*5c2921b0SApple OSS Distributions 		}
1762*5c2921b0SApple OSS Distributions 	}
1763*5c2921b0SApple OSS Distributions 
1764*5c2921b0SApple OSS Distributions 	return nullptr;
1765*5c2921b0SApple OSS Distributions }
1766*5c2921b0SApple OSS Distributions 
1767*5c2921b0SApple OSS Distributions 
1768*5c2921b0SApple OSS Distributions /*
1769*5c2921b0SApple OSS Distributions  * withOptions:
1770*5c2921b0SApple OSS Distributions  *
1771*5c2921b0SApple OSS Distributions  * Create a new IOMemoryDescriptor. The buffer is made up of several
1772*5c2921b0SApple OSS Distributions  * virtual address ranges, from a given task.
1773*5c2921b0SApple OSS Distributions  *
1774*5c2921b0SApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1775*5c2921b0SApple OSS Distributions  */
1776*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1777*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withOptions(void *          buffers,
1778*5c2921b0SApple OSS Distributions     UInt32          count,
1779*5c2921b0SApple OSS Distributions     UInt32          offset,
1780*5c2921b0SApple OSS Distributions     task_t          task,
1781*5c2921b0SApple OSS Distributions     IOOptionBits    opts,
1782*5c2921b0SApple OSS Distributions     IOMapper *      mapper)
1783*5c2921b0SApple OSS Distributions {
1784*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1785*5c2921b0SApple OSS Distributions 
1786*5c2921b0SApple OSS Distributions 	if (self
1787*5c2921b0SApple OSS Distributions 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1788*5c2921b0SApple OSS Distributions 		return nullptr;
1789*5c2921b0SApple OSS Distributions 	}
1790*5c2921b0SApple OSS Distributions 
1791*5c2921b0SApple OSS Distributions 	return os::move(self);
1792*5c2921b0SApple OSS Distributions }
1793*5c2921b0SApple OSS Distributions 
1794*5c2921b0SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1795*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void *         buffers,
1796*5c2921b0SApple OSS Distributions     UInt32         count,
1797*5c2921b0SApple OSS Distributions     UInt32         offset,
1798*5c2921b0SApple OSS Distributions     task_t         task,
1799*5c2921b0SApple OSS Distributions     IOOptionBits   options,
1800*5c2921b0SApple OSS Distributions     IOMapper *     mapper)
1801*5c2921b0SApple OSS Distributions {
1802*5c2921b0SApple OSS Distributions 	return false;
1803*5c2921b0SApple OSS Distributions }
1804*5c2921b0SApple OSS Distributions 
1805*5c2921b0SApple OSS Distributions #ifndef __LP64__
1806*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1807*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1808*5c2921b0SApple OSS Distributions     UInt32          withCount,
1809*5c2921b0SApple OSS Distributions     IODirection     direction,
1810*5c2921b0SApple OSS Distributions     bool            asReference)
1811*5c2921b0SApple OSS Distributions {
1812*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1813*5c2921b0SApple OSS Distributions 	if (that) {
1814*5c2921b0SApple OSS Distributions 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1815*5c2921b0SApple OSS Distributions 			return os::move(that);
1816*5c2921b0SApple OSS Distributions 		}
1817*5c2921b0SApple OSS Distributions 	}
1818*5c2921b0SApple OSS Distributions 	return nullptr;
1819*5c2921b0SApple OSS Distributions }
1820*5c2921b0SApple OSS Distributions 
1821*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1822*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1823*5c2921b0SApple OSS Distributions     IOByteCount             offset,
1824*5c2921b0SApple OSS Distributions     IOByteCount             length,
1825*5c2921b0SApple OSS Distributions     IODirection             direction)
1826*5c2921b0SApple OSS Distributions {
1827*5c2921b0SApple OSS Distributions 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1828*5c2921b0SApple OSS Distributions }
1829*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
1830*5c2921b0SApple OSS Distributions 
1831*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1832*5c2921b0SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1833*5c2921b0SApple OSS Distributions {
1834*5c2921b0SApple OSS Distributions 	IOGeneralMemoryDescriptor *origGenMD =
1835*5c2921b0SApple OSS Distributions 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1836*5c2921b0SApple OSS Distributions 
1837*5c2921b0SApple OSS Distributions 	if (origGenMD) {
1838*5c2921b0SApple OSS Distributions 		return IOGeneralMemoryDescriptor::
1839*5c2921b0SApple OSS Distributions 		       withPersistentMemoryDescriptor(origGenMD);
1840*5c2921b0SApple OSS Distributions 	} else {
1841*5c2921b0SApple OSS Distributions 		return nullptr;
1842*5c2921b0SApple OSS Distributions 	}
1843*5c2921b0SApple OSS Distributions }
1844*5c2921b0SApple OSS Distributions 
1845*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1846*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1847*5c2921b0SApple OSS Distributions {
1848*5c2921b0SApple OSS Distributions 	IOMemoryReference * memRef;
1849*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1850*5c2921b0SApple OSS Distributions 
1851*5c2921b0SApple OSS Distributions 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1852*5c2921b0SApple OSS Distributions 		return nullptr;
1853*5c2921b0SApple OSS Distributions 	}
1854*5c2921b0SApple OSS Distributions 
1855*5c2921b0SApple OSS Distributions 	if (memRef == originalMD->_memRef) {
1856*5c2921b0SApple OSS Distributions 		self.reset(originalMD, OSRetain);
1857*5c2921b0SApple OSS Distributions 		originalMD->memoryReferenceRelease(memRef);
1858*5c2921b0SApple OSS Distributions 		return os::move(self);
1859*5c2921b0SApple OSS Distributions 	}
1860*5c2921b0SApple OSS Distributions 
1861*5c2921b0SApple OSS Distributions 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1862*5c2921b0SApple OSS Distributions 	IOMDPersistentInitData initData = { originalMD, memRef };
1863*5c2921b0SApple OSS Distributions 
1864*5c2921b0SApple OSS Distributions 	if (self
1865*5c2921b0SApple OSS Distributions 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1866*5c2921b0SApple OSS Distributions 		return nullptr;
1867*5c2921b0SApple OSS Distributions 	}
1868*5c2921b0SApple OSS Distributions 	return os::move(self);
1869*5c2921b0SApple OSS Distributions }
1870*5c2921b0SApple OSS Distributions 
1871*5c2921b0SApple OSS Distributions #ifndef __LP64__
1872*5c2921b0SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1873*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1874*5c2921b0SApple OSS Distributions     IOByteCount   withLength,
1875*5c2921b0SApple OSS Distributions     IODirection withDirection)
1876*5c2921b0SApple OSS Distributions {
1877*5c2921b0SApple OSS Distributions 	_singleRange.v.address = (vm_offset_t) address;
1878*5c2921b0SApple OSS Distributions 	_singleRange.v.length  = withLength;
1879*5c2921b0SApple OSS Distributions 
1880*5c2921b0SApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1881*5c2921b0SApple OSS Distributions }
1882*5c2921b0SApple OSS Distributions 
1883*5c2921b0SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1884*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1885*5c2921b0SApple OSS Distributions     IOByteCount    withLength,
1886*5c2921b0SApple OSS Distributions     IODirection  withDirection,
1887*5c2921b0SApple OSS Distributions     task_t       withTask)
1888*5c2921b0SApple OSS Distributions {
1889*5c2921b0SApple OSS Distributions 	_singleRange.v.address = address;
1890*5c2921b0SApple OSS Distributions 	_singleRange.v.length  = withLength;
1891*5c2921b0SApple OSS Distributions 
1892*5c2921b0SApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1893*5c2921b0SApple OSS Distributions }
1894*5c2921b0SApple OSS Distributions 
1895*5c2921b0SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1896*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1897*5c2921b0SApple OSS Distributions 	IOPhysicalAddress      address,
1898*5c2921b0SApple OSS Distributions 	IOByteCount            withLength,
1899*5c2921b0SApple OSS Distributions 	IODirection            withDirection )
1900*5c2921b0SApple OSS Distributions {
1901*5c2921b0SApple OSS Distributions 	_singleRange.p.address = address;
1902*5c2921b0SApple OSS Distributions 	_singleRange.p.length  = withLength;
1903*5c2921b0SApple OSS Distributions 
1904*5c2921b0SApple OSS Distributions 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1905*5c2921b0SApple OSS Distributions }
1906*5c2921b0SApple OSS Distributions 
1907*5c2921b0SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1908*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1909*5c2921b0SApple OSS Distributions 	IOPhysicalRange * ranges,
1910*5c2921b0SApple OSS Distributions 	UInt32            count,
1911*5c2921b0SApple OSS Distributions 	IODirection       direction,
1912*5c2921b0SApple OSS Distributions 	bool              reference)
1913*5c2921b0SApple OSS Distributions {
1914*5c2921b0SApple OSS Distributions 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1915*5c2921b0SApple OSS Distributions 
1916*5c2921b0SApple OSS Distributions 	if (reference) {
1917*5c2921b0SApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1918*5c2921b0SApple OSS Distributions 	}
1919*5c2921b0SApple OSS Distributions 
1920*5c2921b0SApple OSS Distributions 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1921*5c2921b0SApple OSS Distributions }
1922*5c2921b0SApple OSS Distributions 
1923*5c2921b0SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1924*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1925*5c2921b0SApple OSS Distributions 	IOVirtualRange * ranges,
1926*5c2921b0SApple OSS Distributions 	UInt32           count,
1927*5c2921b0SApple OSS Distributions 	IODirection      direction,
1928*5c2921b0SApple OSS Distributions 	task_t           task,
1929*5c2921b0SApple OSS Distributions 	bool             reference)
1930*5c2921b0SApple OSS Distributions {
1931*5c2921b0SApple OSS Distributions 	IOOptionBits mdOpts = direction;
1932*5c2921b0SApple OSS Distributions 
1933*5c2921b0SApple OSS Distributions 	if (reference) {
1934*5c2921b0SApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1935*5c2921b0SApple OSS Distributions 	}
1936*5c2921b0SApple OSS Distributions 
1937*5c2921b0SApple OSS Distributions 	if (task) {
1938*5c2921b0SApple OSS Distributions 		mdOpts |= kIOMemoryTypeVirtual;
1939*5c2921b0SApple OSS Distributions 
1940*5c2921b0SApple OSS Distributions 		// Auto-prepare if this is a kernel memory descriptor as very few
1941*5c2921b0SApple OSS Distributions 		// clients bother to prepare() kernel memory.
1942*5c2921b0SApple OSS Distributions 		// But it was not enforced so what are you going to do?
1943*5c2921b0SApple OSS Distributions 		if (task == kernel_task) {
1944*5c2921b0SApple OSS Distributions 			mdOpts |= kIOMemoryAutoPrepare;
1945*5c2921b0SApple OSS Distributions 		}
1946*5c2921b0SApple OSS Distributions 	} else {
1947*5c2921b0SApple OSS Distributions 		mdOpts |= kIOMemoryTypePhysical;
1948*5c2921b0SApple OSS Distributions 	}
1949*5c2921b0SApple OSS Distributions 
1950*5c2921b0SApple OSS Distributions 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1951*5c2921b0SApple OSS Distributions }
1952*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
1953*5c2921b0SApple OSS Distributions 
1954*5c2921b0SApple OSS Distributions /*
1955*5c2921b0SApple OSS Distributions  * initWithOptions:
1956*5c2921b0SApple OSS Distributions  *
1957*5c2921b0SApple OSS Distributions  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1958*5c2921b0SApple OSS Distributions  * from a given task, several physical ranges, an UPL from the ubc
1959*5c2921b0SApple OSS Distributions  * system or a uio (may be 64bit) from the BSD subsystem.
1960*5c2921b0SApple OSS Distributions  *
1961*5c2921b0SApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1962*5c2921b0SApple OSS Distributions  *
1963*5c2921b0SApple OSS Distributions  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1964*5c2921b0SApple OSS Distributions  * existing instance -- note this behavior is not commonly supported in other
1965*5c2921b0SApple OSS Distributions  * I/O Kit classes, although it is supported here.
1966*5c2921b0SApple OSS Distributions  */
1967*5c2921b0SApple OSS Distributions 
1968*5c2921b0SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1969*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1970*5c2921b0SApple OSS Distributions     UInt32       count,
1971*5c2921b0SApple OSS Distributions     UInt32       offset,
1972*5c2921b0SApple OSS Distributions     task_t       task,
1973*5c2921b0SApple OSS Distributions     IOOptionBits options,
1974*5c2921b0SApple OSS Distributions     IOMapper *   mapper)
1975*5c2921b0SApple OSS Distributions {
1976*5c2921b0SApple OSS Distributions 	IOOptionBits type = options & kIOMemoryTypeMask;
1977*5c2921b0SApple OSS Distributions 
1978*5c2921b0SApple OSS Distributions #ifndef __LP64__
1979*5c2921b0SApple OSS Distributions 	if (task
1980*5c2921b0SApple OSS Distributions 	    && (kIOMemoryTypeVirtual == type)
1981*5c2921b0SApple OSS Distributions 	    && vm_map_is_64bit(get_task_map(task))
1982*5c2921b0SApple OSS Distributions 	    && ((IOVirtualRange *) buffers)->address) {
1983*5c2921b0SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1984*5c2921b0SApple OSS Distributions 		return false;
1985*5c2921b0SApple OSS Distributions 	}
1986*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
1987*5c2921b0SApple OSS Distributions 
1988*5c2921b0SApple OSS Distributions 	// Grab the original MD's configuation data to initialse the
1989*5c2921b0SApple OSS Distributions 	// arguments to this function.
1990*5c2921b0SApple OSS Distributions 	if (kIOMemoryTypePersistentMD == type) {
1991*5c2921b0SApple OSS Distributions 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1992*5c2921b0SApple OSS Distributions 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
1993*5c2921b0SApple OSS Distributions 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
1994*5c2921b0SApple OSS Distributions 
1995*5c2921b0SApple OSS Distributions 		// Only accept persistent memory descriptors with valid dataP data.
1996*5c2921b0SApple OSS Distributions 		assert(orig->_rangesCount == 1);
1997*5c2921b0SApple OSS Distributions 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1998*5c2921b0SApple OSS Distributions 			return false;
1999*5c2921b0SApple OSS Distributions 		}
2000*5c2921b0SApple OSS Distributions 
2001*5c2921b0SApple OSS Distributions 		_memRef = initData->fMemRef; // Grab the new named entry
2002*5c2921b0SApple OSS Distributions 		options = orig->_flags & ~kIOMemoryAsReference;
2003*5c2921b0SApple OSS Distributions 		type = options & kIOMemoryTypeMask;
2004*5c2921b0SApple OSS Distributions 		buffers = orig->_ranges.v;
2005*5c2921b0SApple OSS Distributions 		count = orig->_rangesCount;
2006*5c2921b0SApple OSS Distributions 
2007*5c2921b0SApple OSS Distributions 		// Now grab the original task and whatever mapper was previously used
2008*5c2921b0SApple OSS Distributions 		task = orig->_task;
2009*5c2921b0SApple OSS Distributions 		mapper = dataP->fMapper;
2010*5c2921b0SApple OSS Distributions 
2011*5c2921b0SApple OSS Distributions 		// We are ready to go through the original initialisation now
2012*5c2921b0SApple OSS Distributions 	}
2013*5c2921b0SApple OSS Distributions 
2014*5c2921b0SApple OSS Distributions 	switch (type) {
2015*5c2921b0SApple OSS Distributions 	case kIOMemoryTypeUIO:
2016*5c2921b0SApple OSS Distributions 	case kIOMemoryTypeVirtual:
2017*5c2921b0SApple OSS Distributions #ifndef __LP64__
2018*5c2921b0SApple OSS Distributions 	case kIOMemoryTypeVirtual64:
2019*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2020*5c2921b0SApple OSS Distributions 		assert(task);
2021*5c2921b0SApple OSS Distributions 		if (!task) {
2022*5c2921b0SApple OSS Distributions 			return false;
2023*5c2921b0SApple OSS Distributions 		}
2024*5c2921b0SApple OSS Distributions 		break;
2025*5c2921b0SApple OSS Distributions 
2026*5c2921b0SApple OSS Distributions 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2027*5c2921b0SApple OSS Distributions #ifndef __LP64__
2028*5c2921b0SApple OSS Distributions 	case kIOMemoryTypePhysical64:
2029*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2030*5c2921b0SApple OSS Distributions 	case kIOMemoryTypeUPL:
2031*5c2921b0SApple OSS Distributions 		assert(!task);
2032*5c2921b0SApple OSS Distributions 		break;
2033*5c2921b0SApple OSS Distributions 	default:
2034*5c2921b0SApple OSS Distributions 		return false; /* bad argument */
2035*5c2921b0SApple OSS Distributions 	}
2036*5c2921b0SApple OSS Distributions 
2037*5c2921b0SApple OSS Distributions 	assert(buffers);
2038*5c2921b0SApple OSS Distributions 	assert(count);
2039*5c2921b0SApple OSS Distributions 
2040*5c2921b0SApple OSS Distributions 	/*
2041*5c2921b0SApple OSS Distributions 	 * We can check the _initialized  instance variable before having ever set
2042*5c2921b0SApple OSS Distributions 	 * it to an initial value because I/O Kit guarantees that all our instance
2043*5c2921b0SApple OSS Distributions 	 * variables are zeroed on an object's allocation.
2044*5c2921b0SApple OSS Distributions 	 */
2045*5c2921b0SApple OSS Distributions 
2046*5c2921b0SApple OSS Distributions 	if (_initialized) {
2047*5c2921b0SApple OSS Distributions 		/*
2048*5c2921b0SApple OSS Distributions 		 * An existing memory descriptor is being retargeted to point to
2049*5c2921b0SApple OSS Distributions 		 * somewhere else.  Clean up our present state.
2050*5c2921b0SApple OSS Distributions 		 */
2051*5c2921b0SApple OSS Distributions 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2052*5c2921b0SApple OSS Distributions 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2053*5c2921b0SApple OSS Distributions 			while (_wireCount) {
2054*5c2921b0SApple OSS Distributions 				complete();
2055*5c2921b0SApple OSS Distributions 			}
2056*5c2921b0SApple OSS Distributions 		}
2057*5c2921b0SApple OSS Distributions 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2058*5c2921b0SApple OSS Distributions 			if (kIOMemoryTypeUIO == type) {
2059*5c2921b0SApple OSS Distributions 				uio_free((uio_t) _ranges.v);
2060*5c2921b0SApple OSS Distributions 			}
2061*5c2921b0SApple OSS Distributions #ifndef __LP64__
2062*5c2921b0SApple OSS Distributions 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2063*5c2921b0SApple OSS Distributions 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2064*5c2921b0SApple OSS Distributions 			}
2065*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2066*5c2921b0SApple OSS Distributions 			else {
2067*5c2921b0SApple OSS Distributions 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2068*5c2921b0SApple OSS Distributions 			}
2069*5c2921b0SApple OSS Distributions 		}
2070*5c2921b0SApple OSS Distributions 
2071*5c2921b0SApple OSS Distributions 		options |= (kIOMemoryRedirected & _flags);
2072*5c2921b0SApple OSS Distributions 		if (!(kIOMemoryRedirected & options)) {
2073*5c2921b0SApple OSS Distributions 			if (_memRef) {
2074*5c2921b0SApple OSS Distributions 				memoryReferenceRelease(_memRef);
2075*5c2921b0SApple OSS Distributions 				_memRef = NULL;
2076*5c2921b0SApple OSS Distributions 			}
2077*5c2921b0SApple OSS Distributions 			if (_mappings) {
2078*5c2921b0SApple OSS Distributions 				_mappings->flushCollection();
2079*5c2921b0SApple OSS Distributions 			}
2080*5c2921b0SApple OSS Distributions 		}
2081*5c2921b0SApple OSS Distributions 	} else {
2082*5c2921b0SApple OSS Distributions 		if (!super::init()) {
2083*5c2921b0SApple OSS Distributions 			return false;
2084*5c2921b0SApple OSS Distributions 		}
2085*5c2921b0SApple OSS Distributions 		_initialized = true;
2086*5c2921b0SApple OSS Distributions 	}
2087*5c2921b0SApple OSS Distributions 
2088*5c2921b0SApple OSS Distributions 	// Grab the appropriate mapper
2089*5c2921b0SApple OSS Distributions 	if (kIOMemoryHostOrRemote & options) {
2090*5c2921b0SApple OSS Distributions 		options |= kIOMemoryMapperNone;
2091*5c2921b0SApple OSS Distributions 	}
2092*5c2921b0SApple OSS Distributions 	if (kIOMemoryMapperNone & options) {
2093*5c2921b0SApple OSS Distributions 		mapper = NULL; // No Mapper
2094*5c2921b0SApple OSS Distributions 	} else if (mapper == kIOMapperSystem) {
2095*5c2921b0SApple OSS Distributions 		IOMapper::checkForSystemMapper();
2096*5c2921b0SApple OSS Distributions 		gIOSystemMapper = mapper = IOMapper::gSystem;
2097*5c2921b0SApple OSS Distributions 	}
2098*5c2921b0SApple OSS Distributions 
2099*5c2921b0SApple OSS Distributions 	// Remove the dynamic internal use flags from the initial setting
2100*5c2921b0SApple OSS Distributions 	options               &= ~(kIOMemoryPreparedReadOnly);
2101*5c2921b0SApple OSS Distributions 	_flags                 = options;
2102*5c2921b0SApple OSS Distributions 	_task                  = task;
2103*5c2921b0SApple OSS Distributions 
2104*5c2921b0SApple OSS Distributions #ifndef __LP64__
2105*5c2921b0SApple OSS Distributions 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2106*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2107*5c2921b0SApple OSS Distributions 
2108*5c2921b0SApple OSS Distributions 	_dmaReferences = 0;
2109*5c2921b0SApple OSS Distributions 	__iomd_reservedA = 0;
2110*5c2921b0SApple OSS Distributions 	__iomd_reservedB = 0;
2111*5c2921b0SApple OSS Distributions 	_highestPage = 0;
2112*5c2921b0SApple OSS Distributions 
2113*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & options) {
2114*5c2921b0SApple OSS Distributions 		if (!_prepareLock) {
2115*5c2921b0SApple OSS Distributions 			_prepareLock = IOLockAlloc();
2116*5c2921b0SApple OSS Distributions 		}
2117*5c2921b0SApple OSS Distributions 	} else if (_prepareLock) {
2118*5c2921b0SApple OSS Distributions 		IOLockFree(_prepareLock);
2119*5c2921b0SApple OSS Distributions 		_prepareLock = NULL;
2120*5c2921b0SApple OSS Distributions 	}
2121*5c2921b0SApple OSS Distributions 
2122*5c2921b0SApple OSS Distributions 	if (kIOMemoryTypeUPL == type) {
2123*5c2921b0SApple OSS Distributions 		ioGMDData *dataP;
2124*5c2921b0SApple OSS Distributions 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2125*5c2921b0SApple OSS Distributions 
2126*5c2921b0SApple OSS Distributions 		if (!initMemoryEntries(dataSize, mapper)) {
2127*5c2921b0SApple OSS Distributions 			return false;
2128*5c2921b0SApple OSS Distributions 		}
2129*5c2921b0SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
2130*5c2921b0SApple OSS Distributions 		dataP->fPageCnt = 0;
2131*5c2921b0SApple OSS Distributions 		switch (kIOMemoryDirectionMask & options) {
2132*5c2921b0SApple OSS Distributions 		case kIODirectionOut:
2133*5c2921b0SApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapReadAccess;
2134*5c2921b0SApple OSS Distributions 			break;
2135*5c2921b0SApple OSS Distributions 		case kIODirectionIn:
2136*5c2921b0SApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2137*5c2921b0SApple OSS Distributions 			break;
2138*5c2921b0SApple OSS Distributions 		case kIODirectionNone:
2139*5c2921b0SApple OSS Distributions 		case kIODirectionOutIn:
2140*5c2921b0SApple OSS Distributions 		default:
2141*5c2921b0SApple OSS Distributions 			panic("bad dir for upl 0x%x", (int) options);
2142*5c2921b0SApple OSS Distributions 			break;
2143*5c2921b0SApple OSS Distributions 		}
2144*5c2921b0SApple OSS Distributions 		//       _wireCount++;	// UPLs start out life wired
2145*5c2921b0SApple OSS Distributions 
2146*5c2921b0SApple OSS Distributions 		_length    = count;
2147*5c2921b0SApple OSS Distributions 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2148*5c2921b0SApple OSS Distributions 
2149*5c2921b0SApple OSS Distributions 		ioPLBlock iopl;
2150*5c2921b0SApple OSS Distributions 		iopl.fIOPL = (upl_t) buffers;
2151*5c2921b0SApple OSS Distributions 		upl_set_referenced(iopl.fIOPL, true);
2152*5c2921b0SApple OSS Distributions 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2153*5c2921b0SApple OSS Distributions 
2154*5c2921b0SApple OSS Distributions 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2155*5c2921b0SApple OSS Distributions 			panic("short external upl");
2156*5c2921b0SApple OSS Distributions 		}
2157*5c2921b0SApple OSS Distributions 
2158*5c2921b0SApple OSS Distributions 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2159*5c2921b0SApple OSS Distributions 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2160*5c2921b0SApple OSS Distributions 
2161*5c2921b0SApple OSS Distributions 		// Set the flag kIOPLOnDevice convieniently equal to 1
2162*5c2921b0SApple OSS Distributions 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2163*5c2921b0SApple OSS Distributions 		if (!pageList->device) {
2164*5c2921b0SApple OSS Distributions 			// Pre-compute the offset into the UPL's page list
2165*5c2921b0SApple OSS Distributions 			pageList = &pageList[atop_32(offset)];
2166*5c2921b0SApple OSS Distributions 			offset &= PAGE_MASK;
2167*5c2921b0SApple OSS Distributions 		}
2168*5c2921b0SApple OSS Distributions 		iopl.fIOMDOffset = 0;
2169*5c2921b0SApple OSS Distributions 		iopl.fMappedPage = 0;
2170*5c2921b0SApple OSS Distributions 		iopl.fPageInfo = (vm_address_t) pageList;
2171*5c2921b0SApple OSS Distributions 		iopl.fPageOffset = offset;
2172*5c2921b0SApple OSS Distributions 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2173*5c2921b0SApple OSS Distributions 	} else {
2174*5c2921b0SApple OSS Distributions 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2175*5c2921b0SApple OSS Distributions 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2176*5c2921b0SApple OSS Distributions 
2177*5c2921b0SApple OSS Distributions 		// Initialize the memory descriptor
2178*5c2921b0SApple OSS Distributions 		if (options & kIOMemoryAsReference) {
2179*5c2921b0SApple OSS Distributions #ifndef __LP64__
2180*5c2921b0SApple OSS Distributions 			_rangesIsAllocated = false;
2181*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2182*5c2921b0SApple OSS Distributions 
2183*5c2921b0SApple OSS Distributions 			// Hack assignment to get the buffer arg into _ranges.
2184*5c2921b0SApple OSS Distributions 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2185*5c2921b0SApple OSS Distributions 			// work, C++ sigh.
2186*5c2921b0SApple OSS Distributions 			// This also initialises the uio & physical ranges.
2187*5c2921b0SApple OSS Distributions 			_ranges.v = (IOVirtualRange *) buffers;
2188*5c2921b0SApple OSS Distributions 		} else {
2189*5c2921b0SApple OSS Distributions #ifndef __LP64__
2190*5c2921b0SApple OSS Distributions 			_rangesIsAllocated = true;
2191*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2192*5c2921b0SApple OSS Distributions 			switch (type) {
2193*5c2921b0SApple OSS Distributions 			case kIOMemoryTypeUIO:
2194*5c2921b0SApple OSS Distributions 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2195*5c2921b0SApple OSS Distributions 				break;
2196*5c2921b0SApple OSS Distributions 
2197*5c2921b0SApple OSS Distributions #ifndef __LP64__
2198*5c2921b0SApple OSS Distributions 			case kIOMemoryTypeVirtual64:
2199*5c2921b0SApple OSS Distributions 			case kIOMemoryTypePhysical64:
2200*5c2921b0SApple OSS Distributions 				if (count == 1
2201*5c2921b0SApple OSS Distributions #ifndef __arm__
2202*5c2921b0SApple OSS Distributions 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2203*5c2921b0SApple OSS Distributions #endif
2204*5c2921b0SApple OSS Distributions 				    ) {
2205*5c2921b0SApple OSS Distributions 					if (kIOMemoryTypeVirtual64 == type) {
2206*5c2921b0SApple OSS Distributions 						type = kIOMemoryTypeVirtual;
2207*5c2921b0SApple OSS Distributions 					} else {
2208*5c2921b0SApple OSS Distributions 						type = kIOMemoryTypePhysical;
2209*5c2921b0SApple OSS Distributions 					}
2210*5c2921b0SApple OSS Distributions 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2211*5c2921b0SApple OSS Distributions 					_rangesIsAllocated = false;
2212*5c2921b0SApple OSS Distributions 					_ranges.v = &_singleRange.v;
2213*5c2921b0SApple OSS Distributions 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2214*5c2921b0SApple OSS Distributions 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2215*5c2921b0SApple OSS Distributions 					break;
2216*5c2921b0SApple OSS Distributions 				}
2217*5c2921b0SApple OSS Distributions 				_ranges.v64 = IONew(IOAddressRange, count);
2218*5c2921b0SApple OSS Distributions 				if (!_ranges.v64) {
2219*5c2921b0SApple OSS Distributions 					return false;
2220*5c2921b0SApple OSS Distributions 				}
2221*5c2921b0SApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2222*5c2921b0SApple OSS Distributions 				break;
2223*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2224*5c2921b0SApple OSS Distributions 			case kIOMemoryTypeVirtual:
2225*5c2921b0SApple OSS Distributions 			case kIOMemoryTypePhysical:
2226*5c2921b0SApple OSS Distributions 				if (count == 1) {
2227*5c2921b0SApple OSS Distributions 					_flags |= kIOMemoryAsReference;
2228*5c2921b0SApple OSS Distributions #ifndef __LP64__
2229*5c2921b0SApple OSS Distributions 					_rangesIsAllocated = false;
2230*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2231*5c2921b0SApple OSS Distributions 					_ranges.v = &_singleRange.v;
2232*5c2921b0SApple OSS Distributions 				} else {
2233*5c2921b0SApple OSS Distributions 					_ranges.v = IONew(IOVirtualRange, count);
2234*5c2921b0SApple OSS Distributions 					if (!_ranges.v) {
2235*5c2921b0SApple OSS Distributions 						return false;
2236*5c2921b0SApple OSS Distributions 					}
2237*5c2921b0SApple OSS Distributions 				}
2238*5c2921b0SApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2239*5c2921b0SApple OSS Distributions 				break;
2240*5c2921b0SApple OSS Distributions 			}
2241*5c2921b0SApple OSS Distributions 		}
2242*5c2921b0SApple OSS Distributions #if CONFIG_PROB_GZALLOC
2243*5c2921b0SApple OSS Distributions 		if (task == kernel_task) {
2244*5c2921b0SApple OSS Distributions 			for (UInt32 i = 0; i < count; i++) {
2245*5c2921b0SApple OSS Distributions 				_ranges.v[i].address = pgz_decode(_ranges.v[i].address, _ranges.v[i].length);
2246*5c2921b0SApple OSS Distributions 			}
2247*5c2921b0SApple OSS Distributions 		}
2248*5c2921b0SApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
2249*5c2921b0SApple OSS Distributions 		_rangesCount = count;
2250*5c2921b0SApple OSS Distributions 
2251*5c2921b0SApple OSS Distributions 		// Find starting address within the vector of ranges
2252*5c2921b0SApple OSS Distributions 		Ranges vec = _ranges;
2253*5c2921b0SApple OSS Distributions 		mach_vm_size_t totalLength = 0;
2254*5c2921b0SApple OSS Distributions 		unsigned int ind, pages = 0;
2255*5c2921b0SApple OSS Distributions 		for (ind = 0; ind < count; ind++) {
2256*5c2921b0SApple OSS Distributions 			mach_vm_address_t addr;
2257*5c2921b0SApple OSS Distributions 			mach_vm_address_t endAddr;
2258*5c2921b0SApple OSS Distributions 			mach_vm_size_t    len;
2259*5c2921b0SApple OSS Distributions 
2260*5c2921b0SApple OSS Distributions 			// addr & len are returned by this function
2261*5c2921b0SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, ind);
2262*5c2921b0SApple OSS Distributions 			if (_task) {
2263*5c2921b0SApple OSS Distributions 				mach_vm_size_t phys_size;
2264*5c2921b0SApple OSS Distributions 				kern_return_t kret;
2265*5c2921b0SApple OSS Distributions 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2266*5c2921b0SApple OSS Distributions 				if (KERN_SUCCESS != kret) {
2267*5c2921b0SApple OSS Distributions 					break;
2268*5c2921b0SApple OSS Distributions 				}
2269*5c2921b0SApple OSS Distributions 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2270*5c2921b0SApple OSS Distributions 					break;
2271*5c2921b0SApple OSS Distributions 				}
2272*5c2921b0SApple OSS Distributions 			} else {
2273*5c2921b0SApple OSS Distributions 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2274*5c2921b0SApple OSS Distributions 					break;
2275*5c2921b0SApple OSS Distributions 				}
2276*5c2921b0SApple OSS Distributions 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2277*5c2921b0SApple OSS Distributions 					break;
2278*5c2921b0SApple OSS Distributions 				}
2279*5c2921b0SApple OSS Distributions 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2280*5c2921b0SApple OSS Distributions 					break;
2281*5c2921b0SApple OSS Distributions 				}
2282*5c2921b0SApple OSS Distributions 			}
2283*5c2921b0SApple OSS Distributions 			if (os_add_overflow(totalLength, len, &totalLength)) {
2284*5c2921b0SApple OSS Distributions 				break;
2285*5c2921b0SApple OSS Distributions 			}
2286*5c2921b0SApple OSS Distributions 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2287*5c2921b0SApple OSS Distributions 				uint64_t highPage = atop_64(addr + len - 1);
2288*5c2921b0SApple OSS Distributions 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2289*5c2921b0SApple OSS Distributions 					_highestPage = (ppnum_t) highPage;
2290*5c2921b0SApple OSS Distributions 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2291*5c2921b0SApple OSS Distributions 				}
2292*5c2921b0SApple OSS Distributions 			}
2293*5c2921b0SApple OSS Distributions 		}
2294*5c2921b0SApple OSS Distributions 		if ((ind < count)
2295*5c2921b0SApple OSS Distributions 		    || (totalLength != ((IOByteCount) totalLength))) {
2296*5c2921b0SApple OSS Distributions 			return false;                                   /* overflow */
2297*5c2921b0SApple OSS Distributions 		}
2298*5c2921b0SApple OSS Distributions 		_length      = totalLength;
2299*5c2921b0SApple OSS Distributions 		_pages       = pages;
2300*5c2921b0SApple OSS Distributions 
2301*5c2921b0SApple OSS Distributions 		// Auto-prepare memory at creation time.
2302*5c2921b0SApple OSS Distributions 		// Implied completion when descriptor is free-ed
2303*5c2921b0SApple OSS Distributions 
2304*5c2921b0SApple OSS Distributions 
2305*5c2921b0SApple OSS Distributions 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2306*5c2921b0SApple OSS Distributions 			_wireCount++; // Physical MDs are, by definition, wired
2307*5c2921b0SApple OSS Distributions 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2308*5c2921b0SApple OSS Distributions 			ioGMDData *dataP;
2309*5c2921b0SApple OSS Distributions 			unsigned dataSize;
2310*5c2921b0SApple OSS Distributions 
2311*5c2921b0SApple OSS Distributions 			if (_pages > atop_64(max_mem)) {
2312*5c2921b0SApple OSS Distributions 				return false;
2313*5c2921b0SApple OSS Distributions 			}
2314*5c2921b0SApple OSS Distributions 
2315*5c2921b0SApple OSS Distributions 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2316*5c2921b0SApple OSS Distributions 			if (!initMemoryEntries(dataSize, mapper)) {
2317*5c2921b0SApple OSS Distributions 				return false;
2318*5c2921b0SApple OSS Distributions 			}
2319*5c2921b0SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2320*5c2921b0SApple OSS Distributions 			dataP->fPageCnt = _pages;
2321*5c2921b0SApple OSS Distributions 
2322*5c2921b0SApple OSS Distributions 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2323*5c2921b0SApple OSS Distributions 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2324*5c2921b0SApple OSS Distributions 				_kernelTag = IOMemoryTag(kernel_map);
2325*5c2921b0SApple OSS Distributions 				if (_kernelTag == gIOSurfaceTag) {
2326*5c2921b0SApple OSS Distributions 					_userTag = VM_MEMORY_IOSURFACE;
2327*5c2921b0SApple OSS Distributions 				}
2328*5c2921b0SApple OSS Distributions 			}
2329*5c2921b0SApple OSS Distributions 
2330*5c2921b0SApple OSS Distributions 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2331*5c2921b0SApple OSS Distributions 				IOReturn
2332*5c2921b0SApple OSS Distributions 				    err = memoryReferenceCreate(0, &_memRef);
2333*5c2921b0SApple OSS Distributions 				if (kIOReturnSuccess != err) {
2334*5c2921b0SApple OSS Distributions 					return false;
2335*5c2921b0SApple OSS Distributions 				}
2336*5c2921b0SApple OSS Distributions 			}
2337*5c2921b0SApple OSS Distributions 
2338*5c2921b0SApple OSS Distributions 			if ((_flags & kIOMemoryAutoPrepare)
2339*5c2921b0SApple OSS Distributions 			    && prepare() != kIOReturnSuccess) {
2340*5c2921b0SApple OSS Distributions 				return false;
2341*5c2921b0SApple OSS Distributions 			}
2342*5c2921b0SApple OSS Distributions 		}
2343*5c2921b0SApple OSS Distributions 	}
2344*5c2921b0SApple OSS Distributions 
2345*5c2921b0SApple OSS Distributions 	return true;
2346*5c2921b0SApple OSS Distributions }
2347*5c2921b0SApple OSS Distributions 
2348*5c2921b0SApple OSS Distributions /*
2349*5c2921b0SApple OSS Distributions  * free
2350*5c2921b0SApple OSS Distributions  *
2351*5c2921b0SApple OSS Distributions  * Free resources.
2352*5c2921b0SApple OSS Distributions  */
2353*5c2921b0SApple OSS Distributions void
free()2354*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2355*5c2921b0SApple OSS Distributions {
2356*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2357*5c2921b0SApple OSS Distributions 
2358*5c2921b0SApple OSS Distributions 	if (reserved && reserved->dp.memory) {
2359*5c2921b0SApple OSS Distributions 		LOCK;
2360*5c2921b0SApple OSS Distributions 		reserved->dp.memory = NULL;
2361*5c2921b0SApple OSS Distributions 		UNLOCK;
2362*5c2921b0SApple OSS Distributions 	}
2363*5c2921b0SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2364*5c2921b0SApple OSS Distributions 		ioGMDData * dataP;
2365*5c2921b0SApple OSS Distributions 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2366*5c2921b0SApple OSS Distributions 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2367*5c2921b0SApple OSS Distributions 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2368*5c2921b0SApple OSS Distributions 		}
2369*5c2921b0SApple OSS Distributions 	} else {
2370*5c2921b0SApple OSS Distributions 		while (_wireCount) {
2371*5c2921b0SApple OSS Distributions 			complete();
2372*5c2921b0SApple OSS Distributions 		}
2373*5c2921b0SApple OSS Distributions 	}
2374*5c2921b0SApple OSS Distributions 
2375*5c2921b0SApple OSS Distributions 	if (_memoryEntries) {
2376*5c2921b0SApple OSS Distributions 		_memoryEntries.reset();
2377*5c2921b0SApple OSS Distributions 	}
2378*5c2921b0SApple OSS Distributions 
2379*5c2921b0SApple OSS Distributions 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2380*5c2921b0SApple OSS Distributions 		if (kIOMemoryTypeUIO == type) {
2381*5c2921b0SApple OSS Distributions 			uio_free((uio_t) _ranges.v);
2382*5c2921b0SApple OSS Distributions 		}
2383*5c2921b0SApple OSS Distributions #ifndef __LP64__
2384*5c2921b0SApple OSS Distributions 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2385*5c2921b0SApple OSS Distributions 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2386*5c2921b0SApple OSS Distributions 		}
2387*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2388*5c2921b0SApple OSS Distributions 		else {
2389*5c2921b0SApple OSS Distributions 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2390*5c2921b0SApple OSS Distributions 		}
2391*5c2921b0SApple OSS Distributions 
2392*5c2921b0SApple OSS Distributions 		_ranges.v = NULL;
2393*5c2921b0SApple OSS Distributions 	}
2394*5c2921b0SApple OSS Distributions 
2395*5c2921b0SApple OSS Distributions 	if (reserved) {
2396*5c2921b0SApple OSS Distributions 		cleanKernelReserved(reserved);
2397*5c2921b0SApple OSS Distributions 		if (reserved->dp.devicePager) {
2398*5c2921b0SApple OSS Distributions 			// memEntry holds a ref on the device pager which owns reserved
2399*5c2921b0SApple OSS Distributions 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2400*5c2921b0SApple OSS Distributions 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2401*5c2921b0SApple OSS Distributions 		} else {
2402*5c2921b0SApple OSS Distributions 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2403*5c2921b0SApple OSS Distributions 		}
2404*5c2921b0SApple OSS Distributions 		reserved = NULL;
2405*5c2921b0SApple OSS Distributions 	}
2406*5c2921b0SApple OSS Distributions 
2407*5c2921b0SApple OSS Distributions 	if (_memRef) {
2408*5c2921b0SApple OSS Distributions 		memoryReferenceRelease(_memRef);
2409*5c2921b0SApple OSS Distributions 	}
2410*5c2921b0SApple OSS Distributions 	if (_prepareLock) {
2411*5c2921b0SApple OSS Distributions 		IOLockFree(_prepareLock);
2412*5c2921b0SApple OSS Distributions 	}
2413*5c2921b0SApple OSS Distributions 
2414*5c2921b0SApple OSS Distributions 	super::free();
2415*5c2921b0SApple OSS Distributions }
2416*5c2921b0SApple OSS Distributions 
2417*5c2921b0SApple OSS Distributions #ifndef __LP64__
2418*5c2921b0SApple OSS Distributions void
unmapFromKernel()2419*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2420*5c2921b0SApple OSS Distributions {
2421*5c2921b0SApple OSS Distributions 	panic("IOGMD::unmapFromKernel deprecated");
2422*5c2921b0SApple OSS Distributions }
2423*5c2921b0SApple OSS Distributions 
2424*5c2921b0SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2425*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2426*5c2921b0SApple OSS Distributions {
2427*5c2921b0SApple OSS Distributions 	panic("IOGMD::mapIntoKernel deprecated");
2428*5c2921b0SApple OSS Distributions }
2429*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2430*5c2921b0SApple OSS Distributions 
2431*5c2921b0SApple OSS Distributions /*
2432*5c2921b0SApple OSS Distributions  * getDirection:
2433*5c2921b0SApple OSS Distributions  *
2434*5c2921b0SApple OSS Distributions  * Get the direction of the transfer.
2435*5c2921b0SApple OSS Distributions  */
2436*5c2921b0SApple OSS Distributions IODirection
getDirection() const2437*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2438*5c2921b0SApple OSS Distributions {
2439*5c2921b0SApple OSS Distributions #ifndef __LP64__
2440*5c2921b0SApple OSS Distributions 	if (_direction) {
2441*5c2921b0SApple OSS Distributions 		return _direction;
2442*5c2921b0SApple OSS Distributions 	}
2443*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2444*5c2921b0SApple OSS Distributions 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2445*5c2921b0SApple OSS Distributions }
2446*5c2921b0SApple OSS Distributions 
2447*5c2921b0SApple OSS Distributions /*
2448*5c2921b0SApple OSS Distributions  * getLength:
2449*5c2921b0SApple OSS Distributions  *
2450*5c2921b0SApple OSS Distributions  * Get the length of the transfer (over all ranges).
2451*5c2921b0SApple OSS Distributions  */
2452*5c2921b0SApple OSS Distributions IOByteCount
getLength() const2453*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getLength() const
2454*5c2921b0SApple OSS Distributions {
2455*5c2921b0SApple OSS Distributions 	return _length;
2456*5c2921b0SApple OSS Distributions }
2457*5c2921b0SApple OSS Distributions 
2458*5c2921b0SApple OSS Distributions void
setTag(IOOptionBits tag)2459*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2460*5c2921b0SApple OSS Distributions {
2461*5c2921b0SApple OSS Distributions 	_tag = tag;
2462*5c2921b0SApple OSS Distributions }
2463*5c2921b0SApple OSS Distributions 
2464*5c2921b0SApple OSS Distributions IOOptionBits
getTag(void)2465*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2466*5c2921b0SApple OSS Distributions {
2467*5c2921b0SApple OSS Distributions 	return _tag;
2468*5c2921b0SApple OSS Distributions }
2469*5c2921b0SApple OSS Distributions 
2470*5c2921b0SApple OSS Distributions uint64_t
getFlags(void)2471*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2472*5c2921b0SApple OSS Distributions {
2473*5c2921b0SApple OSS Distributions 	return _flags;
2474*5c2921b0SApple OSS Distributions }
2475*5c2921b0SApple OSS Distributions 
2476*5c2921b0SApple OSS Distributions OSObject *
copyContext(void) const2477*5c2921b0SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2478*5c2921b0SApple OSS Distributions {
2479*5c2921b0SApple OSS Distributions 	if (reserved) {
2480*5c2921b0SApple OSS Distributions 		OSObject * context = reserved->contextObject;
2481*5c2921b0SApple OSS Distributions 		if (context) {
2482*5c2921b0SApple OSS Distributions 			context->retain();
2483*5c2921b0SApple OSS Distributions 		}
2484*5c2921b0SApple OSS Distributions 		return context;
2485*5c2921b0SApple OSS Distributions 	} else {
2486*5c2921b0SApple OSS Distributions 		return NULL;
2487*5c2921b0SApple OSS Distributions 	}
2488*5c2921b0SApple OSS Distributions }
2489*5c2921b0SApple OSS Distributions 
2490*5c2921b0SApple OSS Distributions void
setContext(OSObject * obj)2491*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2492*5c2921b0SApple OSS Distributions {
2493*5c2921b0SApple OSS Distributions 	if (this->reserved == NULL && obj == NULL) {
2494*5c2921b0SApple OSS Distributions 		// No existing object, and no object to set
2495*5c2921b0SApple OSS Distributions 		return;
2496*5c2921b0SApple OSS Distributions 	}
2497*5c2921b0SApple OSS Distributions 
2498*5c2921b0SApple OSS Distributions 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2499*5c2921b0SApple OSS Distributions 	if (reserved) {
2500*5c2921b0SApple OSS Distributions 		OSObject * oldObject = reserved->contextObject;
2501*5c2921b0SApple OSS Distributions 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2502*5c2921b0SApple OSS Distributions 			oldObject->release();
2503*5c2921b0SApple OSS Distributions 		}
2504*5c2921b0SApple OSS Distributions 		if (obj != NULL) {
2505*5c2921b0SApple OSS Distributions 			obj->retain();
2506*5c2921b0SApple OSS Distributions 			reserved->contextObject = obj;
2507*5c2921b0SApple OSS Distributions 		}
2508*5c2921b0SApple OSS Distributions 	}
2509*5c2921b0SApple OSS Distributions }
2510*5c2921b0SApple OSS Distributions 
2511*5c2921b0SApple OSS Distributions #ifndef __LP64__
2512*5c2921b0SApple OSS Distributions #pragma clang diagnostic push
2513*5c2921b0SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2514*5c2921b0SApple OSS Distributions 
2515*5c2921b0SApple OSS Distributions // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2516*5c2921b0SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2517*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2518*5c2921b0SApple OSS Distributions {
2519*5c2921b0SApple OSS Distributions 	addr64_t physAddr = 0;
2520*5c2921b0SApple OSS Distributions 
2521*5c2921b0SApple OSS Distributions 	if (prepare() == kIOReturnSuccess) {
2522*5c2921b0SApple OSS Distributions 		physAddr = getPhysicalSegment64( offset, length );
2523*5c2921b0SApple OSS Distributions 		complete();
2524*5c2921b0SApple OSS Distributions 	}
2525*5c2921b0SApple OSS Distributions 
2526*5c2921b0SApple OSS Distributions 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2527*5c2921b0SApple OSS Distributions }
2528*5c2921b0SApple OSS Distributions 
2529*5c2921b0SApple OSS Distributions #pragma clang diagnostic pop
2530*5c2921b0SApple OSS Distributions 
2531*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2532*5c2921b0SApple OSS Distributions 
2533*5c2921b0SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2534*5c2921b0SApple OSS Distributions IOMemoryDescriptor::readBytes
2535*5c2921b0SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2536*5c2921b0SApple OSS Distributions {
2537*5c2921b0SApple OSS Distributions 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2538*5c2921b0SApple OSS Distributions 	IOByteCount endoffset;
2539*5c2921b0SApple OSS Distributions 	IOByteCount remaining;
2540*5c2921b0SApple OSS Distributions 
2541*5c2921b0SApple OSS Distributions 
2542*5c2921b0SApple OSS Distributions 	// Check that this entire I/O is within the available range
2543*5c2921b0SApple OSS Distributions 	if ((offset > _length)
2544*5c2921b0SApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2545*5c2921b0SApple OSS Distributions 	    || (endoffset > _length)) {
2546*5c2921b0SApple OSS Distributions 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2547*5c2921b0SApple OSS Distributions 		return 0;
2548*5c2921b0SApple OSS Distributions 	}
2549*5c2921b0SApple OSS Distributions 	if (offset >= _length) {
2550*5c2921b0SApple OSS Distributions 		return 0;
2551*5c2921b0SApple OSS Distributions 	}
2552*5c2921b0SApple OSS Distributions 
2553*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2554*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2555*5c2921b0SApple OSS Distributions 		return 0;
2556*5c2921b0SApple OSS Distributions 	}
2557*5c2921b0SApple OSS Distributions 
2558*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2559*5c2921b0SApple OSS Distributions 		LOCK;
2560*5c2921b0SApple OSS Distributions 	}
2561*5c2921b0SApple OSS Distributions 
2562*5c2921b0SApple OSS Distributions 	remaining = length = min(length, _length - offset);
2563*5c2921b0SApple OSS Distributions 	while (remaining) { // (process another target segment?)
2564*5c2921b0SApple OSS Distributions 		addr64_t        srcAddr64;
2565*5c2921b0SApple OSS Distributions 		IOByteCount     srcLen;
2566*5c2921b0SApple OSS Distributions 
2567*5c2921b0SApple OSS Distributions 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2568*5c2921b0SApple OSS Distributions 		if (!srcAddr64) {
2569*5c2921b0SApple OSS Distributions 			break;
2570*5c2921b0SApple OSS Distributions 		}
2571*5c2921b0SApple OSS Distributions 
2572*5c2921b0SApple OSS Distributions 		// Clip segment length to remaining
2573*5c2921b0SApple OSS Distributions 		if (srcLen > remaining) {
2574*5c2921b0SApple OSS Distributions 			srcLen = remaining;
2575*5c2921b0SApple OSS Distributions 		}
2576*5c2921b0SApple OSS Distributions 
2577*5c2921b0SApple OSS Distributions 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2578*5c2921b0SApple OSS Distributions 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2579*5c2921b0SApple OSS Distributions 		}
2580*5c2921b0SApple OSS Distributions 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2581*5c2921b0SApple OSS Distributions 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2582*5c2921b0SApple OSS Distributions 
2583*5c2921b0SApple OSS Distributions 		dstAddr   += srcLen;
2584*5c2921b0SApple OSS Distributions 		offset    += srcLen;
2585*5c2921b0SApple OSS Distributions 		remaining -= srcLen;
2586*5c2921b0SApple OSS Distributions 	}
2587*5c2921b0SApple OSS Distributions 
2588*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2589*5c2921b0SApple OSS Distributions 		UNLOCK;
2590*5c2921b0SApple OSS Distributions 	}
2591*5c2921b0SApple OSS Distributions 
2592*5c2921b0SApple OSS Distributions 	assert(!remaining);
2593*5c2921b0SApple OSS Distributions 
2594*5c2921b0SApple OSS Distributions 	return length - remaining;
2595*5c2921b0SApple OSS Distributions }
2596*5c2921b0SApple OSS Distributions 
2597*5c2921b0SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2598*5c2921b0SApple OSS Distributions IOMemoryDescriptor::writeBytes
2599*5c2921b0SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2600*5c2921b0SApple OSS Distributions {
2601*5c2921b0SApple OSS Distributions 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2602*5c2921b0SApple OSS Distributions 	IOByteCount remaining;
2603*5c2921b0SApple OSS Distributions 	IOByteCount endoffset;
2604*5c2921b0SApple OSS Distributions 	IOByteCount offset = inoffset;
2605*5c2921b0SApple OSS Distributions 
2606*5c2921b0SApple OSS Distributions 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2607*5c2921b0SApple OSS Distributions 
2608*5c2921b0SApple OSS Distributions 	// Check that this entire I/O is within the available range
2609*5c2921b0SApple OSS Distributions 	if ((offset > _length)
2610*5c2921b0SApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2611*5c2921b0SApple OSS Distributions 	    || (endoffset > _length)) {
2612*5c2921b0SApple OSS Distributions 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2613*5c2921b0SApple OSS Distributions 		return 0;
2614*5c2921b0SApple OSS Distributions 	}
2615*5c2921b0SApple OSS Distributions 	if (kIOMemoryPreparedReadOnly & _flags) {
2616*5c2921b0SApple OSS Distributions 		return 0;
2617*5c2921b0SApple OSS Distributions 	}
2618*5c2921b0SApple OSS Distributions 	if (offset >= _length) {
2619*5c2921b0SApple OSS Distributions 		return 0;
2620*5c2921b0SApple OSS Distributions 	}
2621*5c2921b0SApple OSS Distributions 
2622*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2623*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2624*5c2921b0SApple OSS Distributions 		return 0;
2625*5c2921b0SApple OSS Distributions 	}
2626*5c2921b0SApple OSS Distributions 
2627*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2628*5c2921b0SApple OSS Distributions 		LOCK;
2629*5c2921b0SApple OSS Distributions 	}
2630*5c2921b0SApple OSS Distributions 
2631*5c2921b0SApple OSS Distributions 	remaining = length = min(length, _length - offset);
2632*5c2921b0SApple OSS Distributions 	while (remaining) { // (process another target segment?)
2633*5c2921b0SApple OSS Distributions 		addr64_t    dstAddr64;
2634*5c2921b0SApple OSS Distributions 		IOByteCount dstLen;
2635*5c2921b0SApple OSS Distributions 
2636*5c2921b0SApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2637*5c2921b0SApple OSS Distributions 		if (!dstAddr64) {
2638*5c2921b0SApple OSS Distributions 			break;
2639*5c2921b0SApple OSS Distributions 		}
2640*5c2921b0SApple OSS Distributions 
2641*5c2921b0SApple OSS Distributions 		// Clip segment length to remaining
2642*5c2921b0SApple OSS Distributions 		if (dstLen > remaining) {
2643*5c2921b0SApple OSS Distributions 			dstLen = remaining;
2644*5c2921b0SApple OSS Distributions 		}
2645*5c2921b0SApple OSS Distributions 
2646*5c2921b0SApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2647*5c2921b0SApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2648*5c2921b0SApple OSS Distributions 		}
2649*5c2921b0SApple OSS Distributions 		if (!srcAddr) {
2650*5c2921b0SApple OSS Distributions 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2651*5c2921b0SApple OSS Distributions 		} else {
2652*5c2921b0SApple OSS Distributions 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2653*5c2921b0SApple OSS Distributions 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2654*5c2921b0SApple OSS Distributions 			srcAddr   += dstLen;
2655*5c2921b0SApple OSS Distributions 		}
2656*5c2921b0SApple OSS Distributions 		offset    += dstLen;
2657*5c2921b0SApple OSS Distributions 		remaining -= dstLen;
2658*5c2921b0SApple OSS Distributions 	}
2659*5c2921b0SApple OSS Distributions 
2660*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2661*5c2921b0SApple OSS Distributions 		UNLOCK;
2662*5c2921b0SApple OSS Distributions 	}
2663*5c2921b0SApple OSS Distributions 
2664*5c2921b0SApple OSS Distributions 	assert(!remaining);
2665*5c2921b0SApple OSS Distributions 
2666*5c2921b0SApple OSS Distributions #if defined(__x86_64__)
2667*5c2921b0SApple OSS Distributions 	// copypv does not cppvFsnk on intel
2668*5c2921b0SApple OSS Distributions #else
2669*5c2921b0SApple OSS Distributions 	if (!srcAddr) {
2670*5c2921b0SApple OSS Distributions 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2671*5c2921b0SApple OSS Distributions 	}
2672*5c2921b0SApple OSS Distributions #endif
2673*5c2921b0SApple OSS Distributions 
2674*5c2921b0SApple OSS Distributions 	return length - remaining;
2675*5c2921b0SApple OSS Distributions }
2676*5c2921b0SApple OSS Distributions 
2677*5c2921b0SApple OSS Distributions #ifndef __LP64__
2678*5c2921b0SApple OSS Distributions void
setPosition(IOByteCount position)2679*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2680*5c2921b0SApple OSS Distributions {
2681*5c2921b0SApple OSS Distributions 	panic("IOGMD::setPosition deprecated");
2682*5c2921b0SApple OSS Distributions }
2683*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
2684*5c2921b0SApple OSS Distributions 
2685*5c2921b0SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2686*5c2921b0SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2687*5c2921b0SApple OSS Distributions 
2688*5c2921b0SApple OSS Distributions uint64_t
getPreparationID(void)2689*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2690*5c2921b0SApple OSS Distributions {
2691*5c2921b0SApple OSS Distributions 	ioGMDData *dataP;
2692*5c2921b0SApple OSS Distributions 
2693*5c2921b0SApple OSS Distributions 	if (!_wireCount) {
2694*5c2921b0SApple OSS Distributions 		return kIOPreparationIDUnprepared;
2695*5c2921b0SApple OSS Distributions 	}
2696*5c2921b0SApple OSS Distributions 
2697*5c2921b0SApple OSS Distributions 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2698*5c2921b0SApple OSS Distributions 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2699*5c2921b0SApple OSS Distributions 		IOMemoryDescriptor::setPreparationID();
2700*5c2921b0SApple OSS Distributions 		return IOMemoryDescriptor::getPreparationID();
2701*5c2921b0SApple OSS Distributions 	}
2702*5c2921b0SApple OSS Distributions 
2703*5c2921b0SApple OSS Distributions 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2704*5c2921b0SApple OSS Distributions 		return kIOPreparationIDUnprepared;
2705*5c2921b0SApple OSS Distributions 	}
2706*5c2921b0SApple OSS Distributions 
2707*5c2921b0SApple OSS Distributions 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2708*5c2921b0SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2709*5c2921b0SApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2710*5c2921b0SApple OSS Distributions 	}
2711*5c2921b0SApple OSS Distributions 	return dataP->fPreparationID;
2712*5c2921b0SApple OSS Distributions }
2713*5c2921b0SApple OSS Distributions 
2714*5c2921b0SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2715*5c2921b0SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2716*5c2921b0SApple OSS Distributions {
2717*5c2921b0SApple OSS Distributions 	if (reserved->creator) {
2718*5c2921b0SApple OSS Distributions 		task_deallocate(reserved->creator);
2719*5c2921b0SApple OSS Distributions 		reserved->creator = NULL;
2720*5c2921b0SApple OSS Distributions 	}
2721*5c2921b0SApple OSS Distributions 
2722*5c2921b0SApple OSS Distributions 	if (reserved->contextObject) {
2723*5c2921b0SApple OSS Distributions 		reserved->contextObject->release();
2724*5c2921b0SApple OSS Distributions 		reserved->contextObject = NULL;
2725*5c2921b0SApple OSS Distributions 	}
2726*5c2921b0SApple OSS Distributions }
2727*5c2921b0SApple OSS Distributions 
2728*5c2921b0SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2729*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2730*5c2921b0SApple OSS Distributions {
2731*5c2921b0SApple OSS Distributions 	if (!reserved) {
2732*5c2921b0SApple OSS Distributions 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2733*5c2921b0SApple OSS Distributions 	}
2734*5c2921b0SApple OSS Distributions 	return reserved;
2735*5c2921b0SApple OSS Distributions }
2736*5c2921b0SApple OSS Distributions 
2737*5c2921b0SApple OSS Distributions void
setPreparationID(void)2738*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2739*5c2921b0SApple OSS Distributions {
2740*5c2921b0SApple OSS Distributions 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2741*5c2921b0SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2742*5c2921b0SApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2743*5c2921b0SApple OSS Distributions 	}
2744*5c2921b0SApple OSS Distributions }
2745*5c2921b0SApple OSS Distributions 
2746*5c2921b0SApple OSS Distributions uint64_t
getPreparationID(void)2747*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2748*5c2921b0SApple OSS Distributions {
2749*5c2921b0SApple OSS Distributions 	if (reserved) {
2750*5c2921b0SApple OSS Distributions 		return reserved->preparationID;
2751*5c2921b0SApple OSS Distributions 	} else {
2752*5c2921b0SApple OSS Distributions 		return kIOPreparationIDUnsupported;
2753*5c2921b0SApple OSS Distributions 	}
2754*5c2921b0SApple OSS Distributions }
2755*5c2921b0SApple OSS Distributions 
2756*5c2921b0SApple OSS Distributions void
setDescriptorID(void)2757*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2758*5c2921b0SApple OSS Distributions {
2759*5c2921b0SApple OSS Distributions 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2760*5c2921b0SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2761*5c2921b0SApple OSS Distributions 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2762*5c2921b0SApple OSS Distributions 	}
2763*5c2921b0SApple OSS Distributions }
2764*5c2921b0SApple OSS Distributions 
2765*5c2921b0SApple OSS Distributions uint64_t
getDescriptorID(void)2766*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2767*5c2921b0SApple OSS Distributions {
2768*5c2921b0SApple OSS Distributions 	setDescriptorID();
2769*5c2921b0SApple OSS Distributions 
2770*5c2921b0SApple OSS Distributions 	if (reserved) {
2771*5c2921b0SApple OSS Distributions 		return reserved->descriptorID;
2772*5c2921b0SApple OSS Distributions 	} else {
2773*5c2921b0SApple OSS Distributions 		return kIODescriptorIDInvalid;
2774*5c2921b0SApple OSS Distributions 	}
2775*5c2921b0SApple OSS Distributions }
2776*5c2921b0SApple OSS Distributions 
2777*5c2921b0SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2778*5c2921b0SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2779*5c2921b0SApple OSS Distributions {
2780*5c2921b0SApple OSS Distributions 	if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2781*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
2782*5c2921b0SApple OSS Distributions 	}
2783*5c2921b0SApple OSS Distributions 
2784*5c2921b0SApple OSS Distributions 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2785*5c2921b0SApple OSS Distributions 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2786*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
2787*5c2921b0SApple OSS Distributions 	}
2788*5c2921b0SApple OSS Distributions 
2789*5c2921b0SApple OSS Distributions 	uint64_t descriptorID = getDescriptorID();
2790*5c2921b0SApple OSS Distributions 	assert(descriptorID != kIODescriptorIDInvalid);
2791*5c2921b0SApple OSS Distributions 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2792*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
2793*5c2921b0SApple OSS Distributions 	}
2794*5c2921b0SApple OSS Distributions 
2795*5c2921b0SApple OSS Distributions 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2796*5c2921b0SApple OSS Distributions 
2797*5c2921b0SApple OSS Distributions #if __LP64__
2798*5c2921b0SApple OSS Distributions 	static const uint8_t num_segments_page = 8;
2799*5c2921b0SApple OSS Distributions #else
2800*5c2921b0SApple OSS Distributions 	static const uint8_t num_segments_page = 4;
2801*5c2921b0SApple OSS Distributions #endif
2802*5c2921b0SApple OSS Distributions 	static const uint8_t num_segments_long = 2;
2803*5c2921b0SApple OSS Distributions 
2804*5c2921b0SApple OSS Distributions 	IOPhysicalAddress segments_page[num_segments_page];
2805*5c2921b0SApple OSS Distributions 	IOPhysicalRange   segments_long[num_segments_long];
2806*5c2921b0SApple OSS Distributions 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2807*5c2921b0SApple OSS Distributions 	memset(segments_long, 0, sizeof(segments_long));
2808*5c2921b0SApple OSS Distributions 
2809*5c2921b0SApple OSS Distributions 	uint8_t segment_page_idx = 0;
2810*5c2921b0SApple OSS Distributions 	uint8_t segment_long_idx = 0;
2811*5c2921b0SApple OSS Distributions 
2812*5c2921b0SApple OSS Distributions 	IOPhysicalRange physical_segment;
2813*5c2921b0SApple OSS Distributions 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2814*5c2921b0SApple OSS Distributions 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2815*5c2921b0SApple OSS Distributions 
2816*5c2921b0SApple OSS Distributions 		if (physical_segment.length == 0) {
2817*5c2921b0SApple OSS Distributions 			break;
2818*5c2921b0SApple OSS Distributions 		}
2819*5c2921b0SApple OSS Distributions 
2820*5c2921b0SApple OSS Distributions 		/**
2821*5c2921b0SApple OSS Distributions 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2822*5c2921b0SApple OSS Distributions 		 * buffer memory, pack segment events according to the following.
2823*5c2921b0SApple OSS Distributions 		 *
2824*5c2921b0SApple OSS Distributions 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2825*5c2921b0SApple OSS Distributions 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2826*5c2921b0SApple OSS Distributions 		 *
2827*5c2921b0SApple OSS Distributions 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2828*5c2921b0SApple OSS Distributions 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2829*5c2921b0SApple OSS Distributions 		 * - unmapped pages will have a ppn of MAX_INT_32
2830*5c2921b0SApple OSS Distributions 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2831*5c2921b0SApple OSS Distributions 		 * - address_0, length_0, address_0, length_1
2832*5c2921b0SApple OSS Distributions 		 * - unmapped pages will have an address of 0
2833*5c2921b0SApple OSS Distributions 		 *
2834*5c2921b0SApple OSS Distributions 		 * During each iteration do the following depending on the length of the mapping:
2835*5c2921b0SApple OSS Distributions 		 * 1. add the current segment to the appropriate queue of pending segments
2836*5c2921b0SApple OSS Distributions 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2837*5c2921b0SApple OSS Distributions 		 * 1a. if FALSE emit and reset all events in the previous queue
2838*5c2921b0SApple OSS Distributions 		 * 2. check if we have filled up the current queue of pending events
2839*5c2921b0SApple OSS Distributions 		 * 2a. if TRUE emit and reset all events in the pending queue
2840*5c2921b0SApple OSS Distributions 		 * 3. after completing all iterations emit events in the current queue
2841*5c2921b0SApple OSS Distributions 		 */
2842*5c2921b0SApple OSS Distributions 
2843*5c2921b0SApple OSS Distributions 		bool emit_page = false;
2844*5c2921b0SApple OSS Distributions 		bool emit_long = false;
2845*5c2921b0SApple OSS Distributions 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2846*5c2921b0SApple OSS Distributions 			segments_page[segment_page_idx] = physical_segment.address;
2847*5c2921b0SApple OSS Distributions 			segment_page_idx++;
2848*5c2921b0SApple OSS Distributions 
2849*5c2921b0SApple OSS Distributions 			emit_long = segment_long_idx != 0;
2850*5c2921b0SApple OSS Distributions 			emit_page = segment_page_idx == num_segments_page;
2851*5c2921b0SApple OSS Distributions 
2852*5c2921b0SApple OSS Distributions 			if (os_unlikely(emit_long)) {
2853*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2854*5c2921b0SApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2855*5c2921b0SApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2856*5c2921b0SApple OSS Distributions 			}
2857*5c2921b0SApple OSS Distributions 
2858*5c2921b0SApple OSS Distributions 			if (os_unlikely(emit_page)) {
2859*5c2921b0SApple OSS Distributions #if __LP64__
2860*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2861*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2862*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2863*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2864*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2865*5c2921b0SApple OSS Distributions #else
2866*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2867*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2868*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2869*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2870*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2871*5c2921b0SApple OSS Distributions #endif
2872*5c2921b0SApple OSS Distributions 			}
2873*5c2921b0SApple OSS Distributions 		} else {
2874*5c2921b0SApple OSS Distributions 			segments_long[segment_long_idx] = physical_segment;
2875*5c2921b0SApple OSS Distributions 			segment_long_idx++;
2876*5c2921b0SApple OSS Distributions 
2877*5c2921b0SApple OSS Distributions 			emit_page = segment_page_idx != 0;
2878*5c2921b0SApple OSS Distributions 			emit_long = segment_long_idx == num_segments_long;
2879*5c2921b0SApple OSS Distributions 
2880*5c2921b0SApple OSS Distributions 			if (os_unlikely(emit_page)) {
2881*5c2921b0SApple OSS Distributions #if __LP64__
2882*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2883*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2884*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2885*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2886*5c2921b0SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2887*5c2921b0SApple OSS Distributions #else
2888*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2889*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2890*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2891*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2892*5c2921b0SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2893*5c2921b0SApple OSS Distributions #endif
2894*5c2921b0SApple OSS Distributions 			}
2895*5c2921b0SApple OSS Distributions 
2896*5c2921b0SApple OSS Distributions 			if (emit_long) {
2897*5c2921b0SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2898*5c2921b0SApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2899*5c2921b0SApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2900*5c2921b0SApple OSS Distributions 			}
2901*5c2921b0SApple OSS Distributions 		}
2902*5c2921b0SApple OSS Distributions 
2903*5c2921b0SApple OSS Distributions 		if (os_unlikely(emit_page)) {
2904*5c2921b0SApple OSS Distributions 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2905*5c2921b0SApple OSS Distributions 			segment_page_idx = 0;
2906*5c2921b0SApple OSS Distributions 		}
2907*5c2921b0SApple OSS Distributions 
2908*5c2921b0SApple OSS Distributions 		if (os_unlikely(emit_long)) {
2909*5c2921b0SApple OSS Distributions 			memset(segments_long, 0, sizeof(segments_long));
2910*5c2921b0SApple OSS Distributions 			segment_long_idx = 0;
2911*5c2921b0SApple OSS Distributions 		}
2912*5c2921b0SApple OSS Distributions 	}
2913*5c2921b0SApple OSS Distributions 
2914*5c2921b0SApple OSS Distributions 	if (segment_page_idx != 0) {
2915*5c2921b0SApple OSS Distributions 		assert(segment_long_idx == 0);
2916*5c2921b0SApple OSS Distributions #if __LP64__
2917*5c2921b0SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2918*5c2921b0SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2919*5c2921b0SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2920*5c2921b0SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2921*5c2921b0SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2922*5c2921b0SApple OSS Distributions #else
2923*5c2921b0SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2924*5c2921b0SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[1]),
2925*5c2921b0SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[2]),
2926*5c2921b0SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[3]),
2927*5c2921b0SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[4]));
2928*5c2921b0SApple OSS Distributions #endif
2929*5c2921b0SApple OSS Distributions 	} else if (segment_long_idx != 0) {
2930*5c2921b0SApple OSS Distributions 		assert(segment_page_idx == 0);
2931*5c2921b0SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2932*5c2921b0SApple OSS Distributions 		    segments_long[0].address, segments_long[0].length,
2933*5c2921b0SApple OSS Distributions 		    segments_long[1].address, segments_long[1].length);
2934*5c2921b0SApple OSS Distributions 	}
2935*5c2921b0SApple OSS Distributions 
2936*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
2937*5c2921b0SApple OSS Distributions }
2938*5c2921b0SApple OSS Distributions 
2939*5c2921b0SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2940*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2941*5c2921b0SApple OSS Distributions {
2942*5c2921b0SApple OSS Distributions 	_kernelTag = (vm_tag_t) kernelTag;
2943*5c2921b0SApple OSS Distributions 	_userTag   = (vm_tag_t) userTag;
2944*5c2921b0SApple OSS Distributions }
2945*5c2921b0SApple OSS Distributions 
2946*5c2921b0SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2947*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2948*5c2921b0SApple OSS Distributions {
2949*5c2921b0SApple OSS Distributions 	if (vm_kernel_map_is_kernel(map)) {
2950*5c2921b0SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2951*5c2921b0SApple OSS Distributions 			return (uint32_t) _kernelTag;
2952*5c2921b0SApple OSS Distributions 		}
2953*5c2921b0SApple OSS Distributions 	} else {
2954*5c2921b0SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _userTag) {
2955*5c2921b0SApple OSS Distributions 			return (uint32_t) _userTag;
2956*5c2921b0SApple OSS Distributions 		}
2957*5c2921b0SApple OSS Distributions 	}
2958*5c2921b0SApple OSS Distributions 	return IOMemoryTag(map);
2959*5c2921b0SApple OSS Distributions }
2960*5c2921b0SApple OSS Distributions 
2961*5c2921b0SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2962*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2963*5c2921b0SApple OSS Distributions {
2964*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
2965*5c2921b0SApple OSS Distributions 	DMACommandOps params;
2966*5c2921b0SApple OSS Distributions 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2967*5c2921b0SApple OSS Distributions 	ioGMDData *dataP;
2968*5c2921b0SApple OSS Distributions 
2969*5c2921b0SApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
2970*5c2921b0SApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
2971*5c2921b0SApple OSS Distributions 
2972*5c2921b0SApple OSS Distributions 	if (kIOMDDMAMap == op) {
2973*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2974*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
2975*5c2921b0SApple OSS Distributions 		}
2976*5c2921b0SApple OSS Distributions 
2977*5c2921b0SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2978*5c2921b0SApple OSS Distributions 
2979*5c2921b0SApple OSS Distributions 		if (!_memoryEntries
2980*5c2921b0SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2981*5c2921b0SApple OSS Distributions 			return kIOReturnNoMemory;
2982*5c2921b0SApple OSS Distributions 		}
2983*5c2921b0SApple OSS Distributions 
2984*5c2921b0SApple OSS Distributions 		if (_memoryEntries && data->fMapper) {
2985*5c2921b0SApple OSS Distributions 			bool remap, keepMap;
2986*5c2921b0SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2987*5c2921b0SApple OSS Distributions 
2988*5c2921b0SApple OSS Distributions 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2989*5c2921b0SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2990*5c2921b0SApple OSS Distributions 			}
2991*5c2921b0SApple OSS Distributions 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2992*5c2921b0SApple OSS Distributions 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2993*5c2921b0SApple OSS Distributions 			}
2994*5c2921b0SApple OSS Distributions 
2995*5c2921b0SApple OSS Distributions 			keepMap = (data->fMapper == gIOSystemMapper);
2996*5c2921b0SApple OSS Distributions 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2997*5c2921b0SApple OSS Distributions 
2998*5c2921b0SApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2999*5c2921b0SApple OSS Distributions 				IOLockLock(_prepareLock);
3000*5c2921b0SApple OSS Distributions 			}
3001*5c2921b0SApple OSS Distributions 
3002*5c2921b0SApple OSS Distributions 			remap = (!keepMap);
3003*5c2921b0SApple OSS Distributions 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3004*5c2921b0SApple OSS Distributions 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3005*5c2921b0SApple OSS Distributions 			remap |= (dataP->fDMAMapAlignment > page_size);
3006*5c2921b0SApple OSS Distributions 
3007*5c2921b0SApple OSS Distributions 			if (remap || !dataP->fMappedBaseValid) {
3008*5c2921b0SApple OSS Distributions 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3009*5c2921b0SApple OSS Distributions 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3010*5c2921b0SApple OSS Distributions 					dataP->fMappedBase      = data->fAlloc;
3011*5c2921b0SApple OSS Distributions 					dataP->fMappedBaseValid = true;
3012*5c2921b0SApple OSS Distributions 					dataP->fMappedLength    = data->fAllocLength;
3013*5c2921b0SApple OSS Distributions 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3014*5c2921b0SApple OSS Distributions 				}
3015*5c2921b0SApple OSS Distributions 			} else {
3016*5c2921b0SApple OSS Distributions 				data->fAlloc = dataP->fMappedBase;
3017*5c2921b0SApple OSS Distributions 				data->fAllocLength = 0;         // give out IOMD map
3018*5c2921b0SApple OSS Distributions 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3019*5c2921b0SApple OSS Distributions 			}
3020*5c2921b0SApple OSS Distributions 
3021*5c2921b0SApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3022*5c2921b0SApple OSS Distributions 				IOLockUnlock(_prepareLock);
3023*5c2921b0SApple OSS Distributions 			}
3024*5c2921b0SApple OSS Distributions 		}
3025*5c2921b0SApple OSS Distributions 		return err;
3026*5c2921b0SApple OSS Distributions 	}
3027*5c2921b0SApple OSS Distributions 	if (kIOMDDMAUnmap == op) {
3028*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3029*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3030*5c2921b0SApple OSS Distributions 		}
3031*5c2921b0SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3032*5c2921b0SApple OSS Distributions 
3033*5c2921b0SApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3034*5c2921b0SApple OSS Distributions 
3035*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3036*5c2921b0SApple OSS Distributions 	}
3037*5c2921b0SApple OSS Distributions 
3038*5c2921b0SApple OSS Distributions 	if (kIOMDAddDMAMapSpec == op) {
3039*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IODMAMapSpecification)) {
3040*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3041*5c2921b0SApple OSS Distributions 		}
3042*5c2921b0SApple OSS Distributions 
3043*5c2921b0SApple OSS Distributions 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3044*5c2921b0SApple OSS Distributions 
3045*5c2921b0SApple OSS Distributions 		if (!_memoryEntries
3046*5c2921b0SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3047*5c2921b0SApple OSS Distributions 			return kIOReturnNoMemory;
3048*5c2921b0SApple OSS Distributions 		}
3049*5c2921b0SApple OSS Distributions 
3050*5c2921b0SApple OSS Distributions 		if (_memoryEntries) {
3051*5c2921b0SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3052*5c2921b0SApple OSS Distributions 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3053*5c2921b0SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3054*5c2921b0SApple OSS Distributions 			}
3055*5c2921b0SApple OSS Distributions 			if (data->alignment > dataP->fDMAMapAlignment) {
3056*5c2921b0SApple OSS Distributions 				dataP->fDMAMapAlignment = data->alignment;
3057*5c2921b0SApple OSS Distributions 			}
3058*5c2921b0SApple OSS Distributions 		}
3059*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3060*5c2921b0SApple OSS Distributions 	}
3061*5c2921b0SApple OSS Distributions 
3062*5c2921b0SApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3063*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3064*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3065*5c2921b0SApple OSS Distributions 		}
3066*5c2921b0SApple OSS Distributions 
3067*5c2921b0SApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3068*5c2921b0SApple OSS Distributions 		data->fLength = _length;
3069*5c2921b0SApple OSS Distributions 		data->fSGCount = _rangesCount;
3070*5c2921b0SApple OSS Distributions 		data->fPages = _pages;
3071*5c2921b0SApple OSS Distributions 		data->fDirection = getDirection();
3072*5c2921b0SApple OSS Distributions 		if (!_wireCount) {
3073*5c2921b0SApple OSS Distributions 			data->fIsPrepared = false;
3074*5c2921b0SApple OSS Distributions 		} else {
3075*5c2921b0SApple OSS Distributions 			data->fIsPrepared = true;
3076*5c2921b0SApple OSS Distributions 			data->fHighestPage = _highestPage;
3077*5c2921b0SApple OSS Distributions 			if (_memoryEntries) {
3078*5c2921b0SApple OSS Distributions 				dataP = getDataP(_memoryEntries);
3079*5c2921b0SApple OSS Distributions 				ioPLBlock *ioplList = getIOPLList(dataP);
3080*5c2921b0SApple OSS Distributions 				UInt count = getNumIOPL(_memoryEntries, dataP);
3081*5c2921b0SApple OSS Distributions 				if (count == 1) {
3082*5c2921b0SApple OSS Distributions 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3083*5c2921b0SApple OSS Distributions 				}
3084*5c2921b0SApple OSS Distributions 			}
3085*5c2921b0SApple OSS Distributions 		}
3086*5c2921b0SApple OSS Distributions 
3087*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3088*5c2921b0SApple OSS Distributions 	} else if (kIOMDDMAActive == op) {
3089*5c2921b0SApple OSS Distributions 		if (params) {
3090*5c2921b0SApple OSS Distributions 			int16_t prior;
3091*5c2921b0SApple OSS Distributions 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3092*5c2921b0SApple OSS Distributions 			if (!prior) {
3093*5c2921b0SApple OSS Distributions 				md->_mapName = NULL;
3094*5c2921b0SApple OSS Distributions 			}
3095*5c2921b0SApple OSS Distributions 		} else {
3096*5c2921b0SApple OSS Distributions 			if (md->_dmaReferences) {
3097*5c2921b0SApple OSS Distributions 				OSAddAtomic16(-1, &md->_dmaReferences);
3098*5c2921b0SApple OSS Distributions 			} else {
3099*5c2921b0SApple OSS Distributions 				panic("_dmaReferences underflow");
3100*5c2921b0SApple OSS Distributions 			}
3101*5c2921b0SApple OSS Distributions 		}
3102*5c2921b0SApple OSS Distributions 	} else if (kIOMDWalkSegments != op) {
3103*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
3104*5c2921b0SApple OSS Distributions 	}
3105*5c2921b0SApple OSS Distributions 
3106*5c2921b0SApple OSS Distributions 	// Get the next segment
3107*5c2921b0SApple OSS Distributions 	struct InternalState {
3108*5c2921b0SApple OSS Distributions 		IOMDDMAWalkSegmentArgs fIO;
3109*5c2921b0SApple OSS Distributions 		mach_vm_size_t fOffset2Index;
3110*5c2921b0SApple OSS Distributions 		mach_vm_size_t fNextOffset;
3111*5c2921b0SApple OSS Distributions 		UInt fIndex;
3112*5c2921b0SApple OSS Distributions 	} *isP;
3113*5c2921b0SApple OSS Distributions 
3114*5c2921b0SApple OSS Distributions 	// Find the next segment
3115*5c2921b0SApple OSS Distributions 	if (dataSize < sizeof(*isP)) {
3116*5c2921b0SApple OSS Distributions 		return kIOReturnUnderrun;
3117*5c2921b0SApple OSS Distributions 	}
3118*5c2921b0SApple OSS Distributions 
3119*5c2921b0SApple OSS Distributions 	isP = (InternalState *) vData;
3120*5c2921b0SApple OSS Distributions 	uint64_t offset = isP->fIO.fOffset;
3121*5c2921b0SApple OSS Distributions 	uint8_t mapped = isP->fIO.fMapped;
3122*5c2921b0SApple OSS Distributions 	uint64_t mappedBase;
3123*5c2921b0SApple OSS Distributions 
3124*5c2921b0SApple OSS Distributions 	if (mapped && (kIOMemoryRemote & _flags)) {
3125*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3126*5c2921b0SApple OSS Distributions 	}
3127*5c2921b0SApple OSS Distributions 
3128*5c2921b0SApple OSS Distributions 	if (IOMapper::gSystem && mapped
3129*5c2921b0SApple OSS Distributions 	    && (!(kIOMemoryHostOnly & _flags))
3130*5c2921b0SApple OSS Distributions 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3131*5c2921b0SApple OSS Distributions //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3132*5c2921b0SApple OSS Distributions 		if (!_memoryEntries
3133*5c2921b0SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3134*5c2921b0SApple OSS Distributions 			return kIOReturnNoMemory;
3135*5c2921b0SApple OSS Distributions 		}
3136*5c2921b0SApple OSS Distributions 
3137*5c2921b0SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
3138*5c2921b0SApple OSS Distributions 		if (dataP->fMapper) {
3139*5c2921b0SApple OSS Distributions 			IODMAMapSpecification mapSpec;
3140*5c2921b0SApple OSS Distributions 			bzero(&mapSpec, sizeof(mapSpec));
3141*5c2921b0SApple OSS Distributions 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3142*5c2921b0SApple OSS Distributions 			mapSpec.alignment = dataP->fDMAMapAlignment;
3143*5c2921b0SApple OSS Distributions 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3144*5c2921b0SApple OSS Distributions 			if (kIOReturnSuccess != err) {
3145*5c2921b0SApple OSS Distributions 				return err;
3146*5c2921b0SApple OSS Distributions 			}
3147*5c2921b0SApple OSS Distributions 			dataP->fMappedBaseValid = true;
3148*5c2921b0SApple OSS Distributions 		}
3149*5c2921b0SApple OSS Distributions 	}
3150*5c2921b0SApple OSS Distributions 
3151*5c2921b0SApple OSS Distributions 	if (mapped) {
3152*5c2921b0SApple OSS Distributions 		if (IOMapper::gSystem
3153*5c2921b0SApple OSS Distributions 		    && (!(kIOMemoryHostOnly & _flags))
3154*5c2921b0SApple OSS Distributions 		    && _memoryEntries
3155*5c2921b0SApple OSS Distributions 		    && (dataP = getDataP(_memoryEntries))
3156*5c2921b0SApple OSS Distributions 		    && dataP->fMappedBaseValid) {
3157*5c2921b0SApple OSS Distributions 			mappedBase = dataP->fMappedBase;
3158*5c2921b0SApple OSS Distributions 		} else {
3159*5c2921b0SApple OSS Distributions 			mapped = 0;
3160*5c2921b0SApple OSS Distributions 		}
3161*5c2921b0SApple OSS Distributions 	}
3162*5c2921b0SApple OSS Distributions 
3163*5c2921b0SApple OSS Distributions 	if (offset >= _length) {
3164*5c2921b0SApple OSS Distributions 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3165*5c2921b0SApple OSS Distributions 	}
3166*5c2921b0SApple OSS Distributions 
3167*5c2921b0SApple OSS Distributions 	// Validate the previous offset
3168*5c2921b0SApple OSS Distributions 	UInt ind;
3169*5c2921b0SApple OSS Distributions 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3170*5c2921b0SApple OSS Distributions 	if (!params
3171*5c2921b0SApple OSS Distributions 	    && offset
3172*5c2921b0SApple OSS Distributions 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3173*5c2921b0SApple OSS Distributions 		ind = isP->fIndex;
3174*5c2921b0SApple OSS Distributions 	} else {
3175*5c2921b0SApple OSS Distributions 		ind = off2Ind = 0; // Start from beginning
3176*5c2921b0SApple OSS Distributions 	}
3177*5c2921b0SApple OSS Distributions 	mach_vm_size_t length;
3178*5c2921b0SApple OSS Distributions 	UInt64 address;
3179*5c2921b0SApple OSS Distributions 
3180*5c2921b0SApple OSS Distributions 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3181*5c2921b0SApple OSS Distributions 		// Physical address based memory descriptor
3182*5c2921b0SApple OSS Distributions 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3183*5c2921b0SApple OSS Distributions 
3184*5c2921b0SApple OSS Distributions 		// Find the range after the one that contains the offset
3185*5c2921b0SApple OSS Distributions 		mach_vm_size_t len;
3186*5c2921b0SApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3187*5c2921b0SApple OSS Distributions 			len = physP[ind].length;
3188*5c2921b0SApple OSS Distributions 			off2Ind += len;
3189*5c2921b0SApple OSS Distributions 		}
3190*5c2921b0SApple OSS Distributions 
3191*5c2921b0SApple OSS Distributions 		// Calculate length within range and starting address
3192*5c2921b0SApple OSS Distributions 		length   = off2Ind - offset;
3193*5c2921b0SApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3194*5c2921b0SApple OSS Distributions 
3195*5c2921b0SApple OSS Distributions 		if (true && mapped) {
3196*5c2921b0SApple OSS Distributions 			address = mappedBase + offset;
3197*5c2921b0SApple OSS Distributions 		} else {
3198*5c2921b0SApple OSS Distributions 			// see how far we can coalesce ranges
3199*5c2921b0SApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3200*5c2921b0SApple OSS Distributions 				len = physP[ind].length;
3201*5c2921b0SApple OSS Distributions 				length += len;
3202*5c2921b0SApple OSS Distributions 				off2Ind += len;
3203*5c2921b0SApple OSS Distributions 				ind++;
3204*5c2921b0SApple OSS Distributions 			}
3205*5c2921b0SApple OSS Distributions 		}
3206*5c2921b0SApple OSS Distributions 
3207*5c2921b0SApple OSS Distributions 		// correct contiguous check overshoot
3208*5c2921b0SApple OSS Distributions 		ind--;
3209*5c2921b0SApple OSS Distributions 		off2Ind -= len;
3210*5c2921b0SApple OSS Distributions 	}
3211*5c2921b0SApple OSS Distributions #ifndef __LP64__
3212*5c2921b0SApple OSS Distributions 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3213*5c2921b0SApple OSS Distributions 		// Physical address based memory descriptor
3214*5c2921b0SApple OSS Distributions 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3215*5c2921b0SApple OSS Distributions 
3216*5c2921b0SApple OSS Distributions 		// Find the range after the one that contains the offset
3217*5c2921b0SApple OSS Distributions 		mach_vm_size_t len;
3218*5c2921b0SApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3219*5c2921b0SApple OSS Distributions 			len = physP[ind].length;
3220*5c2921b0SApple OSS Distributions 			off2Ind += len;
3221*5c2921b0SApple OSS Distributions 		}
3222*5c2921b0SApple OSS Distributions 
3223*5c2921b0SApple OSS Distributions 		// Calculate length within range and starting address
3224*5c2921b0SApple OSS Distributions 		length   = off2Ind - offset;
3225*5c2921b0SApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3226*5c2921b0SApple OSS Distributions 
3227*5c2921b0SApple OSS Distributions 		if (true && mapped) {
3228*5c2921b0SApple OSS Distributions 			address = mappedBase + offset;
3229*5c2921b0SApple OSS Distributions 		} else {
3230*5c2921b0SApple OSS Distributions 			// see how far we can coalesce ranges
3231*5c2921b0SApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3232*5c2921b0SApple OSS Distributions 				len = physP[ind].length;
3233*5c2921b0SApple OSS Distributions 				length += len;
3234*5c2921b0SApple OSS Distributions 				off2Ind += len;
3235*5c2921b0SApple OSS Distributions 				ind++;
3236*5c2921b0SApple OSS Distributions 			}
3237*5c2921b0SApple OSS Distributions 		}
3238*5c2921b0SApple OSS Distributions 		// correct contiguous check overshoot
3239*5c2921b0SApple OSS Distributions 		ind--;
3240*5c2921b0SApple OSS Distributions 		off2Ind -= len;
3241*5c2921b0SApple OSS Distributions 	}
3242*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
3243*5c2921b0SApple OSS Distributions 	else {
3244*5c2921b0SApple OSS Distributions 		do {
3245*5c2921b0SApple OSS Distributions 			if (!_wireCount) {
3246*5c2921b0SApple OSS Distributions 				panic("IOGMD: not wired for the IODMACommand");
3247*5c2921b0SApple OSS Distributions 			}
3248*5c2921b0SApple OSS Distributions 
3249*5c2921b0SApple OSS Distributions 			assert(_memoryEntries);
3250*5c2921b0SApple OSS Distributions 
3251*5c2921b0SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3252*5c2921b0SApple OSS Distributions 			const ioPLBlock *ioplList = getIOPLList(dataP);
3253*5c2921b0SApple OSS Distributions 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3254*5c2921b0SApple OSS Distributions 			upl_page_info_t *pageList = getPageList(dataP);
3255*5c2921b0SApple OSS Distributions 
3256*5c2921b0SApple OSS Distributions 			assert(numIOPLs > 0);
3257*5c2921b0SApple OSS Distributions 
3258*5c2921b0SApple OSS Distributions 			// Scan through iopl info blocks looking for block containing offset
3259*5c2921b0SApple OSS Distributions 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3260*5c2921b0SApple OSS Distributions 				ind++;
3261*5c2921b0SApple OSS Distributions 			}
3262*5c2921b0SApple OSS Distributions 
3263*5c2921b0SApple OSS Distributions 			// Go back to actual range as search goes past it
3264*5c2921b0SApple OSS Distributions 			ioPLBlock ioplInfo = ioplList[ind - 1];
3265*5c2921b0SApple OSS Distributions 			off2Ind = ioplInfo.fIOMDOffset;
3266*5c2921b0SApple OSS Distributions 
3267*5c2921b0SApple OSS Distributions 			if (ind < numIOPLs) {
3268*5c2921b0SApple OSS Distributions 				length = ioplList[ind].fIOMDOffset;
3269*5c2921b0SApple OSS Distributions 			} else {
3270*5c2921b0SApple OSS Distributions 				length = _length;
3271*5c2921b0SApple OSS Distributions 			}
3272*5c2921b0SApple OSS Distributions 			length -= offset;       // Remainder within iopl
3273*5c2921b0SApple OSS Distributions 
3274*5c2921b0SApple OSS Distributions 			// Subtract offset till this iopl in total list
3275*5c2921b0SApple OSS Distributions 			offset -= off2Ind;
3276*5c2921b0SApple OSS Distributions 
3277*5c2921b0SApple OSS Distributions 			// If a mapped address is requested and this is a pre-mapped IOPL
3278*5c2921b0SApple OSS Distributions 			// then just need to compute an offset relative to the mapped base.
3279*5c2921b0SApple OSS Distributions 			if (mapped) {
3280*5c2921b0SApple OSS Distributions 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3281*5c2921b0SApple OSS Distributions 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3282*5c2921b0SApple OSS Distributions 				continue; // Done leave do/while(false) now
3283*5c2921b0SApple OSS Distributions 			}
3284*5c2921b0SApple OSS Distributions 
3285*5c2921b0SApple OSS Distributions 			// The offset is rebased into the current iopl.
3286*5c2921b0SApple OSS Distributions 			// Now add the iopl 1st page offset.
3287*5c2921b0SApple OSS Distributions 			offset += ioplInfo.fPageOffset;
3288*5c2921b0SApple OSS Distributions 
3289*5c2921b0SApple OSS Distributions 			// For external UPLs the fPageInfo field points directly to
3290*5c2921b0SApple OSS Distributions 			// the upl's upl_page_info_t array.
3291*5c2921b0SApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3292*5c2921b0SApple OSS Distributions 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3293*5c2921b0SApple OSS Distributions 			} else {
3294*5c2921b0SApple OSS Distributions 				pageList = &pageList[ioplInfo.fPageInfo];
3295*5c2921b0SApple OSS Distributions 			}
3296*5c2921b0SApple OSS Distributions 
3297*5c2921b0SApple OSS Distributions 			// Check for direct device non-paged memory
3298*5c2921b0SApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3299*5c2921b0SApple OSS Distributions 				address = ptoa_64(pageList->phys_addr) + offset;
3300*5c2921b0SApple OSS Distributions 				continue; // Done leave do/while(false) now
3301*5c2921b0SApple OSS Distributions 			}
3302*5c2921b0SApple OSS Distributions 
3303*5c2921b0SApple OSS Distributions 			// Now we need compute the index into the pageList
3304*5c2921b0SApple OSS Distributions 			UInt pageInd = atop_32(offset);
3305*5c2921b0SApple OSS Distributions 			offset &= PAGE_MASK;
3306*5c2921b0SApple OSS Distributions 
3307*5c2921b0SApple OSS Distributions 			// Compute the starting address of this segment
3308*5c2921b0SApple OSS Distributions 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3309*5c2921b0SApple OSS Distributions 			if (!pageAddr) {
3310*5c2921b0SApple OSS Distributions 				panic("!pageList phys_addr");
3311*5c2921b0SApple OSS Distributions 			}
3312*5c2921b0SApple OSS Distributions 
3313*5c2921b0SApple OSS Distributions 			address = ptoa_64(pageAddr) + offset;
3314*5c2921b0SApple OSS Distributions 
3315*5c2921b0SApple OSS Distributions 			// length is currently set to the length of the remainider of the iopl.
3316*5c2921b0SApple OSS Distributions 			// We need to check that the remainder of the iopl is contiguous.
3317*5c2921b0SApple OSS Distributions 			// This is indicated by pageList[ind].phys_addr being sequential.
3318*5c2921b0SApple OSS Distributions 			IOByteCount contigLength = PAGE_SIZE - offset;
3319*5c2921b0SApple OSS Distributions 			while (contigLength < length
3320*5c2921b0SApple OSS Distributions 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3321*5c2921b0SApple OSS Distributions 				contigLength += PAGE_SIZE;
3322*5c2921b0SApple OSS Distributions 			}
3323*5c2921b0SApple OSS Distributions 
3324*5c2921b0SApple OSS Distributions 			if (contigLength < length) {
3325*5c2921b0SApple OSS Distributions 				length = contigLength;
3326*5c2921b0SApple OSS Distributions 			}
3327*5c2921b0SApple OSS Distributions 
3328*5c2921b0SApple OSS Distributions 
3329*5c2921b0SApple OSS Distributions 			assert(address);
3330*5c2921b0SApple OSS Distributions 			assert(length);
3331*5c2921b0SApple OSS Distributions 		} while (false);
3332*5c2921b0SApple OSS Distributions 	}
3333*5c2921b0SApple OSS Distributions 
3334*5c2921b0SApple OSS Distributions 	// Update return values and state
3335*5c2921b0SApple OSS Distributions 	isP->fIO.fIOVMAddr = address;
3336*5c2921b0SApple OSS Distributions 	isP->fIO.fLength   = length;
3337*5c2921b0SApple OSS Distributions 	isP->fIndex        = ind;
3338*5c2921b0SApple OSS Distributions 	isP->fOffset2Index = off2Ind;
3339*5c2921b0SApple OSS Distributions 	isP->fNextOffset   = isP->fIO.fOffset + length;
3340*5c2921b0SApple OSS Distributions 
3341*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
3342*5c2921b0SApple OSS Distributions }
3343*5c2921b0SApple OSS Distributions 
3344*5c2921b0SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3345*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3346*5c2921b0SApple OSS Distributions {
3347*5c2921b0SApple OSS Distributions 	IOReturn          ret;
3348*5c2921b0SApple OSS Distributions 	mach_vm_address_t address = 0;
3349*5c2921b0SApple OSS Distributions 	mach_vm_size_t    length  = 0;
3350*5c2921b0SApple OSS Distributions 	IOMapper *        mapper  = gIOSystemMapper;
3351*5c2921b0SApple OSS Distributions 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3352*5c2921b0SApple OSS Distributions 
3353*5c2921b0SApple OSS Distributions 	if (lengthOfSegment) {
3354*5c2921b0SApple OSS Distributions 		*lengthOfSegment = 0;
3355*5c2921b0SApple OSS Distributions 	}
3356*5c2921b0SApple OSS Distributions 
3357*5c2921b0SApple OSS Distributions 	if (offset >= _length) {
3358*5c2921b0SApple OSS Distributions 		return 0;
3359*5c2921b0SApple OSS Distributions 	}
3360*5c2921b0SApple OSS Distributions 
3361*5c2921b0SApple OSS Distributions 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3362*5c2921b0SApple OSS Distributions 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3363*5c2921b0SApple OSS Distributions 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3364*5c2921b0SApple OSS Distributions 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3365*5c2921b0SApple OSS Distributions 
3366*5c2921b0SApple OSS Distributions 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3367*5c2921b0SApple OSS Distributions 		unsigned rangesIndex = 0;
3368*5c2921b0SApple OSS Distributions 		Ranges vec = _ranges;
3369*5c2921b0SApple OSS Distributions 		mach_vm_address_t addr;
3370*5c2921b0SApple OSS Distributions 
3371*5c2921b0SApple OSS Distributions 		// Find starting address within the vector of ranges
3372*5c2921b0SApple OSS Distributions 		for (;;) {
3373*5c2921b0SApple OSS Distributions 			getAddrLenForInd(addr, length, type, vec, rangesIndex);
3374*5c2921b0SApple OSS Distributions 			if (offset < length) {
3375*5c2921b0SApple OSS Distributions 				break;
3376*5c2921b0SApple OSS Distributions 			}
3377*5c2921b0SApple OSS Distributions 			offset -= length; // (make offset relative)
3378*5c2921b0SApple OSS Distributions 			rangesIndex++;
3379*5c2921b0SApple OSS Distributions 		}
3380*5c2921b0SApple OSS Distributions 
3381*5c2921b0SApple OSS Distributions 		// Now that we have the starting range,
3382*5c2921b0SApple OSS Distributions 		// lets find the last contiguous range
3383*5c2921b0SApple OSS Distributions 		addr   += offset;
3384*5c2921b0SApple OSS Distributions 		length -= offset;
3385*5c2921b0SApple OSS Distributions 
3386*5c2921b0SApple OSS Distributions 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3387*5c2921b0SApple OSS Distributions 			mach_vm_address_t newAddr;
3388*5c2921b0SApple OSS Distributions 			mach_vm_size_t    newLen;
3389*5c2921b0SApple OSS Distributions 
3390*5c2921b0SApple OSS Distributions 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3391*5c2921b0SApple OSS Distributions 			if (addr + length != newAddr) {
3392*5c2921b0SApple OSS Distributions 				break;
3393*5c2921b0SApple OSS Distributions 			}
3394*5c2921b0SApple OSS Distributions 			length += newLen;
3395*5c2921b0SApple OSS Distributions 		}
3396*5c2921b0SApple OSS Distributions 		if (addr) {
3397*5c2921b0SApple OSS Distributions 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3398*5c2921b0SApple OSS Distributions 		}
3399*5c2921b0SApple OSS Distributions 	} else {
3400*5c2921b0SApple OSS Distributions 		IOMDDMAWalkSegmentState _state;
3401*5c2921b0SApple OSS Distributions 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3402*5c2921b0SApple OSS Distributions 
3403*5c2921b0SApple OSS Distributions 		state->fOffset = offset;
3404*5c2921b0SApple OSS Distributions 		state->fLength = _length - offset;
3405*5c2921b0SApple OSS Distributions 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3406*5c2921b0SApple OSS Distributions 
3407*5c2921b0SApple OSS Distributions 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3408*5c2921b0SApple OSS Distributions 
3409*5c2921b0SApple OSS Distributions 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3410*5c2921b0SApple OSS Distributions 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3411*5c2921b0SApple OSS Distributions 			    ret, this, state->fOffset,
3412*5c2921b0SApple OSS Distributions 			    state->fIOVMAddr, state->fLength);
3413*5c2921b0SApple OSS Distributions 		}
3414*5c2921b0SApple OSS Distributions 		if (kIOReturnSuccess == ret) {
3415*5c2921b0SApple OSS Distributions 			address = state->fIOVMAddr;
3416*5c2921b0SApple OSS Distributions 			length  = state->fLength;
3417*5c2921b0SApple OSS Distributions 		}
3418*5c2921b0SApple OSS Distributions 
3419*5c2921b0SApple OSS Distributions 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3420*5c2921b0SApple OSS Distributions 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3421*5c2921b0SApple OSS Distributions 
3422*5c2921b0SApple OSS Distributions 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3423*5c2921b0SApple OSS Distributions 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3424*5c2921b0SApple OSS Distributions 				addr64_t    origAddr = address;
3425*5c2921b0SApple OSS Distributions 				IOByteCount origLen  = length;
3426*5c2921b0SApple OSS Distributions 
3427*5c2921b0SApple OSS Distributions 				address = mapper->mapToPhysicalAddress(origAddr);
3428*5c2921b0SApple OSS Distributions 				length = page_size - (address & (page_size - 1));
3429*5c2921b0SApple OSS Distributions 				while ((length < origLen)
3430*5c2921b0SApple OSS Distributions 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3431*5c2921b0SApple OSS Distributions 					length += page_size;
3432*5c2921b0SApple OSS Distributions 				}
3433*5c2921b0SApple OSS Distributions 				if (length > origLen) {
3434*5c2921b0SApple OSS Distributions 					length = origLen;
3435*5c2921b0SApple OSS Distributions 				}
3436*5c2921b0SApple OSS Distributions 			}
3437*5c2921b0SApple OSS Distributions 		}
3438*5c2921b0SApple OSS Distributions 	}
3439*5c2921b0SApple OSS Distributions 
3440*5c2921b0SApple OSS Distributions 	if (!address) {
3441*5c2921b0SApple OSS Distributions 		length = 0;
3442*5c2921b0SApple OSS Distributions 	}
3443*5c2921b0SApple OSS Distributions 
3444*5c2921b0SApple OSS Distributions 	if (lengthOfSegment) {
3445*5c2921b0SApple OSS Distributions 		*lengthOfSegment = length;
3446*5c2921b0SApple OSS Distributions 	}
3447*5c2921b0SApple OSS Distributions 
3448*5c2921b0SApple OSS Distributions 	return address;
3449*5c2921b0SApple OSS Distributions }
3450*5c2921b0SApple OSS Distributions 
3451*5c2921b0SApple OSS Distributions #ifndef __LP64__
3452*5c2921b0SApple OSS Distributions #pragma clang diagnostic push
3453*5c2921b0SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3454*5c2921b0SApple OSS Distributions 
3455*5c2921b0SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3456*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3457*5c2921b0SApple OSS Distributions {
3458*5c2921b0SApple OSS Distributions 	addr64_t address = 0;
3459*5c2921b0SApple OSS Distributions 
3460*5c2921b0SApple OSS Distributions 	if (options & _kIOMemorySourceSegment) {
3461*5c2921b0SApple OSS Distributions 		address = getSourceSegment(offset, lengthOfSegment);
3462*5c2921b0SApple OSS Distributions 	} else if (options & kIOMemoryMapperNone) {
3463*5c2921b0SApple OSS Distributions 		address = getPhysicalSegment64(offset, lengthOfSegment);
3464*5c2921b0SApple OSS Distributions 	} else {
3465*5c2921b0SApple OSS Distributions 		address = getPhysicalSegment(offset, lengthOfSegment);
3466*5c2921b0SApple OSS Distributions 	}
3467*5c2921b0SApple OSS Distributions 
3468*5c2921b0SApple OSS Distributions 	return address;
3469*5c2921b0SApple OSS Distributions }
3470*5c2921b0SApple OSS Distributions #pragma clang diagnostic pop
3471*5c2921b0SApple OSS Distributions 
3472*5c2921b0SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3473*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3474*5c2921b0SApple OSS Distributions {
3475*5c2921b0SApple OSS Distributions 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3476*5c2921b0SApple OSS Distributions }
3477*5c2921b0SApple OSS Distributions 
3478*5c2921b0SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3479*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3480*5c2921b0SApple OSS Distributions {
3481*5c2921b0SApple OSS Distributions 	addr64_t    address = 0;
3482*5c2921b0SApple OSS Distributions 	IOByteCount length  = 0;
3483*5c2921b0SApple OSS Distributions 
3484*5c2921b0SApple OSS Distributions 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3485*5c2921b0SApple OSS Distributions 
3486*5c2921b0SApple OSS Distributions 	if (lengthOfSegment) {
3487*5c2921b0SApple OSS Distributions 		length = *lengthOfSegment;
3488*5c2921b0SApple OSS Distributions 	}
3489*5c2921b0SApple OSS Distributions 
3490*5c2921b0SApple OSS Distributions 	if ((address + length) > 0x100000000ULL) {
3491*5c2921b0SApple OSS Distributions 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3492*5c2921b0SApple OSS Distributions 		    address, (long) length, (getMetaClass())->getClassName());
3493*5c2921b0SApple OSS Distributions 	}
3494*5c2921b0SApple OSS Distributions 
3495*5c2921b0SApple OSS Distributions 	return (IOPhysicalAddress) address;
3496*5c2921b0SApple OSS Distributions }
3497*5c2921b0SApple OSS Distributions 
3498*5c2921b0SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500*5c2921b0SApple OSS Distributions {
3501*5c2921b0SApple OSS Distributions 	IOPhysicalAddress phys32;
3502*5c2921b0SApple OSS Distributions 	IOByteCount       length;
3503*5c2921b0SApple OSS Distributions 	addr64_t          phys64;
3504*5c2921b0SApple OSS Distributions 	IOMapper *        mapper = NULL;
3505*5c2921b0SApple OSS Distributions 
3506*5c2921b0SApple OSS Distributions 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3507*5c2921b0SApple OSS Distributions 	if (!phys32) {
3508*5c2921b0SApple OSS Distributions 		return 0;
3509*5c2921b0SApple OSS Distributions 	}
3510*5c2921b0SApple OSS Distributions 
3511*5c2921b0SApple OSS Distributions 	if (gIOSystemMapper) {
3512*5c2921b0SApple OSS Distributions 		mapper = gIOSystemMapper;
3513*5c2921b0SApple OSS Distributions 	}
3514*5c2921b0SApple OSS Distributions 
3515*5c2921b0SApple OSS Distributions 	if (mapper) {
3516*5c2921b0SApple OSS Distributions 		IOByteCount origLen;
3517*5c2921b0SApple OSS Distributions 
3518*5c2921b0SApple OSS Distributions 		phys64 = mapper->mapToPhysicalAddress(phys32);
3519*5c2921b0SApple OSS Distributions 		origLen = *lengthOfSegment;
3520*5c2921b0SApple OSS Distributions 		length = page_size - (phys64 & (page_size - 1));
3521*5c2921b0SApple OSS Distributions 		while ((length < origLen)
3522*5c2921b0SApple OSS Distributions 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3523*5c2921b0SApple OSS Distributions 			length += page_size;
3524*5c2921b0SApple OSS Distributions 		}
3525*5c2921b0SApple OSS Distributions 		if (length > origLen) {
3526*5c2921b0SApple OSS Distributions 			length = origLen;
3527*5c2921b0SApple OSS Distributions 		}
3528*5c2921b0SApple OSS Distributions 
3529*5c2921b0SApple OSS Distributions 		*lengthOfSegment = length;
3530*5c2921b0SApple OSS Distributions 	} else {
3531*5c2921b0SApple OSS Distributions 		phys64 = (addr64_t) phys32;
3532*5c2921b0SApple OSS Distributions 	}
3533*5c2921b0SApple OSS Distributions 
3534*5c2921b0SApple OSS Distributions 	return phys64;
3535*5c2921b0SApple OSS Distributions }
3536*5c2921b0SApple OSS Distributions 
3537*5c2921b0SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3538*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3539*5c2921b0SApple OSS Distributions {
3540*5c2921b0SApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3541*5c2921b0SApple OSS Distributions }
3542*5c2921b0SApple OSS Distributions 
3543*5c2921b0SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3544*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3545*5c2921b0SApple OSS Distributions {
3546*5c2921b0SApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3547*5c2921b0SApple OSS Distributions }
3548*5c2921b0SApple OSS Distributions 
3549*5c2921b0SApple OSS Distributions #pragma clang diagnostic push
3550*5c2921b0SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3551*5c2921b0SApple OSS Distributions 
3552*5c2921b0SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3553*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3554*5c2921b0SApple OSS Distributions     IOByteCount * lengthOfSegment)
3555*5c2921b0SApple OSS Distributions {
3556*5c2921b0SApple OSS Distributions 	if (_task == kernel_task) {
3557*5c2921b0SApple OSS Distributions 		return (void *) getSourceSegment(offset, lengthOfSegment);
3558*5c2921b0SApple OSS Distributions 	} else {
3559*5c2921b0SApple OSS Distributions 		panic("IOGMD::getVirtualSegment deprecated");
3560*5c2921b0SApple OSS Distributions 	}
3561*5c2921b0SApple OSS Distributions 
3562*5c2921b0SApple OSS Distributions 	return NULL;
3563*5c2921b0SApple OSS Distributions }
3564*5c2921b0SApple OSS Distributions #pragma clang diagnostic pop
3565*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
3566*5c2921b0SApple OSS Distributions 
3567*5c2921b0SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3568*5c2921b0SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3569*5c2921b0SApple OSS Distributions {
3570*5c2921b0SApple OSS Distributions 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3571*5c2921b0SApple OSS Distributions 	DMACommandOps params;
3572*5c2921b0SApple OSS Distributions 	IOReturn err;
3573*5c2921b0SApple OSS Distributions 
3574*5c2921b0SApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
3575*5c2921b0SApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
3576*5c2921b0SApple OSS Distributions 
3577*5c2921b0SApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3578*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3579*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3580*5c2921b0SApple OSS Distributions 		}
3581*5c2921b0SApple OSS Distributions 
3582*5c2921b0SApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3583*5c2921b0SApple OSS Distributions 		data->fLength = getLength();
3584*5c2921b0SApple OSS Distributions 		data->fSGCount = 0;
3585*5c2921b0SApple OSS Distributions 		data->fDirection = getDirection();
3586*5c2921b0SApple OSS Distributions 		data->fIsPrepared = true; // Assume prepared - fails safe
3587*5c2921b0SApple OSS Distributions 	} else if (kIOMDWalkSegments == op) {
3588*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3589*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3590*5c2921b0SApple OSS Distributions 		}
3591*5c2921b0SApple OSS Distributions 
3592*5c2921b0SApple OSS Distributions 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3593*5c2921b0SApple OSS Distributions 		IOByteCount offset  = (IOByteCount) data->fOffset;
3594*5c2921b0SApple OSS Distributions 		IOPhysicalLength length, nextLength;
3595*5c2921b0SApple OSS Distributions 		addr64_t         addr, nextAddr;
3596*5c2921b0SApple OSS Distributions 
3597*5c2921b0SApple OSS Distributions 		if (data->fMapped) {
3598*5c2921b0SApple OSS Distributions 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3599*5c2921b0SApple OSS Distributions 		}
3600*5c2921b0SApple OSS Distributions 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3601*5c2921b0SApple OSS Distributions 		offset += length;
3602*5c2921b0SApple OSS Distributions 		while (offset < getLength()) {
3603*5c2921b0SApple OSS Distributions 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3604*5c2921b0SApple OSS Distributions 			if ((addr + length) != nextAddr) {
3605*5c2921b0SApple OSS Distributions 				break;
3606*5c2921b0SApple OSS Distributions 			}
3607*5c2921b0SApple OSS Distributions 			length += nextLength;
3608*5c2921b0SApple OSS Distributions 			offset += nextLength;
3609*5c2921b0SApple OSS Distributions 		}
3610*5c2921b0SApple OSS Distributions 		data->fIOVMAddr = addr;
3611*5c2921b0SApple OSS Distributions 		data->fLength   = length;
3612*5c2921b0SApple OSS Distributions 	} else if (kIOMDAddDMAMapSpec == op) {
3613*5c2921b0SApple OSS Distributions 		return kIOReturnUnsupported;
3614*5c2921b0SApple OSS Distributions 	} else if (kIOMDDMAMap == op) {
3615*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3616*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3617*5c2921b0SApple OSS Distributions 		}
3618*5c2921b0SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3619*5c2921b0SApple OSS Distributions 
3620*5c2921b0SApple OSS Distributions 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3621*5c2921b0SApple OSS Distributions 
3622*5c2921b0SApple OSS Distributions 		return err;
3623*5c2921b0SApple OSS Distributions 	} else if (kIOMDDMAUnmap == op) {
3624*5c2921b0SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3625*5c2921b0SApple OSS Distributions 			return kIOReturnUnderrun;
3626*5c2921b0SApple OSS Distributions 		}
3627*5c2921b0SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3628*5c2921b0SApple OSS Distributions 
3629*5c2921b0SApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3630*5c2921b0SApple OSS Distributions 
3631*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3632*5c2921b0SApple OSS Distributions 	} else {
3633*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
3634*5c2921b0SApple OSS Distributions 	}
3635*5c2921b0SApple OSS Distributions 
3636*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
3637*5c2921b0SApple OSS Distributions }
3638*5c2921b0SApple OSS Distributions 
3639*5c2921b0SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3640*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3641*5c2921b0SApple OSS Distributions     IOOptionBits * oldState )
3642*5c2921b0SApple OSS Distributions {
3643*5c2921b0SApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3644*5c2921b0SApple OSS Distributions 
3645*5c2921b0SApple OSS Distributions 	vm_purgable_t control;
3646*5c2921b0SApple OSS Distributions 	int           state;
3647*5c2921b0SApple OSS Distributions 
3648*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3649*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3650*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3651*5c2921b0SApple OSS Distributions 	}
3652*5c2921b0SApple OSS Distributions 
3653*5c2921b0SApple OSS Distributions 	if (_memRef) {
3654*5c2921b0SApple OSS Distributions 		err = super::setPurgeable(newState, oldState);
3655*5c2921b0SApple OSS Distributions 	} else {
3656*5c2921b0SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3657*5c2921b0SApple OSS Distributions 			LOCK;
3658*5c2921b0SApple OSS Distributions 		}
3659*5c2921b0SApple OSS Distributions 		do{
3660*5c2921b0SApple OSS Distributions 			// Find the appropriate vm_map for the given task
3661*5c2921b0SApple OSS Distributions 			vm_map_t curMap;
3662*5c2921b0SApple OSS Distributions 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3663*5c2921b0SApple OSS Distributions 				err = kIOReturnNotReady;
3664*5c2921b0SApple OSS Distributions 				break;
3665*5c2921b0SApple OSS Distributions 			} else if (!_task) {
3666*5c2921b0SApple OSS Distributions 				err = kIOReturnUnsupported;
3667*5c2921b0SApple OSS Distributions 				break;
3668*5c2921b0SApple OSS Distributions 			} else {
3669*5c2921b0SApple OSS Distributions 				curMap = get_task_map(_task);
3670*5c2921b0SApple OSS Distributions 				if (NULL == curMap) {
3671*5c2921b0SApple OSS Distributions 					err = KERN_INVALID_ARGUMENT;
3672*5c2921b0SApple OSS Distributions 					break;
3673*5c2921b0SApple OSS Distributions 				}
3674*5c2921b0SApple OSS Distributions 			}
3675*5c2921b0SApple OSS Distributions 
3676*5c2921b0SApple OSS Distributions 			// can only do one range
3677*5c2921b0SApple OSS Distributions 			Ranges vec = _ranges;
3678*5c2921b0SApple OSS Distributions 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3679*5c2921b0SApple OSS Distributions 			mach_vm_address_t addr;
3680*5c2921b0SApple OSS Distributions 			mach_vm_size_t    len;
3681*5c2921b0SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, 0);
3682*5c2921b0SApple OSS Distributions 
3683*5c2921b0SApple OSS Distributions 			err = purgeableControlBits(newState, &control, &state);
3684*5c2921b0SApple OSS Distributions 			if (kIOReturnSuccess != err) {
3685*5c2921b0SApple OSS Distributions 				break;
3686*5c2921b0SApple OSS Distributions 			}
3687*5c2921b0SApple OSS Distributions 			err = vm_map_purgable_control(curMap, addr, control, &state);
3688*5c2921b0SApple OSS Distributions 			if (oldState) {
3689*5c2921b0SApple OSS Distributions 				if (kIOReturnSuccess == err) {
3690*5c2921b0SApple OSS Distributions 					err = purgeableStateBits(&state);
3691*5c2921b0SApple OSS Distributions 					*oldState = state;
3692*5c2921b0SApple OSS Distributions 				}
3693*5c2921b0SApple OSS Distributions 			}
3694*5c2921b0SApple OSS Distributions 		}while (false);
3695*5c2921b0SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3696*5c2921b0SApple OSS Distributions 			UNLOCK;
3697*5c2921b0SApple OSS Distributions 		}
3698*5c2921b0SApple OSS Distributions 	}
3699*5c2921b0SApple OSS Distributions 
3700*5c2921b0SApple OSS Distributions 	return err;
3701*5c2921b0SApple OSS Distributions }
3702*5c2921b0SApple OSS Distributions 
3703*5c2921b0SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3704*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3705*5c2921b0SApple OSS Distributions     IOOptionBits * oldState )
3706*5c2921b0SApple OSS Distributions {
3707*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3708*5c2921b0SApple OSS Distributions 
3709*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3710*5c2921b0SApple OSS Distributions 		LOCK;
3711*5c2921b0SApple OSS Distributions 	}
3712*5c2921b0SApple OSS Distributions 	if (_memRef) {
3713*5c2921b0SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3714*5c2921b0SApple OSS Distributions 	}
3715*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3716*5c2921b0SApple OSS Distributions 		UNLOCK;
3717*5c2921b0SApple OSS Distributions 	}
3718*5c2921b0SApple OSS Distributions 
3719*5c2921b0SApple OSS Distributions 	return err;
3720*5c2921b0SApple OSS Distributions }
3721*5c2921b0SApple OSS Distributions 
3722*5c2921b0SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3723*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3724*5c2921b0SApple OSS Distributions     int newLedgerTag,
3725*5c2921b0SApple OSS Distributions     IOOptionBits newLedgerOptions )
3726*5c2921b0SApple OSS Distributions {
3727*5c2921b0SApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3728*5c2921b0SApple OSS Distributions 
3729*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3730*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3731*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3732*5c2921b0SApple OSS Distributions 	}
3733*5c2921b0SApple OSS Distributions 
3734*5c2921b0SApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3735*5c2921b0SApple OSS Distributions 		return kIOReturnUnsupported;
3736*5c2921b0SApple OSS Distributions 	}
3737*5c2921b0SApple OSS Distributions 
3738*5c2921b0SApple OSS Distributions 	if (_memRef) {
3739*5c2921b0SApple OSS Distributions 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3740*5c2921b0SApple OSS Distributions 	} else {
3741*5c2921b0SApple OSS Distributions 		err = kIOReturnUnsupported;
3742*5c2921b0SApple OSS Distributions 	}
3743*5c2921b0SApple OSS Distributions 
3744*5c2921b0SApple OSS Distributions 	return err;
3745*5c2921b0SApple OSS Distributions }
3746*5c2921b0SApple OSS Distributions 
3747*5c2921b0SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3748*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3749*5c2921b0SApple OSS Distributions     int newLedgerTag,
3750*5c2921b0SApple OSS Distributions     IOOptionBits newLedgerOptions )
3751*5c2921b0SApple OSS Distributions {
3752*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3753*5c2921b0SApple OSS Distributions 
3754*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3755*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3756*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3757*5c2921b0SApple OSS Distributions 	}
3758*5c2921b0SApple OSS Distributions 
3759*5c2921b0SApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3760*5c2921b0SApple OSS Distributions 		return kIOReturnUnsupported;
3761*5c2921b0SApple OSS Distributions 	}
3762*5c2921b0SApple OSS Distributions 
3763*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3764*5c2921b0SApple OSS Distributions 		LOCK;
3765*5c2921b0SApple OSS Distributions 	}
3766*5c2921b0SApple OSS Distributions 	if (_memRef) {
3767*5c2921b0SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3768*5c2921b0SApple OSS Distributions 	} else {
3769*5c2921b0SApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3770*5c2921b0SApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3771*5c2921b0SApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3772*5c2921b0SApple OSS Distributions 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3773*5c2921b0SApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3774*5c2921b0SApple OSS Distributions 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775*5c2921b0SApple OSS Distributions 		}
3776*5c2921b0SApple OSS Distributions 	}
3777*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3778*5c2921b0SApple OSS Distributions 		UNLOCK;
3779*5c2921b0SApple OSS Distributions 	}
3780*5c2921b0SApple OSS Distributions 
3781*5c2921b0SApple OSS Distributions 	return err;
3782*5c2921b0SApple OSS Distributions }
3783*5c2921b0SApple OSS Distributions 
3784*5c2921b0SApple OSS Distributions 
3785*5c2921b0SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3786*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3787*5c2921b0SApple OSS Distributions {
3788*5c2921b0SApple OSS Distributions 	uint64_t length;
3789*5c2921b0SApple OSS Distributions 
3790*5c2921b0SApple OSS Distributions 	if (_memRef) {
3791*5c2921b0SApple OSS Distributions 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3792*5c2921b0SApple OSS Distributions 	} else {
3793*5c2921b0SApple OSS Distributions 		IOByteCount       iterate, segLen;
3794*5c2921b0SApple OSS Distributions 		IOPhysicalAddress sourceAddr, sourceAlign;
3795*5c2921b0SApple OSS Distributions 
3796*5c2921b0SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3797*5c2921b0SApple OSS Distributions 			LOCK;
3798*5c2921b0SApple OSS Distributions 		}
3799*5c2921b0SApple OSS Distributions 		length = 0;
3800*5c2921b0SApple OSS Distributions 		iterate = 0;
3801*5c2921b0SApple OSS Distributions 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3802*5c2921b0SApple OSS Distributions 			sourceAlign = (sourceAddr & page_mask);
3803*5c2921b0SApple OSS Distributions 			if (offset && !iterate) {
3804*5c2921b0SApple OSS Distributions 				*offset = sourceAlign;
3805*5c2921b0SApple OSS Distributions 			}
3806*5c2921b0SApple OSS Distributions 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3807*5c2921b0SApple OSS Distributions 			iterate += segLen;
3808*5c2921b0SApple OSS Distributions 		}
3809*5c2921b0SApple OSS Distributions 		if (!iterate) {
3810*5c2921b0SApple OSS Distributions 			length = getLength();
3811*5c2921b0SApple OSS Distributions 			if (offset) {
3812*5c2921b0SApple OSS Distributions 				*offset = 0;
3813*5c2921b0SApple OSS Distributions 			}
3814*5c2921b0SApple OSS Distributions 		}
3815*5c2921b0SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3816*5c2921b0SApple OSS Distributions 			UNLOCK;
3817*5c2921b0SApple OSS Distributions 		}
3818*5c2921b0SApple OSS Distributions 	}
3819*5c2921b0SApple OSS Distributions 
3820*5c2921b0SApple OSS Distributions 	return length;
3821*5c2921b0SApple OSS Distributions }
3822*5c2921b0SApple OSS Distributions 
3823*5c2921b0SApple OSS Distributions 
3824*5c2921b0SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3825*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3826*5c2921b0SApple OSS Distributions     IOByteCount * dirtyPageCount )
3827*5c2921b0SApple OSS Distributions {
3828*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3829*5c2921b0SApple OSS Distributions 
3830*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3831*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3832*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3833*5c2921b0SApple OSS Distributions 	}
3834*5c2921b0SApple OSS Distributions 
3835*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3836*5c2921b0SApple OSS Distributions 		LOCK;
3837*5c2921b0SApple OSS Distributions 	}
3838*5c2921b0SApple OSS Distributions 	if (_memRef) {
3839*5c2921b0SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3840*5c2921b0SApple OSS Distributions 	} else {
3841*5c2921b0SApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3842*5c2921b0SApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3843*5c2921b0SApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3844*5c2921b0SApple OSS Distributions 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3845*5c2921b0SApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3846*5c2921b0SApple OSS Distributions 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3847*5c2921b0SApple OSS Distributions 		}
3848*5c2921b0SApple OSS Distributions 	}
3849*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3850*5c2921b0SApple OSS Distributions 		UNLOCK;
3851*5c2921b0SApple OSS Distributions 	}
3852*5c2921b0SApple OSS Distributions 
3853*5c2921b0SApple OSS Distributions 	return err;
3854*5c2921b0SApple OSS Distributions }
3855*5c2921b0SApple OSS Distributions 
3856*5c2921b0SApple OSS Distributions 
3857*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3858*5c2921b0SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3859*5c2921b0SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3860*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
3861*5c2921b0SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3862*5c2921b0SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3863*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
3864*5c2921b0SApple OSS Distributions 
3865*5c2921b0SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3866*5c2921b0SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3867*5c2921b0SApple OSS Distributions {
3868*5c2921b0SApple OSS Distributions 	ppnum_t page, end;
3869*5c2921b0SApple OSS Distributions 
3870*5c2921b0SApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3871*5c2921b0SApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3872*5c2921b0SApple OSS Distributions 	for (; page < end; page++) {
3873*5c2921b0SApple OSS Distributions 		pmap_clear_noencrypt(page);
3874*5c2921b0SApple OSS Distributions 	}
3875*5c2921b0SApple OSS Distributions }
3876*5c2921b0SApple OSS Distributions 
3877*5c2921b0SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3878*5c2921b0SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3879*5c2921b0SApple OSS Distributions {
3880*5c2921b0SApple OSS Distributions 	ppnum_t page, end;
3881*5c2921b0SApple OSS Distributions 
3882*5c2921b0SApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3883*5c2921b0SApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3884*5c2921b0SApple OSS Distributions 	for (; page < end; page++) {
3885*5c2921b0SApple OSS Distributions 		pmap_set_noencrypt(page);
3886*5c2921b0SApple OSS Distributions 	}
3887*5c2921b0SApple OSS Distributions }
3888*5c2921b0SApple OSS Distributions 
3889*5c2921b0SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3890*5c2921b0SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3891*5c2921b0SApple OSS Distributions     IOByteCount offset, IOByteCount length )
3892*5c2921b0SApple OSS Distributions {
3893*5c2921b0SApple OSS Distributions 	IOByteCount remaining;
3894*5c2921b0SApple OSS Distributions 	unsigned int res;
3895*5c2921b0SApple OSS Distributions 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3896*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3897*5c2921b0SApple OSS Distributions 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3898*5c2921b0SApple OSS Distributions #endif
3899*5c2921b0SApple OSS Distributions 
3900*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3901*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3902*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
3903*5c2921b0SApple OSS Distributions 	}
3904*5c2921b0SApple OSS Distributions 
3905*5c2921b0SApple OSS Distributions 	switch (options) {
3906*5c2921b0SApple OSS Distributions 	case kIOMemoryIncoherentIOFlush:
3907*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3908*5c2921b0SApple OSS Distributions 		func_ext = &dcache_incoherent_io_flush64;
3909*5c2921b0SApple OSS Distributions #if __ARM_COHERENT_IO__
3910*5c2921b0SApple OSS Distributions 		func_ext(0, 0, 0, &res);
3911*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3912*5c2921b0SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3913*5c2921b0SApple OSS Distributions 		break;
3914*5c2921b0SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3915*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
3916*5c2921b0SApple OSS Distributions 		func = &dcache_incoherent_io_flush64;
3917*5c2921b0SApple OSS Distributions 		break;
3918*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
3919*5c2921b0SApple OSS Distributions 	case kIOMemoryIncoherentIOStore:
3920*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3921*5c2921b0SApple OSS Distributions 		func_ext = &dcache_incoherent_io_store64;
3922*5c2921b0SApple OSS Distributions #if __ARM_COHERENT_IO__
3923*5c2921b0SApple OSS Distributions 		func_ext(0, 0, 0, &res);
3924*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
3925*5c2921b0SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3926*5c2921b0SApple OSS Distributions 		break;
3927*5c2921b0SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3928*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
3929*5c2921b0SApple OSS Distributions 		func = &dcache_incoherent_io_store64;
3930*5c2921b0SApple OSS Distributions 		break;
3931*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
3932*5c2921b0SApple OSS Distributions 
3933*5c2921b0SApple OSS Distributions 	case kIOMemorySetEncrypted:
3934*5c2921b0SApple OSS Distributions 		func = &SetEncryptOp;
3935*5c2921b0SApple OSS Distributions 		break;
3936*5c2921b0SApple OSS Distributions 	case kIOMemoryClearEncrypted:
3937*5c2921b0SApple OSS Distributions 		func = &ClearEncryptOp;
3938*5c2921b0SApple OSS Distributions 		break;
3939*5c2921b0SApple OSS Distributions 	}
3940*5c2921b0SApple OSS Distributions 
3941*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3942*5c2921b0SApple OSS Distributions 	if ((func == NULL) && (func_ext == NULL)) {
3943*5c2921b0SApple OSS Distributions 		return kIOReturnUnsupported;
3944*5c2921b0SApple OSS Distributions 	}
3945*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
3946*5c2921b0SApple OSS Distributions 	if (!func) {
3947*5c2921b0SApple OSS Distributions 		return kIOReturnUnsupported;
3948*5c2921b0SApple OSS Distributions 	}
3949*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
3950*5c2921b0SApple OSS Distributions 
3951*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3952*5c2921b0SApple OSS Distributions 		LOCK;
3953*5c2921b0SApple OSS Distributions 	}
3954*5c2921b0SApple OSS Distributions 
3955*5c2921b0SApple OSS Distributions 	res = 0x0UL;
3956*5c2921b0SApple OSS Distributions 	remaining = length = min(length, getLength() - offset);
3957*5c2921b0SApple OSS Distributions 	while (remaining) {
3958*5c2921b0SApple OSS Distributions 		// (process another target segment?)
3959*5c2921b0SApple OSS Distributions 		addr64_t    dstAddr64;
3960*5c2921b0SApple OSS Distributions 		IOByteCount dstLen;
3961*5c2921b0SApple OSS Distributions 
3962*5c2921b0SApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3963*5c2921b0SApple OSS Distributions 		if (!dstAddr64) {
3964*5c2921b0SApple OSS Distributions 			break;
3965*5c2921b0SApple OSS Distributions 		}
3966*5c2921b0SApple OSS Distributions 
3967*5c2921b0SApple OSS Distributions 		// Clip segment length to remaining
3968*5c2921b0SApple OSS Distributions 		if (dstLen > remaining) {
3969*5c2921b0SApple OSS Distributions 			dstLen = remaining;
3970*5c2921b0SApple OSS Distributions 		}
3971*5c2921b0SApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3972*5c2921b0SApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3973*5c2921b0SApple OSS Distributions 		}
3974*5c2921b0SApple OSS Distributions 		if (remaining > UINT_MAX) {
3975*5c2921b0SApple OSS Distributions 			remaining = UINT_MAX;
3976*5c2921b0SApple OSS Distributions 		}
3977*5c2921b0SApple OSS Distributions 
3978*5c2921b0SApple OSS Distributions #if defined(__arm64__)
3979*5c2921b0SApple OSS Distributions 		if (func) {
3980*5c2921b0SApple OSS Distributions 			(*func)(dstAddr64, (unsigned int) dstLen);
3981*5c2921b0SApple OSS Distributions 		}
3982*5c2921b0SApple OSS Distributions 		if (func_ext) {
3983*5c2921b0SApple OSS Distributions 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3984*5c2921b0SApple OSS Distributions 			if (res != 0x0UL) {
3985*5c2921b0SApple OSS Distributions 				remaining = 0;
3986*5c2921b0SApple OSS Distributions 				break;
3987*5c2921b0SApple OSS Distributions 			}
3988*5c2921b0SApple OSS Distributions 		}
3989*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
3990*5c2921b0SApple OSS Distributions 		(*func)(dstAddr64, (unsigned int) dstLen);
3991*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
3992*5c2921b0SApple OSS Distributions 
3993*5c2921b0SApple OSS Distributions 		offset    += dstLen;
3994*5c2921b0SApple OSS Distributions 		remaining -= dstLen;
3995*5c2921b0SApple OSS Distributions 	}
3996*5c2921b0SApple OSS Distributions 
3997*5c2921b0SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3998*5c2921b0SApple OSS Distributions 		UNLOCK;
3999*5c2921b0SApple OSS Distributions 	}
4000*5c2921b0SApple OSS Distributions 
4001*5c2921b0SApple OSS Distributions 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4002*5c2921b0SApple OSS Distributions }
4003*5c2921b0SApple OSS Distributions 
4004*5c2921b0SApple OSS Distributions /*
4005*5c2921b0SApple OSS Distributions  *
4006*5c2921b0SApple OSS Distributions  */
4007*5c2921b0SApple OSS Distributions 
4008*5c2921b0SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4009*5c2921b0SApple OSS Distributions 
4010*5c2921b0SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4011*5c2921b0SApple OSS Distributions 
4012*5c2921b0SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4013*5c2921b0SApple OSS Distributions  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4014*5c2921b0SApple OSS Distributions  * kernel non-text data -- should we just add another range instead?
4015*5c2921b0SApple OSS Distributions  */
4016*5c2921b0SApple OSS Distributions #define io_kernel_static_start  vm_kernel_stext
4017*5c2921b0SApple OSS Distributions #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4018*5c2921b0SApple OSS Distributions 
4019*5c2921b0SApple OSS Distributions #elif defined(__arm64__)
4020*5c2921b0SApple OSS Distributions 
4021*5c2921b0SApple OSS Distributions extern vm_offset_t              static_memory_end;
4022*5c2921b0SApple OSS Distributions 
4023*5c2921b0SApple OSS Distributions #if defined(__arm64__)
4024*5c2921b0SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4025*5c2921b0SApple OSS Distributions #else /* defined(__arm64__) */
4026*5c2921b0SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4027*5c2921b0SApple OSS Distributions #endif /* defined(__arm64__) */
4028*5c2921b0SApple OSS Distributions 
4029*5c2921b0SApple OSS Distributions #define io_kernel_static_end    static_memory_end
4030*5c2921b0SApple OSS Distributions 
4031*5c2921b0SApple OSS Distributions #else
4032*5c2921b0SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4033*5c2921b0SApple OSS Distributions #endif
4034*5c2921b0SApple OSS Distributions 
4035*5c2921b0SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4036*5c2921b0SApple OSS Distributions io_get_kernel_static_upl(
4037*5c2921b0SApple OSS Distributions 	vm_map_t                /* map */,
4038*5c2921b0SApple OSS Distributions 	uintptr_t               offset,
4039*5c2921b0SApple OSS Distributions 	upl_size_t              *upl_size,
4040*5c2921b0SApple OSS Distributions 	unsigned int            *page_offset,
4041*5c2921b0SApple OSS Distributions 	upl_t                   *upl,
4042*5c2921b0SApple OSS Distributions 	upl_page_info_array_t   page_list,
4043*5c2921b0SApple OSS Distributions 	unsigned int            *count,
4044*5c2921b0SApple OSS Distributions 	ppnum_t                 *highest_page)
4045*5c2921b0SApple OSS Distributions {
4046*5c2921b0SApple OSS Distributions 	unsigned int pageCount, page;
4047*5c2921b0SApple OSS Distributions 	ppnum_t phys;
4048*5c2921b0SApple OSS Distributions 	ppnum_t highestPage = 0;
4049*5c2921b0SApple OSS Distributions 
4050*5c2921b0SApple OSS Distributions 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4051*5c2921b0SApple OSS Distributions 	if (pageCount > *count) {
4052*5c2921b0SApple OSS Distributions 		pageCount = *count;
4053*5c2921b0SApple OSS Distributions 	}
4054*5c2921b0SApple OSS Distributions 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4055*5c2921b0SApple OSS Distributions 
4056*5c2921b0SApple OSS Distributions 	*upl = NULL;
4057*5c2921b0SApple OSS Distributions 	*page_offset = ((unsigned int) page_mask & offset);
4058*5c2921b0SApple OSS Distributions 
4059*5c2921b0SApple OSS Distributions 	for (page = 0; page < pageCount; page++) {
4060*5c2921b0SApple OSS Distributions 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4061*5c2921b0SApple OSS Distributions 		if (!phys) {
4062*5c2921b0SApple OSS Distributions 			break;
4063*5c2921b0SApple OSS Distributions 		}
4064*5c2921b0SApple OSS Distributions 		page_list[page].phys_addr = phys;
4065*5c2921b0SApple OSS Distributions 		page_list[page].free_when_done = 0;
4066*5c2921b0SApple OSS Distributions 		page_list[page].absent    = 0;
4067*5c2921b0SApple OSS Distributions 		page_list[page].dirty     = 0;
4068*5c2921b0SApple OSS Distributions 		page_list[page].precious  = 0;
4069*5c2921b0SApple OSS Distributions 		page_list[page].device    = 0;
4070*5c2921b0SApple OSS Distributions 		if (phys > highestPage) {
4071*5c2921b0SApple OSS Distributions 			highestPage = phys;
4072*5c2921b0SApple OSS Distributions 		}
4073*5c2921b0SApple OSS Distributions 	}
4074*5c2921b0SApple OSS Distributions 
4075*5c2921b0SApple OSS Distributions 	*highest_page = highestPage;
4076*5c2921b0SApple OSS Distributions 
4077*5c2921b0SApple OSS Distributions 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4078*5c2921b0SApple OSS Distributions }
4079*5c2921b0SApple OSS Distributions 
4080*5c2921b0SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4081*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4082*5c2921b0SApple OSS Distributions {
4083*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4084*5c2921b0SApple OSS Distributions 	IOReturn error = kIOReturnSuccess;
4085*5c2921b0SApple OSS Distributions 	ioGMDData *dataP;
4086*5c2921b0SApple OSS Distributions 	upl_page_info_array_t pageInfo;
4087*5c2921b0SApple OSS Distributions 	ppnum_t mapBase;
4088*5c2921b0SApple OSS Distributions 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4089*5c2921b0SApple OSS Distributions 	mach_vm_size_t numBytesWired = 0;
4090*5c2921b0SApple OSS Distributions 
4091*5c2921b0SApple OSS Distributions 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4092*5c2921b0SApple OSS Distributions 
4093*5c2921b0SApple OSS Distributions 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4094*5c2921b0SApple OSS Distributions 		forDirection = (IODirection) (forDirection | getDirection());
4095*5c2921b0SApple OSS Distributions 	}
4096*5c2921b0SApple OSS Distributions 
4097*5c2921b0SApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4098*5c2921b0SApple OSS Distributions 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4099*5c2921b0SApple OSS Distributions 	switch (kIODirectionOutIn & forDirection) {
4100*5c2921b0SApple OSS Distributions 	case kIODirectionOut:
4101*5c2921b0SApple OSS Distributions 		// Pages do not need to be marked as dirty on commit
4102*5c2921b0SApple OSS Distributions 		uplFlags = UPL_COPYOUT_FROM;
4103*5c2921b0SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess;
4104*5c2921b0SApple OSS Distributions 		break;
4105*5c2921b0SApple OSS Distributions 
4106*5c2921b0SApple OSS Distributions 	case kIODirectionIn:
4107*5c2921b0SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4108*5c2921b0SApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4109*5c2921b0SApple OSS Distributions 		break;
4110*5c2921b0SApple OSS Distributions 
4111*5c2921b0SApple OSS Distributions 	default:
4112*5c2921b0SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4113*5c2921b0SApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4114*5c2921b0SApple OSS Distributions 		break;
4115*5c2921b0SApple OSS Distributions 	}
4116*5c2921b0SApple OSS Distributions 
4117*5c2921b0SApple OSS Distributions 	if (_wireCount) {
4118*5c2921b0SApple OSS Distributions 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4119*5c2921b0SApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4120*5c2921b0SApple OSS Distributions 			    (size_t)VM_KERNEL_ADDRPERM(this));
4121*5c2921b0SApple OSS Distributions 			error = kIOReturnNotWritable;
4122*5c2921b0SApple OSS Distributions 		}
4123*5c2921b0SApple OSS Distributions 	} else {
4124*5c2921b0SApple OSS Distributions 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4125*5c2921b0SApple OSS Distributions 		IOMapper *mapper;
4126*5c2921b0SApple OSS Distributions 
4127*5c2921b0SApple OSS Distributions 		mapper = dataP->fMapper;
4128*5c2921b0SApple OSS Distributions 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4129*5c2921b0SApple OSS Distributions 
4130*5c2921b0SApple OSS Distributions 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4131*5c2921b0SApple OSS Distributions 		tag = _kernelTag;
4132*5c2921b0SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE == tag) {
4133*5c2921b0SApple OSS Distributions 			tag = IOMemoryTag(kernel_map);
4134*5c2921b0SApple OSS Distributions 		}
4135*5c2921b0SApple OSS Distributions 
4136*5c2921b0SApple OSS Distributions 		if (kIODirectionPrepareToPhys32 & forDirection) {
4137*5c2921b0SApple OSS Distributions 			if (!mapper) {
4138*5c2921b0SApple OSS Distributions 				uplFlags |= UPL_NEED_32BIT_ADDR;
4139*5c2921b0SApple OSS Distributions 			}
4140*5c2921b0SApple OSS Distributions 			if (dataP->fDMAMapNumAddressBits > 32) {
4141*5c2921b0SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = 32;
4142*5c2921b0SApple OSS Distributions 			}
4143*5c2921b0SApple OSS Distributions 		}
4144*5c2921b0SApple OSS Distributions 		if (kIODirectionPrepareNoFault    & forDirection) {
4145*5c2921b0SApple OSS Distributions 			uplFlags |= UPL_REQUEST_NO_FAULT;
4146*5c2921b0SApple OSS Distributions 		}
4147*5c2921b0SApple OSS Distributions 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4148*5c2921b0SApple OSS Distributions 			uplFlags |= UPL_NOZEROFILLIO;
4149*5c2921b0SApple OSS Distributions 		}
4150*5c2921b0SApple OSS Distributions 		if (kIODirectionPrepareNonCoherent & forDirection) {
4151*5c2921b0SApple OSS Distributions 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4152*5c2921b0SApple OSS Distributions 		}
4153*5c2921b0SApple OSS Distributions 
4154*5c2921b0SApple OSS Distributions 		mapBase = 0;
4155*5c2921b0SApple OSS Distributions 
4156*5c2921b0SApple OSS Distributions 		// Note that appendBytes(NULL) zeros the data up to the desired length
4157*5c2921b0SApple OSS Distributions 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4158*5c2921b0SApple OSS Distributions 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4159*5c2921b0SApple OSS Distributions 			error = kIOReturnNoMemory;
4160*5c2921b0SApple OSS Distributions 			traceInterval.setEndArg2(error);
4161*5c2921b0SApple OSS Distributions 			return error;
4162*5c2921b0SApple OSS Distributions 		}
4163*5c2921b0SApple OSS Distributions 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4164*5c2921b0SApple OSS Distributions 			error = kIOReturnNoMemory;
4165*5c2921b0SApple OSS Distributions 			traceInterval.setEndArg2(error);
4166*5c2921b0SApple OSS Distributions 			return error;
4167*5c2921b0SApple OSS Distributions 		}
4168*5c2921b0SApple OSS Distributions 		dataP = NULL;
4169*5c2921b0SApple OSS Distributions 
4170*5c2921b0SApple OSS Distributions 		// Find the appropriate vm_map for the given task
4171*5c2921b0SApple OSS Distributions 		vm_map_t curMap;
4172*5c2921b0SApple OSS Distributions 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4173*5c2921b0SApple OSS Distributions 			curMap = NULL;
4174*5c2921b0SApple OSS Distributions 		} else {
4175*5c2921b0SApple OSS Distributions 			curMap = get_task_map(_task);
4176*5c2921b0SApple OSS Distributions 		}
4177*5c2921b0SApple OSS Distributions 
4178*5c2921b0SApple OSS Distributions 		// Iterate over the vector of virtual ranges
4179*5c2921b0SApple OSS Distributions 		Ranges vec = _ranges;
4180*5c2921b0SApple OSS Distributions 		unsigned int pageIndex  = 0;
4181*5c2921b0SApple OSS Distributions 		IOByteCount mdOffset    = 0;
4182*5c2921b0SApple OSS Distributions 		ppnum_t highestPage     = 0;
4183*5c2921b0SApple OSS Distributions 		bool         byteAlignUPL;
4184*5c2921b0SApple OSS Distributions 
4185*5c2921b0SApple OSS Distributions 		IOMemoryEntry * memRefEntry = NULL;
4186*5c2921b0SApple OSS Distributions 		if (_memRef) {
4187*5c2921b0SApple OSS Distributions 			memRefEntry = &_memRef->entries[0];
4188*5c2921b0SApple OSS Distributions 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4189*5c2921b0SApple OSS Distributions 		} else {
4190*5c2921b0SApple OSS Distributions 			byteAlignUPL = true;
4191*5c2921b0SApple OSS Distributions 		}
4192*5c2921b0SApple OSS Distributions 
4193*5c2921b0SApple OSS Distributions 		for (UInt range = 0; mdOffset < _length; range++) {
4194*5c2921b0SApple OSS Distributions 			ioPLBlock iopl;
4195*5c2921b0SApple OSS Distributions 			mach_vm_address_t startPage, startPageOffset;
4196*5c2921b0SApple OSS Distributions 			mach_vm_size_t    numBytes;
4197*5c2921b0SApple OSS Distributions 			ppnum_t highPage = 0;
4198*5c2921b0SApple OSS Distributions 
4199*5c2921b0SApple OSS Distributions 			if (_memRef) {
4200*5c2921b0SApple OSS Distributions 				if (range >= _memRef->count) {
4201*5c2921b0SApple OSS Distributions 					panic("memRefEntry");
4202*5c2921b0SApple OSS Distributions 				}
4203*5c2921b0SApple OSS Distributions 				memRefEntry = &_memRef->entries[range];
4204*5c2921b0SApple OSS Distributions 				numBytes    = memRefEntry->size;
4205*5c2921b0SApple OSS Distributions 				startPage   = -1ULL;
4206*5c2921b0SApple OSS Distributions 				if (byteAlignUPL) {
4207*5c2921b0SApple OSS Distributions 					startPageOffset = 0;
4208*5c2921b0SApple OSS Distributions 				} else {
4209*5c2921b0SApple OSS Distributions 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4210*5c2921b0SApple OSS Distributions 				}
4211*5c2921b0SApple OSS Distributions 			} else {
4212*5c2921b0SApple OSS Distributions 				// Get the startPage address and length of vec[range]
4213*5c2921b0SApple OSS Distributions 				getAddrLenForInd(startPage, numBytes, type, vec, range);
4214*5c2921b0SApple OSS Distributions 				if (byteAlignUPL) {
4215*5c2921b0SApple OSS Distributions 					startPageOffset = 0;
4216*5c2921b0SApple OSS Distributions 				} else {
4217*5c2921b0SApple OSS Distributions 					startPageOffset = startPage & PAGE_MASK;
4218*5c2921b0SApple OSS Distributions 					startPage = trunc_page_64(startPage);
4219*5c2921b0SApple OSS Distributions 				}
4220*5c2921b0SApple OSS Distributions 			}
4221*5c2921b0SApple OSS Distributions 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4222*5c2921b0SApple OSS Distributions 			numBytes += startPageOffset;
4223*5c2921b0SApple OSS Distributions 
4224*5c2921b0SApple OSS Distributions 			if (mapper) {
4225*5c2921b0SApple OSS Distributions 				iopl.fMappedPage = mapBase + pageIndex;
4226*5c2921b0SApple OSS Distributions 			} else {
4227*5c2921b0SApple OSS Distributions 				iopl.fMappedPage = 0;
4228*5c2921b0SApple OSS Distributions 			}
4229*5c2921b0SApple OSS Distributions 
4230*5c2921b0SApple OSS Distributions 			// Iterate over the current range, creating UPLs
4231*5c2921b0SApple OSS Distributions 			while (numBytes) {
4232*5c2921b0SApple OSS Distributions 				vm_address_t kernelStart = (vm_address_t) startPage;
4233*5c2921b0SApple OSS Distributions 				vm_map_t theMap;
4234*5c2921b0SApple OSS Distributions 				if (curMap) {
4235*5c2921b0SApple OSS Distributions 					theMap = curMap;
4236*5c2921b0SApple OSS Distributions 				} else if (_memRef) {
4237*5c2921b0SApple OSS Distributions 					theMap = NULL;
4238*5c2921b0SApple OSS Distributions 				} else {
4239*5c2921b0SApple OSS Distributions 					assert(_task == kernel_task);
4240*5c2921b0SApple OSS Distributions 					theMap = IOPageableMapForAddress(kernelStart);
4241*5c2921b0SApple OSS Distributions 				}
4242*5c2921b0SApple OSS Distributions 
4243*5c2921b0SApple OSS Distributions 				// ioplFlags is an in/out parameter
4244*5c2921b0SApple OSS Distributions 				upl_control_flags_t ioplFlags = uplFlags;
4245*5c2921b0SApple OSS Distributions 				dataP = getDataP(_memoryEntries);
4246*5c2921b0SApple OSS Distributions 				pageInfo = getPageList(dataP);
4247*5c2921b0SApple OSS Distributions 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4248*5c2921b0SApple OSS Distributions 
4249*5c2921b0SApple OSS Distributions 				mach_vm_size_t ioplPhysSize;
4250*5c2921b0SApple OSS Distributions 				upl_size_t     ioplSize;
4251*5c2921b0SApple OSS Distributions 				unsigned int   numPageInfo;
4252*5c2921b0SApple OSS Distributions 
4253*5c2921b0SApple OSS Distributions 				if (_memRef) {
4254*5c2921b0SApple OSS Distributions 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4255*5c2921b0SApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4256*5c2921b0SApple OSS Distributions 				} else {
4257*5c2921b0SApple OSS Distributions 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4258*5c2921b0SApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4259*5c2921b0SApple OSS Distributions 				}
4260*5c2921b0SApple OSS Distributions 				if (error != KERN_SUCCESS) {
4261*5c2921b0SApple OSS Distributions 					if (_memRef) {
4262*5c2921b0SApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4263*5c2921b0SApple OSS Distributions 					} else {
4264*5c2921b0SApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4265*5c2921b0SApple OSS Distributions 					}
4266*5c2921b0SApple OSS Distributions 					printf("entry size error %d\n", error);
4267*5c2921b0SApple OSS Distributions 					goto abortExit;
4268*5c2921b0SApple OSS Distributions 				}
4269*5c2921b0SApple OSS Distributions 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4270*5c2921b0SApple OSS Distributions 				numPageInfo = atop_32(ioplPhysSize);
4271*5c2921b0SApple OSS Distributions 				if (byteAlignUPL) {
4272*5c2921b0SApple OSS Distributions 					if (numBytes > ioplPhysSize) {
4273*5c2921b0SApple OSS Distributions 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4274*5c2921b0SApple OSS Distributions 					} else {
4275*5c2921b0SApple OSS Distributions 						ioplSize = ((typeof(ioplSize))numBytes);
4276*5c2921b0SApple OSS Distributions 					}
4277*5c2921b0SApple OSS Distributions 				} else {
4278*5c2921b0SApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4279*5c2921b0SApple OSS Distributions 				}
4280*5c2921b0SApple OSS Distributions 
4281*5c2921b0SApple OSS Distributions 				if (_memRef) {
4282*5c2921b0SApple OSS Distributions 					memory_object_offset_t entryOffset;
4283*5c2921b0SApple OSS Distributions 
4284*5c2921b0SApple OSS Distributions 					entryOffset = mdOffset;
4285*5c2921b0SApple OSS Distributions 					if (byteAlignUPL) {
4286*5c2921b0SApple OSS Distributions 						entryOffset = (entryOffset - memRefEntry->offset);
4287*5c2921b0SApple OSS Distributions 					} else {
4288*5c2921b0SApple OSS Distributions 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4289*5c2921b0SApple OSS Distributions 					}
4290*5c2921b0SApple OSS Distributions 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4291*5c2921b0SApple OSS Distributions 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4292*5c2921b0SApple OSS Distributions 					}
4293*5c2921b0SApple OSS Distributions 					error = memory_object_iopl_request(memRefEntry->entry,
4294*5c2921b0SApple OSS Distributions 					    entryOffset,
4295*5c2921b0SApple OSS Distributions 					    &ioplSize,
4296*5c2921b0SApple OSS Distributions 					    &iopl.fIOPL,
4297*5c2921b0SApple OSS Distributions 					    baseInfo,
4298*5c2921b0SApple OSS Distributions 					    &numPageInfo,
4299*5c2921b0SApple OSS Distributions 					    &ioplFlags,
4300*5c2921b0SApple OSS Distributions 					    tag);
4301*5c2921b0SApple OSS Distributions 				} else if ((theMap == kernel_map)
4302*5c2921b0SApple OSS Distributions 				    && (kernelStart >= io_kernel_static_start)
4303*5c2921b0SApple OSS Distributions 				    && (kernelStart < io_kernel_static_end)) {
4304*5c2921b0SApple OSS Distributions 					error = io_get_kernel_static_upl(theMap,
4305*5c2921b0SApple OSS Distributions 					    kernelStart,
4306*5c2921b0SApple OSS Distributions 					    &ioplSize,
4307*5c2921b0SApple OSS Distributions 					    &iopl.fPageOffset,
4308*5c2921b0SApple OSS Distributions 					    &iopl.fIOPL,
4309*5c2921b0SApple OSS Distributions 					    baseInfo,
4310*5c2921b0SApple OSS Distributions 					    &numPageInfo,
4311*5c2921b0SApple OSS Distributions 					    &highPage);
4312*5c2921b0SApple OSS Distributions 				} else {
4313*5c2921b0SApple OSS Distributions 					assert(theMap);
4314*5c2921b0SApple OSS Distributions 					error = vm_map_create_upl(theMap,
4315*5c2921b0SApple OSS Distributions 					    startPage,
4316*5c2921b0SApple OSS Distributions 					    (upl_size_t*)&ioplSize,
4317*5c2921b0SApple OSS Distributions 					    &iopl.fIOPL,
4318*5c2921b0SApple OSS Distributions 					    baseInfo,
4319*5c2921b0SApple OSS Distributions 					    &numPageInfo,
4320*5c2921b0SApple OSS Distributions 					    &ioplFlags,
4321*5c2921b0SApple OSS Distributions 					    tag);
4322*5c2921b0SApple OSS Distributions 				}
4323*5c2921b0SApple OSS Distributions 
4324*5c2921b0SApple OSS Distributions 				if (error != KERN_SUCCESS) {
4325*5c2921b0SApple OSS Distributions 					traceInterval.setEndArg2(error);
4326*5c2921b0SApple OSS Distributions 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4327*5c2921b0SApple OSS Distributions 					goto abortExit;
4328*5c2921b0SApple OSS Distributions 				}
4329*5c2921b0SApple OSS Distributions 
4330*5c2921b0SApple OSS Distributions 				assert(ioplSize);
4331*5c2921b0SApple OSS Distributions 
4332*5c2921b0SApple OSS Distributions 				if (iopl.fIOPL) {
4333*5c2921b0SApple OSS Distributions 					highPage = upl_get_highest_page(iopl.fIOPL);
4334*5c2921b0SApple OSS Distributions 				}
4335*5c2921b0SApple OSS Distributions 				if (highPage > highestPage) {
4336*5c2921b0SApple OSS Distributions 					highestPage = highPage;
4337*5c2921b0SApple OSS Distributions 				}
4338*5c2921b0SApple OSS Distributions 
4339*5c2921b0SApple OSS Distributions 				if (baseInfo->device) {
4340*5c2921b0SApple OSS Distributions 					numPageInfo = 1;
4341*5c2921b0SApple OSS Distributions 					iopl.fFlags = kIOPLOnDevice;
4342*5c2921b0SApple OSS Distributions 				} else {
4343*5c2921b0SApple OSS Distributions 					iopl.fFlags = 0;
4344*5c2921b0SApple OSS Distributions 				}
4345*5c2921b0SApple OSS Distributions 
4346*5c2921b0SApple OSS Distributions 				if (byteAlignUPL) {
4347*5c2921b0SApple OSS Distributions 					if (iopl.fIOPL) {
4348*5c2921b0SApple OSS Distributions 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4349*5c2921b0SApple OSS Distributions 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4350*5c2921b0SApple OSS Distributions 					}
4351*5c2921b0SApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4352*5c2921b0SApple OSS Distributions 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4353*5c2921b0SApple OSS Distributions 						startPage -= iopl.fPageOffset;
4354*5c2921b0SApple OSS Distributions 					}
4355*5c2921b0SApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4356*5c2921b0SApple OSS Distributions 					numBytes += iopl.fPageOffset;
4357*5c2921b0SApple OSS Distributions 				}
4358*5c2921b0SApple OSS Distributions 
4359*5c2921b0SApple OSS Distributions 				iopl.fIOMDOffset = mdOffset;
4360*5c2921b0SApple OSS Distributions 				iopl.fPageInfo = pageIndex;
4361*5c2921b0SApple OSS Distributions 
4362*5c2921b0SApple OSS Distributions 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4363*5c2921b0SApple OSS Distributions 					// Clean up partial created and unsaved iopl
4364*5c2921b0SApple OSS Distributions 					if (iopl.fIOPL) {
4365*5c2921b0SApple OSS Distributions 						upl_abort(iopl.fIOPL, 0);
4366*5c2921b0SApple OSS Distributions 						upl_deallocate(iopl.fIOPL);
4367*5c2921b0SApple OSS Distributions 					}
4368*5c2921b0SApple OSS Distributions 					error = kIOReturnNoMemory;
4369*5c2921b0SApple OSS Distributions 					traceInterval.setEndArg2(error);
4370*5c2921b0SApple OSS Distributions 					goto abortExit;
4371*5c2921b0SApple OSS Distributions 				}
4372*5c2921b0SApple OSS Distributions 				dataP = NULL;
4373*5c2921b0SApple OSS Distributions 
4374*5c2921b0SApple OSS Distributions 				// Check for a multiple iopl's in one virtual range
4375*5c2921b0SApple OSS Distributions 				pageIndex += numPageInfo;
4376*5c2921b0SApple OSS Distributions 				mdOffset -= iopl.fPageOffset;
4377*5c2921b0SApple OSS Distributions 				numBytesWired += ioplSize;
4378*5c2921b0SApple OSS Distributions 				if (ioplSize < numBytes) {
4379*5c2921b0SApple OSS Distributions 					numBytes -= ioplSize;
4380*5c2921b0SApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4381*5c2921b0SApple OSS Distributions 						startPage += ioplSize;
4382*5c2921b0SApple OSS Distributions 					}
4383*5c2921b0SApple OSS Distributions 					mdOffset += ioplSize;
4384*5c2921b0SApple OSS Distributions 					iopl.fPageOffset = 0;
4385*5c2921b0SApple OSS Distributions 					if (mapper) {
4386*5c2921b0SApple OSS Distributions 						iopl.fMappedPage = mapBase + pageIndex;
4387*5c2921b0SApple OSS Distributions 					}
4388*5c2921b0SApple OSS Distributions 				} else {
4389*5c2921b0SApple OSS Distributions 					mdOffset += numBytes;
4390*5c2921b0SApple OSS Distributions 					break;
4391*5c2921b0SApple OSS Distributions 				}
4392*5c2921b0SApple OSS Distributions 			}
4393*5c2921b0SApple OSS Distributions 		}
4394*5c2921b0SApple OSS Distributions 
4395*5c2921b0SApple OSS Distributions 		_highestPage = highestPage;
4396*5c2921b0SApple OSS Distributions 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4397*5c2921b0SApple OSS Distributions 
4398*5c2921b0SApple OSS Distributions 		if (UPL_COPYOUT_FROM & uplFlags) {
4399*5c2921b0SApple OSS Distributions 			_flags |= kIOMemoryPreparedReadOnly;
4400*5c2921b0SApple OSS Distributions 		}
4401*5c2921b0SApple OSS Distributions 		traceInterval.setEndCodes(numBytesWired, error);
4402*5c2921b0SApple OSS Distributions 	}
4403*5c2921b0SApple OSS Distributions 
4404*5c2921b0SApple OSS Distributions #if IOTRACKING
4405*5c2921b0SApple OSS Distributions 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4406*5c2921b0SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4407*5c2921b0SApple OSS Distributions 		if (!dataP->fWireTracking.link.next) {
4408*5c2921b0SApple OSS Distributions 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4409*5c2921b0SApple OSS Distributions 		}
4410*5c2921b0SApple OSS Distributions 	}
4411*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
4412*5c2921b0SApple OSS Distributions 
4413*5c2921b0SApple OSS Distributions 	return error;
4414*5c2921b0SApple OSS Distributions 
4415*5c2921b0SApple OSS Distributions abortExit:
4416*5c2921b0SApple OSS Distributions 	{
4417*5c2921b0SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4418*5c2921b0SApple OSS Distributions 		UInt done = getNumIOPL(_memoryEntries, dataP);
4419*5c2921b0SApple OSS Distributions 		ioPLBlock *ioplList = getIOPLList(dataP);
4420*5c2921b0SApple OSS Distributions 
4421*5c2921b0SApple OSS Distributions 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4422*5c2921b0SApple OSS Distributions 			if (ioplList[ioplIdx].fIOPL) {
4423*5c2921b0SApple OSS Distributions 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4424*5c2921b0SApple OSS Distributions 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4425*5c2921b0SApple OSS Distributions 			}
4426*5c2921b0SApple OSS Distributions 		}
4427*5c2921b0SApple OSS Distributions 		_memoryEntries->setLength(computeDataSize(0, 0));
4428*5c2921b0SApple OSS Distributions 	}
4429*5c2921b0SApple OSS Distributions 
4430*5c2921b0SApple OSS Distributions 	if (error == KERN_FAILURE) {
4431*5c2921b0SApple OSS Distributions 		error = kIOReturnCannotWire;
4432*5c2921b0SApple OSS Distributions 	} else if (error == KERN_MEMORY_ERROR) {
4433*5c2921b0SApple OSS Distributions 		error = kIOReturnNoResources;
4434*5c2921b0SApple OSS Distributions 	}
4435*5c2921b0SApple OSS Distributions 
4436*5c2921b0SApple OSS Distributions 	return error;
4437*5c2921b0SApple OSS Distributions }
4438*5c2921b0SApple OSS Distributions 
4439*5c2921b0SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4440*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4441*5c2921b0SApple OSS Distributions {
4442*5c2921b0SApple OSS Distributions 	ioGMDData * dataP;
4443*5c2921b0SApple OSS Distributions 
4444*5c2921b0SApple OSS Distributions 	if (size > UINT_MAX) {
4445*5c2921b0SApple OSS Distributions 		return false;
4446*5c2921b0SApple OSS Distributions 	}
4447*5c2921b0SApple OSS Distributions 	if (!_memoryEntries) {
4448*5c2921b0SApple OSS Distributions 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4449*5c2921b0SApple OSS Distributions 		if (!_memoryEntries) {
4450*5c2921b0SApple OSS Distributions 			return false;
4451*5c2921b0SApple OSS Distributions 		}
4452*5c2921b0SApple OSS Distributions 	} else if (!_memoryEntries->initWithCapacity(size)) {
4453*5c2921b0SApple OSS Distributions 		return false;
4454*5c2921b0SApple OSS Distributions 	}
4455*5c2921b0SApple OSS Distributions 
4456*5c2921b0SApple OSS Distributions 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4457*5c2921b0SApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4458*5c2921b0SApple OSS Distributions 
4459*5c2921b0SApple OSS Distributions 	if (mapper == kIOMapperWaitSystem) {
4460*5c2921b0SApple OSS Distributions 		IOMapper::checkForSystemMapper();
4461*5c2921b0SApple OSS Distributions 		mapper = IOMapper::gSystem;
4462*5c2921b0SApple OSS Distributions 	}
4463*5c2921b0SApple OSS Distributions 	dataP->fMapper               = mapper;
4464*5c2921b0SApple OSS Distributions 	dataP->fPageCnt              = 0;
4465*5c2921b0SApple OSS Distributions 	dataP->fMappedBase           = 0;
4466*5c2921b0SApple OSS Distributions 	dataP->fDMAMapNumAddressBits = 64;
4467*5c2921b0SApple OSS Distributions 	dataP->fDMAMapAlignment      = 0;
4468*5c2921b0SApple OSS Distributions 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4469*5c2921b0SApple OSS Distributions 	dataP->fCompletionError      = false;
4470*5c2921b0SApple OSS Distributions 	dataP->fMappedBaseValid      = false;
4471*5c2921b0SApple OSS Distributions 
4472*5c2921b0SApple OSS Distributions 	return true;
4473*5c2921b0SApple OSS Distributions }
4474*5c2921b0SApple OSS Distributions 
4475*5c2921b0SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4476*5c2921b0SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4477*5c2921b0SApple OSS Distributions 	IOMapper                    * mapper,
4478*5c2921b0SApple OSS Distributions 	IOMemoryDescriptor          * memory,
4479*5c2921b0SApple OSS Distributions 	IODMACommand                * command,
4480*5c2921b0SApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4481*5c2921b0SApple OSS Distributions 	uint64_t                      offset,
4482*5c2921b0SApple OSS Distributions 	uint64_t                      length,
4483*5c2921b0SApple OSS Distributions 	uint64_t                    * mapAddress,
4484*5c2921b0SApple OSS Distributions 	uint64_t                    * mapLength)
4485*5c2921b0SApple OSS Distributions {
4486*5c2921b0SApple OSS Distributions 	IOReturn err;
4487*5c2921b0SApple OSS Distributions 	uint32_t mapOptions;
4488*5c2921b0SApple OSS Distributions 
4489*5c2921b0SApple OSS Distributions 	mapOptions = 0;
4490*5c2921b0SApple OSS Distributions 	mapOptions |= kIODMAMapReadAccess;
4491*5c2921b0SApple OSS Distributions 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4492*5c2921b0SApple OSS Distributions 		mapOptions |= kIODMAMapWriteAccess;
4493*5c2921b0SApple OSS Distributions 	}
4494*5c2921b0SApple OSS Distributions 
4495*5c2921b0SApple OSS Distributions 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4496*5c2921b0SApple OSS Distributions 	    mapSpec, command, NULL, mapAddress, mapLength);
4497*5c2921b0SApple OSS Distributions 
4498*5c2921b0SApple OSS Distributions 	if (kIOReturnSuccess == err) {
4499*5c2921b0SApple OSS Distributions 		dmaMapRecord(mapper, command, *mapLength);
4500*5c2921b0SApple OSS Distributions 	}
4501*5c2921b0SApple OSS Distributions 
4502*5c2921b0SApple OSS Distributions 	return err;
4503*5c2921b0SApple OSS Distributions }
4504*5c2921b0SApple OSS Distributions 
4505*5c2921b0SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4506*5c2921b0SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4507*5c2921b0SApple OSS Distributions 	IOMapper                    * mapper,
4508*5c2921b0SApple OSS Distributions 	IODMACommand                * command,
4509*5c2921b0SApple OSS Distributions 	uint64_t                      mapLength)
4510*5c2921b0SApple OSS Distributions {
4511*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4512*5c2921b0SApple OSS Distributions 	kern_allocation_name_t alloc;
4513*5c2921b0SApple OSS Distributions 	int16_t                prior;
4514*5c2921b0SApple OSS Distributions 
4515*5c2921b0SApple OSS Distributions 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4516*5c2921b0SApple OSS Distributions 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4517*5c2921b0SApple OSS Distributions 	}
4518*5c2921b0SApple OSS Distributions 
4519*5c2921b0SApple OSS Distributions 	if (!command) {
4520*5c2921b0SApple OSS Distributions 		return;
4521*5c2921b0SApple OSS Distributions 	}
4522*5c2921b0SApple OSS Distributions 	prior = OSAddAtomic16(1, &_dmaReferences);
4523*5c2921b0SApple OSS Distributions 	if (!prior) {
4524*5c2921b0SApple OSS Distributions 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4525*5c2921b0SApple OSS Distributions 			_mapName  = alloc;
4526*5c2921b0SApple OSS Distributions 			mapLength = _length;
4527*5c2921b0SApple OSS Distributions 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4528*5c2921b0SApple OSS Distributions 		} else {
4529*5c2921b0SApple OSS Distributions 			_mapName = NULL;
4530*5c2921b0SApple OSS Distributions 		}
4531*5c2921b0SApple OSS Distributions 	}
4532*5c2921b0SApple OSS Distributions }
4533*5c2921b0SApple OSS Distributions 
4534*5c2921b0SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4535*5c2921b0SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4536*5c2921b0SApple OSS Distributions 	IOMapper                    * mapper,
4537*5c2921b0SApple OSS Distributions 	IODMACommand                * command,
4538*5c2921b0SApple OSS Distributions 	uint64_t                      offset,
4539*5c2921b0SApple OSS Distributions 	uint64_t                      mapAddress,
4540*5c2921b0SApple OSS Distributions 	uint64_t                      mapLength)
4541*5c2921b0SApple OSS Distributions {
4542*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4543*5c2921b0SApple OSS Distributions 	IOReturn ret;
4544*5c2921b0SApple OSS Distributions 	kern_allocation_name_t alloc;
4545*5c2921b0SApple OSS Distributions 	kern_allocation_name_t mapName;
4546*5c2921b0SApple OSS Distributions 	int16_t prior;
4547*5c2921b0SApple OSS Distributions 
4548*5c2921b0SApple OSS Distributions 	mapName = NULL;
4549*5c2921b0SApple OSS Distributions 	prior = 0;
4550*5c2921b0SApple OSS Distributions 	if (command) {
4551*5c2921b0SApple OSS Distributions 		mapName = _mapName;
4552*5c2921b0SApple OSS Distributions 		if (_dmaReferences) {
4553*5c2921b0SApple OSS Distributions 			prior = OSAddAtomic16(-1, &_dmaReferences);
4554*5c2921b0SApple OSS Distributions 		} else {
4555*5c2921b0SApple OSS Distributions 			panic("_dmaReferences underflow");
4556*5c2921b0SApple OSS Distributions 		}
4557*5c2921b0SApple OSS Distributions 	}
4558*5c2921b0SApple OSS Distributions 
4559*5c2921b0SApple OSS Distributions 	if (!mapLength) {
4560*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4561*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
4562*5c2921b0SApple OSS Distributions 	}
4563*5c2921b0SApple OSS Distributions 
4564*5c2921b0SApple OSS Distributions 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4565*5c2921b0SApple OSS Distributions 
4566*5c2921b0SApple OSS Distributions 	if ((alloc = mapper->fAllocName)) {
4567*5c2921b0SApple OSS Distributions 		kern_allocation_update_size(alloc, -mapLength);
4568*5c2921b0SApple OSS Distributions 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4569*5c2921b0SApple OSS Distributions 			mapLength = _length;
4570*5c2921b0SApple OSS Distributions 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4571*5c2921b0SApple OSS Distributions 		}
4572*5c2921b0SApple OSS Distributions 	}
4573*5c2921b0SApple OSS Distributions 
4574*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(ret);
4575*5c2921b0SApple OSS Distributions 	return ret;
4576*5c2921b0SApple OSS Distributions }
4577*5c2921b0SApple OSS Distributions 
4578*5c2921b0SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4579*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4580*5c2921b0SApple OSS Distributions 	IOMapper                    * mapper,
4581*5c2921b0SApple OSS Distributions 	IOMemoryDescriptor          * memory,
4582*5c2921b0SApple OSS Distributions 	IODMACommand                * command,
4583*5c2921b0SApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4584*5c2921b0SApple OSS Distributions 	uint64_t                      offset,
4585*5c2921b0SApple OSS Distributions 	uint64_t                      length,
4586*5c2921b0SApple OSS Distributions 	uint64_t                    * mapAddress,
4587*5c2921b0SApple OSS Distributions 	uint64_t                    * mapLength)
4588*5c2921b0SApple OSS Distributions {
4589*5c2921b0SApple OSS Distributions 	IOReturn          err = kIOReturnSuccess;
4590*5c2921b0SApple OSS Distributions 	ioGMDData *       dataP;
4591*5c2921b0SApple OSS Distributions 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4592*5c2921b0SApple OSS Distributions 
4593*5c2921b0SApple OSS Distributions 	*mapAddress = 0;
4594*5c2921b0SApple OSS Distributions 	if (kIOMemoryHostOnly & _flags) {
4595*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
4596*5c2921b0SApple OSS Distributions 	}
4597*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4598*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
4599*5c2921b0SApple OSS Distributions 	}
4600*5c2921b0SApple OSS Distributions 
4601*5c2921b0SApple OSS Distributions 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4602*5c2921b0SApple OSS Distributions 	    || offset || (length != _length)) {
4603*5c2921b0SApple OSS Distributions 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4604*5c2921b0SApple OSS Distributions 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4605*5c2921b0SApple OSS Distributions 		const ioPLBlock * ioplList = getIOPLList(dataP);
4606*5c2921b0SApple OSS Distributions 		upl_page_info_t * pageList;
4607*5c2921b0SApple OSS Distributions 		uint32_t          mapOptions = 0;
4608*5c2921b0SApple OSS Distributions 
4609*5c2921b0SApple OSS Distributions 		IODMAMapSpecification mapSpec;
4610*5c2921b0SApple OSS Distributions 		bzero(&mapSpec, sizeof(mapSpec));
4611*5c2921b0SApple OSS Distributions 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4612*5c2921b0SApple OSS Distributions 		mapSpec.alignment = dataP->fDMAMapAlignment;
4613*5c2921b0SApple OSS Distributions 
4614*5c2921b0SApple OSS Distributions 		// For external UPLs the fPageInfo field points directly to
4615*5c2921b0SApple OSS Distributions 		// the upl's upl_page_info_t array.
4616*5c2921b0SApple OSS Distributions 		if (ioplList->fFlags & kIOPLExternUPL) {
4617*5c2921b0SApple OSS Distributions 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4618*5c2921b0SApple OSS Distributions 			mapOptions |= kIODMAMapPagingPath;
4619*5c2921b0SApple OSS Distributions 		} else {
4620*5c2921b0SApple OSS Distributions 			pageList = getPageList(dataP);
4621*5c2921b0SApple OSS Distributions 		}
4622*5c2921b0SApple OSS Distributions 
4623*5c2921b0SApple OSS Distributions 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4624*5c2921b0SApple OSS Distributions 			mapOptions |= kIODMAMapPageListFullyOccupied;
4625*5c2921b0SApple OSS Distributions 		}
4626*5c2921b0SApple OSS Distributions 
4627*5c2921b0SApple OSS Distributions 		assert(dataP->fDMAAccess);
4628*5c2921b0SApple OSS Distributions 		mapOptions |= dataP->fDMAAccess;
4629*5c2921b0SApple OSS Distributions 
4630*5c2921b0SApple OSS Distributions 		// Check for direct device non-paged memory
4631*5c2921b0SApple OSS Distributions 		if (ioplList->fFlags & kIOPLOnDevice) {
4632*5c2921b0SApple OSS Distributions 			mapOptions |= kIODMAMapPhysicallyContiguous;
4633*5c2921b0SApple OSS Distributions 		}
4634*5c2921b0SApple OSS Distributions 
4635*5c2921b0SApple OSS Distributions 		IODMAMapPageList dmaPageList =
4636*5c2921b0SApple OSS Distributions 		{
4637*5c2921b0SApple OSS Distributions 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4638*5c2921b0SApple OSS Distributions 			.pageListCount = _pages,
4639*5c2921b0SApple OSS Distributions 			.pageList      = &pageList[0]
4640*5c2921b0SApple OSS Distributions 		};
4641*5c2921b0SApple OSS Distributions 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4642*5c2921b0SApple OSS Distributions 		    command, &dmaPageList, mapAddress, mapLength);
4643*5c2921b0SApple OSS Distributions 
4644*5c2921b0SApple OSS Distributions 		if (kIOReturnSuccess == err) {
4645*5c2921b0SApple OSS Distributions 			dmaMapRecord(mapper, command, *mapLength);
4646*5c2921b0SApple OSS Distributions 		}
4647*5c2921b0SApple OSS Distributions 	}
4648*5c2921b0SApple OSS Distributions 
4649*5c2921b0SApple OSS Distributions 	return err;
4650*5c2921b0SApple OSS Distributions }
4651*5c2921b0SApple OSS Distributions 
4652*5c2921b0SApple OSS Distributions /*
4653*5c2921b0SApple OSS Distributions  * prepare
4654*5c2921b0SApple OSS Distributions  *
4655*5c2921b0SApple OSS Distributions  * Prepare the memory for an I/O transfer.  This involves paging in
4656*5c2921b0SApple OSS Distributions  * the memory, if necessary, and wiring it down for the duration of
4657*5c2921b0SApple OSS Distributions  * the transfer.  The complete() method completes the processing of
4658*5c2921b0SApple OSS Distributions  * the memory after the I/O transfer finishes.  This method needn't
4659*5c2921b0SApple OSS Distributions  * called for non-pageable memory.
4660*5c2921b0SApple OSS Distributions  */
4661*5c2921b0SApple OSS Distributions 
4662*5c2921b0SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4663*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4664*5c2921b0SApple OSS Distributions {
4665*5c2921b0SApple OSS Distributions 	IOReturn     error    = kIOReturnSuccess;
4666*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4667*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4668*5c2921b0SApple OSS Distributions 
4669*5c2921b0SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4670*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4671*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
4672*5c2921b0SApple OSS Distributions 	}
4673*5c2921b0SApple OSS Distributions 
4674*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4675*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4676*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4677*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
4678*5c2921b0SApple OSS Distributions 	}
4679*5c2921b0SApple OSS Distributions 
4680*5c2921b0SApple OSS Distributions 	if (_prepareLock) {
4681*5c2921b0SApple OSS Distributions 		IOLockLock(_prepareLock);
4682*5c2921b0SApple OSS Distributions 	}
4683*5c2921b0SApple OSS Distributions 
4684*5c2921b0SApple OSS Distributions 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4685*5c2921b0SApple OSS Distributions 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4686*5c2921b0SApple OSS Distributions 			error = kIOReturnNotReady;
4687*5c2921b0SApple OSS Distributions 			goto finish;
4688*5c2921b0SApple OSS Distributions 		}
4689*5c2921b0SApple OSS Distributions 		error = wireVirtual(forDirection);
4690*5c2921b0SApple OSS Distributions 	}
4691*5c2921b0SApple OSS Distributions 
4692*5c2921b0SApple OSS Distributions 	if (kIOReturnSuccess == error) {
4693*5c2921b0SApple OSS Distributions 		if (1 == ++_wireCount) {
4694*5c2921b0SApple OSS Distributions 			if (kIOMemoryClearEncrypt & _flags) {
4695*5c2921b0SApple OSS Distributions 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4696*5c2921b0SApple OSS Distributions 			}
4697*5c2921b0SApple OSS Distributions 
4698*5c2921b0SApple OSS Distributions 			ktraceEmitPhysicalSegments();
4699*5c2921b0SApple OSS Distributions 		}
4700*5c2921b0SApple OSS Distributions 	}
4701*5c2921b0SApple OSS Distributions 
4702*5c2921b0SApple OSS Distributions finish:
4703*5c2921b0SApple OSS Distributions 
4704*5c2921b0SApple OSS Distributions 	if (_prepareLock) {
4705*5c2921b0SApple OSS Distributions 		IOLockUnlock(_prepareLock);
4706*5c2921b0SApple OSS Distributions 	}
4707*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(error);
4708*5c2921b0SApple OSS Distributions 
4709*5c2921b0SApple OSS Distributions 	return error;
4710*5c2921b0SApple OSS Distributions }
4711*5c2921b0SApple OSS Distributions 
4712*5c2921b0SApple OSS Distributions /*
4713*5c2921b0SApple OSS Distributions  * complete
4714*5c2921b0SApple OSS Distributions  *
4715*5c2921b0SApple OSS Distributions  * Complete processing of the memory after an I/O transfer finishes.
4716*5c2921b0SApple OSS Distributions  * This method should not be called unless a prepare was previously
4717*5c2921b0SApple OSS Distributions  * issued; the prepare() and complete() must occur in pairs, before
4718*5c2921b0SApple OSS Distributions  * before and after an I/O transfer involving pageable memory.
4719*5c2921b0SApple OSS Distributions  */
4720*5c2921b0SApple OSS Distributions 
4721*5c2921b0SApple OSS Distributions IOReturn
complete(IODirection forDirection)4722*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4723*5c2921b0SApple OSS Distributions {
4724*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4725*5c2921b0SApple OSS Distributions 	ioGMDData  * dataP;
4726*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4727*5c2921b0SApple OSS Distributions 
4728*5c2921b0SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4729*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4730*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
4731*5c2921b0SApple OSS Distributions 	}
4732*5c2921b0SApple OSS Distributions 
4733*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4734*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4735*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4736*5c2921b0SApple OSS Distributions 		return kIOReturnNotAttached;
4737*5c2921b0SApple OSS Distributions 	}
4738*5c2921b0SApple OSS Distributions 
4739*5c2921b0SApple OSS Distributions 	if (_prepareLock) {
4740*5c2921b0SApple OSS Distributions 		IOLockLock(_prepareLock);
4741*5c2921b0SApple OSS Distributions 	}
4742*5c2921b0SApple OSS Distributions 	do{
4743*5c2921b0SApple OSS Distributions 		assert(_wireCount);
4744*5c2921b0SApple OSS Distributions 		if (!_wireCount) {
4745*5c2921b0SApple OSS Distributions 			break;
4746*5c2921b0SApple OSS Distributions 		}
4747*5c2921b0SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4748*5c2921b0SApple OSS Distributions 		if (!dataP) {
4749*5c2921b0SApple OSS Distributions 			break;
4750*5c2921b0SApple OSS Distributions 		}
4751*5c2921b0SApple OSS Distributions 
4752*5c2921b0SApple OSS Distributions 		if (kIODirectionCompleteWithError & forDirection) {
4753*5c2921b0SApple OSS Distributions 			dataP->fCompletionError = true;
4754*5c2921b0SApple OSS Distributions 		}
4755*5c2921b0SApple OSS Distributions 
4756*5c2921b0SApple OSS Distributions 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4757*5c2921b0SApple OSS Distributions 			performOperation(kIOMemorySetEncrypted, 0, _length);
4758*5c2921b0SApple OSS Distributions 		}
4759*5c2921b0SApple OSS Distributions 
4760*5c2921b0SApple OSS Distributions 		_wireCount--;
4761*5c2921b0SApple OSS Distributions 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4762*5c2921b0SApple OSS Distributions 			ioPLBlock *ioplList = getIOPLList(dataP);
4763*5c2921b0SApple OSS Distributions 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4764*5c2921b0SApple OSS Distributions 
4765*5c2921b0SApple OSS Distributions 			if (_wireCount) {
4766*5c2921b0SApple OSS Distributions 				// kIODirectionCompleteWithDataValid & forDirection
4767*5c2921b0SApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4768*5c2921b0SApple OSS Distributions 					vm_tag_t tag;
4769*5c2921b0SApple OSS Distributions 					tag = (typeof(tag))getVMTag(kernel_map);
4770*5c2921b0SApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4771*5c2921b0SApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4772*5c2921b0SApple OSS Distributions 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4773*5c2921b0SApple OSS Distributions 						}
4774*5c2921b0SApple OSS Distributions 					}
4775*5c2921b0SApple OSS Distributions 				}
4776*5c2921b0SApple OSS Distributions 			} else {
4777*5c2921b0SApple OSS Distributions 				if (_dmaReferences) {
4778*5c2921b0SApple OSS Distributions 					panic("complete() while dma active");
4779*5c2921b0SApple OSS Distributions 				}
4780*5c2921b0SApple OSS Distributions 
4781*5c2921b0SApple OSS Distributions 				if (dataP->fMappedBaseValid) {
4782*5c2921b0SApple OSS Distributions 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4783*5c2921b0SApple OSS Distributions 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4784*5c2921b0SApple OSS Distributions 				}
4785*5c2921b0SApple OSS Distributions #if IOTRACKING
4786*5c2921b0SApple OSS Distributions 				if (dataP->fWireTracking.link.next) {
4787*5c2921b0SApple OSS Distributions 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4788*5c2921b0SApple OSS Distributions 				}
4789*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
4790*5c2921b0SApple OSS Distributions 				// Only complete iopls that we created which are for TypeVirtual
4791*5c2921b0SApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4792*5c2921b0SApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4793*5c2921b0SApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4794*5c2921b0SApple OSS Distributions 							if (dataP->fCompletionError) {
4795*5c2921b0SApple OSS Distributions 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4796*5c2921b0SApple OSS Distributions 							} else {
4797*5c2921b0SApple OSS Distributions 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4798*5c2921b0SApple OSS Distributions 							}
4799*5c2921b0SApple OSS Distributions 							upl_deallocate(ioplList[ind].fIOPL);
4800*5c2921b0SApple OSS Distributions 						}
4801*5c2921b0SApple OSS Distributions 					}
4802*5c2921b0SApple OSS Distributions 				} else if (kIOMemoryTypeUPL == type) {
4803*5c2921b0SApple OSS Distributions 					upl_set_referenced(ioplList[0].fIOPL, false);
4804*5c2921b0SApple OSS Distributions 				}
4805*5c2921b0SApple OSS Distributions 
4806*5c2921b0SApple OSS Distributions 				_memoryEntries->setLength(computeDataSize(0, 0));
4807*5c2921b0SApple OSS Distributions 
4808*5c2921b0SApple OSS Distributions 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4809*5c2921b0SApple OSS Distributions 				_flags &= ~kIOMemoryPreparedReadOnly;
4810*5c2921b0SApple OSS Distributions 
4811*5c2921b0SApple OSS Distributions 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4812*5c2921b0SApple OSS Distributions 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4813*5c2921b0SApple OSS Distributions 				}
4814*5c2921b0SApple OSS Distributions 			}
4815*5c2921b0SApple OSS Distributions 		}
4816*5c2921b0SApple OSS Distributions 	}while (false);
4817*5c2921b0SApple OSS Distributions 
4818*5c2921b0SApple OSS Distributions 	if (_prepareLock) {
4819*5c2921b0SApple OSS Distributions 		IOLockUnlock(_prepareLock);
4820*5c2921b0SApple OSS Distributions 	}
4821*5c2921b0SApple OSS Distributions 
4822*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4823*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
4824*5c2921b0SApple OSS Distributions }
4825*5c2921b0SApple OSS Distributions 
4826*5c2921b0SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4827*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4828*5c2921b0SApple OSS Distributions 	vm_map_t                __addressMap,
4829*5c2921b0SApple OSS Distributions 	IOVirtualAddress *      __address,
4830*5c2921b0SApple OSS Distributions 	IOOptionBits            options,
4831*5c2921b0SApple OSS Distributions 	IOByteCount             __offset,
4832*5c2921b0SApple OSS Distributions 	IOByteCount             __length )
4833*5c2921b0SApple OSS Distributions {
4834*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4835*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4836*5c2921b0SApple OSS Distributions #ifndef __LP64__
4837*5c2921b0SApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4838*5c2921b0SApple OSS Distributions 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4839*5c2921b0SApple OSS Distributions 	}
4840*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
4841*5c2921b0SApple OSS Distributions 
4842*5c2921b0SApple OSS Distributions 	kern_return_t  err;
4843*5c2921b0SApple OSS Distributions 
4844*5c2921b0SApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4845*5c2921b0SApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4846*5c2921b0SApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
4847*5c2921b0SApple OSS Distributions 
4848*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4849*5c2921b0SApple OSS Distributions 	Ranges vec = _ranges;
4850*5c2921b0SApple OSS Distributions 
4851*5c2921b0SApple OSS Distributions 	mach_vm_address_t range0Addr = 0;
4852*5c2921b0SApple OSS Distributions 	mach_vm_size_t    range0Len = 0;
4853*5c2921b0SApple OSS Distributions 
4854*5c2921b0SApple OSS Distributions 	if ((offset >= _length) || ((offset + length) > _length)) {
4855*5c2921b0SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnBadArgument);
4856*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4857*5c2921b0SApple OSS Distributions 		// assert(offset == 0 && _length == 0 && length == 0);
4858*5c2921b0SApple OSS Distributions 		return kIOReturnBadArgument;
4859*5c2921b0SApple OSS Distributions 	}
4860*5c2921b0SApple OSS Distributions 
4861*5c2921b0SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4862*5c2921b0SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4863*5c2921b0SApple OSS Distributions 		return 0;
4864*5c2921b0SApple OSS Distributions 	}
4865*5c2921b0SApple OSS Distributions 
4866*5c2921b0SApple OSS Distributions 	if (vec.v) {
4867*5c2921b0SApple OSS Distributions 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4868*5c2921b0SApple OSS Distributions 	}
4869*5c2921b0SApple OSS Distributions 
4870*5c2921b0SApple OSS Distributions 	// mapping source == dest? (could be much better)
4871*5c2921b0SApple OSS Distributions 	if (_task
4872*5c2921b0SApple OSS Distributions 	    && (mapping->fAddressTask == _task)
4873*5c2921b0SApple OSS Distributions 	    && (mapping->fAddressMap == get_task_map(_task))
4874*5c2921b0SApple OSS Distributions 	    && (options & kIOMapAnywhere)
4875*5c2921b0SApple OSS Distributions 	    && (!(kIOMapUnique & options))
4876*5c2921b0SApple OSS Distributions 	    && (!(kIOMapGuardedMask & options))
4877*5c2921b0SApple OSS Distributions 	    && (1 == _rangesCount)
4878*5c2921b0SApple OSS Distributions 	    && (0 == offset)
4879*5c2921b0SApple OSS Distributions 	    && range0Addr
4880*5c2921b0SApple OSS Distributions 	    && (length <= range0Len)) {
4881*5c2921b0SApple OSS Distributions 		mapping->fAddress = range0Addr;
4882*5c2921b0SApple OSS Distributions 		mapping->fOptions |= kIOMapStatic;
4883*5c2921b0SApple OSS Distributions 
4884*5c2921b0SApple OSS Distributions 		return kIOReturnSuccess;
4885*5c2921b0SApple OSS Distributions 	}
4886*5c2921b0SApple OSS Distributions 
4887*5c2921b0SApple OSS Distributions 	if (!_memRef) {
4888*5c2921b0SApple OSS Distributions 		IOOptionBits createOptions = 0;
4889*5c2921b0SApple OSS Distributions 		if (!(kIOMapReadOnly & options)) {
4890*5c2921b0SApple OSS Distributions 			createOptions |= kIOMemoryReferenceWrite;
4891*5c2921b0SApple OSS Distributions #if DEVELOPMENT || DEBUG
4892*5c2921b0SApple OSS Distributions 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4893*5c2921b0SApple OSS Distributions 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4894*5c2921b0SApple OSS Distributions 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4895*5c2921b0SApple OSS Distributions 			}
4896*5c2921b0SApple OSS Distributions #endif
4897*5c2921b0SApple OSS Distributions 		}
4898*5c2921b0SApple OSS Distributions 		err = memoryReferenceCreate(createOptions, &_memRef);
4899*5c2921b0SApple OSS Distributions 		if (kIOReturnSuccess != err) {
4900*5c2921b0SApple OSS Distributions 			traceInterval.setEndArg1(err);
4901*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4902*5c2921b0SApple OSS Distributions 			return err;
4903*5c2921b0SApple OSS Distributions 		}
4904*5c2921b0SApple OSS Distributions 	}
4905*5c2921b0SApple OSS Distributions 
4906*5c2921b0SApple OSS Distributions 	memory_object_t pager;
4907*5c2921b0SApple OSS Distributions 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4908*5c2921b0SApple OSS Distributions 
4909*5c2921b0SApple OSS Distributions 	// <upl_transpose //
4910*5c2921b0SApple OSS Distributions 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4911*5c2921b0SApple OSS Distributions 		do{
4912*5c2921b0SApple OSS Distributions 			upl_t               redirUPL2;
4913*5c2921b0SApple OSS Distributions 			upl_size_t          size;
4914*5c2921b0SApple OSS Distributions 			upl_control_flags_t flags;
4915*5c2921b0SApple OSS Distributions 			unsigned int        lock_count;
4916*5c2921b0SApple OSS Distributions 
4917*5c2921b0SApple OSS Distributions 			if (!_memRef || (1 != _memRef->count)) {
4918*5c2921b0SApple OSS Distributions 				err = kIOReturnNotReadable;
4919*5c2921b0SApple OSS Distributions 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4920*5c2921b0SApple OSS Distributions 				break;
4921*5c2921b0SApple OSS Distributions 			}
4922*5c2921b0SApple OSS Distributions 
4923*5c2921b0SApple OSS Distributions 			size = (upl_size_t) round_page(mapping->fLength);
4924*5c2921b0SApple OSS Distributions 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4925*5c2921b0SApple OSS Distributions 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4926*5c2921b0SApple OSS Distributions 
4927*5c2921b0SApple OSS Distributions 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4928*5c2921b0SApple OSS Distributions 			    NULL, NULL,
4929*5c2921b0SApple OSS Distributions 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4930*5c2921b0SApple OSS Distributions 				redirUPL2 = NULL;
4931*5c2921b0SApple OSS Distributions 			}
4932*5c2921b0SApple OSS Distributions 
4933*5c2921b0SApple OSS Distributions 			for (lock_count = 0;
4934*5c2921b0SApple OSS Distributions 			    IORecursiveLockHaveLock(gIOMemoryLock);
4935*5c2921b0SApple OSS Distributions 			    lock_count++) {
4936*5c2921b0SApple OSS Distributions 				UNLOCK;
4937*5c2921b0SApple OSS Distributions 			}
4938*5c2921b0SApple OSS Distributions 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4939*5c2921b0SApple OSS Distributions 			for (;
4940*5c2921b0SApple OSS Distributions 			    lock_count;
4941*5c2921b0SApple OSS Distributions 			    lock_count--) {
4942*5c2921b0SApple OSS Distributions 				LOCK;
4943*5c2921b0SApple OSS Distributions 			}
4944*5c2921b0SApple OSS Distributions 
4945*5c2921b0SApple OSS Distributions 			if (kIOReturnSuccess != err) {
4946*5c2921b0SApple OSS Distributions 				IOLog("upl_transpose(%x)\n", err);
4947*5c2921b0SApple OSS Distributions 				err = kIOReturnSuccess;
4948*5c2921b0SApple OSS Distributions 			}
4949*5c2921b0SApple OSS Distributions 
4950*5c2921b0SApple OSS Distributions 			if (redirUPL2) {
4951*5c2921b0SApple OSS Distributions 				upl_commit(redirUPL2, NULL, 0);
4952*5c2921b0SApple OSS Distributions 				upl_deallocate(redirUPL2);
4953*5c2921b0SApple OSS Distributions 				redirUPL2 = NULL;
4954*5c2921b0SApple OSS Distributions 			}
4955*5c2921b0SApple OSS Distributions 			{
4956*5c2921b0SApple OSS Distributions 				// swap the memEntries since they now refer to different vm_objects
4957*5c2921b0SApple OSS Distributions 				IOMemoryReference * me = _memRef;
4958*5c2921b0SApple OSS Distributions 				_memRef = mapping->fMemory->_memRef;
4959*5c2921b0SApple OSS Distributions 				mapping->fMemory->_memRef = me;
4960*5c2921b0SApple OSS Distributions 			}
4961*5c2921b0SApple OSS Distributions 			if (pager) {
4962*5c2921b0SApple OSS Distributions 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4963*5c2921b0SApple OSS Distributions 			}
4964*5c2921b0SApple OSS Distributions 		}while (false);
4965*5c2921b0SApple OSS Distributions 	}
4966*5c2921b0SApple OSS Distributions 	// upl_transpose> //
4967*5c2921b0SApple OSS Distributions 	else {
4968*5c2921b0SApple OSS Distributions 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4969*5c2921b0SApple OSS Distributions 		if (err) {
4970*5c2921b0SApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4971*5c2921b0SApple OSS Distributions 		}
4972*5c2921b0SApple OSS Distributions #if IOTRACKING
4973*5c2921b0SApple OSS Distributions 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4974*5c2921b0SApple OSS Distributions 			// only dram maps in the default on developement case
4975*5c2921b0SApple OSS Distributions 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4976*5c2921b0SApple OSS Distributions 		}
4977*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
4978*5c2921b0SApple OSS Distributions 		if ((err == KERN_SUCCESS) && pager) {
4979*5c2921b0SApple OSS Distributions 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4980*5c2921b0SApple OSS Distributions 
4981*5c2921b0SApple OSS Distributions 			if (err != KERN_SUCCESS) {
4982*5c2921b0SApple OSS Distributions 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4983*5c2921b0SApple OSS Distributions 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4984*5c2921b0SApple OSS Distributions 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4985*5c2921b0SApple OSS Distributions 			}
4986*5c2921b0SApple OSS Distributions 		}
4987*5c2921b0SApple OSS Distributions 	}
4988*5c2921b0SApple OSS Distributions 
4989*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(err);
4990*5c2921b0SApple OSS Distributions 	if (err) {
4991*5c2921b0SApple OSS Distributions 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4992*5c2921b0SApple OSS Distributions 	}
4993*5c2921b0SApple OSS Distributions 	return err;
4994*5c2921b0SApple OSS Distributions }
4995*5c2921b0SApple OSS Distributions 
4996*5c2921b0SApple OSS Distributions #if IOTRACKING
4997*5c2921b0SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4998*5c2921b0SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4999*5c2921b0SApple OSS Distributions     mach_vm_address_t * address, mach_vm_size_t * size)
5000*5c2921b0SApple OSS Distributions {
5001*5c2921b0SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5002*5c2921b0SApple OSS Distributions 
5003*5c2921b0SApple OSS Distributions 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5004*5c2921b0SApple OSS Distributions 
5005*5c2921b0SApple OSS Distributions 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5006*5c2921b0SApple OSS Distributions 		return kIOReturnNotReady;
5007*5c2921b0SApple OSS Distributions 	}
5008*5c2921b0SApple OSS Distributions 
5009*5c2921b0SApple OSS Distributions 	*task    = map->fAddressTask;
5010*5c2921b0SApple OSS Distributions 	*address = map->fAddress;
5011*5c2921b0SApple OSS Distributions 	*size    = map->fLength;
5012*5c2921b0SApple OSS Distributions 
5013*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
5014*5c2921b0SApple OSS Distributions }
5015*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
5016*5c2921b0SApple OSS Distributions 
5017*5c2921b0SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5018*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5019*5c2921b0SApple OSS Distributions 	vm_map_t                addressMap,
5020*5c2921b0SApple OSS Distributions 	IOVirtualAddress        __address,
5021*5c2921b0SApple OSS Distributions 	IOByteCount             __length )
5022*5c2921b0SApple OSS Distributions {
5023*5c2921b0SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5024*5c2921b0SApple OSS Distributions 	IOReturn ret;
5025*5c2921b0SApple OSS Distributions 	ret = super::doUnmap(addressMap, __address, __length);
5026*5c2921b0SApple OSS Distributions 	traceInterval.setEndArg1(ret);
5027*5c2921b0SApple OSS Distributions 	return ret;
5028*5c2921b0SApple OSS Distributions }
5029*5c2921b0SApple OSS Distributions 
5030*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5031*5c2921b0SApple OSS Distributions 
5032*5c2921b0SApple OSS Distributions #undef super
5033*5c2921b0SApple OSS Distributions #define super OSObject
5034*5c2921b0SApple OSS Distributions 
5035*5c2921b0SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5036*5c2921b0SApple OSS Distributions 
5037*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5038*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5039*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5040*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5041*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5042*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5043*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5044*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5045*5c2921b0SApple OSS Distributions 
5046*5c2921b0SApple OSS Distributions /* ex-inline function implementation */
5047*5c2921b0SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5048*5c2921b0SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5049*5c2921b0SApple OSS Distributions {
5050*5c2921b0SApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
5051*5c2921b0SApple OSS Distributions }
5052*5c2921b0SApple OSS Distributions 
5053*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5054*5c2921b0SApple OSS Distributions 
5055*5c2921b0SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5056*5c2921b0SApple OSS Distributions IOMemoryMap::init(
5057*5c2921b0SApple OSS Distributions 	task_t                  intoTask,
5058*5c2921b0SApple OSS Distributions 	mach_vm_address_t       toAddress,
5059*5c2921b0SApple OSS Distributions 	IOOptionBits            _options,
5060*5c2921b0SApple OSS Distributions 	mach_vm_size_t          _offset,
5061*5c2921b0SApple OSS Distributions 	mach_vm_size_t          _length )
5062*5c2921b0SApple OSS Distributions {
5063*5c2921b0SApple OSS Distributions 	if (!intoTask) {
5064*5c2921b0SApple OSS Distributions 		return false;
5065*5c2921b0SApple OSS Distributions 	}
5066*5c2921b0SApple OSS Distributions 
5067*5c2921b0SApple OSS Distributions 	if (!super::init()) {
5068*5c2921b0SApple OSS Distributions 		return false;
5069*5c2921b0SApple OSS Distributions 	}
5070*5c2921b0SApple OSS Distributions 
5071*5c2921b0SApple OSS Distributions 	fAddressMap  = get_task_map(intoTask);
5072*5c2921b0SApple OSS Distributions 	if (!fAddressMap) {
5073*5c2921b0SApple OSS Distributions 		return false;
5074*5c2921b0SApple OSS Distributions 	}
5075*5c2921b0SApple OSS Distributions 	vm_map_reference(fAddressMap);
5076*5c2921b0SApple OSS Distributions 
5077*5c2921b0SApple OSS Distributions 	fAddressTask = intoTask;
5078*5c2921b0SApple OSS Distributions 	fOptions     = _options;
5079*5c2921b0SApple OSS Distributions 	fLength      = _length;
5080*5c2921b0SApple OSS Distributions 	fOffset      = _offset;
5081*5c2921b0SApple OSS Distributions 	fAddress     = toAddress;
5082*5c2921b0SApple OSS Distributions 
5083*5c2921b0SApple OSS Distributions 	return true;
5084*5c2921b0SApple OSS Distributions }
5085*5c2921b0SApple OSS Distributions 
5086*5c2921b0SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5087*5c2921b0SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5088*5c2921b0SApple OSS Distributions {
5089*5c2921b0SApple OSS Distributions 	if (!_memory) {
5090*5c2921b0SApple OSS Distributions 		return false;
5091*5c2921b0SApple OSS Distributions 	}
5092*5c2921b0SApple OSS Distributions 
5093*5c2921b0SApple OSS Distributions 	if (!fSuperMap) {
5094*5c2921b0SApple OSS Distributions 		if ((_offset + fLength) > _memory->getLength()) {
5095*5c2921b0SApple OSS Distributions 			return false;
5096*5c2921b0SApple OSS Distributions 		}
5097*5c2921b0SApple OSS Distributions 		fOffset = _offset;
5098*5c2921b0SApple OSS Distributions 	}
5099*5c2921b0SApple OSS Distributions 
5100*5c2921b0SApple OSS Distributions 
5101*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5102*5c2921b0SApple OSS Distributions 	if (fMemory) {
5103*5c2921b0SApple OSS Distributions 		if (fMemory != _memory) {
5104*5c2921b0SApple OSS Distributions 			fMemory->removeMapping(this);
5105*5c2921b0SApple OSS Distributions 		}
5106*5c2921b0SApple OSS Distributions 	}
5107*5c2921b0SApple OSS Distributions 	fMemory = os::move(tempval);
5108*5c2921b0SApple OSS Distributions 
5109*5c2921b0SApple OSS Distributions 	return true;
5110*5c2921b0SApple OSS Distributions }
5111*5c2921b0SApple OSS Distributions 
5112*5c2921b0SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5113*5c2921b0SApple OSS Distributions IOMemoryDescriptor::doMap(
5114*5c2921b0SApple OSS Distributions 	vm_map_t                __addressMap,
5115*5c2921b0SApple OSS Distributions 	IOVirtualAddress *      __address,
5116*5c2921b0SApple OSS Distributions 	IOOptionBits            options,
5117*5c2921b0SApple OSS Distributions 	IOByteCount             __offset,
5118*5c2921b0SApple OSS Distributions 	IOByteCount             __length )
5119*5c2921b0SApple OSS Distributions {
5120*5c2921b0SApple OSS Distributions 	return kIOReturnUnsupported;
5121*5c2921b0SApple OSS Distributions }
5122*5c2921b0SApple OSS Distributions 
5123*5c2921b0SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5124*5c2921b0SApple OSS Distributions IOMemoryDescriptor::handleFault(
5125*5c2921b0SApple OSS Distributions 	void *                  _pager,
5126*5c2921b0SApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5127*5c2921b0SApple OSS Distributions 	mach_vm_size_t          length)
5128*5c2921b0SApple OSS Distributions {
5129*5c2921b0SApple OSS Distributions 	if (kIOMemoryRedirected & _flags) {
5130*5c2921b0SApple OSS Distributions #if DEBUG
5131*5c2921b0SApple OSS Distributions 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5132*5c2921b0SApple OSS Distributions #endif
5133*5c2921b0SApple OSS Distributions 		do {
5134*5c2921b0SApple OSS Distributions 			SLEEP;
5135*5c2921b0SApple OSS Distributions 		} while (kIOMemoryRedirected & _flags);
5136*5c2921b0SApple OSS Distributions 	}
5137*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
5138*5c2921b0SApple OSS Distributions }
5139*5c2921b0SApple OSS Distributions 
5140*5c2921b0SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5141*5c2921b0SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5142*5c2921b0SApple OSS Distributions 	void *                  _pager,
5143*5c2921b0SApple OSS Distributions 	vm_map_t                addressMap,
5144*5c2921b0SApple OSS Distributions 	mach_vm_address_t       address,
5145*5c2921b0SApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5146*5c2921b0SApple OSS Distributions 	mach_vm_size_t          length,
5147*5c2921b0SApple OSS Distributions 	IOOptionBits            options )
5148*5c2921b0SApple OSS Distributions {
5149*5c2921b0SApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5150*5c2921b0SApple OSS Distributions 	memory_object_t     pager = (memory_object_t) _pager;
5151*5c2921b0SApple OSS Distributions 	mach_vm_size_t      size;
5152*5c2921b0SApple OSS Distributions 	mach_vm_size_t      bytes;
5153*5c2921b0SApple OSS Distributions 	mach_vm_size_t      page;
5154*5c2921b0SApple OSS Distributions 	mach_vm_size_t      pageOffset;
5155*5c2921b0SApple OSS Distributions 	mach_vm_size_t      pagerOffset;
5156*5c2921b0SApple OSS Distributions 	IOPhysicalLength    segLen, chunk;
5157*5c2921b0SApple OSS Distributions 	addr64_t            physAddr;
5158*5c2921b0SApple OSS Distributions 	IOOptionBits        type;
5159*5c2921b0SApple OSS Distributions 
5160*5c2921b0SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
5161*5c2921b0SApple OSS Distributions 
5162*5c2921b0SApple OSS Distributions 	if (reserved->dp.pagerContig) {
5163*5c2921b0SApple OSS Distributions 		sourceOffset = 0;
5164*5c2921b0SApple OSS Distributions 		pagerOffset  = 0;
5165*5c2921b0SApple OSS Distributions 	}
5166*5c2921b0SApple OSS Distributions 
5167*5c2921b0SApple OSS Distributions 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5168*5c2921b0SApple OSS Distributions 	assert( physAddr );
5169*5c2921b0SApple OSS Distributions 	pageOffset = physAddr - trunc_page_64( physAddr );
5170*5c2921b0SApple OSS Distributions 	pagerOffset = sourceOffset;
5171*5c2921b0SApple OSS Distributions 
5172*5c2921b0SApple OSS Distributions 	size = length + pageOffset;
5173*5c2921b0SApple OSS Distributions 	physAddr -= pageOffset;
5174*5c2921b0SApple OSS Distributions 
5175*5c2921b0SApple OSS Distributions 	segLen += pageOffset;
5176*5c2921b0SApple OSS Distributions 	bytes = size;
5177*5c2921b0SApple OSS Distributions 	do{
5178*5c2921b0SApple OSS Distributions 		// in the middle of the loop only map whole pages
5179*5c2921b0SApple OSS Distributions 		if (segLen >= bytes) {
5180*5c2921b0SApple OSS Distributions 			segLen = bytes;
5181*5c2921b0SApple OSS Distributions 		} else if (segLen != trunc_page_64(segLen)) {
5182*5c2921b0SApple OSS Distributions 			err = kIOReturnVMError;
5183*5c2921b0SApple OSS Distributions 		}
5184*5c2921b0SApple OSS Distributions 		if (physAddr != trunc_page_64(physAddr)) {
5185*5c2921b0SApple OSS Distributions 			err = kIOReturnBadArgument;
5186*5c2921b0SApple OSS Distributions 		}
5187*5c2921b0SApple OSS Distributions 
5188*5c2921b0SApple OSS Distributions 		if (kIOReturnSuccess != err) {
5189*5c2921b0SApple OSS Distributions 			break;
5190*5c2921b0SApple OSS Distributions 		}
5191*5c2921b0SApple OSS Distributions 
5192*5c2921b0SApple OSS Distributions #if DEBUG || DEVELOPMENT
5193*5c2921b0SApple OSS Distributions 		if ((kIOMemoryTypeUPL != type)
5194*5c2921b0SApple OSS Distributions 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5195*5c2921b0SApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5196*5c2921b0SApple OSS Distributions 			    physAddr, (uint64_t)segLen);
5197*5c2921b0SApple OSS Distributions 		}
5198*5c2921b0SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5199*5c2921b0SApple OSS Distributions 
5200*5c2921b0SApple OSS Distributions 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5201*5c2921b0SApple OSS Distributions 		for (page = 0;
5202*5c2921b0SApple OSS Distributions 		    (page < segLen) && (KERN_SUCCESS == err);
5203*5c2921b0SApple OSS Distributions 		    page += chunk) {
5204*5c2921b0SApple OSS Distributions 			err = device_pager_populate_object(pager, pagerOffset,
5205*5c2921b0SApple OSS Distributions 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5206*5c2921b0SApple OSS Distributions 			pagerOffset += chunk;
5207*5c2921b0SApple OSS Distributions 		}
5208*5c2921b0SApple OSS Distributions 
5209*5c2921b0SApple OSS Distributions 		assert(KERN_SUCCESS == err);
5210*5c2921b0SApple OSS Distributions 		if (err) {
5211*5c2921b0SApple OSS Distributions 			break;
5212*5c2921b0SApple OSS Distributions 		}
5213*5c2921b0SApple OSS Distributions 
5214*5c2921b0SApple OSS Distributions 		// This call to vm_fault causes an early pmap level resolution
5215*5c2921b0SApple OSS Distributions 		// of the mappings created above for kernel mappings, since
5216*5c2921b0SApple OSS Distributions 		// faulting in later can't take place from interrupt level.
5217*5c2921b0SApple OSS Distributions 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5218*5c2921b0SApple OSS Distributions 			err = vm_fault(addressMap,
5219*5c2921b0SApple OSS Distributions 			    (vm_map_offset_t)trunc_page_64(address),
5220*5c2921b0SApple OSS Distributions 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5221*5c2921b0SApple OSS Distributions 			    FALSE, VM_KERN_MEMORY_NONE,
5222*5c2921b0SApple OSS Distributions 			    THREAD_UNINT, NULL,
5223*5c2921b0SApple OSS Distributions 			    (vm_map_offset_t)0);
5224*5c2921b0SApple OSS Distributions 
5225*5c2921b0SApple OSS Distributions 			if (KERN_SUCCESS != err) {
5226*5c2921b0SApple OSS Distributions 				break;
5227*5c2921b0SApple OSS Distributions 			}
5228*5c2921b0SApple OSS Distributions 		}
5229*5c2921b0SApple OSS Distributions 
5230*5c2921b0SApple OSS Distributions 		sourceOffset += segLen - pageOffset;
5231*5c2921b0SApple OSS Distributions 		address += segLen;
5232*5c2921b0SApple OSS Distributions 		bytes -= segLen;
5233*5c2921b0SApple OSS Distributions 		pageOffset = 0;
5234*5c2921b0SApple OSS Distributions 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5235*5c2921b0SApple OSS Distributions 
5236*5c2921b0SApple OSS Distributions 	if (bytes) {
5237*5c2921b0SApple OSS Distributions 		err = kIOReturnBadArgument;
5238*5c2921b0SApple OSS Distributions 	}
5239*5c2921b0SApple OSS Distributions 
5240*5c2921b0SApple OSS Distributions 	return err;
5241*5c2921b0SApple OSS Distributions }
5242*5c2921b0SApple OSS Distributions 
5243*5c2921b0SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5244*5c2921b0SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5245*5c2921b0SApple OSS Distributions 	vm_map_t                addressMap,
5246*5c2921b0SApple OSS Distributions 	IOVirtualAddress        __address,
5247*5c2921b0SApple OSS Distributions 	IOByteCount             __length )
5248*5c2921b0SApple OSS Distributions {
5249*5c2921b0SApple OSS Distributions 	IOReturn          err;
5250*5c2921b0SApple OSS Distributions 	IOMemoryMap *     mapping;
5251*5c2921b0SApple OSS Distributions 	mach_vm_address_t address;
5252*5c2921b0SApple OSS Distributions 	mach_vm_size_t    length;
5253*5c2921b0SApple OSS Distributions 
5254*5c2921b0SApple OSS Distributions 	if (__length) {
5255*5c2921b0SApple OSS Distributions 		panic("doUnmap");
5256*5c2921b0SApple OSS Distributions 	}
5257*5c2921b0SApple OSS Distributions 
5258*5c2921b0SApple OSS Distributions 	mapping = (IOMemoryMap *) __address;
5259*5c2921b0SApple OSS Distributions 	addressMap = mapping->fAddressMap;
5260*5c2921b0SApple OSS Distributions 	address    = mapping->fAddress;
5261*5c2921b0SApple OSS Distributions 	length     = mapping->fLength;
5262*5c2921b0SApple OSS Distributions 
5263*5c2921b0SApple OSS Distributions 	if (kIOMapOverwrite & mapping->fOptions) {
5264*5c2921b0SApple OSS Distributions 		err = KERN_SUCCESS;
5265*5c2921b0SApple OSS Distributions 	} else {
5266*5c2921b0SApple OSS Distributions 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5267*5c2921b0SApple OSS Distributions 			addressMap = IOPageableMapForAddress( address );
5268*5c2921b0SApple OSS Distributions 		}
5269*5c2921b0SApple OSS Distributions #if DEBUG
5270*5c2921b0SApple OSS Distributions 		if (kIOLogMapping & gIOKitDebug) {
5271*5c2921b0SApple OSS Distributions 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5272*5c2921b0SApple OSS Distributions 			    addressMap, address, length );
5273*5c2921b0SApple OSS Distributions 		}
5274*5c2921b0SApple OSS Distributions #endif
5275*5c2921b0SApple OSS Distributions 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5276*5c2921b0SApple OSS Distributions 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5277*5c2921b0SApple OSS Distributions 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5278*5c2921b0SApple OSS Distributions 		}
5279*5c2921b0SApple OSS Distributions 	}
5280*5c2921b0SApple OSS Distributions 
5281*5c2921b0SApple OSS Distributions #if IOTRACKING
5282*5c2921b0SApple OSS Distributions 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5283*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
5284*5c2921b0SApple OSS Distributions 
5285*5c2921b0SApple OSS Distributions 	return err;
5286*5c2921b0SApple OSS Distributions }
5287*5c2921b0SApple OSS Distributions 
5288*5c2921b0SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5289*5c2921b0SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5290*5c2921b0SApple OSS Distributions {
5291*5c2921b0SApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5292*5c2921b0SApple OSS Distributions 	IOMemoryMap *       mapping = NULL;
5293*5c2921b0SApple OSS Distributions 	OSSharedPtr<OSIterator>        iter;
5294*5c2921b0SApple OSS Distributions 
5295*5c2921b0SApple OSS Distributions 	LOCK;
5296*5c2921b0SApple OSS Distributions 
5297*5c2921b0SApple OSS Distributions 	if (doRedirect) {
5298*5c2921b0SApple OSS Distributions 		_flags |= kIOMemoryRedirected;
5299*5c2921b0SApple OSS Distributions 	} else {
5300*5c2921b0SApple OSS Distributions 		_flags &= ~kIOMemoryRedirected;
5301*5c2921b0SApple OSS Distributions 	}
5302*5c2921b0SApple OSS Distributions 
5303*5c2921b0SApple OSS Distributions 	do {
5304*5c2921b0SApple OSS Distributions 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5305*5c2921b0SApple OSS Distributions 			memory_object_t   pager;
5306*5c2921b0SApple OSS Distributions 
5307*5c2921b0SApple OSS Distributions 			if (reserved) {
5308*5c2921b0SApple OSS Distributions 				pager = (memory_object_t) reserved->dp.devicePager;
5309*5c2921b0SApple OSS Distributions 			} else {
5310*5c2921b0SApple OSS Distributions 				pager = MACH_PORT_NULL;
5311*5c2921b0SApple OSS Distributions 			}
5312*5c2921b0SApple OSS Distributions 
5313*5c2921b0SApple OSS Distributions 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5314*5c2921b0SApple OSS Distributions 				mapping->redirect( safeTask, doRedirect );
5315*5c2921b0SApple OSS Distributions 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5316*5c2921b0SApple OSS Distributions 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5317*5c2921b0SApple OSS Distributions 				}
5318*5c2921b0SApple OSS Distributions 			}
5319*5c2921b0SApple OSS Distributions 
5320*5c2921b0SApple OSS Distributions 			iter.reset();
5321*5c2921b0SApple OSS Distributions 		}
5322*5c2921b0SApple OSS Distributions 	} while (false);
5323*5c2921b0SApple OSS Distributions 
5324*5c2921b0SApple OSS Distributions 	if (!doRedirect) {
5325*5c2921b0SApple OSS Distributions 		WAKEUP;
5326*5c2921b0SApple OSS Distributions 	}
5327*5c2921b0SApple OSS Distributions 
5328*5c2921b0SApple OSS Distributions 	UNLOCK;
5329*5c2921b0SApple OSS Distributions 
5330*5c2921b0SApple OSS Distributions #ifndef __LP64__
5331*5c2921b0SApple OSS Distributions 	// temporary binary compatibility
5332*5c2921b0SApple OSS Distributions 	IOSubMemoryDescriptor * subMem;
5333*5c2921b0SApple OSS Distributions 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5334*5c2921b0SApple OSS Distributions 		err = subMem->redirect( safeTask, doRedirect );
5335*5c2921b0SApple OSS Distributions 	} else {
5336*5c2921b0SApple OSS Distributions 		err = kIOReturnSuccess;
5337*5c2921b0SApple OSS Distributions 	}
5338*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5339*5c2921b0SApple OSS Distributions 
5340*5c2921b0SApple OSS Distributions 	return err;
5341*5c2921b0SApple OSS Distributions }
5342*5c2921b0SApple OSS Distributions 
5343*5c2921b0SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5344*5c2921b0SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5345*5c2921b0SApple OSS Distributions {
5346*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5347*5c2921b0SApple OSS Distributions 
5348*5c2921b0SApple OSS Distributions 	if (fSuperMap) {
5349*5c2921b0SApple OSS Distributions //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5350*5c2921b0SApple OSS Distributions 	} else {
5351*5c2921b0SApple OSS Distributions 		LOCK;
5352*5c2921b0SApple OSS Distributions 
5353*5c2921b0SApple OSS Distributions 		do{
5354*5c2921b0SApple OSS Distributions 			if (!fAddress) {
5355*5c2921b0SApple OSS Distributions 				break;
5356*5c2921b0SApple OSS Distributions 			}
5357*5c2921b0SApple OSS Distributions 			if (!fAddressMap) {
5358*5c2921b0SApple OSS Distributions 				break;
5359*5c2921b0SApple OSS Distributions 			}
5360*5c2921b0SApple OSS Distributions 
5361*5c2921b0SApple OSS Distributions 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5362*5c2921b0SApple OSS Distributions 			    && (0 == (fOptions & kIOMapStatic))) {
5363*5c2921b0SApple OSS Distributions 				IOUnmapPages( fAddressMap, fAddress, fLength );
5364*5c2921b0SApple OSS Distributions 				err = kIOReturnSuccess;
5365*5c2921b0SApple OSS Distributions #if DEBUG
5366*5c2921b0SApple OSS Distributions 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5367*5c2921b0SApple OSS Distributions #endif
5368*5c2921b0SApple OSS Distributions 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5369*5c2921b0SApple OSS Distributions 				IOOptionBits newMode;
5370*5c2921b0SApple OSS Distributions 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5371*5c2921b0SApple OSS Distributions 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5372*5c2921b0SApple OSS Distributions 			}
5373*5c2921b0SApple OSS Distributions 		}while (false);
5374*5c2921b0SApple OSS Distributions 		UNLOCK;
5375*5c2921b0SApple OSS Distributions 	}
5376*5c2921b0SApple OSS Distributions 
5377*5c2921b0SApple OSS Distributions 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5378*5c2921b0SApple OSS Distributions 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5379*5c2921b0SApple OSS Distributions 	    && safeTask
5380*5c2921b0SApple OSS Distributions 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5381*5c2921b0SApple OSS Distributions 		fMemory->redirect(safeTask, doRedirect);
5382*5c2921b0SApple OSS Distributions 	}
5383*5c2921b0SApple OSS Distributions 
5384*5c2921b0SApple OSS Distributions 	return err;
5385*5c2921b0SApple OSS Distributions }
5386*5c2921b0SApple OSS Distributions 
5387*5c2921b0SApple OSS Distributions IOReturn
unmap(void)5388*5c2921b0SApple OSS Distributions IOMemoryMap::unmap( void )
5389*5c2921b0SApple OSS Distributions {
5390*5c2921b0SApple OSS Distributions 	IOReturn    err;
5391*5c2921b0SApple OSS Distributions 
5392*5c2921b0SApple OSS Distributions 	LOCK;
5393*5c2921b0SApple OSS Distributions 
5394*5c2921b0SApple OSS Distributions 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5395*5c2921b0SApple OSS Distributions 	    && (0 == (kIOMapStatic & fOptions))) {
5396*5c2921b0SApple OSS Distributions 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5397*5c2921b0SApple OSS Distributions 	} else {
5398*5c2921b0SApple OSS Distributions 		err = kIOReturnSuccess;
5399*5c2921b0SApple OSS Distributions 	}
5400*5c2921b0SApple OSS Distributions 
5401*5c2921b0SApple OSS Distributions 	if (fAddressMap) {
5402*5c2921b0SApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5403*5c2921b0SApple OSS Distributions 		fAddressMap = NULL;
5404*5c2921b0SApple OSS Distributions 	}
5405*5c2921b0SApple OSS Distributions 
5406*5c2921b0SApple OSS Distributions 	fAddress = 0;
5407*5c2921b0SApple OSS Distributions 
5408*5c2921b0SApple OSS Distributions 	UNLOCK;
5409*5c2921b0SApple OSS Distributions 
5410*5c2921b0SApple OSS Distributions 	return err;
5411*5c2921b0SApple OSS Distributions }
5412*5c2921b0SApple OSS Distributions 
5413*5c2921b0SApple OSS Distributions void
taskDied(void)5414*5c2921b0SApple OSS Distributions IOMemoryMap::taskDied( void )
5415*5c2921b0SApple OSS Distributions {
5416*5c2921b0SApple OSS Distributions 	LOCK;
5417*5c2921b0SApple OSS Distributions 	if (fUserClientUnmap) {
5418*5c2921b0SApple OSS Distributions 		unmap();
5419*5c2921b0SApple OSS Distributions 	}
5420*5c2921b0SApple OSS Distributions #if IOTRACKING
5421*5c2921b0SApple OSS Distributions 	else {
5422*5c2921b0SApple OSS Distributions 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5423*5c2921b0SApple OSS Distributions 	}
5424*5c2921b0SApple OSS Distributions #endif /* IOTRACKING */
5425*5c2921b0SApple OSS Distributions 
5426*5c2921b0SApple OSS Distributions 	if (fAddressMap) {
5427*5c2921b0SApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5428*5c2921b0SApple OSS Distributions 		fAddressMap = NULL;
5429*5c2921b0SApple OSS Distributions 	}
5430*5c2921b0SApple OSS Distributions 	fAddressTask = NULL;
5431*5c2921b0SApple OSS Distributions 	fAddress     = 0;
5432*5c2921b0SApple OSS Distributions 	UNLOCK;
5433*5c2921b0SApple OSS Distributions }
5434*5c2921b0SApple OSS Distributions 
5435*5c2921b0SApple OSS Distributions IOReturn
userClientUnmap(void)5436*5c2921b0SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5437*5c2921b0SApple OSS Distributions {
5438*5c2921b0SApple OSS Distributions 	fUserClientUnmap = true;
5439*5c2921b0SApple OSS Distributions 	return kIOReturnSuccess;
5440*5c2921b0SApple OSS Distributions }
5441*5c2921b0SApple OSS Distributions 
5442*5c2921b0SApple OSS Distributions // Overload the release mechanism.  All mappings must be a member
5443*5c2921b0SApple OSS Distributions // of a memory descriptors _mappings set.  This means that we
5444*5c2921b0SApple OSS Distributions // always have 2 references on a mapping.  When either of these mappings
5445*5c2921b0SApple OSS Distributions // are released we need to free ourselves.
5446*5c2921b0SApple OSS Distributions void
taggedRelease(const void * tag) const5447*5c2921b0SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5448*5c2921b0SApple OSS Distributions {
5449*5c2921b0SApple OSS Distributions 	LOCK;
5450*5c2921b0SApple OSS Distributions 	super::taggedRelease(tag, 2);
5451*5c2921b0SApple OSS Distributions 	UNLOCK;
5452*5c2921b0SApple OSS Distributions }
5453*5c2921b0SApple OSS Distributions 
5454*5c2921b0SApple OSS Distributions void
free()5455*5c2921b0SApple OSS Distributions IOMemoryMap::free()
5456*5c2921b0SApple OSS Distributions {
5457*5c2921b0SApple OSS Distributions 	unmap();
5458*5c2921b0SApple OSS Distributions 
5459*5c2921b0SApple OSS Distributions 	if (fMemory) {
5460*5c2921b0SApple OSS Distributions 		LOCK;
5461*5c2921b0SApple OSS Distributions 		fMemory->removeMapping(this);
5462*5c2921b0SApple OSS Distributions 		UNLOCK;
5463*5c2921b0SApple OSS Distributions 		fMemory.reset();
5464*5c2921b0SApple OSS Distributions 	}
5465*5c2921b0SApple OSS Distributions 
5466*5c2921b0SApple OSS Distributions 	if (fSuperMap) {
5467*5c2921b0SApple OSS Distributions 		fSuperMap.reset();
5468*5c2921b0SApple OSS Distributions 	}
5469*5c2921b0SApple OSS Distributions 
5470*5c2921b0SApple OSS Distributions 	if (fRedirUPL) {
5471*5c2921b0SApple OSS Distributions 		upl_commit(fRedirUPL, NULL, 0);
5472*5c2921b0SApple OSS Distributions 		upl_deallocate(fRedirUPL);
5473*5c2921b0SApple OSS Distributions 	}
5474*5c2921b0SApple OSS Distributions 
5475*5c2921b0SApple OSS Distributions 	super::free();
5476*5c2921b0SApple OSS Distributions }
5477*5c2921b0SApple OSS Distributions 
5478*5c2921b0SApple OSS Distributions IOByteCount
getLength()5479*5c2921b0SApple OSS Distributions IOMemoryMap::getLength()
5480*5c2921b0SApple OSS Distributions {
5481*5c2921b0SApple OSS Distributions 	return fLength;
5482*5c2921b0SApple OSS Distributions }
5483*5c2921b0SApple OSS Distributions 
5484*5c2921b0SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5485*5c2921b0SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5486*5c2921b0SApple OSS Distributions {
5487*5c2921b0SApple OSS Distributions #ifndef __LP64__
5488*5c2921b0SApple OSS Distributions 	if (fSuperMap) {
5489*5c2921b0SApple OSS Distributions 		fSuperMap->getVirtualAddress();
5490*5c2921b0SApple OSS Distributions 	} else if (fAddressMap
5491*5c2921b0SApple OSS Distributions 	    && vm_map_is_64bit(fAddressMap)
5492*5c2921b0SApple OSS Distributions 	    && (sizeof(IOVirtualAddress) < 8)) {
5493*5c2921b0SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5494*5c2921b0SApple OSS Distributions 	}
5495*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5496*5c2921b0SApple OSS Distributions 
5497*5c2921b0SApple OSS Distributions 	return fAddress;
5498*5c2921b0SApple OSS Distributions }
5499*5c2921b0SApple OSS Distributions 
5500*5c2921b0SApple OSS Distributions #ifndef __LP64__
5501*5c2921b0SApple OSS Distributions mach_vm_address_t
getAddress()5502*5c2921b0SApple OSS Distributions IOMemoryMap::getAddress()
5503*5c2921b0SApple OSS Distributions {
5504*5c2921b0SApple OSS Distributions 	return fAddress;
5505*5c2921b0SApple OSS Distributions }
5506*5c2921b0SApple OSS Distributions 
5507*5c2921b0SApple OSS Distributions mach_vm_size_t
getSize()5508*5c2921b0SApple OSS Distributions IOMemoryMap::getSize()
5509*5c2921b0SApple OSS Distributions {
5510*5c2921b0SApple OSS Distributions 	return fLength;
5511*5c2921b0SApple OSS Distributions }
5512*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5513*5c2921b0SApple OSS Distributions 
5514*5c2921b0SApple OSS Distributions 
5515*5c2921b0SApple OSS Distributions task_t
getAddressTask()5516*5c2921b0SApple OSS Distributions IOMemoryMap::getAddressTask()
5517*5c2921b0SApple OSS Distributions {
5518*5c2921b0SApple OSS Distributions 	if (fSuperMap) {
5519*5c2921b0SApple OSS Distributions 		return fSuperMap->getAddressTask();
5520*5c2921b0SApple OSS Distributions 	} else {
5521*5c2921b0SApple OSS Distributions 		return fAddressTask;
5522*5c2921b0SApple OSS Distributions 	}
5523*5c2921b0SApple OSS Distributions }
5524*5c2921b0SApple OSS Distributions 
5525*5c2921b0SApple OSS Distributions IOOptionBits
getMapOptions()5526*5c2921b0SApple OSS Distributions IOMemoryMap::getMapOptions()
5527*5c2921b0SApple OSS Distributions {
5528*5c2921b0SApple OSS Distributions 	return fOptions;
5529*5c2921b0SApple OSS Distributions }
5530*5c2921b0SApple OSS Distributions 
5531*5c2921b0SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5532*5c2921b0SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5533*5c2921b0SApple OSS Distributions {
5534*5c2921b0SApple OSS Distributions 	return fMemory.get();
5535*5c2921b0SApple OSS Distributions }
5536*5c2921b0SApple OSS Distributions 
5537*5c2921b0SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5538*5c2921b0SApple OSS Distributions IOMemoryMap::copyCompatible(
5539*5c2921b0SApple OSS Distributions 	IOMemoryMap * newMapping )
5540*5c2921b0SApple OSS Distributions {
5541*5c2921b0SApple OSS Distributions 	task_t              task      = newMapping->getAddressTask();
5542*5c2921b0SApple OSS Distributions 	mach_vm_address_t   toAddress = newMapping->fAddress;
5543*5c2921b0SApple OSS Distributions 	IOOptionBits        _options  = newMapping->fOptions;
5544*5c2921b0SApple OSS Distributions 	mach_vm_size_t      _offset   = newMapping->fOffset;
5545*5c2921b0SApple OSS Distributions 	mach_vm_size_t      _length   = newMapping->fLength;
5546*5c2921b0SApple OSS Distributions 
5547*5c2921b0SApple OSS Distributions 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5548*5c2921b0SApple OSS Distributions 		return NULL;
5549*5c2921b0SApple OSS Distributions 	}
5550*5c2921b0SApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5551*5c2921b0SApple OSS Distributions 		return NULL;
5552*5c2921b0SApple OSS Distributions 	}
5553*5c2921b0SApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5554*5c2921b0SApple OSS Distributions 		return NULL;
5555*5c2921b0SApple OSS Distributions 	}
5556*5c2921b0SApple OSS Distributions 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5557*5c2921b0SApple OSS Distributions 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5558*5c2921b0SApple OSS Distributions 		return NULL;
5559*5c2921b0SApple OSS Distributions 	}
5560*5c2921b0SApple OSS Distributions 
5561*5c2921b0SApple OSS Distributions 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5562*5c2921b0SApple OSS Distributions 		return NULL;
5563*5c2921b0SApple OSS Distributions 	}
5564*5c2921b0SApple OSS Distributions 
5565*5c2921b0SApple OSS Distributions 	if (_offset < fOffset) {
5566*5c2921b0SApple OSS Distributions 		return NULL;
5567*5c2921b0SApple OSS Distributions 	}
5568*5c2921b0SApple OSS Distributions 
5569*5c2921b0SApple OSS Distributions 	_offset -= fOffset;
5570*5c2921b0SApple OSS Distributions 
5571*5c2921b0SApple OSS Distributions 	if ((_offset + _length) > fLength) {
5572*5c2921b0SApple OSS Distributions 		return NULL;
5573*5c2921b0SApple OSS Distributions 	}
5574*5c2921b0SApple OSS Distributions 
5575*5c2921b0SApple OSS Distributions 	if ((fLength == _length) && (!_offset)) {
5576*5c2921b0SApple OSS Distributions 		retain();
5577*5c2921b0SApple OSS Distributions 		newMapping = this;
5578*5c2921b0SApple OSS Distributions 	} else {
5579*5c2921b0SApple OSS Distributions 		newMapping->fSuperMap.reset(this, OSRetain);
5580*5c2921b0SApple OSS Distributions 		newMapping->fOffset   = fOffset + _offset;
5581*5c2921b0SApple OSS Distributions 		newMapping->fAddress  = fAddress + _offset;
5582*5c2921b0SApple OSS Distributions 	}
5583*5c2921b0SApple OSS Distributions 
5584*5c2921b0SApple OSS Distributions 	return newMapping;
5585*5c2921b0SApple OSS Distributions }
5586*5c2921b0SApple OSS Distributions 
5587*5c2921b0SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5588*5c2921b0SApple OSS Distributions IOMemoryMap::wireRange(
5589*5c2921b0SApple OSS Distributions 	uint32_t                options,
5590*5c2921b0SApple OSS Distributions 	mach_vm_size_t          offset,
5591*5c2921b0SApple OSS Distributions 	mach_vm_size_t          length)
5592*5c2921b0SApple OSS Distributions {
5593*5c2921b0SApple OSS Distributions 	IOReturn kr;
5594*5c2921b0SApple OSS Distributions 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5595*5c2921b0SApple OSS Distributions 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5596*5c2921b0SApple OSS Distributions 	vm_prot_t prot;
5597*5c2921b0SApple OSS Distributions 
5598*5c2921b0SApple OSS Distributions 	prot = (kIODirectionOutIn & options);
5599*5c2921b0SApple OSS Distributions 	if (prot) {
5600*5c2921b0SApple OSS Distributions 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5601*5c2921b0SApple OSS Distributions 	} else {
5602*5c2921b0SApple OSS Distributions 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5603*5c2921b0SApple OSS Distributions 	}
5604*5c2921b0SApple OSS Distributions 
5605*5c2921b0SApple OSS Distributions 	return kr;
5606*5c2921b0SApple OSS Distributions }
5607*5c2921b0SApple OSS Distributions 
5608*5c2921b0SApple OSS Distributions 
5609*5c2921b0SApple OSS Distributions IOPhysicalAddress
5610*5c2921b0SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5611*5c2921b0SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5612*5c2921b0SApple OSS Distributions #else /* !__LP64__ */
5613*5c2921b0SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5614*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5615*5c2921b0SApple OSS Distributions {
5616*5c2921b0SApple OSS Distributions 	IOPhysicalAddress   address;
5617*5c2921b0SApple OSS Distributions 
5618*5c2921b0SApple OSS Distributions 	LOCK;
5619*5c2921b0SApple OSS Distributions #ifdef __LP64__
5620*5c2921b0SApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5621*5c2921b0SApple OSS Distributions #else /* !__LP64__ */
5622*5c2921b0SApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5623*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5624*5c2921b0SApple OSS Distributions 	UNLOCK;
5625*5c2921b0SApple OSS Distributions 
5626*5c2921b0SApple OSS Distributions 	return address;
5627*5c2921b0SApple OSS Distributions }
5628*5c2921b0SApple OSS Distributions 
5629*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5630*5c2921b0SApple OSS Distributions 
5631*5c2921b0SApple OSS Distributions #undef super
5632*5c2921b0SApple OSS Distributions #define super OSObject
5633*5c2921b0SApple OSS Distributions 
5634*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5635*5c2921b0SApple OSS Distributions 
5636*5c2921b0SApple OSS Distributions void
initialize(void)5637*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5638*5c2921b0SApple OSS Distributions {
5639*5c2921b0SApple OSS Distributions 	if (NULL == gIOMemoryLock) {
5640*5c2921b0SApple OSS Distributions 		gIOMemoryLock = IORecursiveLockAlloc();
5641*5c2921b0SApple OSS Distributions 	}
5642*5c2921b0SApple OSS Distributions 
5643*5c2921b0SApple OSS Distributions 	gIOLastPage = IOGetLastPageNumber();
5644*5c2921b0SApple OSS Distributions }
5645*5c2921b0SApple OSS Distributions 
5646*5c2921b0SApple OSS Distributions void
free(void)5647*5c2921b0SApple OSS Distributions IOMemoryDescriptor::free( void )
5648*5c2921b0SApple OSS Distributions {
5649*5c2921b0SApple OSS Distributions 	if (_mappings) {
5650*5c2921b0SApple OSS Distributions 		_mappings.reset();
5651*5c2921b0SApple OSS Distributions 	}
5652*5c2921b0SApple OSS Distributions 
5653*5c2921b0SApple OSS Distributions 	if (reserved) {
5654*5c2921b0SApple OSS Distributions 		cleanKernelReserved(reserved);
5655*5c2921b0SApple OSS Distributions 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5656*5c2921b0SApple OSS Distributions 		reserved = NULL;
5657*5c2921b0SApple OSS Distributions 	}
5658*5c2921b0SApple OSS Distributions 	super::free();
5659*5c2921b0SApple OSS Distributions }
5660*5c2921b0SApple OSS Distributions 
5661*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5662*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setMapping(
5663*5c2921b0SApple OSS Distributions 	task_t                  intoTask,
5664*5c2921b0SApple OSS Distributions 	IOVirtualAddress        mapAddress,
5665*5c2921b0SApple OSS Distributions 	IOOptionBits            options )
5666*5c2921b0SApple OSS Distributions {
5667*5c2921b0SApple OSS Distributions 	return createMappingInTask( intoTask, mapAddress,
5668*5c2921b0SApple OSS Distributions 	           options | kIOMapStatic,
5669*5c2921b0SApple OSS Distributions 	           0, getLength());
5670*5c2921b0SApple OSS Distributions }
5671*5c2921b0SApple OSS Distributions 
5672*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5673*5c2921b0SApple OSS Distributions IOMemoryDescriptor::map(
5674*5c2921b0SApple OSS Distributions 	IOOptionBits            options )
5675*5c2921b0SApple OSS Distributions {
5676*5c2921b0SApple OSS Distributions 	return createMappingInTask( kernel_task, 0,
5677*5c2921b0SApple OSS Distributions 	           options | kIOMapAnywhere,
5678*5c2921b0SApple OSS Distributions 	           0, getLength());
5679*5c2921b0SApple OSS Distributions }
5680*5c2921b0SApple OSS Distributions 
5681*5c2921b0SApple OSS Distributions #ifndef __LP64__
5682*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5683*5c2921b0SApple OSS Distributions IOMemoryDescriptor::map(
5684*5c2921b0SApple OSS Distributions 	task_t                  intoTask,
5685*5c2921b0SApple OSS Distributions 	IOVirtualAddress        atAddress,
5686*5c2921b0SApple OSS Distributions 	IOOptionBits            options,
5687*5c2921b0SApple OSS Distributions 	IOByteCount             offset,
5688*5c2921b0SApple OSS Distributions 	IOByteCount             length )
5689*5c2921b0SApple OSS Distributions {
5690*5c2921b0SApple OSS Distributions 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5691*5c2921b0SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5692*5c2921b0SApple OSS Distributions 		return NULL;
5693*5c2921b0SApple OSS Distributions 	}
5694*5c2921b0SApple OSS Distributions 
5695*5c2921b0SApple OSS Distributions 	return createMappingInTask(intoTask, atAddress,
5696*5c2921b0SApple OSS Distributions 	           options, offset, length);
5697*5c2921b0SApple OSS Distributions }
5698*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5699*5c2921b0SApple OSS Distributions 
5700*5c2921b0SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5701*5c2921b0SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5702*5c2921b0SApple OSS Distributions 	task_t                  intoTask,
5703*5c2921b0SApple OSS Distributions 	mach_vm_address_t       atAddress,
5704*5c2921b0SApple OSS Distributions 	IOOptionBits            options,
5705*5c2921b0SApple OSS Distributions 	mach_vm_size_t          offset,
5706*5c2921b0SApple OSS Distributions 	mach_vm_size_t          length)
5707*5c2921b0SApple OSS Distributions {
5708*5c2921b0SApple OSS Distributions 	IOMemoryMap * result;
5709*5c2921b0SApple OSS Distributions 	IOMemoryMap * mapping;
5710*5c2921b0SApple OSS Distributions 
5711*5c2921b0SApple OSS Distributions 	if (0 == length) {
5712*5c2921b0SApple OSS Distributions 		length = getLength();
5713*5c2921b0SApple OSS Distributions 	}
5714*5c2921b0SApple OSS Distributions 
5715*5c2921b0SApple OSS Distributions 	mapping = new IOMemoryMap;
5716*5c2921b0SApple OSS Distributions 
5717*5c2921b0SApple OSS Distributions 	if (mapping
5718*5c2921b0SApple OSS Distributions 	    && !mapping->init( intoTask, atAddress,
5719*5c2921b0SApple OSS Distributions 	    options, offset, length )) {
5720*5c2921b0SApple OSS Distributions 		mapping->release();
5721*5c2921b0SApple OSS Distributions 		mapping = NULL;
5722*5c2921b0SApple OSS Distributions 	}
5723*5c2921b0SApple OSS Distributions 
5724*5c2921b0SApple OSS Distributions 	if (mapping) {
5725*5c2921b0SApple OSS Distributions 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5726*5c2921b0SApple OSS Distributions 	} else {
5727*5c2921b0SApple OSS Distributions 		result = nullptr;
5728*5c2921b0SApple OSS Distributions 	}
5729*5c2921b0SApple OSS Distributions 
5730*5c2921b0SApple OSS Distributions #if DEBUG
5731*5c2921b0SApple OSS Distributions 	if (!result) {
5732*5c2921b0SApple OSS Distributions 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5733*5c2921b0SApple OSS Distributions 		    this, atAddress, (uint32_t) options, offset, length);
5734*5c2921b0SApple OSS Distributions 	}
5735*5c2921b0SApple OSS Distributions #endif
5736*5c2921b0SApple OSS Distributions 
5737*5c2921b0SApple OSS Distributions 	// already retained through makeMapping
5738*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5739*5c2921b0SApple OSS Distributions 
5740*5c2921b0SApple OSS Distributions 	return retval;
5741*5c2921b0SApple OSS Distributions }
5742*5c2921b0SApple OSS Distributions 
5743*5c2921b0SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5744*5c2921b0SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5745*5c2921b0SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5746*5c2921b0SApple OSS Distributions     IOOptionBits         options,
5747*5c2921b0SApple OSS Distributions     IOByteCount          offset)
5748*5c2921b0SApple OSS Distributions {
5749*5c2921b0SApple OSS Distributions 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5750*5c2921b0SApple OSS Distributions }
5751*5c2921b0SApple OSS Distributions #endif
5752*5c2921b0SApple OSS Distributions 
5753*5c2921b0SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5754*5c2921b0SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5755*5c2921b0SApple OSS Distributions     IOOptionBits         options,
5756*5c2921b0SApple OSS Distributions     mach_vm_size_t       offset)
5757*5c2921b0SApple OSS Distributions {
5758*5c2921b0SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5759*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> physMem;
5760*5c2921b0SApple OSS Distributions 
5761*5c2921b0SApple OSS Distributions 	LOCK;
5762*5c2921b0SApple OSS Distributions 
5763*5c2921b0SApple OSS Distributions 	if (fAddress && fAddressMap) {
5764*5c2921b0SApple OSS Distributions 		do{
5765*5c2921b0SApple OSS Distributions 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5766*5c2921b0SApple OSS Distributions 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5767*5c2921b0SApple OSS Distributions 				physMem = fMemory;
5768*5c2921b0SApple OSS Distributions 			}
5769*5c2921b0SApple OSS Distributions 
5770*5c2921b0SApple OSS Distributions 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5771*5c2921b0SApple OSS Distributions 				upl_size_t          size = (typeof(size))round_page(fLength);
5772*5c2921b0SApple OSS Distributions 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5773*5c2921b0SApple OSS Distributions 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5774*5c2921b0SApple OSS Distributions 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5775*5c2921b0SApple OSS Distributions 				    NULL, NULL,
5776*5c2921b0SApple OSS Distributions 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5777*5c2921b0SApple OSS Distributions 					fRedirUPL = NULL;
5778*5c2921b0SApple OSS Distributions 				}
5779*5c2921b0SApple OSS Distributions 
5780*5c2921b0SApple OSS Distributions 				if (physMem) {
5781*5c2921b0SApple OSS Distributions 					IOUnmapPages( fAddressMap, fAddress, fLength );
5782*5c2921b0SApple OSS Distributions 					if ((false)) {
5783*5c2921b0SApple OSS Distributions 						physMem->redirect(NULL, true);
5784*5c2921b0SApple OSS Distributions 					}
5785*5c2921b0SApple OSS Distributions 				}
5786*5c2921b0SApple OSS Distributions 			}
5787*5c2921b0SApple OSS Distributions 
5788*5c2921b0SApple OSS Distributions 			if (newBackingMemory) {
5789*5c2921b0SApple OSS Distributions 				if (newBackingMemory != fMemory) {
5790*5c2921b0SApple OSS Distributions 					fOffset = 0;
5791*5c2921b0SApple OSS Distributions 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5792*5c2921b0SApple OSS Distributions 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5793*5c2921b0SApple OSS Distributions 					    offset, fLength)) {
5794*5c2921b0SApple OSS Distributions 						err = kIOReturnError;
5795*5c2921b0SApple OSS Distributions 					}
5796*5c2921b0SApple OSS Distributions 				}
5797*5c2921b0SApple OSS Distributions 				if (fRedirUPL) {
5798*5c2921b0SApple OSS Distributions 					upl_commit(fRedirUPL, NULL, 0);
5799*5c2921b0SApple OSS Distributions 					upl_deallocate(fRedirUPL);
5800*5c2921b0SApple OSS Distributions 					fRedirUPL = NULL;
5801*5c2921b0SApple OSS Distributions 				}
5802*5c2921b0SApple OSS Distributions 				if ((false) && physMem) {
5803*5c2921b0SApple OSS Distributions 					physMem->redirect(NULL, false);
5804*5c2921b0SApple OSS Distributions 				}
5805*5c2921b0SApple OSS Distributions 			}
5806*5c2921b0SApple OSS Distributions 		}while (false);
5807*5c2921b0SApple OSS Distributions 	}
5808*5c2921b0SApple OSS Distributions 
5809*5c2921b0SApple OSS Distributions 	UNLOCK;
5810*5c2921b0SApple OSS Distributions 
5811*5c2921b0SApple OSS Distributions 	return err;
5812*5c2921b0SApple OSS Distributions }
5813*5c2921b0SApple OSS Distributions 
5814*5c2921b0SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5815*5c2921b0SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5816*5c2921b0SApple OSS Distributions 	IOMemoryDescriptor *    owner,
5817*5c2921b0SApple OSS Distributions 	task_t                  __intoTask,
5818*5c2921b0SApple OSS Distributions 	IOVirtualAddress        __address,
5819*5c2921b0SApple OSS Distributions 	IOOptionBits            options,
5820*5c2921b0SApple OSS Distributions 	IOByteCount             __offset,
5821*5c2921b0SApple OSS Distributions 	IOByteCount             __length )
5822*5c2921b0SApple OSS Distributions {
5823*5c2921b0SApple OSS Distributions #ifndef __LP64__
5824*5c2921b0SApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
5825*5c2921b0SApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
5826*5c2921b0SApple OSS Distributions 	}
5827*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
5828*5c2921b0SApple OSS Distributions 
5829*5c2921b0SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5830*5c2921b0SApple OSS Distributions 	__block IOMemoryMap * result  = NULL;
5831*5c2921b0SApple OSS Distributions 
5832*5c2921b0SApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5833*5c2921b0SApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5834*5c2921b0SApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
5835*5c2921b0SApple OSS Distributions 
5836*5c2921b0SApple OSS Distributions 	mapping->fOffset = offset;
5837*5c2921b0SApple OSS Distributions 
5838*5c2921b0SApple OSS Distributions 	LOCK;
5839*5c2921b0SApple OSS Distributions 
5840*5c2921b0SApple OSS Distributions 	do{
5841*5c2921b0SApple OSS Distributions 		if (kIOMapStatic & options) {
5842*5c2921b0SApple OSS Distributions 			result = mapping;
5843*5c2921b0SApple OSS Distributions 			addMapping(mapping);
5844*5c2921b0SApple OSS Distributions 			mapping->setMemoryDescriptor(this, 0);
5845*5c2921b0SApple OSS Distributions 			continue;
5846*5c2921b0SApple OSS Distributions 		}
5847*5c2921b0SApple OSS Distributions 
5848*5c2921b0SApple OSS Distributions 		if (kIOMapUnique & options) {
5849*5c2921b0SApple OSS Distributions 			addr64_t phys;
5850*5c2921b0SApple OSS Distributions 			IOByteCount       physLen;
5851*5c2921b0SApple OSS Distributions 
5852*5c2921b0SApple OSS Distributions //	    if (owner != this)		continue;
5853*5c2921b0SApple OSS Distributions 
5854*5c2921b0SApple OSS Distributions 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5855*5c2921b0SApple OSS Distributions 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5856*5c2921b0SApple OSS Distributions 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5857*5c2921b0SApple OSS Distributions 				if (!phys || (physLen < length)) {
5858*5c2921b0SApple OSS Distributions 					continue;
5859*5c2921b0SApple OSS Distributions 				}
5860*5c2921b0SApple OSS Distributions 
5861*5c2921b0SApple OSS Distributions 				mapDesc = IOMemoryDescriptor::withAddressRange(
5862*5c2921b0SApple OSS Distributions 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5863*5c2921b0SApple OSS Distributions 				if (!mapDesc) {
5864*5c2921b0SApple OSS Distributions 					continue;
5865*5c2921b0SApple OSS Distributions 				}
5866*5c2921b0SApple OSS Distributions 				offset = 0;
5867*5c2921b0SApple OSS Distributions 				mapping->fOffset = offset;
5868*5c2921b0SApple OSS Distributions 			}
5869*5c2921b0SApple OSS Distributions 		} else {
5870*5c2921b0SApple OSS Distributions 			// look for a compatible existing mapping
5871*5c2921b0SApple OSS Distributions 			if (_mappings) {
5872*5c2921b0SApple OSS Distributions 				_mappings->iterateObjects(^(OSObject * object)
5873*5c2921b0SApple OSS Distributions 				{
5874*5c2921b0SApple OSS Distributions 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5875*5c2921b0SApple OSS Distributions 					if ((result = lookMapping->copyCompatible(mapping))) {
5876*5c2921b0SApple OSS Distributions 					        addMapping(result);
5877*5c2921b0SApple OSS Distributions 					        result->setMemoryDescriptor(this, offset);
5878*5c2921b0SApple OSS Distributions 					        return true;
5879*5c2921b0SApple OSS Distributions 					}
5880*5c2921b0SApple OSS Distributions 					return false;
5881*5c2921b0SApple OSS Distributions 				});
5882*5c2921b0SApple OSS Distributions 			}
5883*5c2921b0SApple OSS Distributions 			if (result || (options & kIOMapReference)) {
5884*5c2921b0SApple OSS Distributions 				if (result != mapping) {
5885*5c2921b0SApple OSS Distributions 					mapping->release();
5886*5c2921b0SApple OSS Distributions 					mapping = NULL;
5887*5c2921b0SApple OSS Distributions 				}
5888*5c2921b0SApple OSS Distributions 				continue;
5889*5c2921b0SApple OSS Distributions 			}
5890*5c2921b0SApple OSS Distributions 		}
5891*5c2921b0SApple OSS Distributions 
5892*5c2921b0SApple OSS Distributions 		if (!mapDesc) {
5893*5c2921b0SApple OSS Distributions 			mapDesc.reset(this, OSRetain);
5894*5c2921b0SApple OSS Distributions 		}
5895*5c2921b0SApple OSS Distributions 		IOReturn
5896*5c2921b0SApple OSS Distributions 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5897*5c2921b0SApple OSS Distributions 		if (kIOReturnSuccess == kr) {
5898*5c2921b0SApple OSS Distributions 			result = mapping;
5899*5c2921b0SApple OSS Distributions 			mapDesc->addMapping(result);
5900*5c2921b0SApple OSS Distributions 			result->setMemoryDescriptor(mapDesc.get(), offset);
5901*5c2921b0SApple OSS Distributions 		} else {
5902*5c2921b0SApple OSS Distributions 			mapping->release();
5903*5c2921b0SApple OSS Distributions 			mapping = NULL;
5904*5c2921b0SApple OSS Distributions 		}
5905*5c2921b0SApple OSS Distributions 	}while (false);
5906*5c2921b0SApple OSS Distributions 
5907*5c2921b0SApple OSS Distributions 	UNLOCK;
5908*5c2921b0SApple OSS Distributions 
5909*5c2921b0SApple OSS Distributions 	return result;
5910*5c2921b0SApple OSS Distributions }
5911*5c2921b0SApple OSS Distributions 
5912*5c2921b0SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5913*5c2921b0SApple OSS Distributions IOMemoryDescriptor::addMapping(
5914*5c2921b0SApple OSS Distributions 	IOMemoryMap * mapping )
5915*5c2921b0SApple OSS Distributions {
5916*5c2921b0SApple OSS Distributions 	if (mapping) {
5917*5c2921b0SApple OSS Distributions 		if (NULL == _mappings) {
5918*5c2921b0SApple OSS Distributions 			_mappings = OSSet::withCapacity(1);
5919*5c2921b0SApple OSS Distributions 		}
5920*5c2921b0SApple OSS Distributions 		if (_mappings) {
5921*5c2921b0SApple OSS Distributions 			_mappings->setObject( mapping );
5922*5c2921b0SApple OSS Distributions 		}
5923*5c2921b0SApple OSS Distributions 	}
5924*5c2921b0SApple OSS Distributions }
5925*5c2921b0SApple OSS Distributions 
5926*5c2921b0SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5927*5c2921b0SApple OSS Distributions IOMemoryDescriptor::removeMapping(
5928*5c2921b0SApple OSS Distributions 	IOMemoryMap * mapping )
5929*5c2921b0SApple OSS Distributions {
5930*5c2921b0SApple OSS Distributions 	if (_mappings) {
5931*5c2921b0SApple OSS Distributions 		_mappings->removeObject( mapping);
5932*5c2921b0SApple OSS Distributions 	}
5933*5c2921b0SApple OSS Distributions }
5934*5c2921b0SApple OSS Distributions 
5935*5c2921b0SApple OSS Distributions void
setMapperOptions(uint16_t options)5936*5c2921b0SApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5937*5c2921b0SApple OSS Distributions {
5938*5c2921b0SApple OSS Distributions 	_iomapperOptions = options;
5939*5c2921b0SApple OSS Distributions }
5940*5c2921b0SApple OSS Distributions 
5941*5c2921b0SApple OSS Distributions uint16_t
getMapperOptions(void)5942*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5943*5c2921b0SApple OSS Distributions {
5944*5c2921b0SApple OSS Distributions 	return _iomapperOptions;
5945*5c2921b0SApple OSS Distributions }
5946*5c2921b0SApple OSS Distributions 
5947*5c2921b0SApple OSS Distributions #ifndef __LP64__
5948*5c2921b0SApple OSS Distributions // obsolete initializers
5949*5c2921b0SApple OSS Distributions // - initWithOptions is the designated initializer
5950*5c2921b0SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5951*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void *      address,
5952*5c2921b0SApple OSS Distributions     IOByteCount   length,
5953*5c2921b0SApple OSS Distributions     IODirection direction)
5954*5c2921b0SApple OSS Distributions {
5955*5c2921b0SApple OSS Distributions 	return false;
5956*5c2921b0SApple OSS Distributions }
5957*5c2921b0SApple OSS Distributions 
5958*5c2921b0SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5959*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5960*5c2921b0SApple OSS Distributions     IOByteCount    length,
5961*5c2921b0SApple OSS Distributions     IODirection  direction,
5962*5c2921b0SApple OSS Distributions     task_t       task)
5963*5c2921b0SApple OSS Distributions {
5964*5c2921b0SApple OSS Distributions 	return false;
5965*5c2921b0SApple OSS Distributions }
5966*5c2921b0SApple OSS Distributions 
5967*5c2921b0SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5968*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
5969*5c2921b0SApple OSS Distributions 	IOPhysicalAddress      address,
5970*5c2921b0SApple OSS Distributions 	IOByteCount            length,
5971*5c2921b0SApple OSS Distributions 	IODirection            direction )
5972*5c2921b0SApple OSS Distributions {
5973*5c2921b0SApple OSS Distributions 	return false;
5974*5c2921b0SApple OSS Distributions }
5975*5c2921b0SApple OSS Distributions 
5976*5c2921b0SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5977*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
5978*5c2921b0SApple OSS Distributions 	IOVirtualRange * ranges,
5979*5c2921b0SApple OSS Distributions 	UInt32           withCount,
5980*5c2921b0SApple OSS Distributions 	IODirection      direction,
5981*5c2921b0SApple OSS Distributions 	task_t           task,
5982*5c2921b0SApple OSS Distributions 	bool             asReference)
5983*5c2921b0SApple OSS Distributions {
5984*5c2921b0SApple OSS Distributions 	return false;
5985*5c2921b0SApple OSS Distributions }
5986*5c2921b0SApple OSS Distributions 
5987*5c2921b0SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5988*5c2921b0SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5989*5c2921b0SApple OSS Distributions     UInt32           withCount,
5990*5c2921b0SApple OSS Distributions     IODirection      direction,
5991*5c2921b0SApple OSS Distributions     bool             asReference)
5992*5c2921b0SApple OSS Distributions {
5993*5c2921b0SApple OSS Distributions 	return false;
5994*5c2921b0SApple OSS Distributions }
5995*5c2921b0SApple OSS Distributions 
5996*5c2921b0SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5997*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5998*5c2921b0SApple OSS Distributions     IOByteCount * lengthOfSegment)
5999*5c2921b0SApple OSS Distributions {
6000*5c2921b0SApple OSS Distributions 	return NULL;
6001*5c2921b0SApple OSS Distributions }
6002*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
6003*5c2921b0SApple OSS Distributions 
6004*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6005*5c2921b0SApple OSS Distributions 
6006*5c2921b0SApple OSS Distributions bool
serialize(OSSerialize * s) const6007*5c2921b0SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6008*5c2921b0SApple OSS Distributions {
6009*5c2921b0SApple OSS Distributions 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6010*5c2921b0SApple OSS Distributions 	OSSharedPtr<OSObject>           values[2] = {NULL};
6011*5c2921b0SApple OSS Distributions 	OSSharedPtr<OSArray>            array;
6012*5c2921b0SApple OSS Distributions 
6013*5c2921b0SApple OSS Distributions 	struct SerData {
6014*5c2921b0SApple OSS Distributions 		user_addr_t address;
6015*5c2921b0SApple OSS Distributions 		user_size_t length;
6016*5c2921b0SApple OSS Distributions 	};
6017*5c2921b0SApple OSS Distributions 
6018*5c2921b0SApple OSS Distributions 	unsigned int index;
6019*5c2921b0SApple OSS Distributions 
6020*5c2921b0SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6021*5c2921b0SApple OSS Distributions 
6022*5c2921b0SApple OSS Distributions 	if (s == NULL) {
6023*5c2921b0SApple OSS Distributions 		return false;
6024*5c2921b0SApple OSS Distributions 	}
6025*5c2921b0SApple OSS Distributions 
6026*5c2921b0SApple OSS Distributions 	array = OSArray::withCapacity(4);
6027*5c2921b0SApple OSS Distributions 	if (!array) {
6028*5c2921b0SApple OSS Distributions 		return false;
6029*5c2921b0SApple OSS Distributions 	}
6030*5c2921b0SApple OSS Distributions 
6031*5c2921b0SApple OSS Distributions 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6032*5c2921b0SApple OSS Distributions 	if (!vcopy) {
6033*5c2921b0SApple OSS Distributions 		return false;
6034*5c2921b0SApple OSS Distributions 	}
6035*5c2921b0SApple OSS Distributions 
6036*5c2921b0SApple OSS Distributions 	keys[0] = OSSymbol::withCString("address");
6037*5c2921b0SApple OSS Distributions 	keys[1] = OSSymbol::withCString("length");
6038*5c2921b0SApple OSS Distributions 
6039*5c2921b0SApple OSS Distributions 	// Copy the volatile data so we don't have to allocate memory
6040*5c2921b0SApple OSS Distributions 	// while the lock is held.
6041*5c2921b0SApple OSS Distributions 	LOCK;
6042*5c2921b0SApple OSS Distributions 	if (vcopy.size() == _rangesCount) {
6043*5c2921b0SApple OSS Distributions 		Ranges vec = _ranges;
6044*5c2921b0SApple OSS Distributions 		for (index = 0; index < vcopy.size(); index++) {
6045*5c2921b0SApple OSS Distributions 			mach_vm_address_t addr; mach_vm_size_t len;
6046*5c2921b0SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, index);
6047*5c2921b0SApple OSS Distributions 			vcopy[index].address = addr;
6048*5c2921b0SApple OSS Distributions 			vcopy[index].length  = len;
6049*5c2921b0SApple OSS Distributions 		}
6050*5c2921b0SApple OSS Distributions 	} else {
6051*5c2921b0SApple OSS Distributions 		// The descriptor changed out from under us.  Give up.
6052*5c2921b0SApple OSS Distributions 		UNLOCK;
6053*5c2921b0SApple OSS Distributions 		return false;
6054*5c2921b0SApple OSS Distributions 	}
6055*5c2921b0SApple OSS Distributions 	UNLOCK;
6056*5c2921b0SApple OSS Distributions 
6057*5c2921b0SApple OSS Distributions 	for (index = 0; index < vcopy.size(); index++) {
6058*5c2921b0SApple OSS Distributions 		user_addr_t addr = vcopy[index].address;
6059*5c2921b0SApple OSS Distributions 		IOByteCount len = (IOByteCount) vcopy[index].length;
6060*5c2921b0SApple OSS Distributions 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6061*5c2921b0SApple OSS Distributions 		if (values[0] == NULL) {
6062*5c2921b0SApple OSS Distributions 			return false;
6063*5c2921b0SApple OSS Distributions 		}
6064*5c2921b0SApple OSS Distributions 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6065*5c2921b0SApple OSS Distributions 		if (values[1] == NULL) {
6066*5c2921b0SApple OSS Distributions 			return false;
6067*5c2921b0SApple OSS Distributions 		}
6068*5c2921b0SApple OSS Distributions 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6069*5c2921b0SApple OSS Distributions 		if (dict == NULL) {
6070*5c2921b0SApple OSS Distributions 			return false;
6071*5c2921b0SApple OSS Distributions 		}
6072*5c2921b0SApple OSS Distributions 		array->setObject(dict.get());
6073*5c2921b0SApple OSS Distributions 		dict.reset();
6074*5c2921b0SApple OSS Distributions 		values[0].reset();
6075*5c2921b0SApple OSS Distributions 		values[1].reset();
6076*5c2921b0SApple OSS Distributions 	}
6077*5c2921b0SApple OSS Distributions 
6078*5c2921b0SApple OSS Distributions 	return array->serialize(s);
6079*5c2921b0SApple OSS Distributions }
6080*5c2921b0SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6081*5c2921b0SApple OSS Distributions 
6082*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6083*5c2921b0SApple OSS Distributions #ifdef __LP64__
6084*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6085*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6086*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6087*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6088*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6089*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6090*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6091*5c2921b0SApple OSS Distributions #else /* !__LP64__ */
6092*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6093*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6094*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6095*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6096*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6097*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6098*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6099*5c2921b0SApple OSS Distributions #endif /* !__LP64__ */
6100*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6101*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6102*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6103*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6104*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6105*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6106*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6107*5c2921b0SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6108*5c2921b0SApple OSS Distributions 
6109*5c2921b0SApple OSS Distributions /* ex-inline function implementation */
6110*5c2921b0SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6111*5c2921b0SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6112*5c2921b0SApple OSS Distributions {
6113*5c2921b0SApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
6114*5c2921b0SApple OSS Distributions }
6115*5c2921b0SApple OSS Distributions 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6116*5c2921b0SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6117*5c2921b0SApple OSS Distributions 
6118*5c2921b0SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6119*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6120*5c2921b0SApple OSS Distributions {
6121*5c2921b0SApple OSS Distributions 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6122*5c2921b0SApple OSS Distributions 	if (me && !me->initWithCapacity(capacity)) {
6123*5c2921b0SApple OSS Distributions 		return nullptr;
6124*5c2921b0SApple OSS Distributions 	}
6125*5c2921b0SApple OSS Distributions 	return me;
6126*5c2921b0SApple OSS Distributions }
6127*5c2921b0SApple OSS Distributions 
6128*5c2921b0SApple OSS Distributions /*
6129*5c2921b0SApple OSS Distributions  * Ignore -Wxnu-typed-allocators within IOMemoryDescriptorMixedData
6130*5c2921b0SApple OSS Distributions  * because it implements an allocator.
6131*5c2921b0SApple OSS Distributions  */
6132*5c2921b0SApple OSS Distributions __typed_allocators_ignore_push
6133*5c2921b0SApple OSS Distributions 
6134*5c2921b0SApple OSS Distributions bool
initWithCapacity(size_t capacity)6135*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6136*5c2921b0SApple OSS Distributions {
6137*5c2921b0SApple OSS Distributions 	if (_data && (!capacity || (_capacity < capacity))) {
6138*5c2921b0SApple OSS Distributions 		freeMemory();
6139*5c2921b0SApple OSS Distributions 	}
6140*5c2921b0SApple OSS Distributions 
6141*5c2921b0SApple OSS Distributions 	if (!OSObject::init()) {
6142*5c2921b0SApple OSS Distributions 		return false;
6143*5c2921b0SApple OSS Distributions 	}
6144*5c2921b0SApple OSS Distributions 
6145*5c2921b0SApple OSS Distributions 	if (!_data && capacity) {
6146*5c2921b0SApple OSS Distributions 		_data = IOMalloc(capacity);
6147*5c2921b0SApple OSS Distributions 		if (!_data) {
6148*5c2921b0SApple OSS Distributions 			return false;
6149*5c2921b0SApple OSS Distributions 		}
6150*5c2921b0SApple OSS Distributions 		_capacity = capacity;
6151*5c2921b0SApple OSS Distributions 	}
6152*5c2921b0SApple OSS Distributions 
6153*5c2921b0SApple OSS Distributions 	_length = 0;
6154*5c2921b0SApple OSS Distributions 
6155*5c2921b0SApple OSS Distributions 	return true;
6156*5c2921b0SApple OSS Distributions }
6157*5c2921b0SApple OSS Distributions 
6158*5c2921b0SApple OSS Distributions void
free()6159*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6160*5c2921b0SApple OSS Distributions {
6161*5c2921b0SApple OSS Distributions 	freeMemory();
6162*5c2921b0SApple OSS Distributions 	OSObject::free();
6163*5c2921b0SApple OSS Distributions }
6164*5c2921b0SApple OSS Distributions 
6165*5c2921b0SApple OSS Distributions void
freeMemory()6166*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6167*5c2921b0SApple OSS Distributions {
6168*5c2921b0SApple OSS Distributions 	IOFree(_data, _capacity);
6169*5c2921b0SApple OSS Distributions 	_data = nullptr;
6170*5c2921b0SApple OSS Distributions 	_capacity = _length = 0;
6171*5c2921b0SApple OSS Distributions }
6172*5c2921b0SApple OSS Distributions 
6173*5c2921b0SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6174*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6175*5c2921b0SApple OSS Distributions {
6176*5c2921b0SApple OSS Distributions 	const auto oldLength = getLength();
6177*5c2921b0SApple OSS Distributions 	size_t newLength;
6178*5c2921b0SApple OSS Distributions 	if (os_add_overflow(oldLength, length, &newLength)) {
6179*5c2921b0SApple OSS Distributions 		return false;
6180*5c2921b0SApple OSS Distributions 	}
6181*5c2921b0SApple OSS Distributions 
6182*5c2921b0SApple OSS Distributions 	if (newLength > _capacity) {
6183*5c2921b0SApple OSS Distributions 		void * const newData = IOMalloc(newLength);
6184*5c2921b0SApple OSS Distributions 		if (!newData) {
6185*5c2921b0SApple OSS Distributions 			return false;
6186*5c2921b0SApple OSS Distributions 		}
6187*5c2921b0SApple OSS Distributions 		if (_data) {
6188*5c2921b0SApple OSS Distributions 			bcopy(_data, newData, oldLength);
6189*5c2921b0SApple OSS Distributions 			IOFree(_data, _capacity);
6190*5c2921b0SApple OSS Distributions 		}
6191*5c2921b0SApple OSS Distributions 		_data = newData;
6192*5c2921b0SApple OSS Distributions 		_capacity = newLength;
6193*5c2921b0SApple OSS Distributions 	}
6194*5c2921b0SApple OSS Distributions 
6195*5c2921b0SApple OSS Distributions 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6196*5c2921b0SApple OSS Distributions 	if (bytes) {
6197*5c2921b0SApple OSS Distributions 		bcopy(bytes, dest, length);
6198*5c2921b0SApple OSS Distributions 	} else {
6199*5c2921b0SApple OSS Distributions 		bzero(dest, length);
6200*5c2921b0SApple OSS Distributions 	}
6201*5c2921b0SApple OSS Distributions 
6202*5c2921b0SApple OSS Distributions 	_length = newLength;
6203*5c2921b0SApple OSS Distributions 
6204*5c2921b0SApple OSS Distributions 	return true;
6205*5c2921b0SApple OSS Distributions }
6206*5c2921b0SApple OSS Distributions 
6207*5c2921b0SApple OSS Distributions void
setLength(size_t length)6208*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6209*5c2921b0SApple OSS Distributions {
6210*5c2921b0SApple OSS Distributions 	if (!_data || (length > _capacity)) {
6211*5c2921b0SApple OSS Distributions 		void * const newData = IOMallocZero(length);
6212*5c2921b0SApple OSS Distributions 		if (_data) {
6213*5c2921b0SApple OSS Distributions 			bcopy(_data, newData, _length);
6214*5c2921b0SApple OSS Distributions 			IOFree(_data, _capacity);
6215*5c2921b0SApple OSS Distributions 		}
6216*5c2921b0SApple OSS Distributions 		_data = newData;
6217*5c2921b0SApple OSS Distributions 		_capacity = length;
6218*5c2921b0SApple OSS Distributions 	}
6219*5c2921b0SApple OSS Distributions 	_length = length;
6220*5c2921b0SApple OSS Distributions }
6221*5c2921b0SApple OSS Distributions 
6222*5c2921b0SApple OSS Distributions __typed_allocators_ignore_pop
6223*5c2921b0SApple OSS Distributions 
6224*5c2921b0SApple OSS Distributions const void *
getBytes() const6225*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6226*5c2921b0SApple OSS Distributions {
6227*5c2921b0SApple OSS Distributions 	return _length ? _data : nullptr;
6228*5c2921b0SApple OSS Distributions }
6229*5c2921b0SApple OSS Distributions 
6230*5c2921b0SApple OSS Distributions size_t
getLength() const6231*5c2921b0SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6232*5c2921b0SApple OSS Distributions {
6233*5c2921b0SApple OSS Distributions 	return _data ? _length : 0;
6234*5c2921b0SApple OSS Distributions }
6235