xref: /xnu-8796.121.2/iokit/Kernel/IOMemoryDescriptor.cpp (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1*c54f35caSApple OSS Distributions /*
2*c54f35caSApple OSS Distributions  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*c54f35caSApple OSS Distributions  *
4*c54f35caSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*c54f35caSApple OSS Distributions  *
6*c54f35caSApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*c54f35caSApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*c54f35caSApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*c54f35caSApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*c54f35caSApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*c54f35caSApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*c54f35caSApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*c54f35caSApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*c54f35caSApple OSS Distributions  *
15*c54f35caSApple OSS Distributions  * Please obtain a copy of the License at
16*c54f35caSApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*c54f35caSApple OSS Distributions  *
18*c54f35caSApple OSS Distributions  * The Original Code and all software distributed under the License are
19*c54f35caSApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*c54f35caSApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*c54f35caSApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*c54f35caSApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*c54f35caSApple OSS Distributions  * Please see the License for the specific language governing rights and
24*c54f35caSApple OSS Distributions  * limitations under the License.
25*c54f35caSApple OSS Distributions  *
26*c54f35caSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*c54f35caSApple OSS Distributions  */
28*c54f35caSApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*c54f35caSApple OSS Distributions 
30*c54f35caSApple OSS Distributions #include <sys/cdefs.h>
31*c54f35caSApple OSS Distributions 
32*c54f35caSApple OSS Distributions #include <IOKit/assert.h>
33*c54f35caSApple OSS Distributions #include <IOKit/system.h>
34*c54f35caSApple OSS Distributions #include <IOKit/IOLib.h>
35*c54f35caSApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*c54f35caSApple OSS Distributions #include <IOKit/IOMapper.h>
37*c54f35caSApple OSS Distributions #include <IOKit/IODMACommand.h>
38*c54f35caSApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*c54f35caSApple OSS Distributions 
40*c54f35caSApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*c54f35caSApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*c54f35caSApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*c54f35caSApple OSS Distributions 
44*c54f35caSApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*c54f35caSApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*c54f35caSApple OSS Distributions #include <libkern/OSDebug.h>
47*c54f35caSApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*c54f35caSApple OSS Distributions 
49*c54f35caSApple OSS Distributions #include "IOKitKernelInternal.h"
50*c54f35caSApple OSS Distributions 
51*c54f35caSApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*c54f35caSApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*c54f35caSApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*c54f35caSApple OSS Distributions #include <libkern/c++/OSArray.h>
55*c54f35caSApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*c54f35caSApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*c54f35caSApple OSS Distributions #include <os/overflow.h>
58*c54f35caSApple OSS Distributions #include <os/cpp_util.h>
59*c54f35caSApple OSS Distributions #include <os/base_private.h>
60*c54f35caSApple OSS Distributions 
61*c54f35caSApple OSS Distributions #include <sys/uio.h>
62*c54f35caSApple OSS Distributions 
63*c54f35caSApple OSS Distributions __BEGIN_DECLS
64*c54f35caSApple OSS Distributions #include <vm/pmap.h>
65*c54f35caSApple OSS Distributions #include <vm/vm_pageout.h>
66*c54f35caSApple OSS Distributions #include <mach/memory_object_types.h>
67*c54f35caSApple OSS Distributions #include <device/device_port.h>
68*c54f35caSApple OSS Distributions 
69*c54f35caSApple OSS Distributions #include <mach/vm_prot.h>
70*c54f35caSApple OSS Distributions #include <mach/mach_vm.h>
71*c54f35caSApple OSS Distributions #include <mach/memory_entry.h>
72*c54f35caSApple OSS Distributions #include <vm/vm_fault.h>
73*c54f35caSApple OSS Distributions #include <vm/vm_protos.h>
74*c54f35caSApple OSS Distributions 
75*c54f35caSApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76*c54f35caSApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
77*c54f35caSApple OSS Distributions 
78*c54f35caSApple OSS Distributions extern kern_return_t
79*c54f35caSApple OSS Distributions mach_memory_entry_ownership(
80*c54f35caSApple OSS Distributions 	ipc_port_t      entry_port,
81*c54f35caSApple OSS Distributions 	task_t          owner,
82*c54f35caSApple OSS Distributions 	int             ledger_tag,
83*c54f35caSApple OSS Distributions 	int             ledger_flags);
84*c54f35caSApple OSS Distributions 
85*c54f35caSApple OSS Distributions __END_DECLS
86*c54f35caSApple OSS Distributions 
87*c54f35caSApple OSS Distributions #define kIOMapperWaitSystem     ((IOMapper *) 1)
88*c54f35caSApple OSS Distributions 
89*c54f35caSApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
90*c54f35caSApple OSS Distributions 
91*c54f35caSApple OSS Distributions ppnum_t           gIOLastPage;
92*c54f35caSApple OSS Distributions 
93*c54f35caSApple OSS Distributions enum {
94*c54f35caSApple OSS Distributions 	kIOMapGuardSizeLarge = 65536
95*c54f35caSApple OSS Distributions };
96*c54f35caSApple OSS Distributions 
97*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98*c54f35caSApple OSS Distributions 
99*c54f35caSApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100*c54f35caSApple OSS Distributions 
101*c54f35caSApple OSS Distributions #define super IOMemoryDescriptor
102*c54f35caSApple OSS Distributions 
103*c54f35caSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104*c54f35caSApple OSS Distributions     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105*c54f35caSApple OSS Distributions 
106*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107*c54f35caSApple OSS Distributions 
108*c54f35caSApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
109*c54f35caSApple OSS Distributions 
110*c54f35caSApple OSS Distributions #define LOCK    IORecursiveLockLock( gIOMemoryLock)
111*c54f35caSApple OSS Distributions #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
112*c54f35caSApple OSS Distributions #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113*c54f35caSApple OSS Distributions #define WAKEUP  \
114*c54f35caSApple OSS Distributions     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115*c54f35caSApple OSS Distributions 
116*c54f35caSApple OSS Distributions #if 0
117*c54f35caSApple OSS Distributions #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
118*c54f35caSApple OSS Distributions #else
119*c54f35caSApple OSS Distributions #define DEBG(fmt, args...)      {}
120*c54f35caSApple OSS Distributions #endif
121*c54f35caSApple OSS Distributions 
122*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123*c54f35caSApple OSS Distributions 
124*c54f35caSApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
125*c54f35caSApple OSS Distributions // Function
126*c54f35caSApple OSS Distributions 
127*c54f35caSApple OSS Distributions enum ioPLBlockFlags {
128*c54f35caSApple OSS Distributions 	kIOPLOnDevice  = 0x00000001,
129*c54f35caSApple OSS Distributions 	kIOPLExternUPL = 0x00000002,
130*c54f35caSApple OSS Distributions };
131*c54f35caSApple OSS Distributions 
132*c54f35caSApple OSS Distributions struct IOMDPersistentInitData {
133*c54f35caSApple OSS Distributions 	const IOGeneralMemoryDescriptor * fMD;
134*c54f35caSApple OSS Distributions 	IOMemoryReference               * fMemRef;
135*c54f35caSApple OSS Distributions };
136*c54f35caSApple OSS Distributions 
137*c54f35caSApple OSS Distributions struct ioPLBlock {
138*c54f35caSApple OSS Distributions 	upl_t fIOPL;
139*c54f35caSApple OSS Distributions 	vm_address_t fPageInfo; // Pointer to page list or index into it
140*c54f35caSApple OSS Distributions 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
141*c54f35caSApple OSS Distributions 	ppnum_t fMappedPage;        // Page number of first page in this iopl
142*c54f35caSApple OSS Distributions 	unsigned int fPageOffset;   // Offset within first page of iopl
143*c54f35caSApple OSS Distributions 	unsigned int fFlags;        // Flags
144*c54f35caSApple OSS Distributions };
145*c54f35caSApple OSS Distributions 
146*c54f35caSApple OSS Distributions enum { kMaxWireTags = 6 };
147*c54f35caSApple OSS Distributions 
148*c54f35caSApple OSS Distributions struct ioGMDData {
149*c54f35caSApple OSS Distributions 	IOMapper *  fMapper;
150*c54f35caSApple OSS Distributions 	uint64_t    fDMAMapAlignment;
151*c54f35caSApple OSS Distributions 	uint64_t    fMappedBase;
152*c54f35caSApple OSS Distributions 	uint64_t    fMappedLength;
153*c54f35caSApple OSS Distributions 	uint64_t    fPreparationID;
154*c54f35caSApple OSS Distributions #if IOTRACKING
155*c54f35caSApple OSS Distributions 	IOTracking  fWireTracking;
156*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
157*c54f35caSApple OSS Distributions 	unsigned int      fPageCnt;
158*c54f35caSApple OSS Distributions 	uint8_t           fDMAMapNumAddressBits;
159*c54f35caSApple OSS Distributions 	unsigned char     fCompletionError:1;
160*c54f35caSApple OSS Distributions 	unsigned char     fMappedBaseValid:1;
161*c54f35caSApple OSS Distributions 	unsigned char     _resv:4;
162*c54f35caSApple OSS Distributions 	unsigned char     fDMAAccess:2;
163*c54f35caSApple OSS Distributions 
164*c54f35caSApple OSS Distributions 	/* variable length arrays */
165*c54f35caSApple OSS Distributions 	upl_page_info_t fPageList[1]
166*c54f35caSApple OSS Distributions #if __LP64__
167*c54f35caSApple OSS Distributions 	// align fPageList as for ioPLBlock
168*c54f35caSApple OSS Distributions 	__attribute__((aligned(sizeof(upl_t))))
169*c54f35caSApple OSS Distributions #endif
170*c54f35caSApple OSS Distributions 	;
171*c54f35caSApple OSS Distributions 	//ioPLBlock fBlocks[1];
172*c54f35caSApple OSS Distributions };
173*c54f35caSApple OSS Distributions 
174*c54f35caSApple OSS Distributions #pragma GCC visibility push(hidden)
175*c54f35caSApple OSS Distributions 
176*c54f35caSApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
177*c54f35caSApple OSS Distributions {
178*c54f35caSApple OSS Distributions 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179*c54f35caSApple OSS Distributions 
180*c54f35caSApple OSS Distributions public:
181*c54f35caSApple OSS Distributions 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182*c54f35caSApple OSS Distributions 	bool initWithCapacity(size_t capacity);
183*c54f35caSApple OSS Distributions 	virtual void free() APPLE_KEXT_OVERRIDE;
184*c54f35caSApple OSS Distributions 
185*c54f35caSApple OSS Distributions 	bool appendBytes(const void * bytes, size_t length);
186*c54f35caSApple OSS Distributions 	bool setLength(size_t length);
187*c54f35caSApple OSS Distributions 
188*c54f35caSApple OSS Distributions 	const void * getBytes() const;
189*c54f35caSApple OSS Distributions 	size_t getLength() const;
190*c54f35caSApple OSS Distributions 
191*c54f35caSApple OSS Distributions private:
192*c54f35caSApple OSS Distributions 	void freeMemory();
193*c54f35caSApple OSS Distributions 
194*c54f35caSApple OSS Distributions 	void *  _data = nullptr;
195*c54f35caSApple OSS Distributions 	size_t  _length = 0;
196*c54f35caSApple OSS Distributions 	size_t  _capacity = 0;
197*c54f35caSApple OSS Distributions };
198*c54f35caSApple OSS Distributions 
199*c54f35caSApple OSS Distributions #pragma GCC visibility pop
200*c54f35caSApple OSS Distributions 
201*c54f35caSApple OSS Distributions #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
202*c54f35caSApple OSS Distributions #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203*c54f35caSApple OSS Distributions #define getNumIOPL(osd, d)      \
204*c54f35caSApple OSS Distributions     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205*c54f35caSApple OSS Distributions #define getPageList(d)  (&(d->fPageList[0]))
206*c54f35caSApple OSS Distributions #define computeDataSize(p, u) \
207*c54f35caSApple OSS Distributions     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208*c54f35caSApple OSS Distributions 
209*c54f35caSApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210*c54f35caSApple OSS Distributions 
211*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212*c54f35caSApple OSS Distributions 
213*c54f35caSApple OSS Distributions extern "C" {
214*c54f35caSApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)215*c54f35caSApple OSS Distributions device_data_action(
216*c54f35caSApple OSS Distributions 	uintptr_t               device_handle,
217*c54f35caSApple OSS Distributions 	ipc_port_t              device_pager,
218*c54f35caSApple OSS Distributions 	vm_prot_t               protection,
219*c54f35caSApple OSS Distributions 	vm_object_offset_t      offset,
220*c54f35caSApple OSS Distributions 	vm_size_t               size)
221*c54f35caSApple OSS Distributions {
222*c54f35caSApple OSS Distributions 	kern_return_t        kr;
223*c54f35caSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224*c54f35caSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> memDesc;
225*c54f35caSApple OSS Distributions 
226*c54f35caSApple OSS Distributions 	LOCK;
227*c54f35caSApple OSS Distributions 	if (ref->dp.memory) {
228*c54f35caSApple OSS Distributions 		memDesc.reset(ref->dp.memory, OSRetain);
229*c54f35caSApple OSS Distributions 		kr = memDesc->handleFault(device_pager, offset, size);
230*c54f35caSApple OSS Distributions 		memDesc.reset();
231*c54f35caSApple OSS Distributions 	} else {
232*c54f35caSApple OSS Distributions 		kr = KERN_ABORTED;
233*c54f35caSApple OSS Distributions 	}
234*c54f35caSApple OSS Distributions 	UNLOCK;
235*c54f35caSApple OSS Distributions 
236*c54f35caSApple OSS Distributions 	return kr;
237*c54f35caSApple OSS Distributions }
238*c54f35caSApple OSS Distributions 
239*c54f35caSApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)240*c54f35caSApple OSS Distributions device_close(
241*c54f35caSApple OSS Distributions 	uintptr_t     device_handle)
242*c54f35caSApple OSS Distributions {
243*c54f35caSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244*c54f35caSApple OSS Distributions 
245*c54f35caSApple OSS Distributions 	IOFreeType( ref, IOMemoryDescriptorReserved );
246*c54f35caSApple OSS Distributions 
247*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
248*c54f35caSApple OSS Distributions }
249*c54f35caSApple OSS Distributions };      // end extern "C"
250*c54f35caSApple OSS Distributions 
251*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252*c54f35caSApple OSS Distributions 
253*c54f35caSApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
254*c54f35caSApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
255*c54f35caSApple OSS Distributions // checked for as a NULL reference is illegal.
256*c54f35caSApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)257*c54f35caSApple OSS Distributions getAddrLenForInd(
258*c54f35caSApple OSS Distributions 	mach_vm_address_t                &addr,
259*c54f35caSApple OSS Distributions 	mach_vm_size_t                   &len, // Output variables
260*c54f35caSApple OSS Distributions 	UInt32                            type,
261*c54f35caSApple OSS Distributions 	IOGeneralMemoryDescriptor::Ranges r,
262*c54f35caSApple OSS Distributions 	UInt32                            ind,
263*c54f35caSApple OSS Distributions 	task_t                            task __unused)
264*c54f35caSApple OSS Distributions {
265*c54f35caSApple OSS Distributions 	assert(kIOMemoryTypeUIO == type
266*c54f35caSApple OSS Distributions 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
267*c54f35caSApple OSS Distributions 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
268*c54f35caSApple OSS Distributions 	if (kIOMemoryTypeUIO == type) {
269*c54f35caSApple OSS Distributions 		user_size_t us;
270*c54f35caSApple OSS Distributions 		user_addr_t ad;
271*c54f35caSApple OSS Distributions 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
272*c54f35caSApple OSS Distributions 	}
273*c54f35caSApple OSS Distributions #ifndef __LP64__
274*c54f35caSApple OSS Distributions 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
275*c54f35caSApple OSS Distributions 		IOAddressRange cur = r.v64[ind];
276*c54f35caSApple OSS Distributions 		addr = cur.address;
277*c54f35caSApple OSS Distributions 		len  = cur.length;
278*c54f35caSApple OSS Distributions 	}
279*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
280*c54f35caSApple OSS Distributions 	else {
281*c54f35caSApple OSS Distributions 		IOVirtualRange cur = r.v[ind];
282*c54f35caSApple OSS Distributions 		addr = cur.address;
283*c54f35caSApple OSS Distributions 		len  = cur.length;
284*c54f35caSApple OSS Distributions 	}
285*c54f35caSApple OSS Distributions #if CONFIG_PROB_GZALLOC
286*c54f35caSApple OSS Distributions 	if (task == kernel_task) {
287*c54f35caSApple OSS Distributions 		addr = pgz_decode(addr, len);
288*c54f35caSApple OSS Distributions 	}
289*c54f35caSApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
290*c54f35caSApple OSS Distributions }
291*c54f35caSApple OSS Distributions 
292*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
293*c54f35caSApple OSS Distributions 
294*c54f35caSApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)295*c54f35caSApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
296*c54f35caSApple OSS Distributions {
297*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
298*c54f35caSApple OSS Distributions 
299*c54f35caSApple OSS Distributions 	*control = VM_PURGABLE_SET_STATE;
300*c54f35caSApple OSS Distributions 
301*c54f35caSApple OSS Distributions 	enum { kIOMemoryPurgeableControlMask = 15 };
302*c54f35caSApple OSS Distributions 
303*c54f35caSApple OSS Distributions 	switch (kIOMemoryPurgeableControlMask & newState) {
304*c54f35caSApple OSS Distributions 	case kIOMemoryPurgeableKeepCurrent:
305*c54f35caSApple OSS Distributions 		*control = VM_PURGABLE_GET_STATE;
306*c54f35caSApple OSS Distributions 		break;
307*c54f35caSApple OSS Distributions 
308*c54f35caSApple OSS Distributions 	case kIOMemoryPurgeableNonVolatile:
309*c54f35caSApple OSS Distributions 		*state = VM_PURGABLE_NONVOLATILE;
310*c54f35caSApple OSS Distributions 		break;
311*c54f35caSApple OSS Distributions 	case kIOMemoryPurgeableVolatile:
312*c54f35caSApple OSS Distributions 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
313*c54f35caSApple OSS Distributions 		break;
314*c54f35caSApple OSS Distributions 	case kIOMemoryPurgeableEmpty:
315*c54f35caSApple OSS Distributions 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
316*c54f35caSApple OSS Distributions 		break;
317*c54f35caSApple OSS Distributions 	default:
318*c54f35caSApple OSS Distributions 		err = kIOReturnBadArgument;
319*c54f35caSApple OSS Distributions 		break;
320*c54f35caSApple OSS Distributions 	}
321*c54f35caSApple OSS Distributions 
322*c54f35caSApple OSS Distributions 	if (*control == VM_PURGABLE_SET_STATE) {
323*c54f35caSApple OSS Distributions 		// let VM know this call is from the kernel and is allowed to alter
324*c54f35caSApple OSS Distributions 		// the volatility of the memory entry even if it was created with
325*c54f35caSApple OSS Distributions 		// MAP_MEM_PURGABLE_KERNEL_ONLY
326*c54f35caSApple OSS Distributions 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
327*c54f35caSApple OSS Distributions 	}
328*c54f35caSApple OSS Distributions 
329*c54f35caSApple OSS Distributions 	return err;
330*c54f35caSApple OSS Distributions }
331*c54f35caSApple OSS Distributions 
332*c54f35caSApple OSS Distributions static IOReturn
purgeableStateBits(int * state)333*c54f35caSApple OSS Distributions purgeableStateBits(int * state)
334*c54f35caSApple OSS Distributions {
335*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
336*c54f35caSApple OSS Distributions 
337*c54f35caSApple OSS Distributions 	switch (VM_PURGABLE_STATE_MASK & *state) {
338*c54f35caSApple OSS Distributions 	case VM_PURGABLE_NONVOLATILE:
339*c54f35caSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
340*c54f35caSApple OSS Distributions 		break;
341*c54f35caSApple OSS Distributions 	case VM_PURGABLE_VOLATILE:
342*c54f35caSApple OSS Distributions 		*state = kIOMemoryPurgeableVolatile;
343*c54f35caSApple OSS Distributions 		break;
344*c54f35caSApple OSS Distributions 	case VM_PURGABLE_EMPTY:
345*c54f35caSApple OSS Distributions 		*state = kIOMemoryPurgeableEmpty;
346*c54f35caSApple OSS Distributions 		break;
347*c54f35caSApple OSS Distributions 	default:
348*c54f35caSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
349*c54f35caSApple OSS Distributions 		err = kIOReturnNotReady;
350*c54f35caSApple OSS Distributions 		break;
351*c54f35caSApple OSS Distributions 	}
352*c54f35caSApple OSS Distributions 	return err;
353*c54f35caSApple OSS Distributions }
354*c54f35caSApple OSS Distributions 
355*c54f35caSApple OSS Distributions typedef struct {
356*c54f35caSApple OSS Distributions 	unsigned int wimg;
357*c54f35caSApple OSS Distributions 	unsigned int object_type;
358*c54f35caSApple OSS Distributions } iokit_memtype_entry;
359*c54f35caSApple OSS Distributions 
360*c54f35caSApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
361*c54f35caSApple OSS Distributions 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
362*c54f35caSApple OSS Distributions 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
363*c54f35caSApple OSS Distributions 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
364*c54f35caSApple OSS Distributions 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
365*c54f35caSApple OSS Distributions 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
366*c54f35caSApple OSS Distributions 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
367*c54f35caSApple OSS Distributions 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
368*c54f35caSApple OSS Distributions 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
369*c54f35caSApple OSS Distributions 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
370*c54f35caSApple OSS Distributions 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
371*c54f35caSApple OSS Distributions };
372*c54f35caSApple OSS Distributions 
373*c54f35caSApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)374*c54f35caSApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
375*c54f35caSApple OSS Distributions {
376*c54f35caSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
377*c54f35caSApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
378*c54f35caSApple OSS Distributions 		cacheMode = kIODefaultCache;
379*c54f35caSApple OSS Distributions 	}
380*c54f35caSApple OSS Distributions 	vm_prot_t prot = 0;
381*c54f35caSApple OSS Distributions 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
382*c54f35caSApple OSS Distributions 	return prot;
383*c54f35caSApple OSS Distributions }
384*c54f35caSApple OSS Distributions 
385*c54f35caSApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)386*c54f35caSApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
387*c54f35caSApple OSS Distributions {
388*c54f35caSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
389*c54f35caSApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
390*c54f35caSApple OSS Distributions 		cacheMode = kIODefaultCache;
391*c54f35caSApple OSS Distributions 	}
392*c54f35caSApple OSS Distributions 	if (cacheMode == kIODefaultCache) {
393*c54f35caSApple OSS Distributions 		return -1U;
394*c54f35caSApple OSS Distributions 	}
395*c54f35caSApple OSS Distributions 	return iomd_mem_types[cacheMode].wimg;
396*c54f35caSApple OSS Distributions }
397*c54f35caSApple OSS Distributions 
398*c54f35caSApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)399*c54f35caSApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
400*c54f35caSApple OSS Distributions {
401*c54f35caSApple OSS Distributions 	pagerFlags &= VM_WIMG_MASK;
402*c54f35caSApple OSS Distributions 	IOOptionBits cacheMode = kIODefaultCache;
403*c54f35caSApple OSS Distributions 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
404*c54f35caSApple OSS Distributions 		if (iomd_mem_types[i].wimg == pagerFlags) {
405*c54f35caSApple OSS Distributions 			cacheMode = i;
406*c54f35caSApple OSS Distributions 			break;
407*c54f35caSApple OSS Distributions 		}
408*c54f35caSApple OSS Distributions 	}
409*c54f35caSApple OSS Distributions 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
410*c54f35caSApple OSS Distributions }
411*c54f35caSApple OSS Distributions 
412*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414*c54f35caSApple OSS Distributions 
415*c54f35caSApple OSS Distributions struct IOMemoryEntry {
416*c54f35caSApple OSS Distributions 	ipc_port_t entry;
417*c54f35caSApple OSS Distributions 	int64_t    offset;
418*c54f35caSApple OSS Distributions 	uint64_t   size;
419*c54f35caSApple OSS Distributions 	uint64_t   start;
420*c54f35caSApple OSS Distributions };
421*c54f35caSApple OSS Distributions 
422*c54f35caSApple OSS Distributions struct IOMemoryReference {
423*c54f35caSApple OSS Distributions 	volatile SInt32             refCount;
424*c54f35caSApple OSS Distributions 	vm_prot_t                   prot;
425*c54f35caSApple OSS Distributions 	uint32_t                    capacity;
426*c54f35caSApple OSS Distributions 	uint32_t                    count;
427*c54f35caSApple OSS Distributions 	struct IOMemoryReference  * mapRef;
428*c54f35caSApple OSS Distributions 	IOMemoryEntry               entries[0];
429*c54f35caSApple OSS Distributions };
430*c54f35caSApple OSS Distributions 
431*c54f35caSApple OSS Distributions enum{
432*c54f35caSApple OSS Distributions 	kIOMemoryReferenceReuse = 0x00000001,
433*c54f35caSApple OSS Distributions 	kIOMemoryReferenceWrite = 0x00000002,
434*c54f35caSApple OSS Distributions 	kIOMemoryReferenceCOW   = 0x00000004,
435*c54f35caSApple OSS Distributions };
436*c54f35caSApple OSS Distributions 
437*c54f35caSApple OSS Distributions SInt32 gIOMemoryReferenceCount;
438*c54f35caSApple OSS Distributions 
439*c54f35caSApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)440*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
441*c54f35caSApple OSS Distributions {
442*c54f35caSApple OSS Distributions 	IOMemoryReference * ref;
443*c54f35caSApple OSS Distributions 	size_t              oldCapacity;
444*c54f35caSApple OSS Distributions 
445*c54f35caSApple OSS Distributions 	if (realloc) {
446*c54f35caSApple OSS Distributions 		oldCapacity = realloc->capacity;
447*c54f35caSApple OSS Distributions 	} else {
448*c54f35caSApple OSS Distributions 		oldCapacity = 0;
449*c54f35caSApple OSS Distributions 	}
450*c54f35caSApple OSS Distributions 
451*c54f35caSApple OSS Distributions 	// Use the kalloc API instead of manually handling the reallocation
452*c54f35caSApple OSS Distributions 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
453*c54f35caSApple OSS Distributions 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
454*c54f35caSApple OSS Distributions 	if (ref) {
455*c54f35caSApple OSS Distributions 		if (oldCapacity == 0) {
456*c54f35caSApple OSS Distributions 			ref->refCount = 1;
457*c54f35caSApple OSS Distributions 			OSIncrementAtomic(&gIOMemoryReferenceCount);
458*c54f35caSApple OSS Distributions 		}
459*c54f35caSApple OSS Distributions 		ref->capacity = capacity;
460*c54f35caSApple OSS Distributions 	}
461*c54f35caSApple OSS Distributions 	return ref;
462*c54f35caSApple OSS Distributions }
463*c54f35caSApple OSS Distributions 
464*c54f35caSApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)465*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
466*c54f35caSApple OSS Distributions {
467*c54f35caSApple OSS Distributions 	IOMemoryEntry * entries;
468*c54f35caSApple OSS Distributions 
469*c54f35caSApple OSS Distributions 	if (ref->mapRef) {
470*c54f35caSApple OSS Distributions 		memoryReferenceFree(ref->mapRef);
471*c54f35caSApple OSS Distributions 		ref->mapRef = NULL;
472*c54f35caSApple OSS Distributions 	}
473*c54f35caSApple OSS Distributions 
474*c54f35caSApple OSS Distributions 	entries = ref->entries + ref->count;
475*c54f35caSApple OSS Distributions 	while (entries > &ref->entries[0]) {
476*c54f35caSApple OSS Distributions 		entries--;
477*c54f35caSApple OSS Distributions 		ipc_port_release_send(entries->entry);
478*c54f35caSApple OSS Distributions 	}
479*c54f35caSApple OSS Distributions 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
480*c54f35caSApple OSS Distributions 
481*c54f35caSApple OSS Distributions 	OSDecrementAtomic(&gIOMemoryReferenceCount);
482*c54f35caSApple OSS Distributions }
483*c54f35caSApple OSS Distributions 
484*c54f35caSApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)485*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
486*c54f35caSApple OSS Distributions {
487*c54f35caSApple OSS Distributions 	if (1 == OSDecrementAtomic(&ref->refCount)) {
488*c54f35caSApple OSS Distributions 		memoryReferenceFree(ref);
489*c54f35caSApple OSS Distributions 	}
490*c54f35caSApple OSS Distributions }
491*c54f35caSApple OSS Distributions 
492*c54f35caSApple OSS Distributions 
493*c54f35caSApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)494*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
495*c54f35caSApple OSS Distributions 	IOOptionBits         options,
496*c54f35caSApple OSS Distributions 	IOMemoryReference ** reference)
497*c54f35caSApple OSS Distributions {
498*c54f35caSApple OSS Distributions 	enum { kCapacity = 4, kCapacityInc = 4 };
499*c54f35caSApple OSS Distributions 
500*c54f35caSApple OSS Distributions 	kern_return_t        err;
501*c54f35caSApple OSS Distributions 	IOMemoryReference *  ref;
502*c54f35caSApple OSS Distributions 	IOMemoryEntry *      entries;
503*c54f35caSApple OSS Distributions 	IOMemoryEntry *      cloneEntries = NULL;
504*c54f35caSApple OSS Distributions 	vm_map_t             map;
505*c54f35caSApple OSS Distributions 	ipc_port_t           entry, cloneEntry;
506*c54f35caSApple OSS Distributions 	vm_prot_t            prot;
507*c54f35caSApple OSS Distributions 	memory_object_size_t actualSize;
508*c54f35caSApple OSS Distributions 	uint32_t             rangeIdx;
509*c54f35caSApple OSS Distributions 	uint32_t             count;
510*c54f35caSApple OSS Distributions 	mach_vm_address_t    entryAddr, endAddr, entrySize;
511*c54f35caSApple OSS Distributions 	mach_vm_size_t       srcAddr, srcLen;
512*c54f35caSApple OSS Distributions 	mach_vm_size_t       nextAddr, nextLen;
513*c54f35caSApple OSS Distributions 	mach_vm_size_t       offset, remain;
514*c54f35caSApple OSS Distributions 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
515*c54f35caSApple OSS Distributions 	int                  misaligned_start = 0, misaligned_end = 0;
516*c54f35caSApple OSS Distributions 	IOByteCount          physLen;
517*c54f35caSApple OSS Distributions 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
518*c54f35caSApple OSS Distributions 	IOOptionBits         cacheMode;
519*c54f35caSApple OSS Distributions 	unsigned int         pagerFlags;
520*c54f35caSApple OSS Distributions 	vm_tag_t             tag;
521*c54f35caSApple OSS Distributions 	vm_named_entry_kernel_flags_t vmne_kflags;
522*c54f35caSApple OSS Distributions 
523*c54f35caSApple OSS Distributions 	ref = memoryReferenceAlloc(kCapacity, NULL);
524*c54f35caSApple OSS Distributions 	if (!ref) {
525*c54f35caSApple OSS Distributions 		return kIOReturnNoMemory;
526*c54f35caSApple OSS Distributions 	}
527*c54f35caSApple OSS Distributions 
528*c54f35caSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(kernel_map);
529*c54f35caSApple OSS Distributions 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
530*c54f35caSApple OSS Distributions 	entries = &ref->entries[0];
531*c54f35caSApple OSS Distributions 	count = 0;
532*c54f35caSApple OSS Distributions 	err = KERN_SUCCESS;
533*c54f35caSApple OSS Distributions 
534*c54f35caSApple OSS Distributions 	offset = 0;
535*c54f35caSApple OSS Distributions 	rangeIdx = 0;
536*c54f35caSApple OSS Distributions 	remain = _length;
537*c54f35caSApple OSS Distributions 	if (_task) {
538*c54f35caSApple OSS Distributions 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
539*c54f35caSApple OSS Distributions 
540*c54f35caSApple OSS Distributions 		// account for IOBMD setLength(), use its capacity as length
541*c54f35caSApple OSS Distributions 		IOBufferMemoryDescriptor * bmd;
542*c54f35caSApple OSS Distributions 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
543*c54f35caSApple OSS Distributions 			nextLen = bmd->getCapacity();
544*c54f35caSApple OSS Distributions 			remain  = nextLen;
545*c54f35caSApple OSS Distributions 		}
546*c54f35caSApple OSS Distributions 	} else {
547*c54f35caSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
548*c54f35caSApple OSS Distributions 		nextLen = physLen;
549*c54f35caSApple OSS Distributions 
550*c54f35caSApple OSS Distributions 		// default cache mode for physical
551*c54f35caSApple OSS Distributions 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
552*c54f35caSApple OSS Distributions 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
553*c54f35caSApple OSS Distributions 			_flags |= (mode << kIOMemoryBufferCacheShift);
554*c54f35caSApple OSS Distributions 		}
555*c54f35caSApple OSS Distributions 	}
556*c54f35caSApple OSS Distributions 
557*c54f35caSApple OSS Distributions 	// cache mode & vm_prot
558*c54f35caSApple OSS Distributions 	prot = VM_PROT_READ;
559*c54f35caSApple OSS Distributions 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
560*c54f35caSApple OSS Distributions 	prot |= vmProtForCacheMode(cacheMode);
561*c54f35caSApple OSS Distributions 	// VM system requires write access to change cache mode
562*c54f35caSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
563*c54f35caSApple OSS Distributions 		prot |= VM_PROT_WRITE;
564*c54f35caSApple OSS Distributions 	}
565*c54f35caSApple OSS Distributions 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
566*c54f35caSApple OSS Distributions 		prot |= VM_PROT_WRITE;
567*c54f35caSApple OSS Distributions 	}
568*c54f35caSApple OSS Distributions 	if (kIOMemoryReferenceWrite & options) {
569*c54f35caSApple OSS Distributions 		prot |= VM_PROT_WRITE;
570*c54f35caSApple OSS Distributions 	}
571*c54f35caSApple OSS Distributions 	if (kIOMemoryReferenceCOW   & options) {
572*c54f35caSApple OSS Distributions 		prot |= MAP_MEM_VM_COPY;
573*c54f35caSApple OSS Distributions 	}
574*c54f35caSApple OSS Distributions 
575*c54f35caSApple OSS Distributions 	if (kIOMemoryUseReserve & _flags) {
576*c54f35caSApple OSS Distributions 		prot |= MAP_MEM_GRAB_SECLUDED;
577*c54f35caSApple OSS Distributions 	}
578*c54f35caSApple OSS Distributions 
579*c54f35caSApple OSS Distributions 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
580*c54f35caSApple OSS Distributions 		cloneEntries = &_memRef->entries[0];
581*c54f35caSApple OSS Distributions 		prot |= MAP_MEM_NAMED_REUSE;
582*c54f35caSApple OSS Distributions 	}
583*c54f35caSApple OSS Distributions 
584*c54f35caSApple OSS Distributions 	if (_task) {
585*c54f35caSApple OSS Distributions 		// virtual ranges
586*c54f35caSApple OSS Distributions 
587*c54f35caSApple OSS Distributions 		if (kIOMemoryBufferPageable & _flags) {
588*c54f35caSApple OSS Distributions 			int ledger_tag, ledger_no_footprint;
589*c54f35caSApple OSS Distributions 
590*c54f35caSApple OSS Distributions 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
591*c54f35caSApple OSS Distributions 			prot |= MAP_MEM_NAMED_CREATE;
592*c54f35caSApple OSS Distributions 
593*c54f35caSApple OSS Distributions 			// default accounting settings:
594*c54f35caSApple OSS Distributions 			//   + "none" ledger tag
595*c54f35caSApple OSS Distributions 			//   + include in footprint
596*c54f35caSApple OSS Distributions 			// can be changed later with ::setOwnership()
597*c54f35caSApple OSS Distributions 			ledger_tag = VM_LEDGER_TAG_NONE;
598*c54f35caSApple OSS Distributions 			ledger_no_footprint = 0;
599*c54f35caSApple OSS Distributions 
600*c54f35caSApple OSS Distributions 			if (kIOMemoryBufferPurgeable & _flags) {
601*c54f35caSApple OSS Distributions 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
602*c54f35caSApple OSS Distributions 				if (VM_KERN_MEMORY_SKYWALK == tag) {
603*c54f35caSApple OSS Distributions 					// Skywalk purgeable memory accounting:
604*c54f35caSApple OSS Distributions 					//    + "network" ledger tag
605*c54f35caSApple OSS Distributions 					//    + not included in footprint
606*c54f35caSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NETWORK;
607*c54f35caSApple OSS Distributions 					ledger_no_footprint = 1;
608*c54f35caSApple OSS Distributions 				} else {
609*c54f35caSApple OSS Distributions 					// regular purgeable memory accounting:
610*c54f35caSApple OSS Distributions 					//    + no ledger tag
611*c54f35caSApple OSS Distributions 					//    + included in footprint
612*c54f35caSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NONE;
613*c54f35caSApple OSS Distributions 					ledger_no_footprint = 0;
614*c54f35caSApple OSS Distributions 				}
615*c54f35caSApple OSS Distributions 			}
616*c54f35caSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
617*c54f35caSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
618*c54f35caSApple OSS Distributions 			if (kIOMemoryUseReserve & _flags) {
619*c54f35caSApple OSS Distributions 				prot |= MAP_MEM_GRAB_SECLUDED;
620*c54f35caSApple OSS Distributions 			}
621*c54f35caSApple OSS Distributions 
622*c54f35caSApple OSS Distributions 			prot |= VM_PROT_WRITE;
623*c54f35caSApple OSS Distributions 			map = NULL;
624*c54f35caSApple OSS Distributions 		} else {
625*c54f35caSApple OSS Distributions 			prot |= MAP_MEM_USE_DATA_ADDR;
626*c54f35caSApple OSS Distributions 			map = get_task_map(_task);
627*c54f35caSApple OSS Distributions 		}
628*c54f35caSApple OSS Distributions 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
629*c54f35caSApple OSS Distributions 
630*c54f35caSApple OSS Distributions 		while (remain) {
631*c54f35caSApple OSS Distributions 			srcAddr  = nextAddr;
632*c54f35caSApple OSS Distributions 			srcLen   = nextLen;
633*c54f35caSApple OSS Distributions 			nextAddr = 0;
634*c54f35caSApple OSS Distributions 			nextLen  = 0;
635*c54f35caSApple OSS Distributions 			// coalesce addr range
636*c54f35caSApple OSS Distributions 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
637*c54f35caSApple OSS Distributions 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
638*c54f35caSApple OSS Distributions 				if ((srcAddr + srcLen) != nextAddr) {
639*c54f35caSApple OSS Distributions 					break;
640*c54f35caSApple OSS Distributions 				}
641*c54f35caSApple OSS Distributions 				srcLen += nextLen;
642*c54f35caSApple OSS Distributions 			}
643*c54f35caSApple OSS Distributions 
644*c54f35caSApple OSS Distributions 			if (MAP_MEM_USE_DATA_ADDR & prot) {
645*c54f35caSApple OSS Distributions 				entryAddr = srcAddr;
646*c54f35caSApple OSS Distributions 				endAddr   = srcAddr + srcLen;
647*c54f35caSApple OSS Distributions 			} else {
648*c54f35caSApple OSS Distributions 				entryAddr = trunc_page_64(srcAddr);
649*c54f35caSApple OSS Distributions 				endAddr   = round_page_64(srcAddr + srcLen);
650*c54f35caSApple OSS Distributions 			}
651*c54f35caSApple OSS Distributions 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
652*c54f35caSApple OSS Distributions 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
653*c54f35caSApple OSS Distributions 			}
654*c54f35caSApple OSS Distributions 
655*c54f35caSApple OSS Distributions 			do{
656*c54f35caSApple OSS Distributions 				entrySize = (endAddr - entryAddr);
657*c54f35caSApple OSS Distributions 				if (!entrySize) {
658*c54f35caSApple OSS Distributions 					break;
659*c54f35caSApple OSS Distributions 				}
660*c54f35caSApple OSS Distributions 				actualSize = entrySize;
661*c54f35caSApple OSS Distributions 
662*c54f35caSApple OSS Distributions 				cloneEntry = MACH_PORT_NULL;
663*c54f35caSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
664*c54f35caSApple OSS Distributions 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
665*c54f35caSApple OSS Distributions 						cloneEntry = cloneEntries->entry;
666*c54f35caSApple OSS Distributions 					} else {
667*c54f35caSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
668*c54f35caSApple OSS Distributions 					}
669*c54f35caSApple OSS Distributions 				}
670*c54f35caSApple OSS Distributions 
671*c54f35caSApple OSS Distributions 				err = mach_make_memory_entry_internal(map,
672*c54f35caSApple OSS Distributions 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
673*c54f35caSApple OSS Distributions 
674*c54f35caSApple OSS Distributions 				if (KERN_SUCCESS != err) {
675*c54f35caSApple OSS Distributions 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
676*c54f35caSApple OSS Distributions 					break;
677*c54f35caSApple OSS Distributions 				}
678*c54f35caSApple OSS Distributions 				if (MAP_MEM_USE_DATA_ADDR & prot) {
679*c54f35caSApple OSS Distributions 					if (actualSize > entrySize) {
680*c54f35caSApple OSS Distributions 						actualSize = entrySize;
681*c54f35caSApple OSS Distributions 					}
682*c54f35caSApple OSS Distributions 				} else if (actualSize > entrySize) {
683*c54f35caSApple OSS Distributions 					panic("mach_make_memory_entry_64 actualSize");
684*c54f35caSApple OSS Distributions 				}
685*c54f35caSApple OSS Distributions 
686*c54f35caSApple OSS Distributions 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687*c54f35caSApple OSS Distributions 
688*c54f35caSApple OSS Distributions 				if (count && overmap_start) {
689*c54f35caSApple OSS Distributions 					/*
690*c54f35caSApple OSS Distributions 					 * Track misaligned start for all
691*c54f35caSApple OSS Distributions 					 * except the first entry.
692*c54f35caSApple OSS Distributions 					 */
693*c54f35caSApple OSS Distributions 					misaligned_start++;
694*c54f35caSApple OSS Distributions 				}
695*c54f35caSApple OSS Distributions 
696*c54f35caSApple OSS Distributions 				if (overmap_end) {
697*c54f35caSApple OSS Distributions 					/*
698*c54f35caSApple OSS Distributions 					 * Ignore misaligned end for the
699*c54f35caSApple OSS Distributions 					 * last entry.
700*c54f35caSApple OSS Distributions 					 */
701*c54f35caSApple OSS Distributions 					if ((entryAddr + actualSize) != endAddr) {
702*c54f35caSApple OSS Distributions 						misaligned_end++;
703*c54f35caSApple OSS Distributions 					}
704*c54f35caSApple OSS Distributions 				}
705*c54f35caSApple OSS Distributions 
706*c54f35caSApple OSS Distributions 				if (count) {
707*c54f35caSApple OSS Distributions 					/* Middle entries */
708*c54f35caSApple OSS Distributions 					if (misaligned_start || misaligned_end) {
709*c54f35caSApple OSS Distributions 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710*c54f35caSApple OSS Distributions 						ipc_port_release_send(entry);
711*c54f35caSApple OSS Distributions 						err = KERN_NOT_SUPPORTED;
712*c54f35caSApple OSS Distributions 						break;
713*c54f35caSApple OSS Distributions 					}
714*c54f35caSApple OSS Distributions 				}
715*c54f35caSApple OSS Distributions 
716*c54f35caSApple OSS Distributions 				if (count >= ref->capacity) {
717*c54f35caSApple OSS Distributions 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718*c54f35caSApple OSS Distributions 					entries = &ref->entries[count];
719*c54f35caSApple OSS Distributions 				}
720*c54f35caSApple OSS Distributions 				entries->entry  = entry;
721*c54f35caSApple OSS Distributions 				entries->size   = actualSize;
722*c54f35caSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
723*c54f35caSApple OSS Distributions 				entries->start = entryAddr;
724*c54f35caSApple OSS Distributions 				entryAddr += actualSize;
725*c54f35caSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
726*c54f35caSApple OSS Distributions 					if ((cloneEntries->entry == entries->entry)
727*c54f35caSApple OSS Distributions 					    && (cloneEntries->size == entries->size)
728*c54f35caSApple OSS Distributions 					    && (cloneEntries->offset == entries->offset)) {
729*c54f35caSApple OSS Distributions 						cloneEntries++;
730*c54f35caSApple OSS Distributions 					} else {
731*c54f35caSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
732*c54f35caSApple OSS Distributions 					}
733*c54f35caSApple OSS Distributions 				}
734*c54f35caSApple OSS Distributions 				entries++;
735*c54f35caSApple OSS Distributions 				count++;
736*c54f35caSApple OSS Distributions 			}while (true);
737*c54f35caSApple OSS Distributions 			offset += srcLen;
738*c54f35caSApple OSS Distributions 			remain -= srcLen;
739*c54f35caSApple OSS Distributions 		}
740*c54f35caSApple OSS Distributions 	} else {
741*c54f35caSApple OSS Distributions 		// _task == 0, physical or kIOMemoryTypeUPL
742*c54f35caSApple OSS Distributions 		memory_object_t pager;
743*c54f35caSApple OSS Distributions 		vm_size_t       size = ptoa_64(_pages);
744*c54f35caSApple OSS Distributions 
745*c54f35caSApple OSS Distributions 		if (!getKernelReserved()) {
746*c54f35caSApple OSS Distributions 			panic("getKernelReserved");
747*c54f35caSApple OSS Distributions 		}
748*c54f35caSApple OSS Distributions 
749*c54f35caSApple OSS Distributions 		reserved->dp.pagerContig = (1 == _rangesCount);
750*c54f35caSApple OSS Distributions 		reserved->dp.memory      = this;
751*c54f35caSApple OSS Distributions 
752*c54f35caSApple OSS Distributions 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
753*c54f35caSApple OSS Distributions 		if (-1U == pagerFlags) {
754*c54f35caSApple OSS Distributions 			panic("phys is kIODefaultCache");
755*c54f35caSApple OSS Distributions 		}
756*c54f35caSApple OSS Distributions 		if (reserved->dp.pagerContig) {
757*c54f35caSApple OSS Distributions 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758*c54f35caSApple OSS Distributions 		}
759*c54f35caSApple OSS Distributions 
760*c54f35caSApple OSS Distributions 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761*c54f35caSApple OSS Distributions 		    size, pagerFlags);
762*c54f35caSApple OSS Distributions 		assert(pager);
763*c54f35caSApple OSS Distributions 		if (!pager) {
764*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765*c54f35caSApple OSS Distributions 			err = kIOReturnVMError;
766*c54f35caSApple OSS Distributions 		} else {
767*c54f35caSApple OSS Distributions 			srcAddr  = nextAddr;
768*c54f35caSApple OSS Distributions 			entryAddr = trunc_page_64(srcAddr);
769*c54f35caSApple OSS Distributions 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770*c54f35caSApple OSS Distributions 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
772*c54f35caSApple OSS Distributions 			if (KERN_SUCCESS != err) {
773*c54f35caSApple OSS Distributions 				device_pager_deallocate(pager);
774*c54f35caSApple OSS Distributions 			} else {
775*c54f35caSApple OSS Distributions 				reserved->dp.devicePager = pager;
776*c54f35caSApple OSS Distributions 				entries->entry  = entry;
777*c54f35caSApple OSS Distributions 				entries->size   = size;
778*c54f35caSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
779*c54f35caSApple OSS Distributions 				entries++;
780*c54f35caSApple OSS Distributions 				count++;
781*c54f35caSApple OSS Distributions 			}
782*c54f35caSApple OSS Distributions 		}
783*c54f35caSApple OSS Distributions 	}
784*c54f35caSApple OSS Distributions 
785*c54f35caSApple OSS Distributions 	ref->count = count;
786*c54f35caSApple OSS Distributions 	ref->prot  = prot;
787*c54f35caSApple OSS Distributions 
788*c54f35caSApple OSS Distributions 	if (_task && (KERN_SUCCESS == err)
789*c54f35caSApple OSS Distributions 	    && (kIOMemoryMapCopyOnWrite & _flags)
790*c54f35caSApple OSS Distributions 	    && !(kIOMemoryReferenceCOW & options)) {
791*c54f35caSApple OSS Distributions 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
793*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794*c54f35caSApple OSS Distributions 		}
795*c54f35caSApple OSS Distributions 	}
796*c54f35caSApple OSS Distributions 
797*c54f35caSApple OSS Distributions 	if (KERN_SUCCESS == err) {
798*c54f35caSApple OSS Distributions 		if (MAP_MEM_NAMED_REUSE & prot) {
799*c54f35caSApple OSS Distributions 			memoryReferenceFree(ref);
800*c54f35caSApple OSS Distributions 			OSIncrementAtomic(&_memRef->refCount);
801*c54f35caSApple OSS Distributions 			ref = _memRef;
802*c54f35caSApple OSS Distributions 		}
803*c54f35caSApple OSS Distributions 	} else {
804*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805*c54f35caSApple OSS Distributions 		memoryReferenceFree(ref);
806*c54f35caSApple OSS Distributions 		ref = NULL;
807*c54f35caSApple OSS Distributions 	}
808*c54f35caSApple OSS Distributions 
809*c54f35caSApple OSS Distributions 	*reference = ref;
810*c54f35caSApple OSS Distributions 
811*c54f35caSApple OSS Distributions 	return err;
812*c54f35caSApple OSS Distributions }
813*c54f35caSApple OSS Distributions 
814*c54f35caSApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815*c54f35caSApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816*c54f35caSApple OSS Distributions {
817*c54f35caSApple OSS Distributions 	switch (kIOMapGuardedMask & options) {
818*c54f35caSApple OSS Distributions 	default:
819*c54f35caSApple OSS Distributions 	case kIOMapGuardedSmall:
820*c54f35caSApple OSS Distributions 		return vm_map_page_size(map);
821*c54f35caSApple OSS Distributions 	case kIOMapGuardedLarge:
822*c54f35caSApple OSS Distributions 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823*c54f35caSApple OSS Distributions 		return kIOMapGuardSizeLarge;
824*c54f35caSApple OSS Distributions 	}
825*c54f35caSApple OSS Distributions 	;
826*c54f35caSApple OSS Distributions }
827*c54f35caSApple OSS Distributions 
828*c54f35caSApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829*c54f35caSApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830*c54f35caSApple OSS Distributions     vm_map_offset_t addr, mach_vm_size_t size)
831*c54f35caSApple OSS Distributions {
832*c54f35caSApple OSS Distributions 	kern_return_t   kr;
833*c54f35caSApple OSS Distributions 	vm_map_offset_t actualAddr;
834*c54f35caSApple OSS Distributions 	mach_vm_size_t  actualSize;
835*c54f35caSApple OSS Distributions 
836*c54f35caSApple OSS Distributions 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837*c54f35caSApple OSS Distributions 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838*c54f35caSApple OSS Distributions 
839*c54f35caSApple OSS Distributions 	if (kIOMapGuardedMask & options) {
840*c54f35caSApple OSS Distributions 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841*c54f35caSApple OSS Distributions 		actualAddr -= guardSize;
842*c54f35caSApple OSS Distributions 		actualSize += 2 * guardSize;
843*c54f35caSApple OSS Distributions 	}
844*c54f35caSApple OSS Distributions 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
845*c54f35caSApple OSS Distributions 
846*c54f35caSApple OSS Distributions 	return kr;
847*c54f35caSApple OSS Distributions }
848*c54f35caSApple OSS Distributions 
849*c54f35caSApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850*c54f35caSApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851*c54f35caSApple OSS Distributions {
852*c54f35caSApple OSS Distributions 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853*c54f35caSApple OSS Distributions 	IOReturn                        err;
854*c54f35caSApple OSS Distributions 	vm_map_offset_t                 addr;
855*c54f35caSApple OSS Distributions 	mach_vm_size_t                  size;
856*c54f35caSApple OSS Distributions 	mach_vm_size_t                  guardSize;
857*c54f35caSApple OSS Distributions 	vm_map_kernel_flags_t           vmk_flags;
858*c54f35caSApple OSS Distributions 
859*c54f35caSApple OSS Distributions 	addr = ref->mapped;
860*c54f35caSApple OSS Distributions 	size = ref->size;
861*c54f35caSApple OSS Distributions 	guardSize = 0;
862*c54f35caSApple OSS Distributions 
863*c54f35caSApple OSS Distributions 	if (kIOMapGuardedMask & ref->options) {
864*c54f35caSApple OSS Distributions 		if (!(kIOMapAnywhere & ref->options)) {
865*c54f35caSApple OSS Distributions 			return kIOReturnBadArgument;
866*c54f35caSApple OSS Distributions 		}
867*c54f35caSApple OSS Distributions 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868*c54f35caSApple OSS Distributions 		size += 2 * guardSize;
869*c54f35caSApple OSS Distributions 	}
870*c54f35caSApple OSS Distributions 	if (kIOMapAnywhere & ref->options) {
871*c54f35caSApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872*c54f35caSApple OSS Distributions 	} else {
873*c54f35caSApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874*c54f35caSApple OSS Distributions 	}
875*c54f35caSApple OSS Distributions 	vmk_flags.vm_tag = ref->tag;
876*c54f35caSApple OSS Distributions 
877*c54f35caSApple OSS Distributions 	/*
878*c54f35caSApple OSS Distributions 	 * Mapping memory into the kernel_map using IOMDs use the data range.
879*c54f35caSApple OSS Distributions 	 * Memory being mapped should not contain kernel pointers.
880*c54f35caSApple OSS Distributions 	 */
881*c54f35caSApple OSS Distributions 	if (map == kernel_map) {
882*c54f35caSApple OSS Distributions 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883*c54f35caSApple OSS Distributions 	}
884*c54f35caSApple OSS Distributions 
885*c54f35caSApple OSS Distributions 	err = vm_map_enter_mem_object(map, &addr, size,
886*c54f35caSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
887*c54f35caSApple OSS Distributions 	    // TODO4K this should not be necessary...
888*c54f35caSApple OSS Distributions 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889*c54f35caSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
890*c54f35caSApple OSS Distributions 	    (vm_map_offset_t) 0,
891*c54f35caSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
892*c54f35caSApple OSS Distributions 	    vmk_flags,
893*c54f35caSApple OSS Distributions 	    IPC_PORT_NULL,
894*c54f35caSApple OSS Distributions 	    (memory_object_offset_t) 0,
895*c54f35caSApple OSS Distributions 	    false,                       /* copy */
896*c54f35caSApple OSS Distributions 	    ref->prot,
897*c54f35caSApple OSS Distributions 	    ref->prot,
898*c54f35caSApple OSS Distributions 	    VM_INHERIT_NONE);
899*c54f35caSApple OSS Distributions 	if (KERN_SUCCESS == err) {
900*c54f35caSApple OSS Distributions 		ref->mapped = (mach_vm_address_t) addr;
901*c54f35caSApple OSS Distributions 		ref->map = map;
902*c54f35caSApple OSS Distributions 		if (kIOMapGuardedMask & ref->options) {
903*c54f35caSApple OSS Distributions 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904*c54f35caSApple OSS Distributions 
905*c54f35caSApple OSS Distributions 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
906*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
907*c54f35caSApple OSS Distributions 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
908*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
909*c54f35caSApple OSS Distributions 			ref->mapped += guardSize;
910*c54f35caSApple OSS Distributions 		}
911*c54f35caSApple OSS Distributions 	}
912*c54f35caSApple OSS Distributions 
913*c54f35caSApple OSS Distributions 	return err;
914*c54f35caSApple OSS Distributions }
915*c54f35caSApple OSS Distributions 
916*c54f35caSApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
918*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
919*c54f35caSApple OSS Distributions 	vm_map_t            map,
920*c54f35caSApple OSS Distributions 	mach_vm_size_t      inoffset,
921*c54f35caSApple OSS Distributions 	mach_vm_size_t      size,
922*c54f35caSApple OSS Distributions 	IOOptionBits        options,
923*c54f35caSApple OSS Distributions 	mach_vm_address_t * inaddr)
924*c54f35caSApple OSS Distributions {
925*c54f35caSApple OSS Distributions 	IOReturn        err;
926*c54f35caSApple OSS Distributions 	int64_t         offset = inoffset;
927*c54f35caSApple OSS Distributions 	uint32_t        rangeIdx, entryIdx;
928*c54f35caSApple OSS Distributions 	vm_map_offset_t addr, mapAddr;
929*c54f35caSApple OSS Distributions 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930*c54f35caSApple OSS Distributions 
931*c54f35caSApple OSS Distributions 	mach_vm_address_t nextAddr;
932*c54f35caSApple OSS Distributions 	mach_vm_size_t    nextLen;
933*c54f35caSApple OSS Distributions 	IOByteCount       physLen;
934*c54f35caSApple OSS Distributions 	IOMemoryEntry   * entry;
935*c54f35caSApple OSS Distributions 	vm_prot_t         prot, memEntryCacheMode;
936*c54f35caSApple OSS Distributions 	IOOptionBits      type;
937*c54f35caSApple OSS Distributions 	IOOptionBits      cacheMode;
938*c54f35caSApple OSS Distributions 	vm_tag_t          tag;
939*c54f35caSApple OSS Distributions 	// for the kIOMapPrefault option.
940*c54f35caSApple OSS Distributions 	upl_page_info_t * pageList = NULL;
941*c54f35caSApple OSS Distributions 	UInt              currentPageIndex = 0;
942*c54f35caSApple OSS Distributions 	bool              didAlloc;
943*c54f35caSApple OSS Distributions 
944*c54f35caSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945*c54f35caSApple OSS Distributions 
946*c54f35caSApple OSS Distributions 	if (ref->mapRef) {
947*c54f35caSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948*c54f35caSApple OSS Distributions 		return err;
949*c54f35caSApple OSS Distributions 	}
950*c54f35caSApple OSS Distributions 
951*c54f35caSApple OSS Distributions 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952*c54f35caSApple OSS Distributions 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953*c54f35caSApple OSS Distributions 		return err;
954*c54f35caSApple OSS Distributions 	}
955*c54f35caSApple OSS Distributions 
956*c54f35caSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
957*c54f35caSApple OSS Distributions 
958*c54f35caSApple OSS Distributions 	prot = VM_PROT_READ;
959*c54f35caSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
960*c54f35caSApple OSS Distributions 		prot |= VM_PROT_WRITE;
961*c54f35caSApple OSS Distributions 	}
962*c54f35caSApple OSS Distributions 	prot &= ref->prot;
963*c54f35caSApple OSS Distributions 
964*c54f35caSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965*c54f35caSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
966*c54f35caSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
967*c54f35caSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968*c54f35caSApple OSS Distributions 	}
969*c54f35caSApple OSS Distributions 
970*c54f35caSApple OSS Distributions 	tag = (typeof(tag))getVMTag(map);
971*c54f35caSApple OSS Distributions 
972*c54f35caSApple OSS Distributions 	if (_task) {
973*c54f35caSApple OSS Distributions 		// Find first range for offset
974*c54f35caSApple OSS Distributions 		if (!_rangesCount) {
975*c54f35caSApple OSS Distributions 			return kIOReturnBadArgument;
976*c54f35caSApple OSS Distributions 		}
977*c54f35caSApple OSS Distributions 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978*c54f35caSApple OSS Distributions 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979*c54f35caSApple OSS Distributions 			if (remain < nextLen) {
980*c54f35caSApple OSS Distributions 				break;
981*c54f35caSApple OSS Distributions 			}
982*c54f35caSApple OSS Distributions 			remain -= nextLen;
983*c54f35caSApple OSS Distributions 		}
984*c54f35caSApple OSS Distributions 	} else {
985*c54f35caSApple OSS Distributions 		rangeIdx = 0;
986*c54f35caSApple OSS Distributions 		remain   = 0;
987*c54f35caSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988*c54f35caSApple OSS Distributions 		nextLen  = size;
989*c54f35caSApple OSS Distributions 	}
990*c54f35caSApple OSS Distributions 
991*c54f35caSApple OSS Distributions 	assert(remain < nextLen);
992*c54f35caSApple OSS Distributions 	if (remain >= nextLen) {
993*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
995*c54f35caSApple OSS Distributions 	}
996*c54f35caSApple OSS Distributions 
997*c54f35caSApple OSS Distributions 	nextAddr  += remain;
998*c54f35caSApple OSS Distributions 	nextLen   -= remain;
999*c54f35caSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1000*c54f35caSApple OSS Distributions 	pageOffset = (vm_map_page_mask(map) & nextAddr);
1001*c54f35caSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1002*c54f35caSApple OSS Distributions 	pageOffset = (page_mask & nextAddr);
1003*c54f35caSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004*c54f35caSApple OSS Distributions 	addr       = 0;
1005*c54f35caSApple OSS Distributions 	didAlloc   = false;
1006*c54f35caSApple OSS Distributions 
1007*c54f35caSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1008*c54f35caSApple OSS Distributions 		addr = *inaddr;
1009*c54f35caSApple OSS Distributions 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011*c54f35caSApple OSS Distributions 		}
1012*c54f35caSApple OSS Distributions 		addr -= pageOffset;
1013*c54f35caSApple OSS Distributions 	}
1014*c54f35caSApple OSS Distributions 
1015*c54f35caSApple OSS Distributions 	// find first entry for offset
1016*c54f35caSApple OSS Distributions 	for (entryIdx = 0;
1017*c54f35caSApple OSS Distributions 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018*c54f35caSApple OSS Distributions 	    entryIdx++) {
1019*c54f35caSApple OSS Distributions 	}
1020*c54f35caSApple OSS Distributions 	entryIdx--;
1021*c54f35caSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1022*c54f35caSApple OSS Distributions 
1023*c54f35caSApple OSS Distributions 	// allocate VM
1024*c54f35caSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1025*c54f35caSApple OSS Distributions 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026*c54f35caSApple OSS Distributions #else
1027*c54f35caSApple OSS Distributions 	size = round_page_64(size + pageOffset);
1028*c54f35caSApple OSS Distributions #endif
1029*c54f35caSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1030*c54f35caSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031*c54f35caSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1032*c54f35caSApple OSS Distributions 		}
1033*c54f35caSApple OSS Distributions 		err = KERN_SUCCESS;
1034*c54f35caSApple OSS Distributions 	} else {
1035*c54f35caSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1036*c54f35caSApple OSS Distributions 		ref.map     = map;
1037*c54f35caSApple OSS Distributions 		ref.tag     = tag;
1038*c54f35caSApple OSS Distributions 		ref.options = options;
1039*c54f35caSApple OSS Distributions 		ref.size    = size;
1040*c54f35caSApple OSS Distributions 		ref.prot    = prot;
1041*c54f35caSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1042*c54f35caSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043*c54f35caSApple OSS Distributions 			ref.mapped = 0;
1044*c54f35caSApple OSS Distributions 		} else {
1045*c54f35caSApple OSS Distributions 			ref.mapped = addr;
1046*c54f35caSApple OSS Distributions 		}
1047*c54f35caSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048*c54f35caSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049*c54f35caSApple OSS Distributions 		} else {
1050*c54f35caSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051*c54f35caSApple OSS Distributions 		}
1052*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1053*c54f35caSApple OSS Distributions 			addr     = ref.mapped;
1054*c54f35caSApple OSS Distributions 			map      = ref.map;
1055*c54f35caSApple OSS Distributions 			didAlloc = true;
1056*c54f35caSApple OSS Distributions 		}
1057*c54f35caSApple OSS Distributions 	}
1058*c54f35caSApple OSS Distributions 
1059*c54f35caSApple OSS Distributions 	/*
1060*c54f35caSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1061*c54f35caSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1062*c54f35caSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063*c54f35caSApple OSS Distributions 	 * operations.
1064*c54f35caSApple OSS Distributions 	 */
1065*c54f35caSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066*c54f35caSApple OSS Distributions 		options &= ~kIOMapPrefault;
1067*c54f35caSApple OSS Distributions 	}
1068*c54f35caSApple OSS Distributions 
1069*c54f35caSApple OSS Distributions 	/*
1070*c54f35caSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1071*c54f35caSApple OSS Distributions 	 * memory type, and the underlying data.
1072*c54f35caSApple OSS Distributions 	 */
1073*c54f35caSApple OSS Distributions 	if (options & kIOMapPrefault) {
1074*c54f35caSApple OSS Distributions 		/*
1075*c54f35caSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1076*c54f35caSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077*c54f35caSApple OSS Distributions 		 */
1078*c54f35caSApple OSS Distributions 		assert(_wireCount != 0);
1079*c54f35caSApple OSS Distributions 		assert(_memoryEntries != NULL);
1080*c54f35caSApple OSS Distributions 		if ((_wireCount == 0) ||
1081*c54f35caSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1082*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083*c54f35caSApple OSS Distributions 			return kIOReturnBadArgument;
1084*c54f35caSApple OSS Distributions 		}
1085*c54f35caSApple OSS Distributions 
1086*c54f35caSApple OSS Distributions 		// Get the page list.
1087*c54f35caSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1088*c54f35caSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1089*c54f35caSApple OSS Distributions 		pageList = getPageList(dataP);
1090*c54f35caSApple OSS Distributions 
1091*c54f35caSApple OSS Distributions 		// Get the number of IOPLs.
1092*c54f35caSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093*c54f35caSApple OSS Distributions 
1094*c54f35caSApple OSS Distributions 		/*
1095*c54f35caSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1096*c54f35caSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1097*c54f35caSApple OSS Distributions 		 * right range at the end.
1098*c54f35caSApple OSS Distributions 		 */
1099*c54f35caSApple OSS Distributions 		UInt ioplIndex = 0;
1100*c54f35caSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101*c54f35caSApple OSS Distributions 			ioplIndex++;
1102*c54f35caSApple OSS Distributions 		}
1103*c54f35caSApple OSS Distributions 		ioplIndex--;
1104*c54f35caSApple OSS Distributions 
1105*c54f35caSApple OSS Distributions 		// Retrieve the IOPL info block.
1106*c54f35caSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1107*c54f35caSApple OSS Distributions 
1108*c54f35caSApple OSS Distributions 		/*
1109*c54f35caSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110*c54f35caSApple OSS Distributions 		 * array.
1111*c54f35caSApple OSS Distributions 		 */
1112*c54f35caSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1113*c54f35caSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114*c54f35caSApple OSS Distributions 		} else {
1115*c54f35caSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1116*c54f35caSApple OSS Distributions 		}
1117*c54f35caSApple OSS Distributions 
1118*c54f35caSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1119*c54f35caSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120*c54f35caSApple OSS Distributions 
1121*c54f35caSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1122*c54f35caSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1123*c54f35caSApple OSS Distributions 	}
1124*c54f35caSApple OSS Distributions 
1125*c54f35caSApple OSS Distributions 	// enter mappings
1126*c54f35caSApple OSS Distributions 	remain  = size;
1127*c54f35caSApple OSS Distributions 	mapAddr = addr;
1128*c54f35caSApple OSS Distributions 	addr    += pageOffset;
1129*c54f35caSApple OSS Distributions 
1130*c54f35caSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1131*c54f35caSApple OSS Distributions 		entryOffset = offset - entry->offset;
1132*c54f35caSApple OSS Distributions 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133*c54f35caSApple OSS Distributions 			err = kIOReturnNotAligned;
1134*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135*c54f35caSApple OSS Distributions 			break;
1136*c54f35caSApple OSS Distributions 		}
1137*c54f35caSApple OSS Distributions 
1138*c54f35caSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1139*c54f35caSApple OSS Distributions 			vm_size_t unused = 0;
1140*c54f35caSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141*c54f35caSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1142*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1143*c54f35caSApple OSS Distributions 		}
1144*c54f35caSApple OSS Distributions 
1145*c54f35caSApple OSS Distributions 		entryOffset -= pageOffset;
1146*c54f35caSApple OSS Distributions 		if (entryOffset >= entry->size) {
1147*c54f35caSApple OSS Distributions 			panic("entryOffset");
1148*c54f35caSApple OSS Distributions 		}
1149*c54f35caSApple OSS Distributions 		chunk = entry->size - entryOffset;
1150*c54f35caSApple OSS Distributions 		if (chunk) {
1151*c54f35caSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1152*c54f35caSApple OSS Distributions 				.vmf_fixed = true,
1153*c54f35caSApple OSS Distributions 				.vmf_overwrite = true,
1154*c54f35caSApple OSS Distributions 				.vm_tag = tag,
1155*c54f35caSApple OSS Distributions 				.vmkf_iokit_acct = true,
1156*c54f35caSApple OSS Distributions 			};
1157*c54f35caSApple OSS Distributions 
1158*c54f35caSApple OSS Distributions 			if (chunk > remain) {
1159*c54f35caSApple OSS Distributions 				chunk = remain;
1160*c54f35caSApple OSS Distributions 			}
1161*c54f35caSApple OSS Distributions 			if (options & kIOMapPrefault) {
1162*c54f35caSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163*c54f35caSApple OSS Distributions 
1164*c54f35caSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1165*c54f35caSApple OSS Distributions 				    &mapAddr,
1166*c54f35caSApple OSS Distributions 				    chunk, 0 /* mask */,
1167*c54f35caSApple OSS Distributions 				    vmk_flags,
1168*c54f35caSApple OSS Distributions 				    entry->entry,
1169*c54f35caSApple OSS Distributions 				    entryOffset,
1170*c54f35caSApple OSS Distributions 				    prot,                        // cur
1171*c54f35caSApple OSS Distributions 				    prot,                        // max
1172*c54f35caSApple OSS Distributions 				    &pageList[currentPageIndex],
1173*c54f35caSApple OSS Distributions 				    nb_pages);
1174*c54f35caSApple OSS Distributions 
1175*c54f35caSApple OSS Distributions 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176*c54f35caSApple OSS Distributions 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177*c54f35caSApple OSS Distributions 				}
1178*c54f35caSApple OSS Distributions 				// Compute the next index in the page list.
1179*c54f35caSApple OSS Distributions 				currentPageIndex += nb_pages;
1180*c54f35caSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1181*c54f35caSApple OSS Distributions 			} else {
1182*c54f35caSApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1183*c54f35caSApple OSS Distributions 				    &mapAddr,
1184*c54f35caSApple OSS Distributions 				    chunk, 0 /* mask */,
1185*c54f35caSApple OSS Distributions 				    vmk_flags,
1186*c54f35caSApple OSS Distributions 				    entry->entry,
1187*c54f35caSApple OSS Distributions 				    entryOffset,
1188*c54f35caSApple OSS Distributions 				    false,               // copy
1189*c54f35caSApple OSS Distributions 				    prot,               // cur
1190*c54f35caSApple OSS Distributions 				    prot,               // max
1191*c54f35caSApple OSS Distributions 				    VM_INHERIT_NONE);
1192*c54f35caSApple OSS Distributions 			}
1193*c54f35caSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1194*c54f35caSApple OSS Distributions 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195*c54f35caSApple OSS Distributions 				break;
1196*c54f35caSApple OSS Distributions 			}
1197*c54f35caSApple OSS Distributions 			remain -= chunk;
1198*c54f35caSApple OSS Distributions 			if (!remain) {
1199*c54f35caSApple OSS Distributions 				break;
1200*c54f35caSApple OSS Distributions 			}
1201*c54f35caSApple OSS Distributions 			mapAddr  += chunk;
1202*c54f35caSApple OSS Distributions 			offset   += chunk - pageOffset;
1203*c54f35caSApple OSS Distributions 		}
1204*c54f35caSApple OSS Distributions 		pageOffset = 0;
1205*c54f35caSApple OSS Distributions 		entry++;
1206*c54f35caSApple OSS Distributions 		entryIdx++;
1207*c54f35caSApple OSS Distributions 		if (entryIdx >= ref->count) {
1208*c54f35caSApple OSS Distributions 			err = kIOReturnOverrun;
1209*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210*c54f35caSApple OSS Distributions 			break;
1211*c54f35caSApple OSS Distributions 		}
1212*c54f35caSApple OSS Distributions 	}
1213*c54f35caSApple OSS Distributions 
1214*c54f35caSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1215*c54f35caSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216*c54f35caSApple OSS Distributions 		addr = 0;
1217*c54f35caSApple OSS Distributions 	}
1218*c54f35caSApple OSS Distributions 	*inaddr = addr;
1219*c54f35caSApple OSS Distributions 
1220*c54f35caSApple OSS Distributions 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222*c54f35caSApple OSS Distributions 	}
1223*c54f35caSApple OSS Distributions 	return err;
1224*c54f35caSApple OSS Distributions }
1225*c54f35caSApple OSS Distributions 
1226*c54f35caSApple OSS Distributions #define LOGUNALIGN 0
1227*c54f35caSApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
1230*c54f35caSApple OSS Distributions 	vm_map_t            map,
1231*c54f35caSApple OSS Distributions 	mach_vm_size_t      inoffset,
1232*c54f35caSApple OSS Distributions 	mach_vm_size_t      size,
1233*c54f35caSApple OSS Distributions 	IOOptionBits        options,
1234*c54f35caSApple OSS Distributions 	mach_vm_address_t * inaddr)
1235*c54f35caSApple OSS Distributions {
1236*c54f35caSApple OSS Distributions 	IOReturn            err;
1237*c54f35caSApple OSS Distributions 	int64_t             offset = inoffset;
1238*c54f35caSApple OSS Distributions 	uint32_t            entryIdx, firstEntryIdx;
1239*c54f35caSApple OSS Distributions 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1240*c54f35caSApple OSS Distributions 	vm_map_offset_t     entryOffset, remain, chunk;
1241*c54f35caSApple OSS Distributions 
1242*c54f35caSApple OSS Distributions 	IOMemoryEntry    * entry;
1243*c54f35caSApple OSS Distributions 	vm_prot_t          prot, memEntryCacheMode;
1244*c54f35caSApple OSS Distributions 	IOOptionBits       type;
1245*c54f35caSApple OSS Distributions 	IOOptionBits       cacheMode;
1246*c54f35caSApple OSS Distributions 	vm_tag_t           tag;
1247*c54f35caSApple OSS Distributions 	// for the kIOMapPrefault option.
1248*c54f35caSApple OSS Distributions 	upl_page_info_t  * pageList = NULL;
1249*c54f35caSApple OSS Distributions 	UInt               currentPageIndex = 0;
1250*c54f35caSApple OSS Distributions 	bool               didAlloc;
1251*c54f35caSApple OSS Distributions 
1252*c54f35caSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253*c54f35caSApple OSS Distributions 
1254*c54f35caSApple OSS Distributions 	if (ref->mapRef) {
1255*c54f35caSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256*c54f35caSApple OSS Distributions 		return err;
1257*c54f35caSApple OSS Distributions 	}
1258*c54f35caSApple OSS Distributions 
1259*c54f35caSApple OSS Distributions #if LOGUNALIGN
1260*c54f35caSApple OSS Distributions 	printf("MAP offset %qx, %qx\n", inoffset, size);
1261*c54f35caSApple OSS Distributions #endif
1262*c54f35caSApple OSS Distributions 
1263*c54f35caSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
1264*c54f35caSApple OSS Distributions 
1265*c54f35caSApple OSS Distributions 	prot = VM_PROT_READ;
1266*c54f35caSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
1267*c54f35caSApple OSS Distributions 		prot |= VM_PROT_WRITE;
1268*c54f35caSApple OSS Distributions 	}
1269*c54f35caSApple OSS Distributions 	prot &= ref->prot;
1270*c54f35caSApple OSS Distributions 
1271*c54f35caSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272*c54f35caSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
1273*c54f35caSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
1274*c54f35caSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275*c54f35caSApple OSS Distributions 	}
1276*c54f35caSApple OSS Distributions 
1277*c54f35caSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(map);
1278*c54f35caSApple OSS Distributions 
1279*c54f35caSApple OSS Distributions 	addr       = 0;
1280*c54f35caSApple OSS Distributions 	didAlloc   = false;
1281*c54f35caSApple OSS Distributions 
1282*c54f35caSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1283*c54f35caSApple OSS Distributions 		addr = *inaddr;
1284*c54f35caSApple OSS Distributions 	}
1285*c54f35caSApple OSS Distributions 
1286*c54f35caSApple OSS Distributions 	// find first entry for offset
1287*c54f35caSApple OSS Distributions 	for (firstEntryIdx = 0;
1288*c54f35caSApple OSS Distributions 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289*c54f35caSApple OSS Distributions 	    firstEntryIdx++) {
1290*c54f35caSApple OSS Distributions 	}
1291*c54f35caSApple OSS Distributions 	firstEntryIdx--;
1292*c54f35caSApple OSS Distributions 
1293*c54f35caSApple OSS Distributions 	// calculate required VM space
1294*c54f35caSApple OSS Distributions 
1295*c54f35caSApple OSS Distributions 	entryIdx = firstEntryIdx;
1296*c54f35caSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1297*c54f35caSApple OSS Distributions 
1298*c54f35caSApple OSS Distributions 	remain  = size;
1299*c54f35caSApple OSS Distributions 	int64_t iteroffset = offset;
1300*c54f35caSApple OSS Distributions 	uint64_t mapSize = 0;
1301*c54f35caSApple OSS Distributions 	while (remain) {
1302*c54f35caSApple OSS Distributions 		entryOffset = iteroffset - entry->offset;
1303*c54f35caSApple OSS Distributions 		if (entryOffset >= entry->size) {
1304*c54f35caSApple OSS Distributions 			panic("entryOffset");
1305*c54f35caSApple OSS Distributions 		}
1306*c54f35caSApple OSS Distributions 
1307*c54f35caSApple OSS Distributions #if LOGUNALIGN
1308*c54f35caSApple OSS Distributions 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309*c54f35caSApple OSS Distributions 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310*c54f35caSApple OSS Distributions #endif
1311*c54f35caSApple OSS Distributions 
1312*c54f35caSApple OSS Distributions 		chunk = entry->size - entryOffset;
1313*c54f35caSApple OSS Distributions 		if (chunk) {
1314*c54f35caSApple OSS Distributions 			if (chunk > remain) {
1315*c54f35caSApple OSS Distributions 				chunk = remain;
1316*c54f35caSApple OSS Distributions 			}
1317*c54f35caSApple OSS Distributions 			mach_vm_size_t entrySize;
1318*c54f35caSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1320*c54f35caSApple OSS Distributions 			mapSize += entrySize;
1321*c54f35caSApple OSS Distributions 
1322*c54f35caSApple OSS Distributions 			remain -= chunk;
1323*c54f35caSApple OSS Distributions 			if (!remain) {
1324*c54f35caSApple OSS Distributions 				break;
1325*c54f35caSApple OSS Distributions 			}
1326*c54f35caSApple OSS Distributions 			iteroffset   += chunk; // - pageOffset;
1327*c54f35caSApple OSS Distributions 		}
1328*c54f35caSApple OSS Distributions 		entry++;
1329*c54f35caSApple OSS Distributions 		entryIdx++;
1330*c54f35caSApple OSS Distributions 		if (entryIdx >= ref->count) {
1331*c54f35caSApple OSS Distributions 			panic("overrun");
1332*c54f35caSApple OSS Distributions 			err = kIOReturnOverrun;
1333*c54f35caSApple OSS Distributions 			break;
1334*c54f35caSApple OSS Distributions 		}
1335*c54f35caSApple OSS Distributions 	}
1336*c54f35caSApple OSS Distributions 
1337*c54f35caSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1338*c54f35caSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*c54f35caSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1340*c54f35caSApple OSS Distributions 		}
1341*c54f35caSApple OSS Distributions 		err = KERN_SUCCESS;
1342*c54f35caSApple OSS Distributions 	} else {
1343*c54f35caSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1344*c54f35caSApple OSS Distributions 		ref.map     = map;
1345*c54f35caSApple OSS Distributions 		ref.tag     = tag;
1346*c54f35caSApple OSS Distributions 		ref.options = options;
1347*c54f35caSApple OSS Distributions 		ref.size    = mapSize;
1348*c54f35caSApple OSS Distributions 		ref.prot    = prot;
1349*c54f35caSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1350*c54f35caSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351*c54f35caSApple OSS Distributions 			ref.mapped = 0;
1352*c54f35caSApple OSS Distributions 		} else {
1353*c54f35caSApple OSS Distributions 			ref.mapped = addr;
1354*c54f35caSApple OSS Distributions 		}
1355*c54f35caSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356*c54f35caSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357*c54f35caSApple OSS Distributions 		} else {
1358*c54f35caSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359*c54f35caSApple OSS Distributions 		}
1360*c54f35caSApple OSS Distributions 
1361*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1362*c54f35caSApple OSS Distributions 			addr     = ref.mapped;
1363*c54f35caSApple OSS Distributions 			map      = ref.map;
1364*c54f35caSApple OSS Distributions 			didAlloc = true;
1365*c54f35caSApple OSS Distributions 		}
1366*c54f35caSApple OSS Distributions #if LOGUNALIGN
1367*c54f35caSApple OSS Distributions 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368*c54f35caSApple OSS Distributions #endif
1369*c54f35caSApple OSS Distributions 	}
1370*c54f35caSApple OSS Distributions 
1371*c54f35caSApple OSS Distributions 	/*
1372*c54f35caSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1373*c54f35caSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1374*c54f35caSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375*c54f35caSApple OSS Distributions 	 * operations.
1376*c54f35caSApple OSS Distributions 	 */
1377*c54f35caSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378*c54f35caSApple OSS Distributions 		options &= ~kIOMapPrefault;
1379*c54f35caSApple OSS Distributions 	}
1380*c54f35caSApple OSS Distributions 
1381*c54f35caSApple OSS Distributions 	/*
1382*c54f35caSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1383*c54f35caSApple OSS Distributions 	 * memory type, and the underlying data.
1384*c54f35caSApple OSS Distributions 	 */
1385*c54f35caSApple OSS Distributions 	if (options & kIOMapPrefault) {
1386*c54f35caSApple OSS Distributions 		/*
1387*c54f35caSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1388*c54f35caSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389*c54f35caSApple OSS Distributions 		 */
1390*c54f35caSApple OSS Distributions 		assert(_wireCount != 0);
1391*c54f35caSApple OSS Distributions 		assert(_memoryEntries != NULL);
1392*c54f35caSApple OSS Distributions 		if ((_wireCount == 0) ||
1393*c54f35caSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1394*c54f35caSApple OSS Distributions 			return kIOReturnBadArgument;
1395*c54f35caSApple OSS Distributions 		}
1396*c54f35caSApple OSS Distributions 
1397*c54f35caSApple OSS Distributions 		// Get the page list.
1398*c54f35caSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1399*c54f35caSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1400*c54f35caSApple OSS Distributions 		pageList = getPageList(dataP);
1401*c54f35caSApple OSS Distributions 
1402*c54f35caSApple OSS Distributions 		// Get the number of IOPLs.
1403*c54f35caSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404*c54f35caSApple OSS Distributions 
1405*c54f35caSApple OSS Distributions 		/*
1406*c54f35caSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1407*c54f35caSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1408*c54f35caSApple OSS Distributions 		 * right range at the end.
1409*c54f35caSApple OSS Distributions 		 */
1410*c54f35caSApple OSS Distributions 		UInt ioplIndex = 0;
1411*c54f35caSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412*c54f35caSApple OSS Distributions 			ioplIndex++;
1413*c54f35caSApple OSS Distributions 		}
1414*c54f35caSApple OSS Distributions 		ioplIndex--;
1415*c54f35caSApple OSS Distributions 
1416*c54f35caSApple OSS Distributions 		// Retrieve the IOPL info block.
1417*c54f35caSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1418*c54f35caSApple OSS Distributions 
1419*c54f35caSApple OSS Distributions 		/*
1420*c54f35caSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421*c54f35caSApple OSS Distributions 		 * array.
1422*c54f35caSApple OSS Distributions 		 */
1423*c54f35caSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1424*c54f35caSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425*c54f35caSApple OSS Distributions 		} else {
1426*c54f35caSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1427*c54f35caSApple OSS Distributions 		}
1428*c54f35caSApple OSS Distributions 
1429*c54f35caSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1430*c54f35caSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431*c54f35caSApple OSS Distributions 
1432*c54f35caSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1433*c54f35caSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1434*c54f35caSApple OSS Distributions 	}
1435*c54f35caSApple OSS Distributions 
1436*c54f35caSApple OSS Distributions 	// enter mappings
1437*c54f35caSApple OSS Distributions 	remain   = size;
1438*c54f35caSApple OSS Distributions 	mapAddr  = addr;
1439*c54f35caSApple OSS Distributions 	entryIdx = firstEntryIdx;
1440*c54f35caSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1441*c54f35caSApple OSS Distributions 
1442*c54f35caSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1443*c54f35caSApple OSS Distributions #if LOGUNALIGN
1444*c54f35caSApple OSS Distributions 		printf("offset %qx, %qx\n", offset, entry->offset);
1445*c54f35caSApple OSS Distributions #endif
1446*c54f35caSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1447*c54f35caSApple OSS Distributions 			vm_size_t unused = 0;
1448*c54f35caSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449*c54f35caSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1450*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1451*c54f35caSApple OSS Distributions 		}
1452*c54f35caSApple OSS Distributions 		entryOffset = offset - entry->offset;
1453*c54f35caSApple OSS Distributions 		if (entryOffset >= entry->size) {
1454*c54f35caSApple OSS Distributions 			panic("entryOffset");
1455*c54f35caSApple OSS Distributions 		}
1456*c54f35caSApple OSS Distributions 		chunk = entry->size - entryOffset;
1457*c54f35caSApple OSS Distributions #if LOGUNALIGN
1458*c54f35caSApple OSS Distributions 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459*c54f35caSApple OSS Distributions #endif
1460*c54f35caSApple OSS Distributions 		if (chunk) {
1461*c54f35caSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1462*c54f35caSApple OSS Distributions 				.vmf_fixed = true,
1463*c54f35caSApple OSS Distributions 				.vmf_overwrite = true,
1464*c54f35caSApple OSS Distributions 				.vmf_return_data_addr = true,
1465*c54f35caSApple OSS Distributions 				.vm_tag = tag,
1466*c54f35caSApple OSS Distributions 				.vmkf_iokit_acct = true,
1467*c54f35caSApple OSS Distributions 			};
1468*c54f35caSApple OSS Distributions 
1469*c54f35caSApple OSS Distributions 			if (chunk > remain) {
1470*c54f35caSApple OSS Distributions 				chunk = remain;
1471*c54f35caSApple OSS Distributions 			}
1472*c54f35caSApple OSS Distributions 			mapAddrOut = mapAddr;
1473*c54f35caSApple OSS Distributions 			if (options & kIOMapPrefault) {
1474*c54f35caSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475*c54f35caSApple OSS Distributions 
1476*c54f35caSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1477*c54f35caSApple OSS Distributions 				    &mapAddrOut,
1478*c54f35caSApple OSS Distributions 				    chunk, 0 /* mask */,
1479*c54f35caSApple OSS Distributions 				    vmk_flags,
1480*c54f35caSApple OSS Distributions 				    entry->entry,
1481*c54f35caSApple OSS Distributions 				    entryOffset,
1482*c54f35caSApple OSS Distributions 				    prot,                        // cur
1483*c54f35caSApple OSS Distributions 				    prot,                        // max
1484*c54f35caSApple OSS Distributions 				    &pageList[currentPageIndex],
1485*c54f35caSApple OSS Distributions 				    nb_pages);
1486*c54f35caSApple OSS Distributions 
1487*c54f35caSApple OSS Distributions 				// Compute the next index in the page list.
1488*c54f35caSApple OSS Distributions 				currentPageIndex += nb_pages;
1489*c54f35caSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1490*c54f35caSApple OSS Distributions 			} else {
1491*c54f35caSApple OSS Distributions #if LOGUNALIGN
1492*c54f35caSApple OSS Distributions 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493*c54f35caSApple OSS Distributions #endif
1494*c54f35caSApple OSS Distributions 				err = vm_map_enter_mem_object(map,
1495*c54f35caSApple OSS Distributions 				    &mapAddrOut,
1496*c54f35caSApple OSS Distributions 				    chunk, 0 /* mask */,
1497*c54f35caSApple OSS Distributions 				    vmk_flags,
1498*c54f35caSApple OSS Distributions 				    entry->entry,
1499*c54f35caSApple OSS Distributions 				    entryOffset,
1500*c54f35caSApple OSS Distributions 				    false,               // copy
1501*c54f35caSApple OSS Distributions 				    prot,               // cur
1502*c54f35caSApple OSS Distributions 				    prot,               // max
1503*c54f35caSApple OSS Distributions 				    VM_INHERIT_NONE);
1504*c54f35caSApple OSS Distributions 			}
1505*c54f35caSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1506*c54f35caSApple OSS Distributions 				panic("map enter err %x", err);
1507*c54f35caSApple OSS Distributions 				break;
1508*c54f35caSApple OSS Distributions 			}
1509*c54f35caSApple OSS Distributions #if LOGUNALIGN
1510*c54f35caSApple OSS Distributions 			printf("mapAddr o %qx\n", mapAddrOut);
1511*c54f35caSApple OSS Distributions #endif
1512*c54f35caSApple OSS Distributions 			if (entryIdx == firstEntryIdx) {
1513*c54f35caSApple OSS Distributions 				addr = mapAddrOut;
1514*c54f35caSApple OSS Distributions 			}
1515*c54f35caSApple OSS Distributions 			remain -= chunk;
1516*c54f35caSApple OSS Distributions 			if (!remain) {
1517*c54f35caSApple OSS Distributions 				break;
1518*c54f35caSApple OSS Distributions 			}
1519*c54f35caSApple OSS Distributions 			mach_vm_size_t entrySize;
1520*c54f35caSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521*c54f35caSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1522*c54f35caSApple OSS Distributions 			mapAddr += entrySize;
1523*c54f35caSApple OSS Distributions 			offset  += chunk;
1524*c54f35caSApple OSS Distributions 		}
1525*c54f35caSApple OSS Distributions 
1526*c54f35caSApple OSS Distributions 		entry++;
1527*c54f35caSApple OSS Distributions 		entryIdx++;
1528*c54f35caSApple OSS Distributions 		if (entryIdx >= ref->count) {
1529*c54f35caSApple OSS Distributions 			err = kIOReturnOverrun;
1530*c54f35caSApple OSS Distributions 			break;
1531*c54f35caSApple OSS Distributions 		}
1532*c54f35caSApple OSS Distributions 	}
1533*c54f35caSApple OSS Distributions 
1534*c54f35caSApple OSS Distributions 	if (KERN_SUCCESS != err) {
1535*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536*c54f35caSApple OSS Distributions 	}
1537*c54f35caSApple OSS Distributions 
1538*c54f35caSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1539*c54f35caSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540*c54f35caSApple OSS Distributions 		addr = 0;
1541*c54f35caSApple OSS Distributions 	}
1542*c54f35caSApple OSS Distributions 	*inaddr = addr;
1543*c54f35caSApple OSS Distributions 
1544*c54f35caSApple OSS Distributions 	return err;
1545*c54f35caSApple OSS Distributions }
1546*c54f35caSApple OSS Distributions 
1547*c54f35caSApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
1550*c54f35caSApple OSS Distributions 	uint64_t          * offset)
1551*c54f35caSApple OSS Distributions {
1552*c54f35caSApple OSS Distributions 	kern_return_t kr;
1553*c54f35caSApple OSS Distributions 	vm_object_offset_t data_offset = 0;
1554*c54f35caSApple OSS Distributions 	uint64_t total;
1555*c54f35caSApple OSS Distributions 	uint32_t idx;
1556*c54f35caSApple OSS Distributions 
1557*c54f35caSApple OSS Distributions 	assert(ref->count);
1558*c54f35caSApple OSS Distributions 	if (offset) {
1559*c54f35caSApple OSS Distributions 		*offset = (uint64_t) data_offset;
1560*c54f35caSApple OSS Distributions 	}
1561*c54f35caSApple OSS Distributions 	total = 0;
1562*c54f35caSApple OSS Distributions 	for (idx = 0; idx < ref->count; idx++) {
1563*c54f35caSApple OSS Distributions 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564*c54f35caSApple OSS Distributions 		    &data_offset);
1565*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != kr) {
1566*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567*c54f35caSApple OSS Distributions 		} else if (0 != data_offset) {
1568*c54f35caSApple OSS Distributions 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569*c54f35caSApple OSS Distributions 		}
1570*c54f35caSApple OSS Distributions 		if (offset && !idx) {
1571*c54f35caSApple OSS Distributions 			*offset = (uint64_t) data_offset;
1572*c54f35caSApple OSS Distributions 		}
1573*c54f35caSApple OSS Distributions 		total += round_page(data_offset + ref->entries[idx].size);
1574*c54f35caSApple OSS Distributions 	}
1575*c54f35caSApple OSS Distributions 
1576*c54f35caSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577*c54f35caSApple OSS Distributions 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1578*c54f35caSApple OSS Distributions 
1579*c54f35caSApple OSS Distributions 	return total;
1580*c54f35caSApple OSS Distributions }
1581*c54f35caSApple OSS Distributions 
1582*c54f35caSApple OSS Distributions 
1583*c54f35caSApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
1586*c54f35caSApple OSS Distributions 	IOByteCount       * residentPageCount,
1587*c54f35caSApple OSS Distributions 	IOByteCount       * dirtyPageCount)
1588*c54f35caSApple OSS Distributions {
1589*c54f35caSApple OSS Distributions 	IOReturn        err;
1590*c54f35caSApple OSS Distributions 	IOMemoryEntry * entries;
1591*c54f35caSApple OSS Distributions 	unsigned int resident, dirty;
1592*c54f35caSApple OSS Distributions 	unsigned int totalResident, totalDirty;
1593*c54f35caSApple OSS Distributions 
1594*c54f35caSApple OSS Distributions 	totalResident = totalDirty = 0;
1595*c54f35caSApple OSS Distributions 	err = kIOReturnSuccess;
1596*c54f35caSApple OSS Distributions 	entries = ref->entries + ref->count;
1597*c54f35caSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1598*c54f35caSApple OSS Distributions 		entries--;
1599*c54f35caSApple OSS Distributions 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1601*c54f35caSApple OSS Distributions 			break;
1602*c54f35caSApple OSS Distributions 		}
1603*c54f35caSApple OSS Distributions 		totalResident += resident;
1604*c54f35caSApple OSS Distributions 		totalDirty    += dirty;
1605*c54f35caSApple OSS Distributions 	}
1606*c54f35caSApple OSS Distributions 
1607*c54f35caSApple OSS Distributions 	if (residentPageCount) {
1608*c54f35caSApple OSS Distributions 		*residentPageCount = totalResident;
1609*c54f35caSApple OSS Distributions 	}
1610*c54f35caSApple OSS Distributions 	if (dirtyPageCount) {
1611*c54f35caSApple OSS Distributions 		*dirtyPageCount    = totalDirty;
1612*c54f35caSApple OSS Distributions 	}
1613*c54f35caSApple OSS Distributions 	return err;
1614*c54f35caSApple OSS Distributions }
1615*c54f35caSApple OSS Distributions 
1616*c54f35caSApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
1619*c54f35caSApple OSS Distributions 	IOOptionBits        newState,
1620*c54f35caSApple OSS Distributions 	IOOptionBits      * oldState)
1621*c54f35caSApple OSS Distributions {
1622*c54f35caSApple OSS Distributions 	IOReturn        err;
1623*c54f35caSApple OSS Distributions 	IOMemoryEntry * entries;
1624*c54f35caSApple OSS Distributions 	vm_purgable_t   control;
1625*c54f35caSApple OSS Distributions 	int             totalState, state;
1626*c54f35caSApple OSS Distributions 
1627*c54f35caSApple OSS Distributions 	totalState = kIOMemoryPurgeableNonVolatile;
1628*c54f35caSApple OSS Distributions 	err = kIOReturnSuccess;
1629*c54f35caSApple OSS Distributions 	entries = ref->entries + ref->count;
1630*c54f35caSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1631*c54f35caSApple OSS Distributions 		entries--;
1632*c54f35caSApple OSS Distributions 
1633*c54f35caSApple OSS Distributions 		err = purgeableControlBits(newState, &control, &state);
1634*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1635*c54f35caSApple OSS Distributions 			break;
1636*c54f35caSApple OSS Distributions 		}
1637*c54f35caSApple OSS Distributions 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1639*c54f35caSApple OSS Distributions 			break;
1640*c54f35caSApple OSS Distributions 		}
1641*c54f35caSApple OSS Distributions 		err = purgeableStateBits(&state);
1642*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1643*c54f35caSApple OSS Distributions 			break;
1644*c54f35caSApple OSS Distributions 		}
1645*c54f35caSApple OSS Distributions 
1646*c54f35caSApple OSS Distributions 		if (kIOMemoryPurgeableEmpty == state) {
1647*c54f35caSApple OSS Distributions 			totalState = kIOMemoryPurgeableEmpty;
1648*c54f35caSApple OSS Distributions 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1649*c54f35caSApple OSS Distributions 			continue;
1650*c54f35caSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1651*c54f35caSApple OSS Distributions 			continue;
1652*c54f35caSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == state) {
1653*c54f35caSApple OSS Distributions 			totalState = kIOMemoryPurgeableVolatile;
1654*c54f35caSApple OSS Distributions 		} else {
1655*c54f35caSApple OSS Distributions 			totalState = kIOMemoryPurgeableNonVolatile;
1656*c54f35caSApple OSS Distributions 		}
1657*c54f35caSApple OSS Distributions 	}
1658*c54f35caSApple OSS Distributions 
1659*c54f35caSApple OSS Distributions 	if (oldState) {
1660*c54f35caSApple OSS Distributions 		*oldState = totalState;
1661*c54f35caSApple OSS Distributions 	}
1662*c54f35caSApple OSS Distributions 	return err;
1663*c54f35caSApple OSS Distributions }
1664*c54f35caSApple OSS Distributions 
1665*c54f35caSApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667*c54f35caSApple OSS Distributions 	IOMemoryReference * ref,
1668*c54f35caSApple OSS Distributions 	task_t              newOwner,
1669*c54f35caSApple OSS Distributions 	int                 newLedgerTag,
1670*c54f35caSApple OSS Distributions 	IOOptionBits        newLedgerOptions)
1671*c54f35caSApple OSS Distributions {
1672*c54f35caSApple OSS Distributions 	IOReturn        err, totalErr;
1673*c54f35caSApple OSS Distributions 	IOMemoryEntry * entries;
1674*c54f35caSApple OSS Distributions 
1675*c54f35caSApple OSS Distributions 	totalErr = kIOReturnSuccess;
1676*c54f35caSApple OSS Distributions 	entries = ref->entries + ref->count;
1677*c54f35caSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1678*c54f35caSApple OSS Distributions 		entries--;
1679*c54f35caSApple OSS Distributions 
1680*c54f35caSApple OSS Distributions 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681*c54f35caSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1682*c54f35caSApple OSS Distributions 			totalErr = err;
1683*c54f35caSApple OSS Distributions 		}
1684*c54f35caSApple OSS Distributions 	}
1685*c54f35caSApple OSS Distributions 
1686*c54f35caSApple OSS Distributions 	return totalErr;
1687*c54f35caSApple OSS Distributions }
1688*c54f35caSApple OSS Distributions 
1689*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690*c54f35caSApple OSS Distributions 
1691*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692*c54f35caSApple OSS Distributions IOMemoryDescriptor::withAddress(void *      address,
1693*c54f35caSApple OSS Distributions     IOByteCount   length,
1694*c54f35caSApple OSS Distributions     IODirection direction)
1695*c54f35caSApple OSS Distributions {
1696*c54f35caSApple OSS Distributions 	return IOMemoryDescriptor::
1697*c54f35caSApple OSS Distributions 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698*c54f35caSApple OSS Distributions }
1699*c54f35caSApple OSS Distributions 
1700*c54f35caSApple OSS Distributions #ifndef __LP64__
1701*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702*c54f35caSApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703*c54f35caSApple OSS Distributions     IOByteCount  length,
1704*c54f35caSApple OSS Distributions     IODirection  direction,
1705*c54f35caSApple OSS Distributions     task_t       task)
1706*c54f35caSApple OSS Distributions {
1707*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708*c54f35caSApple OSS Distributions 	if (that) {
1709*c54f35caSApple OSS Distributions 		if (that->initWithAddress(address, length, direction, task)) {
1710*c54f35caSApple OSS Distributions 			return os::move(that);
1711*c54f35caSApple OSS Distributions 		}
1712*c54f35caSApple OSS Distributions 	}
1713*c54f35caSApple OSS Distributions 	return nullptr;
1714*c54f35caSApple OSS Distributions }
1715*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
1716*c54f35caSApple OSS Distributions 
1717*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718*c54f35caSApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1719*c54f35caSApple OSS Distributions 	IOPhysicalAddress       address,
1720*c54f35caSApple OSS Distributions 	IOByteCount             length,
1721*c54f35caSApple OSS Distributions 	IODirection             direction )
1722*c54f35caSApple OSS Distributions {
1723*c54f35caSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724*c54f35caSApple OSS Distributions }
1725*c54f35caSApple OSS Distributions 
1726*c54f35caSApple OSS Distributions #ifndef __LP64__
1727*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728*c54f35caSApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729*c54f35caSApple OSS Distributions     UInt32           withCount,
1730*c54f35caSApple OSS Distributions     IODirection      direction,
1731*c54f35caSApple OSS Distributions     task_t           task,
1732*c54f35caSApple OSS Distributions     bool             asReference)
1733*c54f35caSApple OSS Distributions {
1734*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735*c54f35caSApple OSS Distributions 	if (that) {
1736*c54f35caSApple OSS Distributions 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737*c54f35caSApple OSS Distributions 			return os::move(that);
1738*c54f35caSApple OSS Distributions 		}
1739*c54f35caSApple OSS Distributions 	}
1740*c54f35caSApple OSS Distributions 	return nullptr;
1741*c54f35caSApple OSS Distributions }
1742*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
1743*c54f35caSApple OSS Distributions 
1744*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745*c54f35caSApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746*c54f35caSApple OSS Distributions     mach_vm_size_t length,
1747*c54f35caSApple OSS Distributions     IOOptionBits   options,
1748*c54f35caSApple OSS Distributions     task_t         task)
1749*c54f35caSApple OSS Distributions {
1750*c54f35caSApple OSS Distributions 	IOAddressRange range = { address, length };
1751*c54f35caSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752*c54f35caSApple OSS Distributions }
1753*c54f35caSApple OSS Distributions 
1754*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755*c54f35caSApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1756*c54f35caSApple OSS Distributions     UInt32           rangeCount,
1757*c54f35caSApple OSS Distributions     IOOptionBits     options,
1758*c54f35caSApple OSS Distributions     task_t           task)
1759*c54f35caSApple OSS Distributions {
1760*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761*c54f35caSApple OSS Distributions 	if (that) {
1762*c54f35caSApple OSS Distributions 		if (task) {
1763*c54f35caSApple OSS Distributions 			options |= kIOMemoryTypeVirtual64;
1764*c54f35caSApple OSS Distributions 		} else {
1765*c54f35caSApple OSS Distributions 			options |= kIOMemoryTypePhysical64;
1766*c54f35caSApple OSS Distributions 		}
1767*c54f35caSApple OSS Distributions 
1768*c54f35caSApple OSS Distributions 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769*c54f35caSApple OSS Distributions 			return os::move(that);
1770*c54f35caSApple OSS Distributions 		}
1771*c54f35caSApple OSS Distributions 	}
1772*c54f35caSApple OSS Distributions 
1773*c54f35caSApple OSS Distributions 	return nullptr;
1774*c54f35caSApple OSS Distributions }
1775*c54f35caSApple OSS Distributions 
1776*c54f35caSApple OSS Distributions 
1777*c54f35caSApple OSS Distributions /*
1778*c54f35caSApple OSS Distributions  * withOptions:
1779*c54f35caSApple OSS Distributions  *
1780*c54f35caSApple OSS Distributions  * Create a new IOMemoryDescriptor. The buffer is made up of several
1781*c54f35caSApple OSS Distributions  * virtual address ranges, from a given task.
1782*c54f35caSApple OSS Distributions  *
1783*c54f35caSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1784*c54f35caSApple OSS Distributions  */
1785*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786*c54f35caSApple OSS Distributions IOMemoryDescriptor::withOptions(void *          buffers,
1787*c54f35caSApple OSS Distributions     UInt32          count,
1788*c54f35caSApple OSS Distributions     UInt32          offset,
1789*c54f35caSApple OSS Distributions     task_t          task,
1790*c54f35caSApple OSS Distributions     IOOptionBits    opts,
1791*c54f35caSApple OSS Distributions     IOMapper *      mapper)
1792*c54f35caSApple OSS Distributions {
1793*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794*c54f35caSApple OSS Distributions 
1795*c54f35caSApple OSS Distributions 	if (self
1796*c54f35caSApple OSS Distributions 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797*c54f35caSApple OSS Distributions 		return nullptr;
1798*c54f35caSApple OSS Distributions 	}
1799*c54f35caSApple OSS Distributions 
1800*c54f35caSApple OSS Distributions 	return os::move(self);
1801*c54f35caSApple OSS Distributions }
1802*c54f35caSApple OSS Distributions 
1803*c54f35caSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithOptions(void *         buffers,
1805*c54f35caSApple OSS Distributions     UInt32         count,
1806*c54f35caSApple OSS Distributions     UInt32         offset,
1807*c54f35caSApple OSS Distributions     task_t         task,
1808*c54f35caSApple OSS Distributions     IOOptionBits   options,
1809*c54f35caSApple OSS Distributions     IOMapper *     mapper)
1810*c54f35caSApple OSS Distributions {
1811*c54f35caSApple OSS Distributions 	return false;
1812*c54f35caSApple OSS Distributions }
1813*c54f35caSApple OSS Distributions 
1814*c54f35caSApple OSS Distributions #ifndef __LP64__
1815*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816*c54f35caSApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817*c54f35caSApple OSS Distributions     UInt32          withCount,
1818*c54f35caSApple OSS Distributions     IODirection     direction,
1819*c54f35caSApple OSS Distributions     bool            asReference)
1820*c54f35caSApple OSS Distributions {
1821*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822*c54f35caSApple OSS Distributions 	if (that) {
1823*c54f35caSApple OSS Distributions 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824*c54f35caSApple OSS Distributions 			return os::move(that);
1825*c54f35caSApple OSS Distributions 		}
1826*c54f35caSApple OSS Distributions 	}
1827*c54f35caSApple OSS Distributions 	return nullptr;
1828*c54f35caSApple OSS Distributions }
1829*c54f35caSApple OSS Distributions 
1830*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831*c54f35caSApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1832*c54f35caSApple OSS Distributions     IOByteCount             offset,
1833*c54f35caSApple OSS Distributions     IOByteCount             length,
1834*c54f35caSApple OSS Distributions     IODirection             direction)
1835*c54f35caSApple OSS Distributions {
1836*c54f35caSApple OSS Distributions 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837*c54f35caSApple OSS Distributions }
1838*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
1839*c54f35caSApple OSS Distributions 
1840*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841*c54f35caSApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842*c54f35caSApple OSS Distributions {
1843*c54f35caSApple OSS Distributions 	IOGeneralMemoryDescriptor *origGenMD =
1844*c54f35caSApple OSS Distributions 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845*c54f35caSApple OSS Distributions 
1846*c54f35caSApple OSS Distributions 	if (origGenMD) {
1847*c54f35caSApple OSS Distributions 		return IOGeneralMemoryDescriptor::
1848*c54f35caSApple OSS Distributions 		       withPersistentMemoryDescriptor(origGenMD);
1849*c54f35caSApple OSS Distributions 	} else {
1850*c54f35caSApple OSS Distributions 		return nullptr;
1851*c54f35caSApple OSS Distributions 	}
1852*c54f35caSApple OSS Distributions }
1853*c54f35caSApple OSS Distributions 
1854*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856*c54f35caSApple OSS Distributions {
1857*c54f35caSApple OSS Distributions 	IOMemoryReference * memRef;
1858*c54f35caSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859*c54f35caSApple OSS Distributions 
1860*c54f35caSApple OSS Distributions 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861*c54f35caSApple OSS Distributions 		return nullptr;
1862*c54f35caSApple OSS Distributions 	}
1863*c54f35caSApple OSS Distributions 
1864*c54f35caSApple OSS Distributions 	if (memRef == originalMD->_memRef) {
1865*c54f35caSApple OSS Distributions 		self.reset(originalMD, OSRetain);
1866*c54f35caSApple OSS Distributions 		originalMD->memoryReferenceRelease(memRef);
1867*c54f35caSApple OSS Distributions 		return os::move(self);
1868*c54f35caSApple OSS Distributions 	}
1869*c54f35caSApple OSS Distributions 
1870*c54f35caSApple OSS Distributions 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871*c54f35caSApple OSS Distributions 	IOMDPersistentInitData initData = { originalMD, memRef };
1872*c54f35caSApple OSS Distributions 
1873*c54f35caSApple OSS Distributions 	if (self
1874*c54f35caSApple OSS Distributions 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875*c54f35caSApple OSS Distributions 		return nullptr;
1876*c54f35caSApple OSS Distributions 	}
1877*c54f35caSApple OSS Distributions 	return os::move(self);
1878*c54f35caSApple OSS Distributions }
1879*c54f35caSApple OSS Distributions 
1880*c54f35caSApple OSS Distributions #ifndef __LP64__
1881*c54f35caSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1883*c54f35caSApple OSS Distributions     IOByteCount   withLength,
1884*c54f35caSApple OSS Distributions     IODirection withDirection)
1885*c54f35caSApple OSS Distributions {
1886*c54f35caSApple OSS Distributions 	_singleRange.v.address = (vm_offset_t) address;
1887*c54f35caSApple OSS Distributions 	_singleRange.v.length  = withLength;
1888*c54f35caSApple OSS Distributions 
1889*c54f35caSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890*c54f35caSApple OSS Distributions }
1891*c54f35caSApple OSS Distributions 
1892*c54f35caSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894*c54f35caSApple OSS Distributions     IOByteCount    withLength,
1895*c54f35caSApple OSS Distributions     IODirection  withDirection,
1896*c54f35caSApple OSS Distributions     task_t       withTask)
1897*c54f35caSApple OSS Distributions {
1898*c54f35caSApple OSS Distributions 	_singleRange.v.address = address;
1899*c54f35caSApple OSS Distributions 	_singleRange.v.length  = withLength;
1900*c54f35caSApple OSS Distributions 
1901*c54f35caSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902*c54f35caSApple OSS Distributions }
1903*c54f35caSApple OSS Distributions 
1904*c54f35caSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906*c54f35caSApple OSS Distributions 	IOPhysicalAddress      address,
1907*c54f35caSApple OSS Distributions 	IOByteCount            withLength,
1908*c54f35caSApple OSS Distributions 	IODirection            withDirection )
1909*c54f35caSApple OSS Distributions {
1910*c54f35caSApple OSS Distributions 	_singleRange.p.address = address;
1911*c54f35caSApple OSS Distributions 	_singleRange.p.length  = withLength;
1912*c54f35caSApple OSS Distributions 
1913*c54f35caSApple OSS Distributions 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914*c54f35caSApple OSS Distributions }
1915*c54f35caSApple OSS Distributions 
1916*c54f35caSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918*c54f35caSApple OSS Distributions 	IOPhysicalRange * ranges,
1919*c54f35caSApple OSS Distributions 	UInt32            count,
1920*c54f35caSApple OSS Distributions 	IODirection       direction,
1921*c54f35caSApple OSS Distributions 	bool              reference)
1922*c54f35caSApple OSS Distributions {
1923*c54f35caSApple OSS Distributions 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924*c54f35caSApple OSS Distributions 
1925*c54f35caSApple OSS Distributions 	if (reference) {
1926*c54f35caSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1927*c54f35caSApple OSS Distributions 	}
1928*c54f35caSApple OSS Distributions 
1929*c54f35caSApple OSS Distributions 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930*c54f35caSApple OSS Distributions }
1931*c54f35caSApple OSS Distributions 
1932*c54f35caSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1934*c54f35caSApple OSS Distributions 	IOVirtualRange * ranges,
1935*c54f35caSApple OSS Distributions 	UInt32           count,
1936*c54f35caSApple OSS Distributions 	IODirection      direction,
1937*c54f35caSApple OSS Distributions 	task_t           task,
1938*c54f35caSApple OSS Distributions 	bool             reference)
1939*c54f35caSApple OSS Distributions {
1940*c54f35caSApple OSS Distributions 	IOOptionBits mdOpts = direction;
1941*c54f35caSApple OSS Distributions 
1942*c54f35caSApple OSS Distributions 	if (reference) {
1943*c54f35caSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1944*c54f35caSApple OSS Distributions 	}
1945*c54f35caSApple OSS Distributions 
1946*c54f35caSApple OSS Distributions 	if (task) {
1947*c54f35caSApple OSS Distributions 		mdOpts |= kIOMemoryTypeVirtual;
1948*c54f35caSApple OSS Distributions 
1949*c54f35caSApple OSS Distributions 		// Auto-prepare if this is a kernel memory descriptor as very few
1950*c54f35caSApple OSS Distributions 		// clients bother to prepare() kernel memory.
1951*c54f35caSApple OSS Distributions 		// But it was not enforced so what are you going to do?
1952*c54f35caSApple OSS Distributions 		if (task == kernel_task) {
1953*c54f35caSApple OSS Distributions 			mdOpts |= kIOMemoryAutoPrepare;
1954*c54f35caSApple OSS Distributions 		}
1955*c54f35caSApple OSS Distributions 	} else {
1956*c54f35caSApple OSS Distributions 		mdOpts |= kIOMemoryTypePhysical;
1957*c54f35caSApple OSS Distributions 	}
1958*c54f35caSApple OSS Distributions 
1959*c54f35caSApple OSS Distributions 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960*c54f35caSApple OSS Distributions }
1961*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
1962*c54f35caSApple OSS Distributions 
1963*c54f35caSApple OSS Distributions /*
1964*c54f35caSApple OSS Distributions  * initWithOptions:
1965*c54f35caSApple OSS Distributions  *
1966*c54f35caSApple OSS Distributions  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967*c54f35caSApple OSS Distributions  * from a given task, several physical ranges, an UPL from the ubc
1968*c54f35caSApple OSS Distributions  * system or a uio (may be 64bit) from the BSD subsystem.
1969*c54f35caSApple OSS Distributions  *
1970*c54f35caSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1971*c54f35caSApple OSS Distributions  *
1972*c54f35caSApple OSS Distributions  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973*c54f35caSApple OSS Distributions  * existing instance -- note this behavior is not commonly supported in other
1974*c54f35caSApple OSS Distributions  * I/O Kit classes, although it is supported here.
1975*c54f35caSApple OSS Distributions  */
1976*c54f35caSApple OSS Distributions 
1977*c54f35caSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1979*c54f35caSApple OSS Distributions     UInt32       count,
1980*c54f35caSApple OSS Distributions     UInt32       offset,
1981*c54f35caSApple OSS Distributions     task_t       task,
1982*c54f35caSApple OSS Distributions     IOOptionBits options,
1983*c54f35caSApple OSS Distributions     IOMapper *   mapper)
1984*c54f35caSApple OSS Distributions {
1985*c54f35caSApple OSS Distributions 	IOOptionBits type = options & kIOMemoryTypeMask;
1986*c54f35caSApple OSS Distributions 
1987*c54f35caSApple OSS Distributions #ifndef __LP64__
1988*c54f35caSApple OSS Distributions 	if (task
1989*c54f35caSApple OSS Distributions 	    && (kIOMemoryTypeVirtual == type)
1990*c54f35caSApple OSS Distributions 	    && vm_map_is_64bit(get_task_map(task))
1991*c54f35caSApple OSS Distributions 	    && ((IOVirtualRange *) buffers)->address) {
1992*c54f35caSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993*c54f35caSApple OSS Distributions 		return false;
1994*c54f35caSApple OSS Distributions 	}
1995*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
1996*c54f35caSApple OSS Distributions 
1997*c54f35caSApple OSS Distributions 	// Grab the original MD's configuation data to initialse the
1998*c54f35caSApple OSS Distributions 	// arguments to this function.
1999*c54f35caSApple OSS Distributions 	if (kIOMemoryTypePersistentMD == type) {
2000*c54f35caSApple OSS Distributions 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001*c54f35caSApple OSS Distributions 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002*c54f35caSApple OSS Distributions 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003*c54f35caSApple OSS Distributions 
2004*c54f35caSApple OSS Distributions 		// Only accept persistent memory descriptors with valid dataP data.
2005*c54f35caSApple OSS Distributions 		assert(orig->_rangesCount == 1);
2006*c54f35caSApple OSS Distributions 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007*c54f35caSApple OSS Distributions 			return false;
2008*c54f35caSApple OSS Distributions 		}
2009*c54f35caSApple OSS Distributions 
2010*c54f35caSApple OSS Distributions 		_memRef = initData->fMemRef; // Grab the new named entry
2011*c54f35caSApple OSS Distributions 		options = orig->_flags & ~kIOMemoryAsReference;
2012*c54f35caSApple OSS Distributions 		type = options & kIOMemoryTypeMask;
2013*c54f35caSApple OSS Distributions 		buffers = orig->_ranges.v;
2014*c54f35caSApple OSS Distributions 		count = orig->_rangesCount;
2015*c54f35caSApple OSS Distributions 
2016*c54f35caSApple OSS Distributions 		// Now grab the original task and whatever mapper was previously used
2017*c54f35caSApple OSS Distributions 		task = orig->_task;
2018*c54f35caSApple OSS Distributions 		mapper = dataP->fMapper;
2019*c54f35caSApple OSS Distributions 
2020*c54f35caSApple OSS Distributions 		// We are ready to go through the original initialisation now
2021*c54f35caSApple OSS Distributions 	}
2022*c54f35caSApple OSS Distributions 
2023*c54f35caSApple OSS Distributions 	switch (type) {
2024*c54f35caSApple OSS Distributions 	case kIOMemoryTypeUIO:
2025*c54f35caSApple OSS Distributions 	case kIOMemoryTypeVirtual:
2026*c54f35caSApple OSS Distributions #ifndef __LP64__
2027*c54f35caSApple OSS Distributions 	case kIOMemoryTypeVirtual64:
2028*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2029*c54f35caSApple OSS Distributions 		assert(task);
2030*c54f35caSApple OSS Distributions 		if (!task) {
2031*c54f35caSApple OSS Distributions 			return false;
2032*c54f35caSApple OSS Distributions 		}
2033*c54f35caSApple OSS Distributions 		break;
2034*c54f35caSApple OSS Distributions 
2035*c54f35caSApple OSS Distributions 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2036*c54f35caSApple OSS Distributions #ifndef __LP64__
2037*c54f35caSApple OSS Distributions 	case kIOMemoryTypePhysical64:
2038*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2039*c54f35caSApple OSS Distributions 	case kIOMemoryTypeUPL:
2040*c54f35caSApple OSS Distributions 		assert(!task);
2041*c54f35caSApple OSS Distributions 		break;
2042*c54f35caSApple OSS Distributions 	default:
2043*c54f35caSApple OSS Distributions 		return false; /* bad argument */
2044*c54f35caSApple OSS Distributions 	}
2045*c54f35caSApple OSS Distributions 
2046*c54f35caSApple OSS Distributions 	assert(buffers);
2047*c54f35caSApple OSS Distributions 	assert(count);
2048*c54f35caSApple OSS Distributions 
2049*c54f35caSApple OSS Distributions 	/*
2050*c54f35caSApple OSS Distributions 	 * We can check the _initialized  instance variable before having ever set
2051*c54f35caSApple OSS Distributions 	 * it to an initial value because I/O Kit guarantees that all our instance
2052*c54f35caSApple OSS Distributions 	 * variables are zeroed on an object's allocation.
2053*c54f35caSApple OSS Distributions 	 */
2054*c54f35caSApple OSS Distributions 
2055*c54f35caSApple OSS Distributions 	if (_initialized) {
2056*c54f35caSApple OSS Distributions 		/*
2057*c54f35caSApple OSS Distributions 		 * An existing memory descriptor is being retargeted to point to
2058*c54f35caSApple OSS Distributions 		 * somewhere else.  Clean up our present state.
2059*c54f35caSApple OSS Distributions 		 */
2060*c54f35caSApple OSS Distributions 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2061*c54f35caSApple OSS Distributions 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062*c54f35caSApple OSS Distributions 			while (_wireCount) {
2063*c54f35caSApple OSS Distributions 				complete();
2064*c54f35caSApple OSS Distributions 			}
2065*c54f35caSApple OSS Distributions 		}
2066*c54f35caSApple OSS Distributions 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067*c54f35caSApple OSS Distributions 			if (kIOMemoryTypeUIO == type) {
2068*c54f35caSApple OSS Distributions 				uio_free((uio_t) _ranges.v);
2069*c54f35caSApple OSS Distributions 			}
2070*c54f35caSApple OSS Distributions #ifndef __LP64__
2071*c54f35caSApple OSS Distributions 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072*c54f35caSApple OSS Distributions 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073*c54f35caSApple OSS Distributions 			}
2074*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2075*c54f35caSApple OSS Distributions 			else {
2076*c54f35caSApple OSS Distributions 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077*c54f35caSApple OSS Distributions 			}
2078*c54f35caSApple OSS Distributions 		}
2079*c54f35caSApple OSS Distributions 
2080*c54f35caSApple OSS Distributions 		options |= (kIOMemoryRedirected & _flags);
2081*c54f35caSApple OSS Distributions 		if (!(kIOMemoryRedirected & options)) {
2082*c54f35caSApple OSS Distributions 			if (_memRef) {
2083*c54f35caSApple OSS Distributions 				memoryReferenceRelease(_memRef);
2084*c54f35caSApple OSS Distributions 				_memRef = NULL;
2085*c54f35caSApple OSS Distributions 			}
2086*c54f35caSApple OSS Distributions 			if (_mappings) {
2087*c54f35caSApple OSS Distributions 				_mappings->flushCollection();
2088*c54f35caSApple OSS Distributions 			}
2089*c54f35caSApple OSS Distributions 		}
2090*c54f35caSApple OSS Distributions 	} else {
2091*c54f35caSApple OSS Distributions 		if (!super::init()) {
2092*c54f35caSApple OSS Distributions 			return false;
2093*c54f35caSApple OSS Distributions 		}
2094*c54f35caSApple OSS Distributions 		_initialized = true;
2095*c54f35caSApple OSS Distributions 	}
2096*c54f35caSApple OSS Distributions 
2097*c54f35caSApple OSS Distributions 	// Grab the appropriate mapper
2098*c54f35caSApple OSS Distributions 	if (kIOMemoryHostOrRemote & options) {
2099*c54f35caSApple OSS Distributions 		options |= kIOMemoryMapperNone;
2100*c54f35caSApple OSS Distributions 	}
2101*c54f35caSApple OSS Distributions 	if (kIOMemoryMapperNone & options) {
2102*c54f35caSApple OSS Distributions 		mapper = NULL; // No Mapper
2103*c54f35caSApple OSS Distributions 	} else if (mapper == kIOMapperSystem) {
2104*c54f35caSApple OSS Distributions 		IOMapper::checkForSystemMapper();
2105*c54f35caSApple OSS Distributions 		gIOSystemMapper = mapper = IOMapper::gSystem;
2106*c54f35caSApple OSS Distributions 	}
2107*c54f35caSApple OSS Distributions 
2108*c54f35caSApple OSS Distributions 	// Remove the dynamic internal use flags from the initial setting
2109*c54f35caSApple OSS Distributions 	options               &= ~(kIOMemoryPreparedReadOnly);
2110*c54f35caSApple OSS Distributions 	_flags                 = options;
2111*c54f35caSApple OSS Distributions 	_task                  = task;
2112*c54f35caSApple OSS Distributions 
2113*c54f35caSApple OSS Distributions #ifndef __LP64__
2114*c54f35caSApple OSS Distributions 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2115*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2116*c54f35caSApple OSS Distributions 
2117*c54f35caSApple OSS Distributions 	_dmaReferences = 0;
2118*c54f35caSApple OSS Distributions 	__iomd_reservedA = 0;
2119*c54f35caSApple OSS Distributions 	__iomd_reservedB = 0;
2120*c54f35caSApple OSS Distributions 	_highestPage = 0;
2121*c54f35caSApple OSS Distributions 
2122*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & options) {
2123*c54f35caSApple OSS Distributions 		if (!_prepareLock) {
2124*c54f35caSApple OSS Distributions 			_prepareLock = IOLockAlloc();
2125*c54f35caSApple OSS Distributions 		}
2126*c54f35caSApple OSS Distributions 	} else if (_prepareLock) {
2127*c54f35caSApple OSS Distributions 		IOLockFree(_prepareLock);
2128*c54f35caSApple OSS Distributions 		_prepareLock = NULL;
2129*c54f35caSApple OSS Distributions 	}
2130*c54f35caSApple OSS Distributions 
2131*c54f35caSApple OSS Distributions 	if (kIOMemoryTypeUPL == type) {
2132*c54f35caSApple OSS Distributions 		ioGMDData *dataP;
2133*c54f35caSApple OSS Distributions 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134*c54f35caSApple OSS Distributions 
2135*c54f35caSApple OSS Distributions 		if (!initMemoryEntries(dataSize, mapper)) {
2136*c54f35caSApple OSS Distributions 			return false;
2137*c54f35caSApple OSS Distributions 		}
2138*c54f35caSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
2139*c54f35caSApple OSS Distributions 		dataP->fPageCnt = 0;
2140*c54f35caSApple OSS Distributions 		switch (kIOMemoryDirectionMask & options) {
2141*c54f35caSApple OSS Distributions 		case kIODirectionOut:
2142*c54f35caSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapReadAccess;
2143*c54f35caSApple OSS Distributions 			break;
2144*c54f35caSApple OSS Distributions 		case kIODirectionIn:
2145*c54f35caSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2146*c54f35caSApple OSS Distributions 			break;
2147*c54f35caSApple OSS Distributions 		case kIODirectionNone:
2148*c54f35caSApple OSS Distributions 		case kIODirectionOutIn:
2149*c54f35caSApple OSS Distributions 		default:
2150*c54f35caSApple OSS Distributions 			panic("bad dir for upl 0x%x", (int) options);
2151*c54f35caSApple OSS Distributions 			break;
2152*c54f35caSApple OSS Distributions 		}
2153*c54f35caSApple OSS Distributions 		//       _wireCount++;	// UPLs start out life wired
2154*c54f35caSApple OSS Distributions 
2155*c54f35caSApple OSS Distributions 		_length    = count;
2156*c54f35caSApple OSS Distributions 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157*c54f35caSApple OSS Distributions 
2158*c54f35caSApple OSS Distributions 		ioPLBlock iopl;
2159*c54f35caSApple OSS Distributions 		iopl.fIOPL = (upl_t) buffers;
2160*c54f35caSApple OSS Distributions 		upl_set_referenced(iopl.fIOPL, true);
2161*c54f35caSApple OSS Distributions 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162*c54f35caSApple OSS Distributions 
2163*c54f35caSApple OSS Distributions 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164*c54f35caSApple OSS Distributions 			panic("short external upl");
2165*c54f35caSApple OSS Distributions 		}
2166*c54f35caSApple OSS Distributions 
2167*c54f35caSApple OSS Distributions 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2168*c54f35caSApple OSS Distributions 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169*c54f35caSApple OSS Distributions 
2170*c54f35caSApple OSS Distributions 		// Set the flag kIOPLOnDevice convieniently equal to 1
2171*c54f35caSApple OSS Distributions 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2172*c54f35caSApple OSS Distributions 		if (!pageList->device) {
2173*c54f35caSApple OSS Distributions 			// Pre-compute the offset into the UPL's page list
2174*c54f35caSApple OSS Distributions 			pageList = &pageList[atop_32(offset)];
2175*c54f35caSApple OSS Distributions 			offset &= PAGE_MASK;
2176*c54f35caSApple OSS Distributions 		}
2177*c54f35caSApple OSS Distributions 		iopl.fIOMDOffset = 0;
2178*c54f35caSApple OSS Distributions 		iopl.fMappedPage = 0;
2179*c54f35caSApple OSS Distributions 		iopl.fPageInfo = (vm_address_t) pageList;
2180*c54f35caSApple OSS Distributions 		iopl.fPageOffset = offset;
2181*c54f35caSApple OSS Distributions 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182*c54f35caSApple OSS Distributions 	} else {
2183*c54f35caSApple OSS Distributions 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184*c54f35caSApple OSS Distributions 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185*c54f35caSApple OSS Distributions 
2186*c54f35caSApple OSS Distributions 		// Initialize the memory descriptor
2187*c54f35caSApple OSS Distributions 		if (options & kIOMemoryAsReference) {
2188*c54f35caSApple OSS Distributions #ifndef __LP64__
2189*c54f35caSApple OSS Distributions 			_rangesIsAllocated = false;
2190*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2191*c54f35caSApple OSS Distributions 
2192*c54f35caSApple OSS Distributions 			// Hack assignment to get the buffer arg into _ranges.
2193*c54f35caSApple OSS Distributions 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194*c54f35caSApple OSS Distributions 			// work, C++ sigh.
2195*c54f35caSApple OSS Distributions 			// This also initialises the uio & physical ranges.
2196*c54f35caSApple OSS Distributions 			_ranges.v = (IOVirtualRange *) buffers;
2197*c54f35caSApple OSS Distributions 		} else {
2198*c54f35caSApple OSS Distributions #ifndef __LP64__
2199*c54f35caSApple OSS Distributions 			_rangesIsAllocated = true;
2200*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2201*c54f35caSApple OSS Distributions 			switch (type) {
2202*c54f35caSApple OSS Distributions 			case kIOMemoryTypeUIO:
2203*c54f35caSApple OSS Distributions 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204*c54f35caSApple OSS Distributions 				break;
2205*c54f35caSApple OSS Distributions 
2206*c54f35caSApple OSS Distributions #ifndef __LP64__
2207*c54f35caSApple OSS Distributions 			case kIOMemoryTypeVirtual64:
2208*c54f35caSApple OSS Distributions 			case kIOMemoryTypePhysical64:
2209*c54f35caSApple OSS Distributions 				if (count == 1
2210*c54f35caSApple OSS Distributions #ifndef __arm__
2211*c54f35caSApple OSS Distributions 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212*c54f35caSApple OSS Distributions #endif
2213*c54f35caSApple OSS Distributions 				    ) {
2214*c54f35caSApple OSS Distributions 					if (kIOMemoryTypeVirtual64 == type) {
2215*c54f35caSApple OSS Distributions 						type = kIOMemoryTypeVirtual;
2216*c54f35caSApple OSS Distributions 					} else {
2217*c54f35caSApple OSS Distributions 						type = kIOMemoryTypePhysical;
2218*c54f35caSApple OSS Distributions 					}
2219*c54f35caSApple OSS Distributions 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220*c54f35caSApple OSS Distributions 					_rangesIsAllocated = false;
2221*c54f35caSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2222*c54f35caSApple OSS Distributions 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223*c54f35caSApple OSS Distributions 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2224*c54f35caSApple OSS Distributions 					break;
2225*c54f35caSApple OSS Distributions 				}
2226*c54f35caSApple OSS Distributions 				_ranges.v64 = IONew(IOAddressRange, count);
2227*c54f35caSApple OSS Distributions 				if (!_ranges.v64) {
2228*c54f35caSApple OSS Distributions 					return false;
2229*c54f35caSApple OSS Distributions 				}
2230*c54f35caSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231*c54f35caSApple OSS Distributions 				break;
2232*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2233*c54f35caSApple OSS Distributions 			case kIOMemoryTypeVirtual:
2234*c54f35caSApple OSS Distributions 			case kIOMemoryTypePhysical:
2235*c54f35caSApple OSS Distributions 				if (count == 1) {
2236*c54f35caSApple OSS Distributions 					_flags |= kIOMemoryAsReference;
2237*c54f35caSApple OSS Distributions #ifndef __LP64__
2238*c54f35caSApple OSS Distributions 					_rangesIsAllocated = false;
2239*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2240*c54f35caSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2241*c54f35caSApple OSS Distributions 				} else {
2242*c54f35caSApple OSS Distributions 					_ranges.v = IONew(IOVirtualRange, count);
2243*c54f35caSApple OSS Distributions 					if (!_ranges.v) {
2244*c54f35caSApple OSS Distributions 						return false;
2245*c54f35caSApple OSS Distributions 					}
2246*c54f35caSApple OSS Distributions 				}
2247*c54f35caSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248*c54f35caSApple OSS Distributions 				break;
2249*c54f35caSApple OSS Distributions 			}
2250*c54f35caSApple OSS Distributions 		}
2251*c54f35caSApple OSS Distributions 		_rangesCount = count;
2252*c54f35caSApple OSS Distributions 
2253*c54f35caSApple OSS Distributions 		// Find starting address within the vector of ranges
2254*c54f35caSApple OSS Distributions 		Ranges vec = _ranges;
2255*c54f35caSApple OSS Distributions 		mach_vm_size_t totalLength = 0;
2256*c54f35caSApple OSS Distributions 		unsigned int ind, pages = 0;
2257*c54f35caSApple OSS Distributions 		for (ind = 0; ind < count; ind++) {
2258*c54f35caSApple OSS Distributions 			mach_vm_address_t addr;
2259*c54f35caSApple OSS Distributions 			mach_vm_address_t endAddr;
2260*c54f35caSApple OSS Distributions 			mach_vm_size_t    len;
2261*c54f35caSApple OSS Distributions 
2262*c54f35caSApple OSS Distributions 			// addr & len are returned by this function
2263*c54f35caSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2264*c54f35caSApple OSS Distributions 			if (_task) {
2265*c54f35caSApple OSS Distributions 				mach_vm_size_t phys_size;
2266*c54f35caSApple OSS Distributions 				kern_return_t kret;
2267*c54f35caSApple OSS Distributions 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268*c54f35caSApple OSS Distributions 				if (KERN_SUCCESS != kret) {
2269*c54f35caSApple OSS Distributions 					break;
2270*c54f35caSApple OSS Distributions 				}
2271*c54f35caSApple OSS Distributions 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272*c54f35caSApple OSS Distributions 					break;
2273*c54f35caSApple OSS Distributions 				}
2274*c54f35caSApple OSS Distributions 			} else {
2275*c54f35caSApple OSS Distributions 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276*c54f35caSApple OSS Distributions 					break;
2277*c54f35caSApple OSS Distributions 				}
2278*c54f35caSApple OSS Distributions 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279*c54f35caSApple OSS Distributions 					break;
2280*c54f35caSApple OSS Distributions 				}
2281*c54f35caSApple OSS Distributions 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282*c54f35caSApple OSS Distributions 					break;
2283*c54f35caSApple OSS Distributions 				}
2284*c54f35caSApple OSS Distributions 			}
2285*c54f35caSApple OSS Distributions 			if (os_add_overflow(totalLength, len, &totalLength)) {
2286*c54f35caSApple OSS Distributions 				break;
2287*c54f35caSApple OSS Distributions 			}
2288*c54f35caSApple OSS Distributions 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289*c54f35caSApple OSS Distributions 				uint64_t highPage = atop_64(addr + len - 1);
2290*c54f35caSApple OSS Distributions 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291*c54f35caSApple OSS Distributions 					_highestPage = (ppnum_t) highPage;
2292*c54f35caSApple OSS Distributions 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293*c54f35caSApple OSS Distributions 				}
2294*c54f35caSApple OSS Distributions 			}
2295*c54f35caSApple OSS Distributions 		}
2296*c54f35caSApple OSS Distributions 		if ((ind < count)
2297*c54f35caSApple OSS Distributions 		    || (totalLength != ((IOByteCount) totalLength))) {
2298*c54f35caSApple OSS Distributions 			return false;                                   /* overflow */
2299*c54f35caSApple OSS Distributions 		}
2300*c54f35caSApple OSS Distributions 		_length      = totalLength;
2301*c54f35caSApple OSS Distributions 		_pages       = pages;
2302*c54f35caSApple OSS Distributions 
2303*c54f35caSApple OSS Distributions 		// Auto-prepare memory at creation time.
2304*c54f35caSApple OSS Distributions 		// Implied completion when descriptor is free-ed
2305*c54f35caSApple OSS Distributions 
2306*c54f35caSApple OSS Distributions 
2307*c54f35caSApple OSS Distributions 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2308*c54f35caSApple OSS Distributions 			_wireCount++; // Physical MDs are, by definition, wired
2309*c54f35caSApple OSS Distributions 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2310*c54f35caSApple OSS Distributions 			ioGMDData *dataP;
2311*c54f35caSApple OSS Distributions 			unsigned dataSize;
2312*c54f35caSApple OSS Distributions 
2313*c54f35caSApple OSS Distributions 			if (_pages > atop_64(max_mem)) {
2314*c54f35caSApple OSS Distributions 				return false;
2315*c54f35caSApple OSS Distributions 			}
2316*c54f35caSApple OSS Distributions 
2317*c54f35caSApple OSS Distributions 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2318*c54f35caSApple OSS Distributions 			if (!initMemoryEntries(dataSize, mapper)) {
2319*c54f35caSApple OSS Distributions 				return false;
2320*c54f35caSApple OSS Distributions 			}
2321*c54f35caSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2322*c54f35caSApple OSS Distributions 			dataP->fPageCnt = _pages;
2323*c54f35caSApple OSS Distributions 
2324*c54f35caSApple OSS Distributions 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2325*c54f35caSApple OSS Distributions 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2326*c54f35caSApple OSS Distributions 				_kernelTag = IOMemoryTag(kernel_map);
2327*c54f35caSApple OSS Distributions 				if (_kernelTag == gIOSurfaceTag) {
2328*c54f35caSApple OSS Distributions 					_userTag = VM_MEMORY_IOSURFACE;
2329*c54f35caSApple OSS Distributions 				}
2330*c54f35caSApple OSS Distributions 			}
2331*c54f35caSApple OSS Distributions 
2332*c54f35caSApple OSS Distributions 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2333*c54f35caSApple OSS Distributions 				IOReturn
2334*c54f35caSApple OSS Distributions 				    err = memoryReferenceCreate(0, &_memRef);
2335*c54f35caSApple OSS Distributions 				if (kIOReturnSuccess != err) {
2336*c54f35caSApple OSS Distributions 					return false;
2337*c54f35caSApple OSS Distributions 				}
2338*c54f35caSApple OSS Distributions 			}
2339*c54f35caSApple OSS Distributions 
2340*c54f35caSApple OSS Distributions 			if ((_flags & kIOMemoryAutoPrepare)
2341*c54f35caSApple OSS Distributions 			    && prepare() != kIOReturnSuccess) {
2342*c54f35caSApple OSS Distributions 				return false;
2343*c54f35caSApple OSS Distributions 			}
2344*c54f35caSApple OSS Distributions 		}
2345*c54f35caSApple OSS Distributions 	}
2346*c54f35caSApple OSS Distributions 
2347*c54f35caSApple OSS Distributions 	return true;
2348*c54f35caSApple OSS Distributions }
2349*c54f35caSApple OSS Distributions 
2350*c54f35caSApple OSS Distributions /*
2351*c54f35caSApple OSS Distributions  * free
2352*c54f35caSApple OSS Distributions  *
2353*c54f35caSApple OSS Distributions  * Free resources.
2354*c54f35caSApple OSS Distributions  */
2355*c54f35caSApple OSS Distributions void
free()2356*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::free()
2357*c54f35caSApple OSS Distributions {
2358*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2359*c54f35caSApple OSS Distributions 
2360*c54f35caSApple OSS Distributions 	if (reserved && reserved->dp.memory) {
2361*c54f35caSApple OSS Distributions 		LOCK;
2362*c54f35caSApple OSS Distributions 		reserved->dp.memory = NULL;
2363*c54f35caSApple OSS Distributions 		UNLOCK;
2364*c54f35caSApple OSS Distributions 	}
2365*c54f35caSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2366*c54f35caSApple OSS Distributions 		ioGMDData * dataP;
2367*c54f35caSApple OSS Distributions 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2368*c54f35caSApple OSS Distributions 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2369*c54f35caSApple OSS Distributions 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2370*c54f35caSApple OSS Distributions 		}
2371*c54f35caSApple OSS Distributions 	} else {
2372*c54f35caSApple OSS Distributions 		while (_wireCount) {
2373*c54f35caSApple OSS Distributions 			complete();
2374*c54f35caSApple OSS Distributions 		}
2375*c54f35caSApple OSS Distributions 	}
2376*c54f35caSApple OSS Distributions 
2377*c54f35caSApple OSS Distributions 	if (_memoryEntries) {
2378*c54f35caSApple OSS Distributions 		_memoryEntries.reset();
2379*c54f35caSApple OSS Distributions 	}
2380*c54f35caSApple OSS Distributions 
2381*c54f35caSApple OSS Distributions 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2382*c54f35caSApple OSS Distributions 		if (kIOMemoryTypeUIO == type) {
2383*c54f35caSApple OSS Distributions 			uio_free((uio_t) _ranges.v);
2384*c54f35caSApple OSS Distributions 		}
2385*c54f35caSApple OSS Distributions #ifndef __LP64__
2386*c54f35caSApple OSS Distributions 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2387*c54f35caSApple OSS Distributions 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2388*c54f35caSApple OSS Distributions 		}
2389*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2390*c54f35caSApple OSS Distributions 		else {
2391*c54f35caSApple OSS Distributions 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2392*c54f35caSApple OSS Distributions 		}
2393*c54f35caSApple OSS Distributions 
2394*c54f35caSApple OSS Distributions 		_ranges.v = NULL;
2395*c54f35caSApple OSS Distributions 	}
2396*c54f35caSApple OSS Distributions 
2397*c54f35caSApple OSS Distributions 	if (reserved) {
2398*c54f35caSApple OSS Distributions 		cleanKernelReserved(reserved);
2399*c54f35caSApple OSS Distributions 		if (reserved->dp.devicePager) {
2400*c54f35caSApple OSS Distributions 			// memEntry holds a ref on the device pager which owns reserved
2401*c54f35caSApple OSS Distributions 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2402*c54f35caSApple OSS Distributions 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2403*c54f35caSApple OSS Distributions 		} else {
2404*c54f35caSApple OSS Distributions 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2405*c54f35caSApple OSS Distributions 		}
2406*c54f35caSApple OSS Distributions 		reserved = NULL;
2407*c54f35caSApple OSS Distributions 	}
2408*c54f35caSApple OSS Distributions 
2409*c54f35caSApple OSS Distributions 	if (_memRef) {
2410*c54f35caSApple OSS Distributions 		memoryReferenceRelease(_memRef);
2411*c54f35caSApple OSS Distributions 	}
2412*c54f35caSApple OSS Distributions 	if (_prepareLock) {
2413*c54f35caSApple OSS Distributions 		IOLockFree(_prepareLock);
2414*c54f35caSApple OSS Distributions 	}
2415*c54f35caSApple OSS Distributions 
2416*c54f35caSApple OSS Distributions 	super::free();
2417*c54f35caSApple OSS Distributions }
2418*c54f35caSApple OSS Distributions 
2419*c54f35caSApple OSS Distributions #ifndef __LP64__
2420*c54f35caSApple OSS Distributions void
unmapFromKernel()2421*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2422*c54f35caSApple OSS Distributions {
2423*c54f35caSApple OSS Distributions 	panic("IOGMD::unmapFromKernel deprecated");
2424*c54f35caSApple OSS Distributions }
2425*c54f35caSApple OSS Distributions 
2426*c54f35caSApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2427*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2428*c54f35caSApple OSS Distributions {
2429*c54f35caSApple OSS Distributions 	panic("IOGMD::mapIntoKernel deprecated");
2430*c54f35caSApple OSS Distributions }
2431*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2432*c54f35caSApple OSS Distributions 
2433*c54f35caSApple OSS Distributions /*
2434*c54f35caSApple OSS Distributions  * getDirection:
2435*c54f35caSApple OSS Distributions  *
2436*c54f35caSApple OSS Distributions  * Get the direction of the transfer.
2437*c54f35caSApple OSS Distributions  */
2438*c54f35caSApple OSS Distributions IODirection
getDirection() const2439*c54f35caSApple OSS Distributions IOMemoryDescriptor::getDirection() const
2440*c54f35caSApple OSS Distributions {
2441*c54f35caSApple OSS Distributions #ifndef __LP64__
2442*c54f35caSApple OSS Distributions 	if (_direction) {
2443*c54f35caSApple OSS Distributions 		return _direction;
2444*c54f35caSApple OSS Distributions 	}
2445*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2446*c54f35caSApple OSS Distributions 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2447*c54f35caSApple OSS Distributions }
2448*c54f35caSApple OSS Distributions 
2449*c54f35caSApple OSS Distributions /*
2450*c54f35caSApple OSS Distributions  * getLength:
2451*c54f35caSApple OSS Distributions  *
2452*c54f35caSApple OSS Distributions  * Get the length of the transfer (over all ranges).
2453*c54f35caSApple OSS Distributions  */
2454*c54f35caSApple OSS Distributions IOByteCount
getLength() const2455*c54f35caSApple OSS Distributions IOMemoryDescriptor::getLength() const
2456*c54f35caSApple OSS Distributions {
2457*c54f35caSApple OSS Distributions 	return _length;
2458*c54f35caSApple OSS Distributions }
2459*c54f35caSApple OSS Distributions 
2460*c54f35caSApple OSS Distributions void
setTag(IOOptionBits tag)2461*c54f35caSApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2462*c54f35caSApple OSS Distributions {
2463*c54f35caSApple OSS Distributions 	_tag = tag;
2464*c54f35caSApple OSS Distributions }
2465*c54f35caSApple OSS Distributions 
2466*c54f35caSApple OSS Distributions IOOptionBits
getTag(void)2467*c54f35caSApple OSS Distributions IOMemoryDescriptor::getTag( void )
2468*c54f35caSApple OSS Distributions {
2469*c54f35caSApple OSS Distributions 	return _tag;
2470*c54f35caSApple OSS Distributions }
2471*c54f35caSApple OSS Distributions 
2472*c54f35caSApple OSS Distributions uint64_t
getFlags(void)2473*c54f35caSApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2474*c54f35caSApple OSS Distributions {
2475*c54f35caSApple OSS Distributions 	return _flags;
2476*c54f35caSApple OSS Distributions }
2477*c54f35caSApple OSS Distributions 
2478*c54f35caSApple OSS Distributions OSObject *
copyContext(void) const2479*c54f35caSApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2480*c54f35caSApple OSS Distributions {
2481*c54f35caSApple OSS Distributions 	if (reserved) {
2482*c54f35caSApple OSS Distributions 		OSObject * context = reserved->contextObject;
2483*c54f35caSApple OSS Distributions 		if (context) {
2484*c54f35caSApple OSS Distributions 			context->retain();
2485*c54f35caSApple OSS Distributions 		}
2486*c54f35caSApple OSS Distributions 		return context;
2487*c54f35caSApple OSS Distributions 	} else {
2488*c54f35caSApple OSS Distributions 		return NULL;
2489*c54f35caSApple OSS Distributions 	}
2490*c54f35caSApple OSS Distributions }
2491*c54f35caSApple OSS Distributions 
2492*c54f35caSApple OSS Distributions void
setContext(OSObject * obj)2493*c54f35caSApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2494*c54f35caSApple OSS Distributions {
2495*c54f35caSApple OSS Distributions 	if (this->reserved == NULL && obj == NULL) {
2496*c54f35caSApple OSS Distributions 		// No existing object, and no object to set
2497*c54f35caSApple OSS Distributions 		return;
2498*c54f35caSApple OSS Distributions 	}
2499*c54f35caSApple OSS Distributions 
2500*c54f35caSApple OSS Distributions 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2501*c54f35caSApple OSS Distributions 	if (reserved) {
2502*c54f35caSApple OSS Distributions 		OSObject * oldObject = reserved->contextObject;
2503*c54f35caSApple OSS Distributions 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2504*c54f35caSApple OSS Distributions 			oldObject->release();
2505*c54f35caSApple OSS Distributions 		}
2506*c54f35caSApple OSS Distributions 		if (obj != NULL) {
2507*c54f35caSApple OSS Distributions 			obj->retain();
2508*c54f35caSApple OSS Distributions 			reserved->contextObject = obj;
2509*c54f35caSApple OSS Distributions 		}
2510*c54f35caSApple OSS Distributions 	}
2511*c54f35caSApple OSS Distributions }
2512*c54f35caSApple OSS Distributions 
2513*c54f35caSApple OSS Distributions #ifndef __LP64__
2514*c54f35caSApple OSS Distributions #pragma clang diagnostic push
2515*c54f35caSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2516*c54f35caSApple OSS Distributions 
2517*c54f35caSApple OSS Distributions // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2518*c54f35caSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2519*c54f35caSApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2520*c54f35caSApple OSS Distributions {
2521*c54f35caSApple OSS Distributions 	addr64_t physAddr = 0;
2522*c54f35caSApple OSS Distributions 
2523*c54f35caSApple OSS Distributions 	if (prepare() == kIOReturnSuccess) {
2524*c54f35caSApple OSS Distributions 		physAddr = getPhysicalSegment64( offset, length );
2525*c54f35caSApple OSS Distributions 		complete();
2526*c54f35caSApple OSS Distributions 	}
2527*c54f35caSApple OSS Distributions 
2528*c54f35caSApple OSS Distributions 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2529*c54f35caSApple OSS Distributions }
2530*c54f35caSApple OSS Distributions 
2531*c54f35caSApple OSS Distributions #pragma clang diagnostic pop
2532*c54f35caSApple OSS Distributions 
2533*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2534*c54f35caSApple OSS Distributions 
2535*c54f35caSApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536*c54f35caSApple OSS Distributions IOMemoryDescriptor::readBytes
2537*c54f35caSApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2538*c54f35caSApple OSS Distributions {
2539*c54f35caSApple OSS Distributions 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540*c54f35caSApple OSS Distributions 	IOByteCount endoffset;
2541*c54f35caSApple OSS Distributions 	IOByteCount remaining;
2542*c54f35caSApple OSS Distributions 
2543*c54f35caSApple OSS Distributions 
2544*c54f35caSApple OSS Distributions 	// Check that this entire I/O is within the available range
2545*c54f35caSApple OSS Distributions 	if ((offset > _length)
2546*c54f35caSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2547*c54f35caSApple OSS Distributions 	    || (endoffset > _length)) {
2548*c54f35caSApple OSS Distributions 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2549*c54f35caSApple OSS Distributions 		return 0;
2550*c54f35caSApple OSS Distributions 	}
2551*c54f35caSApple OSS Distributions 	if (offset >= _length) {
2552*c54f35caSApple OSS Distributions 		return 0;
2553*c54f35caSApple OSS Distributions 	}
2554*c54f35caSApple OSS Distributions 
2555*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2556*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2557*c54f35caSApple OSS Distributions 		return 0;
2558*c54f35caSApple OSS Distributions 	}
2559*c54f35caSApple OSS Distributions 
2560*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2561*c54f35caSApple OSS Distributions 		LOCK;
2562*c54f35caSApple OSS Distributions 	}
2563*c54f35caSApple OSS Distributions 
2564*c54f35caSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2565*c54f35caSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2566*c54f35caSApple OSS Distributions 		addr64_t        srcAddr64;
2567*c54f35caSApple OSS Distributions 		IOByteCount     srcLen;
2568*c54f35caSApple OSS Distributions 
2569*c54f35caSApple OSS Distributions 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2570*c54f35caSApple OSS Distributions 		if (!srcAddr64) {
2571*c54f35caSApple OSS Distributions 			break;
2572*c54f35caSApple OSS Distributions 		}
2573*c54f35caSApple OSS Distributions 
2574*c54f35caSApple OSS Distributions 		// Clip segment length to remaining
2575*c54f35caSApple OSS Distributions 		if (srcLen > remaining) {
2576*c54f35caSApple OSS Distributions 			srcLen = remaining;
2577*c54f35caSApple OSS Distributions 		}
2578*c54f35caSApple OSS Distributions 
2579*c54f35caSApple OSS Distributions 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2580*c54f35caSApple OSS Distributions 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2581*c54f35caSApple OSS Distributions 		}
2582*c54f35caSApple OSS Distributions 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2583*c54f35caSApple OSS Distributions 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2584*c54f35caSApple OSS Distributions 
2585*c54f35caSApple OSS Distributions 		dstAddr   += srcLen;
2586*c54f35caSApple OSS Distributions 		offset    += srcLen;
2587*c54f35caSApple OSS Distributions 		remaining -= srcLen;
2588*c54f35caSApple OSS Distributions 	}
2589*c54f35caSApple OSS Distributions 
2590*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2591*c54f35caSApple OSS Distributions 		UNLOCK;
2592*c54f35caSApple OSS Distributions 	}
2593*c54f35caSApple OSS Distributions 
2594*c54f35caSApple OSS Distributions 	assert(!remaining);
2595*c54f35caSApple OSS Distributions 
2596*c54f35caSApple OSS Distributions 	return length - remaining;
2597*c54f35caSApple OSS Distributions }
2598*c54f35caSApple OSS Distributions 
2599*c54f35caSApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2600*c54f35caSApple OSS Distributions IOMemoryDescriptor::writeBytes
2601*c54f35caSApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2602*c54f35caSApple OSS Distributions {
2603*c54f35caSApple OSS Distributions 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2604*c54f35caSApple OSS Distributions 	IOByteCount remaining;
2605*c54f35caSApple OSS Distributions 	IOByteCount endoffset;
2606*c54f35caSApple OSS Distributions 	IOByteCount offset = inoffset;
2607*c54f35caSApple OSS Distributions 
2608*c54f35caSApple OSS Distributions 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2609*c54f35caSApple OSS Distributions 
2610*c54f35caSApple OSS Distributions 	// Check that this entire I/O is within the available range
2611*c54f35caSApple OSS Distributions 	if ((offset > _length)
2612*c54f35caSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2613*c54f35caSApple OSS Distributions 	    || (endoffset > _length)) {
2614*c54f35caSApple OSS Distributions 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2615*c54f35caSApple OSS Distributions 		return 0;
2616*c54f35caSApple OSS Distributions 	}
2617*c54f35caSApple OSS Distributions 	if (kIOMemoryPreparedReadOnly & _flags) {
2618*c54f35caSApple OSS Distributions 		return 0;
2619*c54f35caSApple OSS Distributions 	}
2620*c54f35caSApple OSS Distributions 	if (offset >= _length) {
2621*c54f35caSApple OSS Distributions 		return 0;
2622*c54f35caSApple OSS Distributions 	}
2623*c54f35caSApple OSS Distributions 
2624*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2625*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2626*c54f35caSApple OSS Distributions 		return 0;
2627*c54f35caSApple OSS Distributions 	}
2628*c54f35caSApple OSS Distributions 
2629*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2630*c54f35caSApple OSS Distributions 		LOCK;
2631*c54f35caSApple OSS Distributions 	}
2632*c54f35caSApple OSS Distributions 
2633*c54f35caSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2634*c54f35caSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2635*c54f35caSApple OSS Distributions 		addr64_t    dstAddr64;
2636*c54f35caSApple OSS Distributions 		IOByteCount dstLen;
2637*c54f35caSApple OSS Distributions 
2638*c54f35caSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2639*c54f35caSApple OSS Distributions 		if (!dstAddr64) {
2640*c54f35caSApple OSS Distributions 			break;
2641*c54f35caSApple OSS Distributions 		}
2642*c54f35caSApple OSS Distributions 
2643*c54f35caSApple OSS Distributions 		// Clip segment length to remaining
2644*c54f35caSApple OSS Distributions 		if (dstLen > remaining) {
2645*c54f35caSApple OSS Distributions 			dstLen = remaining;
2646*c54f35caSApple OSS Distributions 		}
2647*c54f35caSApple OSS Distributions 
2648*c54f35caSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2649*c54f35caSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2650*c54f35caSApple OSS Distributions 		}
2651*c54f35caSApple OSS Distributions 		if (!srcAddr) {
2652*c54f35caSApple OSS Distributions 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2653*c54f35caSApple OSS Distributions 		} else {
2654*c54f35caSApple OSS Distributions 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2655*c54f35caSApple OSS Distributions 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2656*c54f35caSApple OSS Distributions 			srcAddr   += dstLen;
2657*c54f35caSApple OSS Distributions 		}
2658*c54f35caSApple OSS Distributions 		offset    += dstLen;
2659*c54f35caSApple OSS Distributions 		remaining -= dstLen;
2660*c54f35caSApple OSS Distributions 	}
2661*c54f35caSApple OSS Distributions 
2662*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2663*c54f35caSApple OSS Distributions 		UNLOCK;
2664*c54f35caSApple OSS Distributions 	}
2665*c54f35caSApple OSS Distributions 
2666*c54f35caSApple OSS Distributions 	assert(!remaining);
2667*c54f35caSApple OSS Distributions 
2668*c54f35caSApple OSS Distributions #if defined(__x86_64__)
2669*c54f35caSApple OSS Distributions 	// copypv does not cppvFsnk on intel
2670*c54f35caSApple OSS Distributions #else
2671*c54f35caSApple OSS Distributions 	if (!srcAddr) {
2672*c54f35caSApple OSS Distributions 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2673*c54f35caSApple OSS Distributions 	}
2674*c54f35caSApple OSS Distributions #endif
2675*c54f35caSApple OSS Distributions 
2676*c54f35caSApple OSS Distributions 	return length - remaining;
2677*c54f35caSApple OSS Distributions }
2678*c54f35caSApple OSS Distributions 
2679*c54f35caSApple OSS Distributions #ifndef __LP64__
2680*c54f35caSApple OSS Distributions void
setPosition(IOByteCount position)2681*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2682*c54f35caSApple OSS Distributions {
2683*c54f35caSApple OSS Distributions 	panic("IOGMD::setPosition deprecated");
2684*c54f35caSApple OSS Distributions }
2685*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
2686*c54f35caSApple OSS Distributions 
2687*c54f35caSApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2688*c54f35caSApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2689*c54f35caSApple OSS Distributions 
2690*c54f35caSApple OSS Distributions uint64_t
getPreparationID(void)2691*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2692*c54f35caSApple OSS Distributions {
2693*c54f35caSApple OSS Distributions 	ioGMDData *dataP;
2694*c54f35caSApple OSS Distributions 
2695*c54f35caSApple OSS Distributions 	if (!_wireCount) {
2696*c54f35caSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2697*c54f35caSApple OSS Distributions 	}
2698*c54f35caSApple OSS Distributions 
2699*c54f35caSApple OSS Distributions 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2700*c54f35caSApple OSS Distributions 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2701*c54f35caSApple OSS Distributions 		IOMemoryDescriptor::setPreparationID();
2702*c54f35caSApple OSS Distributions 		return IOMemoryDescriptor::getPreparationID();
2703*c54f35caSApple OSS Distributions 	}
2704*c54f35caSApple OSS Distributions 
2705*c54f35caSApple OSS Distributions 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2706*c54f35caSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2707*c54f35caSApple OSS Distributions 	}
2708*c54f35caSApple OSS Distributions 
2709*c54f35caSApple OSS Distributions 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2710*c54f35caSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2711*c54f35caSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2712*c54f35caSApple OSS Distributions 	}
2713*c54f35caSApple OSS Distributions 	return dataP->fPreparationID;
2714*c54f35caSApple OSS Distributions }
2715*c54f35caSApple OSS Distributions 
2716*c54f35caSApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2717*c54f35caSApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2718*c54f35caSApple OSS Distributions {
2719*c54f35caSApple OSS Distributions 	if (reserved->creator) {
2720*c54f35caSApple OSS Distributions 		task_deallocate(reserved->creator);
2721*c54f35caSApple OSS Distributions 		reserved->creator = NULL;
2722*c54f35caSApple OSS Distributions 	}
2723*c54f35caSApple OSS Distributions 
2724*c54f35caSApple OSS Distributions 	if (reserved->contextObject) {
2725*c54f35caSApple OSS Distributions 		reserved->contextObject->release();
2726*c54f35caSApple OSS Distributions 		reserved->contextObject = NULL;
2727*c54f35caSApple OSS Distributions 	}
2728*c54f35caSApple OSS Distributions }
2729*c54f35caSApple OSS Distributions 
2730*c54f35caSApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2731*c54f35caSApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2732*c54f35caSApple OSS Distributions {
2733*c54f35caSApple OSS Distributions 	if (!reserved) {
2734*c54f35caSApple OSS Distributions 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2735*c54f35caSApple OSS Distributions 	}
2736*c54f35caSApple OSS Distributions 	return reserved;
2737*c54f35caSApple OSS Distributions }
2738*c54f35caSApple OSS Distributions 
2739*c54f35caSApple OSS Distributions void
setPreparationID(void)2740*c54f35caSApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2741*c54f35caSApple OSS Distributions {
2742*c54f35caSApple OSS Distributions 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2743*c54f35caSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2744*c54f35caSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2745*c54f35caSApple OSS Distributions 	}
2746*c54f35caSApple OSS Distributions }
2747*c54f35caSApple OSS Distributions 
2748*c54f35caSApple OSS Distributions uint64_t
getPreparationID(void)2749*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2750*c54f35caSApple OSS Distributions {
2751*c54f35caSApple OSS Distributions 	if (reserved) {
2752*c54f35caSApple OSS Distributions 		return reserved->preparationID;
2753*c54f35caSApple OSS Distributions 	} else {
2754*c54f35caSApple OSS Distributions 		return kIOPreparationIDUnsupported;
2755*c54f35caSApple OSS Distributions 	}
2756*c54f35caSApple OSS Distributions }
2757*c54f35caSApple OSS Distributions 
2758*c54f35caSApple OSS Distributions void
setDescriptorID(void)2759*c54f35caSApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2760*c54f35caSApple OSS Distributions {
2761*c54f35caSApple OSS Distributions 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2762*c54f35caSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2763*c54f35caSApple OSS Distributions 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2764*c54f35caSApple OSS Distributions 	}
2765*c54f35caSApple OSS Distributions }
2766*c54f35caSApple OSS Distributions 
2767*c54f35caSApple OSS Distributions uint64_t
getDescriptorID(void)2768*c54f35caSApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2769*c54f35caSApple OSS Distributions {
2770*c54f35caSApple OSS Distributions 	setDescriptorID();
2771*c54f35caSApple OSS Distributions 
2772*c54f35caSApple OSS Distributions 	if (reserved) {
2773*c54f35caSApple OSS Distributions 		return reserved->descriptorID;
2774*c54f35caSApple OSS Distributions 	} else {
2775*c54f35caSApple OSS Distributions 		return kIODescriptorIDInvalid;
2776*c54f35caSApple OSS Distributions 	}
2777*c54f35caSApple OSS Distributions }
2778*c54f35caSApple OSS Distributions 
2779*c54f35caSApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2780*c54f35caSApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2781*c54f35caSApple OSS Distributions {
2782*c54f35caSApple OSS Distributions 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2783*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
2784*c54f35caSApple OSS Distributions 	}
2785*c54f35caSApple OSS Distributions 
2786*c54f35caSApple OSS Distributions 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2787*c54f35caSApple OSS Distributions 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2788*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
2789*c54f35caSApple OSS Distributions 	}
2790*c54f35caSApple OSS Distributions 
2791*c54f35caSApple OSS Distributions 	uint64_t descriptorID = getDescriptorID();
2792*c54f35caSApple OSS Distributions 	assert(descriptorID != kIODescriptorIDInvalid);
2793*c54f35caSApple OSS Distributions 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2794*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
2795*c54f35caSApple OSS Distributions 	}
2796*c54f35caSApple OSS Distributions 
2797*c54f35caSApple OSS Distributions 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2798*c54f35caSApple OSS Distributions 
2799*c54f35caSApple OSS Distributions #if __LP64__
2800*c54f35caSApple OSS Distributions 	static const uint8_t num_segments_page = 8;
2801*c54f35caSApple OSS Distributions #else
2802*c54f35caSApple OSS Distributions 	static const uint8_t num_segments_page = 4;
2803*c54f35caSApple OSS Distributions #endif
2804*c54f35caSApple OSS Distributions 	static const uint8_t num_segments_long = 2;
2805*c54f35caSApple OSS Distributions 
2806*c54f35caSApple OSS Distributions 	IOPhysicalAddress segments_page[num_segments_page];
2807*c54f35caSApple OSS Distributions 	IOPhysicalRange   segments_long[num_segments_long];
2808*c54f35caSApple OSS Distributions 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2809*c54f35caSApple OSS Distributions 	memset(segments_long, 0, sizeof(segments_long));
2810*c54f35caSApple OSS Distributions 
2811*c54f35caSApple OSS Distributions 	uint8_t segment_page_idx = 0;
2812*c54f35caSApple OSS Distributions 	uint8_t segment_long_idx = 0;
2813*c54f35caSApple OSS Distributions 
2814*c54f35caSApple OSS Distributions 	IOPhysicalRange physical_segment;
2815*c54f35caSApple OSS Distributions 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2816*c54f35caSApple OSS Distributions 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2817*c54f35caSApple OSS Distributions 
2818*c54f35caSApple OSS Distributions 		if (physical_segment.length == 0) {
2819*c54f35caSApple OSS Distributions 			break;
2820*c54f35caSApple OSS Distributions 		}
2821*c54f35caSApple OSS Distributions 
2822*c54f35caSApple OSS Distributions 		/**
2823*c54f35caSApple OSS Distributions 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2824*c54f35caSApple OSS Distributions 		 * buffer memory, pack segment events according to the following.
2825*c54f35caSApple OSS Distributions 		 *
2826*c54f35caSApple OSS Distributions 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2827*c54f35caSApple OSS Distributions 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2828*c54f35caSApple OSS Distributions 		 *
2829*c54f35caSApple OSS Distributions 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2830*c54f35caSApple OSS Distributions 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2831*c54f35caSApple OSS Distributions 		 * - unmapped pages will have a ppn of MAX_INT_32
2832*c54f35caSApple OSS Distributions 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2833*c54f35caSApple OSS Distributions 		 * - address_0, length_0, address_0, length_1
2834*c54f35caSApple OSS Distributions 		 * - unmapped pages will have an address of 0
2835*c54f35caSApple OSS Distributions 		 *
2836*c54f35caSApple OSS Distributions 		 * During each iteration do the following depending on the length of the mapping:
2837*c54f35caSApple OSS Distributions 		 * 1. add the current segment to the appropriate queue of pending segments
2838*c54f35caSApple OSS Distributions 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2839*c54f35caSApple OSS Distributions 		 * 1a. if FALSE emit and reset all events in the previous queue
2840*c54f35caSApple OSS Distributions 		 * 2. check if we have filled up the current queue of pending events
2841*c54f35caSApple OSS Distributions 		 * 2a. if TRUE emit and reset all events in the pending queue
2842*c54f35caSApple OSS Distributions 		 * 3. after completing all iterations emit events in the current queue
2843*c54f35caSApple OSS Distributions 		 */
2844*c54f35caSApple OSS Distributions 
2845*c54f35caSApple OSS Distributions 		bool emit_page = false;
2846*c54f35caSApple OSS Distributions 		bool emit_long = false;
2847*c54f35caSApple OSS Distributions 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2848*c54f35caSApple OSS Distributions 			segments_page[segment_page_idx] = physical_segment.address;
2849*c54f35caSApple OSS Distributions 			segment_page_idx++;
2850*c54f35caSApple OSS Distributions 
2851*c54f35caSApple OSS Distributions 			emit_long = segment_long_idx != 0;
2852*c54f35caSApple OSS Distributions 			emit_page = segment_page_idx == num_segments_page;
2853*c54f35caSApple OSS Distributions 
2854*c54f35caSApple OSS Distributions 			if (os_unlikely(emit_long)) {
2855*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2856*c54f35caSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2857*c54f35caSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2858*c54f35caSApple OSS Distributions 			}
2859*c54f35caSApple OSS Distributions 
2860*c54f35caSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2861*c54f35caSApple OSS Distributions #if __LP64__
2862*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2863*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2864*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2865*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2866*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2867*c54f35caSApple OSS Distributions #else
2868*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2869*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2870*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2871*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2872*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2873*c54f35caSApple OSS Distributions #endif
2874*c54f35caSApple OSS Distributions 			}
2875*c54f35caSApple OSS Distributions 		} else {
2876*c54f35caSApple OSS Distributions 			segments_long[segment_long_idx] = physical_segment;
2877*c54f35caSApple OSS Distributions 			segment_long_idx++;
2878*c54f35caSApple OSS Distributions 
2879*c54f35caSApple OSS Distributions 			emit_page = segment_page_idx != 0;
2880*c54f35caSApple OSS Distributions 			emit_long = segment_long_idx == num_segments_long;
2881*c54f35caSApple OSS Distributions 
2882*c54f35caSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2883*c54f35caSApple OSS Distributions #if __LP64__
2884*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2885*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2886*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2887*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2888*c54f35caSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2889*c54f35caSApple OSS Distributions #else
2890*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2891*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2892*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2893*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2894*c54f35caSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2895*c54f35caSApple OSS Distributions #endif
2896*c54f35caSApple OSS Distributions 			}
2897*c54f35caSApple OSS Distributions 
2898*c54f35caSApple OSS Distributions 			if (emit_long) {
2899*c54f35caSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2900*c54f35caSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2901*c54f35caSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2902*c54f35caSApple OSS Distributions 			}
2903*c54f35caSApple OSS Distributions 		}
2904*c54f35caSApple OSS Distributions 
2905*c54f35caSApple OSS Distributions 		if (os_unlikely(emit_page)) {
2906*c54f35caSApple OSS Distributions 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2907*c54f35caSApple OSS Distributions 			segment_page_idx = 0;
2908*c54f35caSApple OSS Distributions 		}
2909*c54f35caSApple OSS Distributions 
2910*c54f35caSApple OSS Distributions 		if (os_unlikely(emit_long)) {
2911*c54f35caSApple OSS Distributions 			memset(segments_long, 0, sizeof(segments_long));
2912*c54f35caSApple OSS Distributions 			segment_long_idx = 0;
2913*c54f35caSApple OSS Distributions 		}
2914*c54f35caSApple OSS Distributions 	}
2915*c54f35caSApple OSS Distributions 
2916*c54f35caSApple OSS Distributions 	if (segment_page_idx != 0) {
2917*c54f35caSApple OSS Distributions 		assert(segment_long_idx == 0);
2918*c54f35caSApple OSS Distributions #if __LP64__
2919*c54f35caSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2920*c54f35caSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2921*c54f35caSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2922*c54f35caSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2923*c54f35caSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2924*c54f35caSApple OSS Distributions #else
2925*c54f35caSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2926*c54f35caSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[1]),
2927*c54f35caSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[2]),
2928*c54f35caSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[3]),
2929*c54f35caSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[4]));
2930*c54f35caSApple OSS Distributions #endif
2931*c54f35caSApple OSS Distributions 	} else if (segment_long_idx != 0) {
2932*c54f35caSApple OSS Distributions 		assert(segment_page_idx == 0);
2933*c54f35caSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2934*c54f35caSApple OSS Distributions 		    segments_long[0].address, segments_long[0].length,
2935*c54f35caSApple OSS Distributions 		    segments_long[1].address, segments_long[1].length);
2936*c54f35caSApple OSS Distributions 	}
2937*c54f35caSApple OSS Distributions 
2938*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
2939*c54f35caSApple OSS Distributions }
2940*c54f35caSApple OSS Distributions 
2941*c54f35caSApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2942*c54f35caSApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2943*c54f35caSApple OSS Distributions {
2944*c54f35caSApple OSS Distributions 	_kernelTag = (vm_tag_t) kernelTag;
2945*c54f35caSApple OSS Distributions 	_userTag   = (vm_tag_t) userTag;
2946*c54f35caSApple OSS Distributions }
2947*c54f35caSApple OSS Distributions 
2948*c54f35caSApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2949*c54f35caSApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2950*c54f35caSApple OSS Distributions {
2951*c54f35caSApple OSS Distributions 	if (vm_kernel_map_is_kernel(map)) {
2952*c54f35caSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2953*c54f35caSApple OSS Distributions 			return (uint32_t) _kernelTag;
2954*c54f35caSApple OSS Distributions 		}
2955*c54f35caSApple OSS Distributions 	} else {
2956*c54f35caSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _userTag) {
2957*c54f35caSApple OSS Distributions 			return (uint32_t) _userTag;
2958*c54f35caSApple OSS Distributions 		}
2959*c54f35caSApple OSS Distributions 	}
2960*c54f35caSApple OSS Distributions 	return IOMemoryTag(map);
2961*c54f35caSApple OSS Distributions }
2962*c54f35caSApple OSS Distributions 
2963*c54f35caSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2964*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2965*c54f35caSApple OSS Distributions {
2966*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
2967*c54f35caSApple OSS Distributions 	DMACommandOps params;
2968*c54f35caSApple OSS Distributions 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2969*c54f35caSApple OSS Distributions 	ioGMDData *dataP;
2970*c54f35caSApple OSS Distributions 
2971*c54f35caSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
2972*c54f35caSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
2973*c54f35caSApple OSS Distributions 
2974*c54f35caSApple OSS Distributions 	if (kIOMDDMAMap == op) {
2975*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2976*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
2977*c54f35caSApple OSS Distributions 		}
2978*c54f35caSApple OSS Distributions 
2979*c54f35caSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2980*c54f35caSApple OSS Distributions 
2981*c54f35caSApple OSS Distributions 		if (!_memoryEntries
2982*c54f35caSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2983*c54f35caSApple OSS Distributions 			return kIOReturnNoMemory;
2984*c54f35caSApple OSS Distributions 		}
2985*c54f35caSApple OSS Distributions 
2986*c54f35caSApple OSS Distributions 		if (_memoryEntries && data->fMapper) {
2987*c54f35caSApple OSS Distributions 			bool remap, keepMap;
2988*c54f35caSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2989*c54f35caSApple OSS Distributions 
2990*c54f35caSApple OSS Distributions 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2991*c54f35caSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2992*c54f35caSApple OSS Distributions 			}
2993*c54f35caSApple OSS Distributions 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2994*c54f35caSApple OSS Distributions 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2995*c54f35caSApple OSS Distributions 			}
2996*c54f35caSApple OSS Distributions 
2997*c54f35caSApple OSS Distributions 			keepMap = (data->fMapper == gIOSystemMapper);
2998*c54f35caSApple OSS Distributions 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2999*c54f35caSApple OSS Distributions 
3000*c54f35caSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3001*c54f35caSApple OSS Distributions 				IOLockLock(_prepareLock);
3002*c54f35caSApple OSS Distributions 			}
3003*c54f35caSApple OSS Distributions 
3004*c54f35caSApple OSS Distributions 			remap = (!keepMap);
3005*c54f35caSApple OSS Distributions 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3006*c54f35caSApple OSS Distributions 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3007*c54f35caSApple OSS Distributions 			remap |= (dataP->fDMAMapAlignment > page_size);
3008*c54f35caSApple OSS Distributions 
3009*c54f35caSApple OSS Distributions 			if (remap || !dataP->fMappedBaseValid) {
3010*c54f35caSApple OSS Distributions 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3011*c54f35caSApple OSS Distributions 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3012*c54f35caSApple OSS Distributions 					dataP->fMappedBase      = data->fAlloc;
3013*c54f35caSApple OSS Distributions 					dataP->fMappedBaseValid = true;
3014*c54f35caSApple OSS Distributions 					dataP->fMappedLength    = data->fAllocLength;
3015*c54f35caSApple OSS Distributions 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3016*c54f35caSApple OSS Distributions 				}
3017*c54f35caSApple OSS Distributions 			} else {
3018*c54f35caSApple OSS Distributions 				data->fAlloc = dataP->fMappedBase;
3019*c54f35caSApple OSS Distributions 				data->fAllocLength = 0;         // give out IOMD map
3020*c54f35caSApple OSS Distributions 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3021*c54f35caSApple OSS Distributions 			}
3022*c54f35caSApple OSS Distributions 
3023*c54f35caSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3024*c54f35caSApple OSS Distributions 				IOLockUnlock(_prepareLock);
3025*c54f35caSApple OSS Distributions 			}
3026*c54f35caSApple OSS Distributions 		}
3027*c54f35caSApple OSS Distributions 		return err;
3028*c54f35caSApple OSS Distributions 	}
3029*c54f35caSApple OSS Distributions 	if (kIOMDDMAUnmap == op) {
3030*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3031*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3032*c54f35caSApple OSS Distributions 		}
3033*c54f35caSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3034*c54f35caSApple OSS Distributions 
3035*c54f35caSApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3036*c54f35caSApple OSS Distributions 
3037*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3038*c54f35caSApple OSS Distributions 	}
3039*c54f35caSApple OSS Distributions 
3040*c54f35caSApple OSS Distributions 	if (kIOMDAddDMAMapSpec == op) {
3041*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IODMAMapSpecification)) {
3042*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3043*c54f35caSApple OSS Distributions 		}
3044*c54f35caSApple OSS Distributions 
3045*c54f35caSApple OSS Distributions 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046*c54f35caSApple OSS Distributions 
3047*c54f35caSApple OSS Distributions 		if (!_memoryEntries
3048*c54f35caSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049*c54f35caSApple OSS Distributions 			return kIOReturnNoMemory;
3050*c54f35caSApple OSS Distributions 		}
3051*c54f35caSApple OSS Distributions 
3052*c54f35caSApple OSS Distributions 		if (_memoryEntries) {
3053*c54f35caSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3054*c54f35caSApple OSS Distributions 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055*c54f35caSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056*c54f35caSApple OSS Distributions 			}
3057*c54f35caSApple OSS Distributions 			if (data->alignment > dataP->fDMAMapAlignment) {
3058*c54f35caSApple OSS Distributions 				dataP->fDMAMapAlignment = data->alignment;
3059*c54f35caSApple OSS Distributions 			}
3060*c54f35caSApple OSS Distributions 		}
3061*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3062*c54f35caSApple OSS Distributions 	}
3063*c54f35caSApple OSS Distributions 
3064*c54f35caSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3065*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3067*c54f35caSApple OSS Distributions 		}
3068*c54f35caSApple OSS Distributions 
3069*c54f35caSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070*c54f35caSApple OSS Distributions 		data->fLength = _length;
3071*c54f35caSApple OSS Distributions 		data->fSGCount = _rangesCount;
3072*c54f35caSApple OSS Distributions 		data->fPages = _pages;
3073*c54f35caSApple OSS Distributions 		data->fDirection = getDirection();
3074*c54f35caSApple OSS Distributions 		if (!_wireCount) {
3075*c54f35caSApple OSS Distributions 			data->fIsPrepared = false;
3076*c54f35caSApple OSS Distributions 		} else {
3077*c54f35caSApple OSS Distributions 			data->fIsPrepared = true;
3078*c54f35caSApple OSS Distributions 			data->fHighestPage = _highestPage;
3079*c54f35caSApple OSS Distributions 			if (_memoryEntries) {
3080*c54f35caSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
3081*c54f35caSApple OSS Distributions 				ioPLBlock *ioplList = getIOPLList(dataP);
3082*c54f35caSApple OSS Distributions 				UInt count = getNumIOPL(_memoryEntries, dataP);
3083*c54f35caSApple OSS Distributions 				if (count == 1) {
3084*c54f35caSApple OSS Distributions 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085*c54f35caSApple OSS Distributions 				}
3086*c54f35caSApple OSS Distributions 			}
3087*c54f35caSApple OSS Distributions 		}
3088*c54f35caSApple OSS Distributions 
3089*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3090*c54f35caSApple OSS Distributions 	} else if (kIOMDDMAActive == op) {
3091*c54f35caSApple OSS Distributions 		if (params) {
3092*c54f35caSApple OSS Distributions 			int16_t prior;
3093*c54f35caSApple OSS Distributions 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3094*c54f35caSApple OSS Distributions 			if (!prior) {
3095*c54f35caSApple OSS Distributions 				md->_mapName = NULL;
3096*c54f35caSApple OSS Distributions 			}
3097*c54f35caSApple OSS Distributions 		} else {
3098*c54f35caSApple OSS Distributions 			if (md->_dmaReferences) {
3099*c54f35caSApple OSS Distributions 				OSAddAtomic16(-1, &md->_dmaReferences);
3100*c54f35caSApple OSS Distributions 			} else {
3101*c54f35caSApple OSS Distributions 				panic("_dmaReferences underflow");
3102*c54f35caSApple OSS Distributions 			}
3103*c54f35caSApple OSS Distributions 		}
3104*c54f35caSApple OSS Distributions 	} else if (kIOMDWalkSegments != op) {
3105*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
3106*c54f35caSApple OSS Distributions 	}
3107*c54f35caSApple OSS Distributions 
3108*c54f35caSApple OSS Distributions 	// Get the next segment
3109*c54f35caSApple OSS Distributions 	struct InternalState {
3110*c54f35caSApple OSS Distributions 		IOMDDMAWalkSegmentArgs fIO;
3111*c54f35caSApple OSS Distributions 		mach_vm_size_t fOffset2Index;
3112*c54f35caSApple OSS Distributions 		mach_vm_size_t fNextOffset;
3113*c54f35caSApple OSS Distributions 		UInt fIndex;
3114*c54f35caSApple OSS Distributions 	} *isP;
3115*c54f35caSApple OSS Distributions 
3116*c54f35caSApple OSS Distributions 	// Find the next segment
3117*c54f35caSApple OSS Distributions 	if (dataSize < sizeof(*isP)) {
3118*c54f35caSApple OSS Distributions 		return kIOReturnUnderrun;
3119*c54f35caSApple OSS Distributions 	}
3120*c54f35caSApple OSS Distributions 
3121*c54f35caSApple OSS Distributions 	isP = (InternalState *) vData;
3122*c54f35caSApple OSS Distributions 	uint64_t offset = isP->fIO.fOffset;
3123*c54f35caSApple OSS Distributions 	uint8_t mapped = isP->fIO.fMapped;
3124*c54f35caSApple OSS Distributions 	uint64_t mappedBase;
3125*c54f35caSApple OSS Distributions 
3126*c54f35caSApple OSS Distributions 	if (mapped && (kIOMemoryRemote & _flags)) {
3127*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3128*c54f35caSApple OSS Distributions 	}
3129*c54f35caSApple OSS Distributions 
3130*c54f35caSApple OSS Distributions 	if (IOMapper::gSystem && mapped
3131*c54f35caSApple OSS Distributions 	    && (!(kIOMemoryHostOnly & _flags))
3132*c54f35caSApple OSS Distributions 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133*c54f35caSApple OSS Distributions //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134*c54f35caSApple OSS Distributions 		if (!_memoryEntries
3135*c54f35caSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136*c54f35caSApple OSS Distributions 			return kIOReturnNoMemory;
3137*c54f35caSApple OSS Distributions 		}
3138*c54f35caSApple OSS Distributions 
3139*c54f35caSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
3140*c54f35caSApple OSS Distributions 		if (dataP->fMapper) {
3141*c54f35caSApple OSS Distributions 			IODMAMapSpecification mapSpec;
3142*c54f35caSApple OSS Distributions 			bzero(&mapSpec, sizeof(mapSpec));
3143*c54f35caSApple OSS Distributions 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144*c54f35caSApple OSS Distributions 			mapSpec.alignment = dataP->fDMAMapAlignment;
3145*c54f35caSApple OSS Distributions 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146*c54f35caSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3147*c54f35caSApple OSS Distributions 				return err;
3148*c54f35caSApple OSS Distributions 			}
3149*c54f35caSApple OSS Distributions 			dataP->fMappedBaseValid = true;
3150*c54f35caSApple OSS Distributions 		}
3151*c54f35caSApple OSS Distributions 	}
3152*c54f35caSApple OSS Distributions 
3153*c54f35caSApple OSS Distributions 	if (mapped) {
3154*c54f35caSApple OSS Distributions 		if (IOMapper::gSystem
3155*c54f35caSApple OSS Distributions 		    && (!(kIOMemoryHostOnly & _flags))
3156*c54f35caSApple OSS Distributions 		    && _memoryEntries
3157*c54f35caSApple OSS Distributions 		    && (dataP = getDataP(_memoryEntries))
3158*c54f35caSApple OSS Distributions 		    && dataP->fMappedBaseValid) {
3159*c54f35caSApple OSS Distributions 			mappedBase = dataP->fMappedBase;
3160*c54f35caSApple OSS Distributions 		} else {
3161*c54f35caSApple OSS Distributions 			mapped = 0;
3162*c54f35caSApple OSS Distributions 		}
3163*c54f35caSApple OSS Distributions 	}
3164*c54f35caSApple OSS Distributions 
3165*c54f35caSApple OSS Distributions 	if (offset >= _length) {
3166*c54f35caSApple OSS Distributions 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167*c54f35caSApple OSS Distributions 	}
3168*c54f35caSApple OSS Distributions 
3169*c54f35caSApple OSS Distributions 	// Validate the previous offset
3170*c54f35caSApple OSS Distributions 	UInt ind;
3171*c54f35caSApple OSS Distributions 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3172*c54f35caSApple OSS Distributions 	if (!params
3173*c54f35caSApple OSS Distributions 	    && offset
3174*c54f35caSApple OSS Distributions 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175*c54f35caSApple OSS Distributions 		ind = isP->fIndex;
3176*c54f35caSApple OSS Distributions 	} else {
3177*c54f35caSApple OSS Distributions 		ind = off2Ind = 0; // Start from beginning
3178*c54f35caSApple OSS Distributions 	}
3179*c54f35caSApple OSS Distributions 	mach_vm_size_t length;
3180*c54f35caSApple OSS Distributions 	UInt64 address;
3181*c54f35caSApple OSS Distributions 
3182*c54f35caSApple OSS Distributions 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183*c54f35caSApple OSS Distributions 		// Physical address based memory descriptor
3184*c54f35caSApple OSS Distributions 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185*c54f35caSApple OSS Distributions 
3186*c54f35caSApple OSS Distributions 		// Find the range after the one that contains the offset
3187*c54f35caSApple OSS Distributions 		mach_vm_size_t len;
3188*c54f35caSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3189*c54f35caSApple OSS Distributions 			len = physP[ind].length;
3190*c54f35caSApple OSS Distributions 			off2Ind += len;
3191*c54f35caSApple OSS Distributions 		}
3192*c54f35caSApple OSS Distributions 
3193*c54f35caSApple OSS Distributions 		// Calculate length within range and starting address
3194*c54f35caSApple OSS Distributions 		length   = off2Ind - offset;
3195*c54f35caSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3196*c54f35caSApple OSS Distributions 
3197*c54f35caSApple OSS Distributions 		if (true && mapped) {
3198*c54f35caSApple OSS Distributions 			address = mappedBase + offset;
3199*c54f35caSApple OSS Distributions 		} else {
3200*c54f35caSApple OSS Distributions 			// see how far we can coalesce ranges
3201*c54f35caSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3202*c54f35caSApple OSS Distributions 				len = physP[ind].length;
3203*c54f35caSApple OSS Distributions 				length += len;
3204*c54f35caSApple OSS Distributions 				off2Ind += len;
3205*c54f35caSApple OSS Distributions 				ind++;
3206*c54f35caSApple OSS Distributions 			}
3207*c54f35caSApple OSS Distributions 		}
3208*c54f35caSApple OSS Distributions 
3209*c54f35caSApple OSS Distributions 		// correct contiguous check overshoot
3210*c54f35caSApple OSS Distributions 		ind--;
3211*c54f35caSApple OSS Distributions 		off2Ind -= len;
3212*c54f35caSApple OSS Distributions 	}
3213*c54f35caSApple OSS Distributions #ifndef __LP64__
3214*c54f35caSApple OSS Distributions 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215*c54f35caSApple OSS Distributions 		// Physical address based memory descriptor
3216*c54f35caSApple OSS Distributions 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217*c54f35caSApple OSS Distributions 
3218*c54f35caSApple OSS Distributions 		// Find the range after the one that contains the offset
3219*c54f35caSApple OSS Distributions 		mach_vm_size_t len;
3220*c54f35caSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3221*c54f35caSApple OSS Distributions 			len = physP[ind].length;
3222*c54f35caSApple OSS Distributions 			off2Ind += len;
3223*c54f35caSApple OSS Distributions 		}
3224*c54f35caSApple OSS Distributions 
3225*c54f35caSApple OSS Distributions 		// Calculate length within range and starting address
3226*c54f35caSApple OSS Distributions 		length   = off2Ind - offset;
3227*c54f35caSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3228*c54f35caSApple OSS Distributions 
3229*c54f35caSApple OSS Distributions 		if (true && mapped) {
3230*c54f35caSApple OSS Distributions 			address = mappedBase + offset;
3231*c54f35caSApple OSS Distributions 		} else {
3232*c54f35caSApple OSS Distributions 			// see how far we can coalesce ranges
3233*c54f35caSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3234*c54f35caSApple OSS Distributions 				len = physP[ind].length;
3235*c54f35caSApple OSS Distributions 				length += len;
3236*c54f35caSApple OSS Distributions 				off2Ind += len;
3237*c54f35caSApple OSS Distributions 				ind++;
3238*c54f35caSApple OSS Distributions 			}
3239*c54f35caSApple OSS Distributions 		}
3240*c54f35caSApple OSS Distributions 		// correct contiguous check overshoot
3241*c54f35caSApple OSS Distributions 		ind--;
3242*c54f35caSApple OSS Distributions 		off2Ind -= len;
3243*c54f35caSApple OSS Distributions 	}
3244*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
3245*c54f35caSApple OSS Distributions 	else {
3246*c54f35caSApple OSS Distributions 		do {
3247*c54f35caSApple OSS Distributions 			if (!_wireCount) {
3248*c54f35caSApple OSS Distributions 				panic("IOGMD: not wired for the IODMACommand");
3249*c54f35caSApple OSS Distributions 			}
3250*c54f35caSApple OSS Distributions 
3251*c54f35caSApple OSS Distributions 			assert(_memoryEntries);
3252*c54f35caSApple OSS Distributions 
3253*c54f35caSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3254*c54f35caSApple OSS Distributions 			const ioPLBlock *ioplList = getIOPLList(dataP);
3255*c54f35caSApple OSS Distributions 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256*c54f35caSApple OSS Distributions 			upl_page_info_t *pageList = getPageList(dataP);
3257*c54f35caSApple OSS Distributions 
3258*c54f35caSApple OSS Distributions 			assert(numIOPLs > 0);
3259*c54f35caSApple OSS Distributions 
3260*c54f35caSApple OSS Distributions 			// Scan through iopl info blocks looking for block containing offset
3261*c54f35caSApple OSS Distributions 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262*c54f35caSApple OSS Distributions 				ind++;
3263*c54f35caSApple OSS Distributions 			}
3264*c54f35caSApple OSS Distributions 
3265*c54f35caSApple OSS Distributions 			// Go back to actual range as search goes past it
3266*c54f35caSApple OSS Distributions 			ioPLBlock ioplInfo = ioplList[ind - 1];
3267*c54f35caSApple OSS Distributions 			off2Ind = ioplInfo.fIOMDOffset;
3268*c54f35caSApple OSS Distributions 
3269*c54f35caSApple OSS Distributions 			if (ind < numIOPLs) {
3270*c54f35caSApple OSS Distributions 				length = ioplList[ind].fIOMDOffset;
3271*c54f35caSApple OSS Distributions 			} else {
3272*c54f35caSApple OSS Distributions 				length = _length;
3273*c54f35caSApple OSS Distributions 			}
3274*c54f35caSApple OSS Distributions 			length -= offset;       // Remainder within iopl
3275*c54f35caSApple OSS Distributions 
3276*c54f35caSApple OSS Distributions 			// Subtract offset till this iopl in total list
3277*c54f35caSApple OSS Distributions 			offset -= off2Ind;
3278*c54f35caSApple OSS Distributions 
3279*c54f35caSApple OSS Distributions 			// If a mapped address is requested and this is a pre-mapped IOPL
3280*c54f35caSApple OSS Distributions 			// then just need to compute an offset relative to the mapped base.
3281*c54f35caSApple OSS Distributions 			if (mapped) {
3282*c54f35caSApple OSS Distributions 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283*c54f35caSApple OSS Distributions 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284*c54f35caSApple OSS Distributions 				continue; // Done leave do/while(false) now
3285*c54f35caSApple OSS Distributions 			}
3286*c54f35caSApple OSS Distributions 
3287*c54f35caSApple OSS Distributions 			// The offset is rebased into the current iopl.
3288*c54f35caSApple OSS Distributions 			// Now add the iopl 1st page offset.
3289*c54f35caSApple OSS Distributions 			offset += ioplInfo.fPageOffset;
3290*c54f35caSApple OSS Distributions 
3291*c54f35caSApple OSS Distributions 			// For external UPLs the fPageInfo field points directly to
3292*c54f35caSApple OSS Distributions 			// the upl's upl_page_info_t array.
3293*c54f35caSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3294*c54f35caSApple OSS Distributions 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295*c54f35caSApple OSS Distributions 			} else {
3296*c54f35caSApple OSS Distributions 				pageList = &pageList[ioplInfo.fPageInfo];
3297*c54f35caSApple OSS Distributions 			}
3298*c54f35caSApple OSS Distributions 
3299*c54f35caSApple OSS Distributions 			// Check for direct device non-paged memory
3300*c54f35caSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3301*c54f35caSApple OSS Distributions 				address = ptoa_64(pageList->phys_addr) + offset;
3302*c54f35caSApple OSS Distributions 				continue; // Done leave do/while(false) now
3303*c54f35caSApple OSS Distributions 			}
3304*c54f35caSApple OSS Distributions 
3305*c54f35caSApple OSS Distributions 			// Now we need compute the index into the pageList
3306*c54f35caSApple OSS Distributions 			UInt pageInd = atop_32(offset);
3307*c54f35caSApple OSS Distributions 			offset &= PAGE_MASK;
3308*c54f35caSApple OSS Distributions 
3309*c54f35caSApple OSS Distributions 			// Compute the starting address of this segment
3310*c54f35caSApple OSS Distributions 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311*c54f35caSApple OSS Distributions 			if (!pageAddr) {
3312*c54f35caSApple OSS Distributions 				panic("!pageList phys_addr");
3313*c54f35caSApple OSS Distributions 			}
3314*c54f35caSApple OSS Distributions 
3315*c54f35caSApple OSS Distributions 			address = ptoa_64(pageAddr) + offset;
3316*c54f35caSApple OSS Distributions 
3317*c54f35caSApple OSS Distributions 			// length is currently set to the length of the remainider of the iopl.
3318*c54f35caSApple OSS Distributions 			// We need to check that the remainder of the iopl is contiguous.
3319*c54f35caSApple OSS Distributions 			// This is indicated by pageList[ind].phys_addr being sequential.
3320*c54f35caSApple OSS Distributions 			IOByteCount contigLength = PAGE_SIZE - offset;
3321*c54f35caSApple OSS Distributions 			while (contigLength < length
3322*c54f35caSApple OSS Distributions 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3323*c54f35caSApple OSS Distributions 				contigLength += PAGE_SIZE;
3324*c54f35caSApple OSS Distributions 			}
3325*c54f35caSApple OSS Distributions 
3326*c54f35caSApple OSS Distributions 			if (contigLength < length) {
3327*c54f35caSApple OSS Distributions 				length = contigLength;
3328*c54f35caSApple OSS Distributions 			}
3329*c54f35caSApple OSS Distributions 
3330*c54f35caSApple OSS Distributions 
3331*c54f35caSApple OSS Distributions 			assert(address);
3332*c54f35caSApple OSS Distributions 			assert(length);
3333*c54f35caSApple OSS Distributions 		} while (false);
3334*c54f35caSApple OSS Distributions 	}
3335*c54f35caSApple OSS Distributions 
3336*c54f35caSApple OSS Distributions 	// Update return values and state
3337*c54f35caSApple OSS Distributions 	isP->fIO.fIOVMAddr = address;
3338*c54f35caSApple OSS Distributions 	isP->fIO.fLength   = length;
3339*c54f35caSApple OSS Distributions 	isP->fIndex        = ind;
3340*c54f35caSApple OSS Distributions 	isP->fOffset2Index = off2Ind;
3341*c54f35caSApple OSS Distributions 	isP->fNextOffset   = isP->fIO.fOffset + length;
3342*c54f35caSApple OSS Distributions 
3343*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
3344*c54f35caSApple OSS Distributions }
3345*c54f35caSApple OSS Distributions 
3346*c54f35caSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348*c54f35caSApple OSS Distributions {
3349*c54f35caSApple OSS Distributions 	IOReturn          ret;
3350*c54f35caSApple OSS Distributions 	mach_vm_address_t address = 0;
3351*c54f35caSApple OSS Distributions 	mach_vm_size_t    length  = 0;
3352*c54f35caSApple OSS Distributions 	IOMapper *        mapper  = gIOSystemMapper;
3353*c54f35caSApple OSS Distributions 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3354*c54f35caSApple OSS Distributions 
3355*c54f35caSApple OSS Distributions 	if (lengthOfSegment) {
3356*c54f35caSApple OSS Distributions 		*lengthOfSegment = 0;
3357*c54f35caSApple OSS Distributions 	}
3358*c54f35caSApple OSS Distributions 
3359*c54f35caSApple OSS Distributions 	if (offset >= _length) {
3360*c54f35caSApple OSS Distributions 		return 0;
3361*c54f35caSApple OSS Distributions 	}
3362*c54f35caSApple OSS Distributions 
3363*c54f35caSApple OSS Distributions 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364*c54f35caSApple OSS Distributions 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365*c54f35caSApple OSS Distributions 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366*c54f35caSApple OSS Distributions 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367*c54f35caSApple OSS Distributions 
3368*c54f35caSApple OSS Distributions 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369*c54f35caSApple OSS Distributions 		unsigned rangesIndex = 0;
3370*c54f35caSApple OSS Distributions 		Ranges vec = _ranges;
3371*c54f35caSApple OSS Distributions 		mach_vm_address_t addr;
3372*c54f35caSApple OSS Distributions 
3373*c54f35caSApple OSS Distributions 		// Find starting address within the vector of ranges
3374*c54f35caSApple OSS Distributions 		for (;;) {
3375*c54f35caSApple OSS Distributions 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376*c54f35caSApple OSS Distributions 			if (offset < length) {
3377*c54f35caSApple OSS Distributions 				break;
3378*c54f35caSApple OSS Distributions 			}
3379*c54f35caSApple OSS Distributions 			offset -= length; // (make offset relative)
3380*c54f35caSApple OSS Distributions 			rangesIndex++;
3381*c54f35caSApple OSS Distributions 		}
3382*c54f35caSApple OSS Distributions 
3383*c54f35caSApple OSS Distributions 		// Now that we have the starting range,
3384*c54f35caSApple OSS Distributions 		// lets find the last contiguous range
3385*c54f35caSApple OSS Distributions 		addr   += offset;
3386*c54f35caSApple OSS Distributions 		length -= offset;
3387*c54f35caSApple OSS Distributions 
3388*c54f35caSApple OSS Distributions 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389*c54f35caSApple OSS Distributions 			mach_vm_address_t newAddr;
3390*c54f35caSApple OSS Distributions 			mach_vm_size_t    newLen;
3391*c54f35caSApple OSS Distributions 
3392*c54f35caSApple OSS Distributions 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393*c54f35caSApple OSS Distributions 			if (addr + length != newAddr) {
3394*c54f35caSApple OSS Distributions 				break;
3395*c54f35caSApple OSS Distributions 			}
3396*c54f35caSApple OSS Distributions 			length += newLen;
3397*c54f35caSApple OSS Distributions 		}
3398*c54f35caSApple OSS Distributions 		if (addr) {
3399*c54f35caSApple OSS Distributions 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400*c54f35caSApple OSS Distributions 		}
3401*c54f35caSApple OSS Distributions 	} else {
3402*c54f35caSApple OSS Distributions 		IOMDDMAWalkSegmentState _state;
3403*c54f35caSApple OSS Distributions 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404*c54f35caSApple OSS Distributions 
3405*c54f35caSApple OSS Distributions 		state->fOffset = offset;
3406*c54f35caSApple OSS Distributions 		state->fLength = _length - offset;
3407*c54f35caSApple OSS Distributions 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408*c54f35caSApple OSS Distributions 
3409*c54f35caSApple OSS Distributions 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410*c54f35caSApple OSS Distributions 
3411*c54f35caSApple OSS Distributions 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412*c54f35caSApple OSS Distributions 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413*c54f35caSApple OSS Distributions 			    ret, this, state->fOffset,
3414*c54f35caSApple OSS Distributions 			    state->fIOVMAddr, state->fLength);
3415*c54f35caSApple OSS Distributions 		}
3416*c54f35caSApple OSS Distributions 		if (kIOReturnSuccess == ret) {
3417*c54f35caSApple OSS Distributions 			address = state->fIOVMAddr;
3418*c54f35caSApple OSS Distributions 			length  = state->fLength;
3419*c54f35caSApple OSS Distributions 		}
3420*c54f35caSApple OSS Distributions 
3421*c54f35caSApple OSS Distributions 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422*c54f35caSApple OSS Distributions 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423*c54f35caSApple OSS Distributions 
3424*c54f35caSApple OSS Distributions 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425*c54f35caSApple OSS Distributions 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426*c54f35caSApple OSS Distributions 				addr64_t    origAddr = address;
3427*c54f35caSApple OSS Distributions 				IOByteCount origLen  = length;
3428*c54f35caSApple OSS Distributions 
3429*c54f35caSApple OSS Distributions 				address = mapper->mapToPhysicalAddress(origAddr);
3430*c54f35caSApple OSS Distributions 				length = page_size - (address & (page_size - 1));
3431*c54f35caSApple OSS Distributions 				while ((length < origLen)
3432*c54f35caSApple OSS Distributions 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433*c54f35caSApple OSS Distributions 					length += page_size;
3434*c54f35caSApple OSS Distributions 				}
3435*c54f35caSApple OSS Distributions 				if (length > origLen) {
3436*c54f35caSApple OSS Distributions 					length = origLen;
3437*c54f35caSApple OSS Distributions 				}
3438*c54f35caSApple OSS Distributions 			}
3439*c54f35caSApple OSS Distributions 		}
3440*c54f35caSApple OSS Distributions 	}
3441*c54f35caSApple OSS Distributions 
3442*c54f35caSApple OSS Distributions 	if (!address) {
3443*c54f35caSApple OSS Distributions 		length = 0;
3444*c54f35caSApple OSS Distributions 	}
3445*c54f35caSApple OSS Distributions 
3446*c54f35caSApple OSS Distributions 	if (lengthOfSegment) {
3447*c54f35caSApple OSS Distributions 		*lengthOfSegment = length;
3448*c54f35caSApple OSS Distributions 	}
3449*c54f35caSApple OSS Distributions 
3450*c54f35caSApple OSS Distributions 	return address;
3451*c54f35caSApple OSS Distributions }
3452*c54f35caSApple OSS Distributions 
3453*c54f35caSApple OSS Distributions #ifndef __LP64__
3454*c54f35caSApple OSS Distributions #pragma clang diagnostic push
3455*c54f35caSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456*c54f35caSApple OSS Distributions 
3457*c54f35caSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459*c54f35caSApple OSS Distributions {
3460*c54f35caSApple OSS Distributions 	addr64_t address = 0;
3461*c54f35caSApple OSS Distributions 
3462*c54f35caSApple OSS Distributions 	if (options & _kIOMemorySourceSegment) {
3463*c54f35caSApple OSS Distributions 		address = getSourceSegment(offset, lengthOfSegment);
3464*c54f35caSApple OSS Distributions 	} else if (options & kIOMemoryMapperNone) {
3465*c54f35caSApple OSS Distributions 		address = getPhysicalSegment64(offset, lengthOfSegment);
3466*c54f35caSApple OSS Distributions 	} else {
3467*c54f35caSApple OSS Distributions 		address = getPhysicalSegment(offset, lengthOfSegment);
3468*c54f35caSApple OSS Distributions 	}
3469*c54f35caSApple OSS Distributions 
3470*c54f35caSApple OSS Distributions 	return address;
3471*c54f35caSApple OSS Distributions }
3472*c54f35caSApple OSS Distributions #pragma clang diagnostic pop
3473*c54f35caSApple OSS Distributions 
3474*c54f35caSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476*c54f35caSApple OSS Distributions {
3477*c54f35caSApple OSS Distributions 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478*c54f35caSApple OSS Distributions }
3479*c54f35caSApple OSS Distributions 
3480*c54f35caSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482*c54f35caSApple OSS Distributions {
3483*c54f35caSApple OSS Distributions 	addr64_t    address = 0;
3484*c54f35caSApple OSS Distributions 	IOByteCount length  = 0;
3485*c54f35caSApple OSS Distributions 
3486*c54f35caSApple OSS Distributions 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487*c54f35caSApple OSS Distributions 
3488*c54f35caSApple OSS Distributions 	if (lengthOfSegment) {
3489*c54f35caSApple OSS Distributions 		length = *lengthOfSegment;
3490*c54f35caSApple OSS Distributions 	}
3491*c54f35caSApple OSS Distributions 
3492*c54f35caSApple OSS Distributions 	if ((address + length) > 0x100000000ULL) {
3493*c54f35caSApple OSS Distributions 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494*c54f35caSApple OSS Distributions 		    address, (long) length, (getMetaClass())->getClassName());
3495*c54f35caSApple OSS Distributions 	}
3496*c54f35caSApple OSS Distributions 
3497*c54f35caSApple OSS Distributions 	return (IOPhysicalAddress) address;
3498*c54f35caSApple OSS Distributions }
3499*c54f35caSApple OSS Distributions 
3500*c54f35caSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502*c54f35caSApple OSS Distributions {
3503*c54f35caSApple OSS Distributions 	IOPhysicalAddress phys32;
3504*c54f35caSApple OSS Distributions 	IOByteCount       length;
3505*c54f35caSApple OSS Distributions 	addr64_t          phys64;
3506*c54f35caSApple OSS Distributions 	IOMapper *        mapper = NULL;
3507*c54f35caSApple OSS Distributions 
3508*c54f35caSApple OSS Distributions 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509*c54f35caSApple OSS Distributions 	if (!phys32) {
3510*c54f35caSApple OSS Distributions 		return 0;
3511*c54f35caSApple OSS Distributions 	}
3512*c54f35caSApple OSS Distributions 
3513*c54f35caSApple OSS Distributions 	if (gIOSystemMapper) {
3514*c54f35caSApple OSS Distributions 		mapper = gIOSystemMapper;
3515*c54f35caSApple OSS Distributions 	}
3516*c54f35caSApple OSS Distributions 
3517*c54f35caSApple OSS Distributions 	if (mapper) {
3518*c54f35caSApple OSS Distributions 		IOByteCount origLen;
3519*c54f35caSApple OSS Distributions 
3520*c54f35caSApple OSS Distributions 		phys64 = mapper->mapToPhysicalAddress(phys32);
3521*c54f35caSApple OSS Distributions 		origLen = *lengthOfSegment;
3522*c54f35caSApple OSS Distributions 		length = page_size - (phys64 & (page_size - 1));
3523*c54f35caSApple OSS Distributions 		while ((length < origLen)
3524*c54f35caSApple OSS Distributions 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525*c54f35caSApple OSS Distributions 			length += page_size;
3526*c54f35caSApple OSS Distributions 		}
3527*c54f35caSApple OSS Distributions 		if (length > origLen) {
3528*c54f35caSApple OSS Distributions 			length = origLen;
3529*c54f35caSApple OSS Distributions 		}
3530*c54f35caSApple OSS Distributions 
3531*c54f35caSApple OSS Distributions 		*lengthOfSegment = length;
3532*c54f35caSApple OSS Distributions 	} else {
3533*c54f35caSApple OSS Distributions 		phys64 = (addr64_t) phys32;
3534*c54f35caSApple OSS Distributions 	}
3535*c54f35caSApple OSS Distributions 
3536*c54f35caSApple OSS Distributions 	return phys64;
3537*c54f35caSApple OSS Distributions }
3538*c54f35caSApple OSS Distributions 
3539*c54f35caSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541*c54f35caSApple OSS Distributions {
3542*c54f35caSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543*c54f35caSApple OSS Distributions }
3544*c54f35caSApple OSS Distributions 
3545*c54f35caSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547*c54f35caSApple OSS Distributions {
3548*c54f35caSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549*c54f35caSApple OSS Distributions }
3550*c54f35caSApple OSS Distributions 
3551*c54f35caSApple OSS Distributions #pragma clang diagnostic push
3552*c54f35caSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553*c54f35caSApple OSS Distributions 
3554*c54f35caSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556*c54f35caSApple OSS Distributions     IOByteCount * lengthOfSegment)
3557*c54f35caSApple OSS Distributions {
3558*c54f35caSApple OSS Distributions 	if (_task == kernel_task) {
3559*c54f35caSApple OSS Distributions 		return (void *) getSourceSegment(offset, lengthOfSegment);
3560*c54f35caSApple OSS Distributions 	} else {
3561*c54f35caSApple OSS Distributions 		panic("IOGMD::getVirtualSegment deprecated");
3562*c54f35caSApple OSS Distributions 	}
3563*c54f35caSApple OSS Distributions 
3564*c54f35caSApple OSS Distributions 	return NULL;
3565*c54f35caSApple OSS Distributions }
3566*c54f35caSApple OSS Distributions #pragma clang diagnostic pop
3567*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
3568*c54f35caSApple OSS Distributions 
3569*c54f35caSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570*c54f35caSApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571*c54f35caSApple OSS Distributions {
3572*c54f35caSApple OSS Distributions 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573*c54f35caSApple OSS Distributions 	DMACommandOps params;
3574*c54f35caSApple OSS Distributions 	IOReturn err;
3575*c54f35caSApple OSS Distributions 
3576*c54f35caSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
3577*c54f35caSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
3578*c54f35caSApple OSS Distributions 
3579*c54f35caSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3580*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3582*c54f35caSApple OSS Distributions 		}
3583*c54f35caSApple OSS Distributions 
3584*c54f35caSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585*c54f35caSApple OSS Distributions 		data->fLength = getLength();
3586*c54f35caSApple OSS Distributions 		data->fSGCount = 0;
3587*c54f35caSApple OSS Distributions 		data->fDirection = getDirection();
3588*c54f35caSApple OSS Distributions 		data->fIsPrepared = true; // Assume prepared - fails safe
3589*c54f35caSApple OSS Distributions 	} else if (kIOMDWalkSegments == op) {
3590*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3592*c54f35caSApple OSS Distributions 		}
3593*c54f35caSApple OSS Distributions 
3594*c54f35caSApple OSS Distributions 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595*c54f35caSApple OSS Distributions 		IOByteCount offset  = (IOByteCount) data->fOffset;
3596*c54f35caSApple OSS Distributions 		IOPhysicalLength length, nextLength;
3597*c54f35caSApple OSS Distributions 		addr64_t         addr, nextAddr;
3598*c54f35caSApple OSS Distributions 
3599*c54f35caSApple OSS Distributions 		if (data->fMapped) {
3600*c54f35caSApple OSS Distributions 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601*c54f35caSApple OSS Distributions 		}
3602*c54f35caSApple OSS Distributions 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603*c54f35caSApple OSS Distributions 		offset += length;
3604*c54f35caSApple OSS Distributions 		while (offset < getLength()) {
3605*c54f35caSApple OSS Distributions 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606*c54f35caSApple OSS Distributions 			if ((addr + length) != nextAddr) {
3607*c54f35caSApple OSS Distributions 				break;
3608*c54f35caSApple OSS Distributions 			}
3609*c54f35caSApple OSS Distributions 			length += nextLength;
3610*c54f35caSApple OSS Distributions 			offset += nextLength;
3611*c54f35caSApple OSS Distributions 		}
3612*c54f35caSApple OSS Distributions 		data->fIOVMAddr = addr;
3613*c54f35caSApple OSS Distributions 		data->fLength   = length;
3614*c54f35caSApple OSS Distributions 	} else if (kIOMDAddDMAMapSpec == op) {
3615*c54f35caSApple OSS Distributions 		return kIOReturnUnsupported;
3616*c54f35caSApple OSS Distributions 	} else if (kIOMDDMAMap == op) {
3617*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3619*c54f35caSApple OSS Distributions 		}
3620*c54f35caSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621*c54f35caSApple OSS Distributions 
3622*c54f35caSApple OSS Distributions 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623*c54f35caSApple OSS Distributions 
3624*c54f35caSApple OSS Distributions 		return err;
3625*c54f35caSApple OSS Distributions 	} else if (kIOMDDMAUnmap == op) {
3626*c54f35caSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627*c54f35caSApple OSS Distributions 			return kIOReturnUnderrun;
3628*c54f35caSApple OSS Distributions 		}
3629*c54f35caSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630*c54f35caSApple OSS Distributions 
3631*c54f35caSApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632*c54f35caSApple OSS Distributions 
3633*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3634*c54f35caSApple OSS Distributions 	} else {
3635*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
3636*c54f35caSApple OSS Distributions 	}
3637*c54f35caSApple OSS Distributions 
3638*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
3639*c54f35caSApple OSS Distributions }
3640*c54f35caSApple OSS Distributions 
3641*c54f35caSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643*c54f35caSApple OSS Distributions     IOOptionBits * oldState )
3644*c54f35caSApple OSS Distributions {
3645*c54f35caSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3646*c54f35caSApple OSS Distributions 
3647*c54f35caSApple OSS Distributions 	vm_purgable_t control;
3648*c54f35caSApple OSS Distributions 	int           state;
3649*c54f35caSApple OSS Distributions 
3650*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3651*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3652*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3653*c54f35caSApple OSS Distributions 	}
3654*c54f35caSApple OSS Distributions 
3655*c54f35caSApple OSS Distributions 	if (_memRef) {
3656*c54f35caSApple OSS Distributions 		err = super::setPurgeable(newState, oldState);
3657*c54f35caSApple OSS Distributions 	} else {
3658*c54f35caSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3659*c54f35caSApple OSS Distributions 			LOCK;
3660*c54f35caSApple OSS Distributions 		}
3661*c54f35caSApple OSS Distributions 		do{
3662*c54f35caSApple OSS Distributions 			// Find the appropriate vm_map for the given task
3663*c54f35caSApple OSS Distributions 			vm_map_t curMap;
3664*c54f35caSApple OSS Distributions 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665*c54f35caSApple OSS Distributions 				err = kIOReturnNotReady;
3666*c54f35caSApple OSS Distributions 				break;
3667*c54f35caSApple OSS Distributions 			} else if (!_task) {
3668*c54f35caSApple OSS Distributions 				err = kIOReturnUnsupported;
3669*c54f35caSApple OSS Distributions 				break;
3670*c54f35caSApple OSS Distributions 			} else {
3671*c54f35caSApple OSS Distributions 				curMap = get_task_map(_task);
3672*c54f35caSApple OSS Distributions 				if (NULL == curMap) {
3673*c54f35caSApple OSS Distributions 					err = KERN_INVALID_ARGUMENT;
3674*c54f35caSApple OSS Distributions 					break;
3675*c54f35caSApple OSS Distributions 				}
3676*c54f35caSApple OSS Distributions 			}
3677*c54f35caSApple OSS Distributions 
3678*c54f35caSApple OSS Distributions 			// can only do one range
3679*c54f35caSApple OSS Distributions 			Ranges vec = _ranges;
3680*c54f35caSApple OSS Distributions 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3681*c54f35caSApple OSS Distributions 			mach_vm_address_t addr;
3682*c54f35caSApple OSS Distributions 			mach_vm_size_t    len;
3683*c54f35caSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3684*c54f35caSApple OSS Distributions 
3685*c54f35caSApple OSS Distributions 			err = purgeableControlBits(newState, &control, &state);
3686*c54f35caSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3687*c54f35caSApple OSS Distributions 				break;
3688*c54f35caSApple OSS Distributions 			}
3689*c54f35caSApple OSS Distributions 			err = vm_map_purgable_control(curMap, addr, control, &state);
3690*c54f35caSApple OSS Distributions 			if (oldState) {
3691*c54f35caSApple OSS Distributions 				if (kIOReturnSuccess == err) {
3692*c54f35caSApple OSS Distributions 					err = purgeableStateBits(&state);
3693*c54f35caSApple OSS Distributions 					*oldState = state;
3694*c54f35caSApple OSS Distributions 				}
3695*c54f35caSApple OSS Distributions 			}
3696*c54f35caSApple OSS Distributions 		}while (false);
3697*c54f35caSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3698*c54f35caSApple OSS Distributions 			UNLOCK;
3699*c54f35caSApple OSS Distributions 		}
3700*c54f35caSApple OSS Distributions 	}
3701*c54f35caSApple OSS Distributions 
3702*c54f35caSApple OSS Distributions 	return err;
3703*c54f35caSApple OSS Distributions }
3704*c54f35caSApple OSS Distributions 
3705*c54f35caSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706*c54f35caSApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707*c54f35caSApple OSS Distributions     IOOptionBits * oldState )
3708*c54f35caSApple OSS Distributions {
3709*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3710*c54f35caSApple OSS Distributions 
3711*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3712*c54f35caSApple OSS Distributions 		LOCK;
3713*c54f35caSApple OSS Distributions 	}
3714*c54f35caSApple OSS Distributions 	if (_memRef) {
3715*c54f35caSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716*c54f35caSApple OSS Distributions 	}
3717*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3718*c54f35caSApple OSS Distributions 		UNLOCK;
3719*c54f35caSApple OSS Distributions 	}
3720*c54f35caSApple OSS Distributions 
3721*c54f35caSApple OSS Distributions 	return err;
3722*c54f35caSApple OSS Distributions }
3723*c54f35caSApple OSS Distributions 
3724*c54f35caSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726*c54f35caSApple OSS Distributions     int newLedgerTag,
3727*c54f35caSApple OSS Distributions     IOOptionBits newLedgerOptions )
3728*c54f35caSApple OSS Distributions {
3729*c54f35caSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3730*c54f35caSApple OSS Distributions 
3731*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3732*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3733*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3734*c54f35caSApple OSS Distributions 	}
3735*c54f35caSApple OSS Distributions 
3736*c54f35caSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3737*c54f35caSApple OSS Distributions 		return kIOReturnUnsupported;
3738*c54f35caSApple OSS Distributions 	}
3739*c54f35caSApple OSS Distributions 
3740*c54f35caSApple OSS Distributions 	if (_memRef) {
3741*c54f35caSApple OSS Distributions 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742*c54f35caSApple OSS Distributions 	} else {
3743*c54f35caSApple OSS Distributions 		err = kIOReturnUnsupported;
3744*c54f35caSApple OSS Distributions 	}
3745*c54f35caSApple OSS Distributions 
3746*c54f35caSApple OSS Distributions 	return err;
3747*c54f35caSApple OSS Distributions }
3748*c54f35caSApple OSS Distributions 
3749*c54f35caSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750*c54f35caSApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3751*c54f35caSApple OSS Distributions     int newLedgerTag,
3752*c54f35caSApple OSS Distributions     IOOptionBits newLedgerOptions )
3753*c54f35caSApple OSS Distributions {
3754*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3755*c54f35caSApple OSS Distributions 
3756*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3757*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3758*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3759*c54f35caSApple OSS Distributions 	}
3760*c54f35caSApple OSS Distributions 
3761*c54f35caSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3762*c54f35caSApple OSS Distributions 		return kIOReturnUnsupported;
3763*c54f35caSApple OSS Distributions 	}
3764*c54f35caSApple OSS Distributions 
3765*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3766*c54f35caSApple OSS Distributions 		LOCK;
3767*c54f35caSApple OSS Distributions 	}
3768*c54f35caSApple OSS Distributions 	if (_memRef) {
3769*c54f35caSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770*c54f35caSApple OSS Distributions 	} else {
3771*c54f35caSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3772*c54f35caSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3773*c54f35caSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774*c54f35caSApple OSS Distributions 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775*c54f35caSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776*c54f35caSApple OSS Distributions 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777*c54f35caSApple OSS Distributions 		}
3778*c54f35caSApple OSS Distributions 	}
3779*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3780*c54f35caSApple OSS Distributions 		UNLOCK;
3781*c54f35caSApple OSS Distributions 	}
3782*c54f35caSApple OSS Distributions 
3783*c54f35caSApple OSS Distributions 	return err;
3784*c54f35caSApple OSS Distributions }
3785*c54f35caSApple OSS Distributions 
3786*c54f35caSApple OSS Distributions 
3787*c54f35caSApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3788*c54f35caSApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789*c54f35caSApple OSS Distributions {
3790*c54f35caSApple OSS Distributions 	uint64_t length;
3791*c54f35caSApple OSS Distributions 
3792*c54f35caSApple OSS Distributions 	if (_memRef) {
3793*c54f35caSApple OSS Distributions 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794*c54f35caSApple OSS Distributions 	} else {
3795*c54f35caSApple OSS Distributions 		IOByteCount       iterate, segLen;
3796*c54f35caSApple OSS Distributions 		IOPhysicalAddress sourceAddr, sourceAlign;
3797*c54f35caSApple OSS Distributions 
3798*c54f35caSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3799*c54f35caSApple OSS Distributions 			LOCK;
3800*c54f35caSApple OSS Distributions 		}
3801*c54f35caSApple OSS Distributions 		length = 0;
3802*c54f35caSApple OSS Distributions 		iterate = 0;
3803*c54f35caSApple OSS Distributions 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804*c54f35caSApple OSS Distributions 			sourceAlign = (sourceAddr & page_mask);
3805*c54f35caSApple OSS Distributions 			if (offset && !iterate) {
3806*c54f35caSApple OSS Distributions 				*offset = sourceAlign;
3807*c54f35caSApple OSS Distributions 			}
3808*c54f35caSApple OSS Distributions 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809*c54f35caSApple OSS Distributions 			iterate += segLen;
3810*c54f35caSApple OSS Distributions 		}
3811*c54f35caSApple OSS Distributions 		if (!iterate) {
3812*c54f35caSApple OSS Distributions 			length = getLength();
3813*c54f35caSApple OSS Distributions 			if (offset) {
3814*c54f35caSApple OSS Distributions 				*offset = 0;
3815*c54f35caSApple OSS Distributions 			}
3816*c54f35caSApple OSS Distributions 		}
3817*c54f35caSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3818*c54f35caSApple OSS Distributions 			UNLOCK;
3819*c54f35caSApple OSS Distributions 		}
3820*c54f35caSApple OSS Distributions 	}
3821*c54f35caSApple OSS Distributions 
3822*c54f35caSApple OSS Distributions 	return length;
3823*c54f35caSApple OSS Distributions }
3824*c54f35caSApple OSS Distributions 
3825*c54f35caSApple OSS Distributions 
3826*c54f35caSApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828*c54f35caSApple OSS Distributions     IOByteCount * dirtyPageCount )
3829*c54f35caSApple OSS Distributions {
3830*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3831*c54f35caSApple OSS Distributions 
3832*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3833*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3834*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3835*c54f35caSApple OSS Distributions 	}
3836*c54f35caSApple OSS Distributions 
3837*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3838*c54f35caSApple OSS Distributions 		LOCK;
3839*c54f35caSApple OSS Distributions 	}
3840*c54f35caSApple OSS Distributions 	if (_memRef) {
3841*c54f35caSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842*c54f35caSApple OSS Distributions 	} else {
3843*c54f35caSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3844*c54f35caSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3845*c54f35caSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846*c54f35caSApple OSS Distributions 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847*c54f35caSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848*c54f35caSApple OSS Distributions 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849*c54f35caSApple OSS Distributions 		}
3850*c54f35caSApple OSS Distributions 	}
3851*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3852*c54f35caSApple OSS Distributions 		UNLOCK;
3853*c54f35caSApple OSS Distributions 	}
3854*c54f35caSApple OSS Distributions 
3855*c54f35caSApple OSS Distributions 	return err;
3856*c54f35caSApple OSS Distributions }
3857*c54f35caSApple OSS Distributions 
3858*c54f35caSApple OSS Distributions 
3859*c54f35caSApple OSS Distributions #if defined(__arm64__)
3860*c54f35caSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861*c54f35caSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
3863*c54f35caSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864*c54f35caSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
3866*c54f35caSApple OSS Distributions 
3867*c54f35caSApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3868*c54f35caSApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3869*c54f35caSApple OSS Distributions {
3870*c54f35caSApple OSS Distributions 	ppnum_t page, end;
3871*c54f35caSApple OSS Distributions 
3872*c54f35caSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3873*c54f35caSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874*c54f35caSApple OSS Distributions 	for (; page < end; page++) {
3875*c54f35caSApple OSS Distributions 		pmap_clear_noencrypt(page);
3876*c54f35caSApple OSS Distributions 	}
3877*c54f35caSApple OSS Distributions }
3878*c54f35caSApple OSS Distributions 
3879*c54f35caSApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880*c54f35caSApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3881*c54f35caSApple OSS Distributions {
3882*c54f35caSApple OSS Distributions 	ppnum_t page, end;
3883*c54f35caSApple OSS Distributions 
3884*c54f35caSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3885*c54f35caSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886*c54f35caSApple OSS Distributions 	for (; page < end; page++) {
3887*c54f35caSApple OSS Distributions 		pmap_set_noencrypt(page);
3888*c54f35caSApple OSS Distributions 	}
3889*c54f35caSApple OSS Distributions }
3890*c54f35caSApple OSS Distributions 
3891*c54f35caSApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892*c54f35caSApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3893*c54f35caSApple OSS Distributions     IOByteCount offset, IOByteCount length )
3894*c54f35caSApple OSS Distributions {
3895*c54f35caSApple OSS Distributions 	IOByteCount remaining;
3896*c54f35caSApple OSS Distributions 	unsigned int res;
3897*c54f35caSApple OSS Distributions 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3898*c54f35caSApple OSS Distributions #if defined(__arm64__)
3899*c54f35caSApple OSS Distributions 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900*c54f35caSApple OSS Distributions #endif
3901*c54f35caSApple OSS Distributions 
3902*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3903*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3904*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
3905*c54f35caSApple OSS Distributions 	}
3906*c54f35caSApple OSS Distributions 
3907*c54f35caSApple OSS Distributions 	switch (options) {
3908*c54f35caSApple OSS Distributions 	case kIOMemoryIncoherentIOFlush:
3909*c54f35caSApple OSS Distributions #if defined(__arm64__)
3910*c54f35caSApple OSS Distributions 		func_ext = &dcache_incoherent_io_flush64;
3911*c54f35caSApple OSS Distributions #if __ARM_COHERENT_IO__
3912*c54f35caSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3913*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3914*c54f35caSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3915*c54f35caSApple OSS Distributions 		break;
3916*c54f35caSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3917*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
3918*c54f35caSApple OSS Distributions 		func = &dcache_incoherent_io_flush64;
3919*c54f35caSApple OSS Distributions 		break;
3920*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
3921*c54f35caSApple OSS Distributions 	case kIOMemoryIncoherentIOStore:
3922*c54f35caSApple OSS Distributions #if defined(__arm64__)
3923*c54f35caSApple OSS Distributions 		func_ext = &dcache_incoherent_io_store64;
3924*c54f35caSApple OSS Distributions #if __ARM_COHERENT_IO__
3925*c54f35caSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3926*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
3927*c54f35caSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3928*c54f35caSApple OSS Distributions 		break;
3929*c54f35caSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3930*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
3931*c54f35caSApple OSS Distributions 		func = &dcache_incoherent_io_store64;
3932*c54f35caSApple OSS Distributions 		break;
3933*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
3934*c54f35caSApple OSS Distributions 
3935*c54f35caSApple OSS Distributions 	case kIOMemorySetEncrypted:
3936*c54f35caSApple OSS Distributions 		func = &SetEncryptOp;
3937*c54f35caSApple OSS Distributions 		break;
3938*c54f35caSApple OSS Distributions 	case kIOMemoryClearEncrypted:
3939*c54f35caSApple OSS Distributions 		func = &ClearEncryptOp;
3940*c54f35caSApple OSS Distributions 		break;
3941*c54f35caSApple OSS Distributions 	}
3942*c54f35caSApple OSS Distributions 
3943*c54f35caSApple OSS Distributions #if defined(__arm64__)
3944*c54f35caSApple OSS Distributions 	if ((func == NULL) && (func_ext == NULL)) {
3945*c54f35caSApple OSS Distributions 		return kIOReturnUnsupported;
3946*c54f35caSApple OSS Distributions 	}
3947*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
3948*c54f35caSApple OSS Distributions 	if (!func) {
3949*c54f35caSApple OSS Distributions 		return kIOReturnUnsupported;
3950*c54f35caSApple OSS Distributions 	}
3951*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
3952*c54f35caSApple OSS Distributions 
3953*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3954*c54f35caSApple OSS Distributions 		LOCK;
3955*c54f35caSApple OSS Distributions 	}
3956*c54f35caSApple OSS Distributions 
3957*c54f35caSApple OSS Distributions 	res = 0x0UL;
3958*c54f35caSApple OSS Distributions 	remaining = length = min(length, getLength() - offset);
3959*c54f35caSApple OSS Distributions 	while (remaining) {
3960*c54f35caSApple OSS Distributions 		// (process another target segment?)
3961*c54f35caSApple OSS Distributions 		addr64_t    dstAddr64;
3962*c54f35caSApple OSS Distributions 		IOByteCount dstLen;
3963*c54f35caSApple OSS Distributions 
3964*c54f35caSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965*c54f35caSApple OSS Distributions 		if (!dstAddr64) {
3966*c54f35caSApple OSS Distributions 			break;
3967*c54f35caSApple OSS Distributions 		}
3968*c54f35caSApple OSS Distributions 
3969*c54f35caSApple OSS Distributions 		// Clip segment length to remaining
3970*c54f35caSApple OSS Distributions 		if (dstLen > remaining) {
3971*c54f35caSApple OSS Distributions 			dstLen = remaining;
3972*c54f35caSApple OSS Distributions 		}
3973*c54f35caSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974*c54f35caSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975*c54f35caSApple OSS Distributions 		}
3976*c54f35caSApple OSS Distributions 		if (remaining > UINT_MAX) {
3977*c54f35caSApple OSS Distributions 			remaining = UINT_MAX;
3978*c54f35caSApple OSS Distributions 		}
3979*c54f35caSApple OSS Distributions 
3980*c54f35caSApple OSS Distributions #if defined(__arm64__)
3981*c54f35caSApple OSS Distributions 		if (func) {
3982*c54f35caSApple OSS Distributions 			(*func)(dstAddr64, (unsigned int) dstLen);
3983*c54f35caSApple OSS Distributions 		}
3984*c54f35caSApple OSS Distributions 		if (func_ext) {
3985*c54f35caSApple OSS Distributions 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986*c54f35caSApple OSS Distributions 			if (res != 0x0UL) {
3987*c54f35caSApple OSS Distributions 				remaining = 0;
3988*c54f35caSApple OSS Distributions 				break;
3989*c54f35caSApple OSS Distributions 			}
3990*c54f35caSApple OSS Distributions 		}
3991*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
3992*c54f35caSApple OSS Distributions 		(*func)(dstAddr64, (unsigned int) dstLen);
3993*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
3994*c54f35caSApple OSS Distributions 
3995*c54f35caSApple OSS Distributions 		offset    += dstLen;
3996*c54f35caSApple OSS Distributions 		remaining -= dstLen;
3997*c54f35caSApple OSS Distributions 	}
3998*c54f35caSApple OSS Distributions 
3999*c54f35caSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
4000*c54f35caSApple OSS Distributions 		UNLOCK;
4001*c54f35caSApple OSS Distributions 	}
4002*c54f35caSApple OSS Distributions 
4003*c54f35caSApple OSS Distributions 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004*c54f35caSApple OSS Distributions }
4005*c54f35caSApple OSS Distributions 
4006*c54f35caSApple OSS Distributions /*
4007*c54f35caSApple OSS Distributions  *
4008*c54f35caSApple OSS Distributions  */
4009*c54f35caSApple OSS Distributions 
4010*c54f35caSApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4011*c54f35caSApple OSS Distributions 
4012*c54f35caSApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013*c54f35caSApple OSS Distributions 
4014*c54f35caSApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015*c54f35caSApple OSS Distributions  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016*c54f35caSApple OSS Distributions  * kernel non-text data -- should we just add another range instead?
4017*c54f35caSApple OSS Distributions  */
4018*c54f35caSApple OSS Distributions #define io_kernel_static_start  vm_kernel_stext
4019*c54f35caSApple OSS Distributions #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020*c54f35caSApple OSS Distributions 
4021*c54f35caSApple OSS Distributions #elif defined(__arm64__)
4022*c54f35caSApple OSS Distributions 
4023*c54f35caSApple OSS Distributions extern vm_offset_t              static_memory_end;
4024*c54f35caSApple OSS Distributions 
4025*c54f35caSApple OSS Distributions #if defined(__arm64__)
4026*c54f35caSApple OSS Distributions #define io_kernel_static_start vm_kext_base
4027*c54f35caSApple OSS Distributions #else /* defined(__arm64__) */
4028*c54f35caSApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4029*c54f35caSApple OSS Distributions #endif /* defined(__arm64__) */
4030*c54f35caSApple OSS Distributions 
4031*c54f35caSApple OSS Distributions #define io_kernel_static_end    static_memory_end
4032*c54f35caSApple OSS Distributions 
4033*c54f35caSApple OSS Distributions #else
4034*c54f35caSApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4035*c54f35caSApple OSS Distributions #endif
4036*c54f35caSApple OSS Distributions 
4037*c54f35caSApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038*c54f35caSApple OSS Distributions io_get_kernel_static_upl(
4039*c54f35caSApple OSS Distributions 	vm_map_t                /* map */,
4040*c54f35caSApple OSS Distributions 	uintptr_t               offset,
4041*c54f35caSApple OSS Distributions 	upl_size_t              *upl_size,
4042*c54f35caSApple OSS Distributions 	unsigned int            *page_offset,
4043*c54f35caSApple OSS Distributions 	upl_t                   *upl,
4044*c54f35caSApple OSS Distributions 	upl_page_info_array_t   page_list,
4045*c54f35caSApple OSS Distributions 	unsigned int            *count,
4046*c54f35caSApple OSS Distributions 	ppnum_t                 *highest_page)
4047*c54f35caSApple OSS Distributions {
4048*c54f35caSApple OSS Distributions 	unsigned int pageCount, page;
4049*c54f35caSApple OSS Distributions 	ppnum_t phys;
4050*c54f35caSApple OSS Distributions 	ppnum_t highestPage = 0;
4051*c54f35caSApple OSS Distributions 
4052*c54f35caSApple OSS Distributions 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053*c54f35caSApple OSS Distributions 	if (pageCount > *count) {
4054*c54f35caSApple OSS Distributions 		pageCount = *count;
4055*c54f35caSApple OSS Distributions 	}
4056*c54f35caSApple OSS Distributions 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4057*c54f35caSApple OSS Distributions 
4058*c54f35caSApple OSS Distributions 	*upl = NULL;
4059*c54f35caSApple OSS Distributions 	*page_offset = ((unsigned int) page_mask & offset);
4060*c54f35caSApple OSS Distributions 
4061*c54f35caSApple OSS Distributions 	for (page = 0; page < pageCount; page++) {
4062*c54f35caSApple OSS Distributions 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063*c54f35caSApple OSS Distributions 		if (!phys) {
4064*c54f35caSApple OSS Distributions 			break;
4065*c54f35caSApple OSS Distributions 		}
4066*c54f35caSApple OSS Distributions 		page_list[page].phys_addr = phys;
4067*c54f35caSApple OSS Distributions 		page_list[page].free_when_done = 0;
4068*c54f35caSApple OSS Distributions 		page_list[page].absent    = 0;
4069*c54f35caSApple OSS Distributions 		page_list[page].dirty     = 0;
4070*c54f35caSApple OSS Distributions 		page_list[page].precious  = 0;
4071*c54f35caSApple OSS Distributions 		page_list[page].device    = 0;
4072*c54f35caSApple OSS Distributions 		if (phys > highestPage) {
4073*c54f35caSApple OSS Distributions 			highestPage = phys;
4074*c54f35caSApple OSS Distributions 		}
4075*c54f35caSApple OSS Distributions 	}
4076*c54f35caSApple OSS Distributions 
4077*c54f35caSApple OSS Distributions 	*highest_page = highestPage;
4078*c54f35caSApple OSS Distributions 
4079*c54f35caSApple OSS Distributions 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080*c54f35caSApple OSS Distributions }
4081*c54f35caSApple OSS Distributions 
4082*c54f35caSApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4083*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084*c54f35caSApple OSS Distributions {
4085*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4086*c54f35caSApple OSS Distributions 	IOReturn error = kIOReturnSuccess;
4087*c54f35caSApple OSS Distributions 	ioGMDData *dataP;
4088*c54f35caSApple OSS Distributions 	upl_page_info_array_t pageInfo;
4089*c54f35caSApple OSS Distributions 	ppnum_t mapBase;
4090*c54f35caSApple OSS Distributions 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091*c54f35caSApple OSS Distributions 	mach_vm_size_t numBytesWired = 0;
4092*c54f35caSApple OSS Distributions 
4093*c54f35caSApple OSS Distributions 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094*c54f35caSApple OSS Distributions 
4095*c54f35caSApple OSS Distributions 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096*c54f35caSApple OSS Distributions 		forDirection = (IODirection) (forDirection | getDirection());
4097*c54f35caSApple OSS Distributions 	}
4098*c54f35caSApple OSS Distributions 
4099*c54f35caSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4100*c54f35caSApple OSS Distributions 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101*c54f35caSApple OSS Distributions 	switch (kIODirectionOutIn & forDirection) {
4102*c54f35caSApple OSS Distributions 	case kIODirectionOut:
4103*c54f35caSApple OSS Distributions 		// Pages do not need to be marked as dirty on commit
4104*c54f35caSApple OSS Distributions 		uplFlags = UPL_COPYOUT_FROM;
4105*c54f35caSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess;
4106*c54f35caSApple OSS Distributions 		break;
4107*c54f35caSApple OSS Distributions 
4108*c54f35caSApple OSS Distributions 	case kIODirectionIn:
4109*c54f35caSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4110*c54f35caSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4111*c54f35caSApple OSS Distributions 		break;
4112*c54f35caSApple OSS Distributions 
4113*c54f35caSApple OSS Distributions 	default:
4114*c54f35caSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115*c54f35caSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4116*c54f35caSApple OSS Distributions 		break;
4117*c54f35caSApple OSS Distributions 	}
4118*c54f35caSApple OSS Distributions 
4119*c54f35caSApple OSS Distributions 	if (_wireCount) {
4120*c54f35caSApple OSS Distributions 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121*c54f35caSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122*c54f35caSApple OSS Distributions 			    (size_t)VM_KERNEL_ADDRPERM(this));
4123*c54f35caSApple OSS Distributions 			error = kIOReturnNotWritable;
4124*c54f35caSApple OSS Distributions 		}
4125*c54f35caSApple OSS Distributions 	} else {
4126*c54f35caSApple OSS Distributions 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127*c54f35caSApple OSS Distributions 		IOMapper *mapper;
4128*c54f35caSApple OSS Distributions 
4129*c54f35caSApple OSS Distributions 		mapper = dataP->fMapper;
4130*c54f35caSApple OSS Distributions 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131*c54f35caSApple OSS Distributions 
4132*c54f35caSApple OSS Distributions 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133*c54f35caSApple OSS Distributions 		tag = _kernelTag;
4134*c54f35caSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE == tag) {
4135*c54f35caSApple OSS Distributions 			tag = IOMemoryTag(kernel_map);
4136*c54f35caSApple OSS Distributions 		}
4137*c54f35caSApple OSS Distributions 
4138*c54f35caSApple OSS Distributions 		if (kIODirectionPrepareToPhys32 & forDirection) {
4139*c54f35caSApple OSS Distributions 			if (!mapper) {
4140*c54f35caSApple OSS Distributions 				uplFlags |= UPL_NEED_32BIT_ADDR;
4141*c54f35caSApple OSS Distributions 			}
4142*c54f35caSApple OSS Distributions 			if (dataP->fDMAMapNumAddressBits > 32) {
4143*c54f35caSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = 32;
4144*c54f35caSApple OSS Distributions 			}
4145*c54f35caSApple OSS Distributions 		}
4146*c54f35caSApple OSS Distributions 		if (kIODirectionPrepareNoFault    & forDirection) {
4147*c54f35caSApple OSS Distributions 			uplFlags |= UPL_REQUEST_NO_FAULT;
4148*c54f35caSApple OSS Distributions 		}
4149*c54f35caSApple OSS Distributions 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4150*c54f35caSApple OSS Distributions 			uplFlags |= UPL_NOZEROFILLIO;
4151*c54f35caSApple OSS Distributions 		}
4152*c54f35caSApple OSS Distributions 		if (kIODirectionPrepareNonCoherent & forDirection) {
4153*c54f35caSApple OSS Distributions 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154*c54f35caSApple OSS Distributions 		}
4155*c54f35caSApple OSS Distributions 
4156*c54f35caSApple OSS Distributions 		mapBase = 0;
4157*c54f35caSApple OSS Distributions 
4158*c54f35caSApple OSS Distributions 		// Note that appendBytes(NULL) zeros the data up to the desired length
4159*c54f35caSApple OSS Distributions 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160*c54f35caSApple OSS Distributions 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4161*c54f35caSApple OSS Distributions 			error = kIOReturnNoMemory;
4162*c54f35caSApple OSS Distributions 			traceInterval.setEndArg2(error);
4163*c54f35caSApple OSS Distributions 			return error;
4164*c54f35caSApple OSS Distributions 		}
4165*c54f35caSApple OSS Distributions 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166*c54f35caSApple OSS Distributions 			error = kIOReturnNoMemory;
4167*c54f35caSApple OSS Distributions 			traceInterval.setEndArg2(error);
4168*c54f35caSApple OSS Distributions 			return error;
4169*c54f35caSApple OSS Distributions 		}
4170*c54f35caSApple OSS Distributions 		dataP = NULL;
4171*c54f35caSApple OSS Distributions 
4172*c54f35caSApple OSS Distributions 		// Find the appropriate vm_map for the given task
4173*c54f35caSApple OSS Distributions 		vm_map_t curMap;
4174*c54f35caSApple OSS Distributions 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175*c54f35caSApple OSS Distributions 			curMap = NULL;
4176*c54f35caSApple OSS Distributions 		} else {
4177*c54f35caSApple OSS Distributions 			curMap = get_task_map(_task);
4178*c54f35caSApple OSS Distributions 		}
4179*c54f35caSApple OSS Distributions 
4180*c54f35caSApple OSS Distributions 		// Iterate over the vector of virtual ranges
4181*c54f35caSApple OSS Distributions 		Ranges vec = _ranges;
4182*c54f35caSApple OSS Distributions 		unsigned int pageIndex  = 0;
4183*c54f35caSApple OSS Distributions 		IOByteCount mdOffset    = 0;
4184*c54f35caSApple OSS Distributions 		ppnum_t highestPage     = 0;
4185*c54f35caSApple OSS Distributions 		bool         byteAlignUPL;
4186*c54f35caSApple OSS Distributions 
4187*c54f35caSApple OSS Distributions 		IOMemoryEntry * memRefEntry = NULL;
4188*c54f35caSApple OSS Distributions 		if (_memRef) {
4189*c54f35caSApple OSS Distributions 			memRefEntry = &_memRef->entries[0];
4190*c54f35caSApple OSS Distributions 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191*c54f35caSApple OSS Distributions 		} else {
4192*c54f35caSApple OSS Distributions 			byteAlignUPL = true;
4193*c54f35caSApple OSS Distributions 		}
4194*c54f35caSApple OSS Distributions 
4195*c54f35caSApple OSS Distributions 		for (UInt range = 0; mdOffset < _length; range++) {
4196*c54f35caSApple OSS Distributions 			ioPLBlock iopl;
4197*c54f35caSApple OSS Distributions 			mach_vm_address_t startPage, startPageOffset;
4198*c54f35caSApple OSS Distributions 			mach_vm_size_t    numBytes;
4199*c54f35caSApple OSS Distributions 			ppnum_t highPage = 0;
4200*c54f35caSApple OSS Distributions 
4201*c54f35caSApple OSS Distributions 			if (_memRef) {
4202*c54f35caSApple OSS Distributions 				if (range >= _memRef->count) {
4203*c54f35caSApple OSS Distributions 					panic("memRefEntry");
4204*c54f35caSApple OSS Distributions 				}
4205*c54f35caSApple OSS Distributions 				memRefEntry = &_memRef->entries[range];
4206*c54f35caSApple OSS Distributions 				numBytes    = memRefEntry->size;
4207*c54f35caSApple OSS Distributions 				startPage   = -1ULL;
4208*c54f35caSApple OSS Distributions 				if (byteAlignUPL) {
4209*c54f35caSApple OSS Distributions 					startPageOffset = 0;
4210*c54f35caSApple OSS Distributions 				} else {
4211*c54f35caSApple OSS Distributions 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4212*c54f35caSApple OSS Distributions 				}
4213*c54f35caSApple OSS Distributions 			} else {
4214*c54f35caSApple OSS Distributions 				// Get the startPage address and length of vec[range]
4215*c54f35caSApple OSS Distributions 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216*c54f35caSApple OSS Distributions 				if (byteAlignUPL) {
4217*c54f35caSApple OSS Distributions 					startPageOffset = 0;
4218*c54f35caSApple OSS Distributions 				} else {
4219*c54f35caSApple OSS Distributions 					startPageOffset = startPage & PAGE_MASK;
4220*c54f35caSApple OSS Distributions 					startPage = trunc_page_64(startPage);
4221*c54f35caSApple OSS Distributions 				}
4222*c54f35caSApple OSS Distributions 			}
4223*c54f35caSApple OSS Distributions 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224*c54f35caSApple OSS Distributions 			numBytes += startPageOffset;
4225*c54f35caSApple OSS Distributions 
4226*c54f35caSApple OSS Distributions 			if (mapper) {
4227*c54f35caSApple OSS Distributions 				iopl.fMappedPage = mapBase + pageIndex;
4228*c54f35caSApple OSS Distributions 			} else {
4229*c54f35caSApple OSS Distributions 				iopl.fMappedPage = 0;
4230*c54f35caSApple OSS Distributions 			}
4231*c54f35caSApple OSS Distributions 
4232*c54f35caSApple OSS Distributions 			// Iterate over the current range, creating UPLs
4233*c54f35caSApple OSS Distributions 			while (numBytes) {
4234*c54f35caSApple OSS Distributions 				vm_address_t kernelStart = (vm_address_t) startPage;
4235*c54f35caSApple OSS Distributions 				vm_map_t theMap;
4236*c54f35caSApple OSS Distributions 				if (curMap) {
4237*c54f35caSApple OSS Distributions 					theMap = curMap;
4238*c54f35caSApple OSS Distributions 				} else if (_memRef) {
4239*c54f35caSApple OSS Distributions 					theMap = NULL;
4240*c54f35caSApple OSS Distributions 				} else {
4241*c54f35caSApple OSS Distributions 					assert(_task == kernel_task);
4242*c54f35caSApple OSS Distributions 					theMap = IOPageableMapForAddress(kernelStart);
4243*c54f35caSApple OSS Distributions 				}
4244*c54f35caSApple OSS Distributions 
4245*c54f35caSApple OSS Distributions 				// ioplFlags is an in/out parameter
4246*c54f35caSApple OSS Distributions 				upl_control_flags_t ioplFlags = uplFlags;
4247*c54f35caSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
4248*c54f35caSApple OSS Distributions 				pageInfo = getPageList(dataP);
4249*c54f35caSApple OSS Distributions 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250*c54f35caSApple OSS Distributions 
4251*c54f35caSApple OSS Distributions 				mach_vm_size_t ioplPhysSize;
4252*c54f35caSApple OSS Distributions 				upl_size_t     ioplSize;
4253*c54f35caSApple OSS Distributions 				unsigned int   numPageInfo;
4254*c54f35caSApple OSS Distributions 
4255*c54f35caSApple OSS Distributions 				if (_memRef) {
4256*c54f35caSApple OSS Distributions 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257*c54f35caSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258*c54f35caSApple OSS Distributions 				} else {
4259*c54f35caSApple OSS Distributions 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260*c54f35caSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261*c54f35caSApple OSS Distributions 				}
4262*c54f35caSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4263*c54f35caSApple OSS Distributions 					if (_memRef) {
4264*c54f35caSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265*c54f35caSApple OSS Distributions 					} else {
4266*c54f35caSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267*c54f35caSApple OSS Distributions 					}
4268*c54f35caSApple OSS Distributions 					printf("entry size error %d\n", error);
4269*c54f35caSApple OSS Distributions 					goto abortExit;
4270*c54f35caSApple OSS Distributions 				}
4271*c54f35caSApple OSS Distributions 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272*c54f35caSApple OSS Distributions 				numPageInfo = atop_32(ioplPhysSize);
4273*c54f35caSApple OSS Distributions 				if (byteAlignUPL) {
4274*c54f35caSApple OSS Distributions 					if (numBytes > ioplPhysSize) {
4275*c54f35caSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276*c54f35caSApple OSS Distributions 					} else {
4277*c54f35caSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))numBytes);
4278*c54f35caSApple OSS Distributions 					}
4279*c54f35caSApple OSS Distributions 				} else {
4280*c54f35caSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281*c54f35caSApple OSS Distributions 				}
4282*c54f35caSApple OSS Distributions 
4283*c54f35caSApple OSS Distributions 				if (_memRef) {
4284*c54f35caSApple OSS Distributions 					memory_object_offset_t entryOffset;
4285*c54f35caSApple OSS Distributions 
4286*c54f35caSApple OSS Distributions 					entryOffset = mdOffset;
4287*c54f35caSApple OSS Distributions 					if (byteAlignUPL) {
4288*c54f35caSApple OSS Distributions 						entryOffset = (entryOffset - memRefEntry->offset);
4289*c54f35caSApple OSS Distributions 					} else {
4290*c54f35caSApple OSS Distributions 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291*c54f35caSApple OSS Distributions 					}
4292*c54f35caSApple OSS Distributions 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4293*c54f35caSApple OSS Distributions 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294*c54f35caSApple OSS Distributions 					}
4295*c54f35caSApple OSS Distributions 					error = memory_object_iopl_request(memRefEntry->entry,
4296*c54f35caSApple OSS Distributions 					    entryOffset,
4297*c54f35caSApple OSS Distributions 					    &ioplSize,
4298*c54f35caSApple OSS Distributions 					    &iopl.fIOPL,
4299*c54f35caSApple OSS Distributions 					    baseInfo,
4300*c54f35caSApple OSS Distributions 					    &numPageInfo,
4301*c54f35caSApple OSS Distributions 					    &ioplFlags,
4302*c54f35caSApple OSS Distributions 					    tag);
4303*c54f35caSApple OSS Distributions 				} else if ((theMap == kernel_map)
4304*c54f35caSApple OSS Distributions 				    && (kernelStart >= io_kernel_static_start)
4305*c54f35caSApple OSS Distributions 				    && (kernelStart < io_kernel_static_end)) {
4306*c54f35caSApple OSS Distributions 					error = io_get_kernel_static_upl(theMap,
4307*c54f35caSApple OSS Distributions 					    kernelStart,
4308*c54f35caSApple OSS Distributions 					    &ioplSize,
4309*c54f35caSApple OSS Distributions 					    &iopl.fPageOffset,
4310*c54f35caSApple OSS Distributions 					    &iopl.fIOPL,
4311*c54f35caSApple OSS Distributions 					    baseInfo,
4312*c54f35caSApple OSS Distributions 					    &numPageInfo,
4313*c54f35caSApple OSS Distributions 					    &highPage);
4314*c54f35caSApple OSS Distributions 				} else {
4315*c54f35caSApple OSS Distributions 					assert(theMap);
4316*c54f35caSApple OSS Distributions 					error = vm_map_create_upl(theMap,
4317*c54f35caSApple OSS Distributions 					    startPage,
4318*c54f35caSApple OSS Distributions 					    (upl_size_t*)&ioplSize,
4319*c54f35caSApple OSS Distributions 					    &iopl.fIOPL,
4320*c54f35caSApple OSS Distributions 					    baseInfo,
4321*c54f35caSApple OSS Distributions 					    &numPageInfo,
4322*c54f35caSApple OSS Distributions 					    &ioplFlags,
4323*c54f35caSApple OSS Distributions 					    tag);
4324*c54f35caSApple OSS Distributions 				}
4325*c54f35caSApple OSS Distributions 
4326*c54f35caSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4327*c54f35caSApple OSS Distributions 					traceInterval.setEndArg2(error);
4328*c54f35caSApple OSS Distributions 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329*c54f35caSApple OSS Distributions 					goto abortExit;
4330*c54f35caSApple OSS Distributions 				}
4331*c54f35caSApple OSS Distributions 
4332*c54f35caSApple OSS Distributions 				assert(ioplSize);
4333*c54f35caSApple OSS Distributions 
4334*c54f35caSApple OSS Distributions 				if (iopl.fIOPL) {
4335*c54f35caSApple OSS Distributions 					highPage = upl_get_highest_page(iopl.fIOPL);
4336*c54f35caSApple OSS Distributions 				}
4337*c54f35caSApple OSS Distributions 				if (highPage > highestPage) {
4338*c54f35caSApple OSS Distributions 					highestPage = highPage;
4339*c54f35caSApple OSS Distributions 				}
4340*c54f35caSApple OSS Distributions 
4341*c54f35caSApple OSS Distributions 				if (baseInfo->device) {
4342*c54f35caSApple OSS Distributions 					numPageInfo = 1;
4343*c54f35caSApple OSS Distributions 					iopl.fFlags = kIOPLOnDevice;
4344*c54f35caSApple OSS Distributions 				} else {
4345*c54f35caSApple OSS Distributions 					iopl.fFlags = 0;
4346*c54f35caSApple OSS Distributions 				}
4347*c54f35caSApple OSS Distributions 
4348*c54f35caSApple OSS Distributions 				if (byteAlignUPL) {
4349*c54f35caSApple OSS Distributions 					if (iopl.fIOPL) {
4350*c54f35caSApple OSS Distributions 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351*c54f35caSApple OSS Distributions 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352*c54f35caSApple OSS Distributions 					}
4353*c54f35caSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4354*c54f35caSApple OSS Distributions 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355*c54f35caSApple OSS Distributions 						startPage -= iopl.fPageOffset;
4356*c54f35caSApple OSS Distributions 					}
4357*c54f35caSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358*c54f35caSApple OSS Distributions 					numBytes += iopl.fPageOffset;
4359*c54f35caSApple OSS Distributions 				}
4360*c54f35caSApple OSS Distributions 
4361*c54f35caSApple OSS Distributions 				iopl.fIOMDOffset = mdOffset;
4362*c54f35caSApple OSS Distributions 				iopl.fPageInfo = pageIndex;
4363*c54f35caSApple OSS Distributions 
4364*c54f35caSApple OSS Distributions 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365*c54f35caSApple OSS Distributions 					// Clean up partial created and unsaved iopl
4366*c54f35caSApple OSS Distributions 					if (iopl.fIOPL) {
4367*c54f35caSApple OSS Distributions 						upl_abort(iopl.fIOPL, 0);
4368*c54f35caSApple OSS Distributions 						upl_deallocate(iopl.fIOPL);
4369*c54f35caSApple OSS Distributions 					}
4370*c54f35caSApple OSS Distributions 					error = kIOReturnNoMemory;
4371*c54f35caSApple OSS Distributions 					traceInterval.setEndArg2(error);
4372*c54f35caSApple OSS Distributions 					goto abortExit;
4373*c54f35caSApple OSS Distributions 				}
4374*c54f35caSApple OSS Distributions 				dataP = NULL;
4375*c54f35caSApple OSS Distributions 
4376*c54f35caSApple OSS Distributions 				// Check for a multiple iopl's in one virtual range
4377*c54f35caSApple OSS Distributions 				pageIndex += numPageInfo;
4378*c54f35caSApple OSS Distributions 				mdOffset -= iopl.fPageOffset;
4379*c54f35caSApple OSS Distributions 				numBytesWired += ioplSize;
4380*c54f35caSApple OSS Distributions 				if (ioplSize < numBytes) {
4381*c54f35caSApple OSS Distributions 					numBytes -= ioplSize;
4382*c54f35caSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4383*c54f35caSApple OSS Distributions 						startPage += ioplSize;
4384*c54f35caSApple OSS Distributions 					}
4385*c54f35caSApple OSS Distributions 					mdOffset += ioplSize;
4386*c54f35caSApple OSS Distributions 					iopl.fPageOffset = 0;
4387*c54f35caSApple OSS Distributions 					if (mapper) {
4388*c54f35caSApple OSS Distributions 						iopl.fMappedPage = mapBase + pageIndex;
4389*c54f35caSApple OSS Distributions 					}
4390*c54f35caSApple OSS Distributions 				} else {
4391*c54f35caSApple OSS Distributions 					mdOffset += numBytes;
4392*c54f35caSApple OSS Distributions 					break;
4393*c54f35caSApple OSS Distributions 				}
4394*c54f35caSApple OSS Distributions 			}
4395*c54f35caSApple OSS Distributions 		}
4396*c54f35caSApple OSS Distributions 
4397*c54f35caSApple OSS Distributions 		_highestPage = highestPage;
4398*c54f35caSApple OSS Distributions 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399*c54f35caSApple OSS Distributions 
4400*c54f35caSApple OSS Distributions 		if (UPL_COPYOUT_FROM & uplFlags) {
4401*c54f35caSApple OSS Distributions 			_flags |= kIOMemoryPreparedReadOnly;
4402*c54f35caSApple OSS Distributions 		}
4403*c54f35caSApple OSS Distributions 		traceInterval.setEndCodes(numBytesWired, error);
4404*c54f35caSApple OSS Distributions 	}
4405*c54f35caSApple OSS Distributions 
4406*c54f35caSApple OSS Distributions #if IOTRACKING
4407*c54f35caSApple OSS Distributions 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408*c54f35caSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4409*c54f35caSApple OSS Distributions 		if (!dataP->fWireTracking.link.next) {
4410*c54f35caSApple OSS Distributions 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411*c54f35caSApple OSS Distributions 		}
4412*c54f35caSApple OSS Distributions 	}
4413*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
4414*c54f35caSApple OSS Distributions 
4415*c54f35caSApple OSS Distributions 	return error;
4416*c54f35caSApple OSS Distributions 
4417*c54f35caSApple OSS Distributions abortExit:
4418*c54f35caSApple OSS Distributions 	{
4419*c54f35caSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4420*c54f35caSApple OSS Distributions 		UInt done = getNumIOPL(_memoryEntries, dataP);
4421*c54f35caSApple OSS Distributions 		ioPLBlock *ioplList = getIOPLList(dataP);
4422*c54f35caSApple OSS Distributions 
4423*c54f35caSApple OSS Distributions 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424*c54f35caSApple OSS Distributions 			if (ioplList[ioplIdx].fIOPL) {
4425*c54f35caSApple OSS Distributions 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426*c54f35caSApple OSS Distributions 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4427*c54f35caSApple OSS Distributions 			}
4428*c54f35caSApple OSS Distributions 		}
4429*c54f35caSApple OSS Distributions 		_memoryEntries->setLength(computeDataSize(0, 0));
4430*c54f35caSApple OSS Distributions 	}
4431*c54f35caSApple OSS Distributions 
4432*c54f35caSApple OSS Distributions 	if (error == KERN_FAILURE) {
4433*c54f35caSApple OSS Distributions 		error = kIOReturnCannotWire;
4434*c54f35caSApple OSS Distributions 	} else if (error == KERN_MEMORY_ERROR) {
4435*c54f35caSApple OSS Distributions 		error = kIOReturnNoResources;
4436*c54f35caSApple OSS Distributions 	}
4437*c54f35caSApple OSS Distributions 
4438*c54f35caSApple OSS Distributions 	return error;
4439*c54f35caSApple OSS Distributions }
4440*c54f35caSApple OSS Distributions 
4441*c54f35caSApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4442*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443*c54f35caSApple OSS Distributions {
4444*c54f35caSApple OSS Distributions 	ioGMDData * dataP;
4445*c54f35caSApple OSS Distributions 
4446*c54f35caSApple OSS Distributions 	if (size > UINT_MAX) {
4447*c54f35caSApple OSS Distributions 		return false;
4448*c54f35caSApple OSS Distributions 	}
4449*c54f35caSApple OSS Distributions 	if (!_memoryEntries) {
4450*c54f35caSApple OSS Distributions 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451*c54f35caSApple OSS Distributions 		if (!_memoryEntries) {
4452*c54f35caSApple OSS Distributions 			return false;
4453*c54f35caSApple OSS Distributions 		}
4454*c54f35caSApple OSS Distributions 	} else if (!_memoryEntries->initWithCapacity(size)) {
4455*c54f35caSApple OSS Distributions 		return false;
4456*c54f35caSApple OSS Distributions 	}
4457*c54f35caSApple OSS Distributions 
4458*c54f35caSApple OSS Distributions 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459*c54f35caSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4460*c54f35caSApple OSS Distributions 
4461*c54f35caSApple OSS Distributions 	if (mapper == kIOMapperWaitSystem) {
4462*c54f35caSApple OSS Distributions 		IOMapper::checkForSystemMapper();
4463*c54f35caSApple OSS Distributions 		mapper = IOMapper::gSystem;
4464*c54f35caSApple OSS Distributions 	}
4465*c54f35caSApple OSS Distributions 	dataP->fMapper               = mapper;
4466*c54f35caSApple OSS Distributions 	dataP->fPageCnt              = 0;
4467*c54f35caSApple OSS Distributions 	dataP->fMappedBase           = 0;
4468*c54f35caSApple OSS Distributions 	dataP->fDMAMapNumAddressBits = 64;
4469*c54f35caSApple OSS Distributions 	dataP->fDMAMapAlignment      = 0;
4470*c54f35caSApple OSS Distributions 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4471*c54f35caSApple OSS Distributions 	dataP->fCompletionError      = false;
4472*c54f35caSApple OSS Distributions 	dataP->fMappedBaseValid      = false;
4473*c54f35caSApple OSS Distributions 
4474*c54f35caSApple OSS Distributions 	return true;
4475*c54f35caSApple OSS Distributions }
4476*c54f35caSApple OSS Distributions 
4477*c54f35caSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478*c54f35caSApple OSS Distributions IOMemoryDescriptor::dmaMap(
4479*c54f35caSApple OSS Distributions 	IOMapper                    * mapper,
4480*c54f35caSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4481*c54f35caSApple OSS Distributions 	IODMACommand                * command,
4482*c54f35caSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4483*c54f35caSApple OSS Distributions 	uint64_t                      offset,
4484*c54f35caSApple OSS Distributions 	uint64_t                      length,
4485*c54f35caSApple OSS Distributions 	uint64_t                    * mapAddress,
4486*c54f35caSApple OSS Distributions 	uint64_t                    * mapLength)
4487*c54f35caSApple OSS Distributions {
4488*c54f35caSApple OSS Distributions 	IOReturn err;
4489*c54f35caSApple OSS Distributions 	uint32_t mapOptions;
4490*c54f35caSApple OSS Distributions 
4491*c54f35caSApple OSS Distributions 	mapOptions = 0;
4492*c54f35caSApple OSS Distributions 	mapOptions |= kIODMAMapReadAccess;
4493*c54f35caSApple OSS Distributions 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494*c54f35caSApple OSS Distributions 		mapOptions |= kIODMAMapWriteAccess;
4495*c54f35caSApple OSS Distributions 	}
4496*c54f35caSApple OSS Distributions 
4497*c54f35caSApple OSS Distributions 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498*c54f35caSApple OSS Distributions 	    mapSpec, command, NULL, mapAddress, mapLength);
4499*c54f35caSApple OSS Distributions 
4500*c54f35caSApple OSS Distributions 	if (kIOReturnSuccess == err) {
4501*c54f35caSApple OSS Distributions 		dmaMapRecord(mapper, command, *mapLength);
4502*c54f35caSApple OSS Distributions 	}
4503*c54f35caSApple OSS Distributions 
4504*c54f35caSApple OSS Distributions 	return err;
4505*c54f35caSApple OSS Distributions }
4506*c54f35caSApple OSS Distributions 
4507*c54f35caSApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508*c54f35caSApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4509*c54f35caSApple OSS Distributions 	IOMapper                    * mapper,
4510*c54f35caSApple OSS Distributions 	IODMACommand                * command,
4511*c54f35caSApple OSS Distributions 	uint64_t                      mapLength)
4512*c54f35caSApple OSS Distributions {
4513*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514*c54f35caSApple OSS Distributions 	kern_allocation_name_t alloc;
4515*c54f35caSApple OSS Distributions 	int16_t                prior;
4516*c54f35caSApple OSS Distributions 
4517*c54f35caSApple OSS Distributions 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518*c54f35caSApple OSS Distributions 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4519*c54f35caSApple OSS Distributions 	}
4520*c54f35caSApple OSS Distributions 
4521*c54f35caSApple OSS Distributions 	if (!command) {
4522*c54f35caSApple OSS Distributions 		return;
4523*c54f35caSApple OSS Distributions 	}
4524*c54f35caSApple OSS Distributions 	prior = OSAddAtomic16(1, &_dmaReferences);
4525*c54f35caSApple OSS Distributions 	if (!prior) {
4526*c54f35caSApple OSS Distributions 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527*c54f35caSApple OSS Distributions 			_mapName  = alloc;
4528*c54f35caSApple OSS Distributions 			mapLength = _length;
4529*c54f35caSApple OSS Distributions 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530*c54f35caSApple OSS Distributions 		} else {
4531*c54f35caSApple OSS Distributions 			_mapName = NULL;
4532*c54f35caSApple OSS Distributions 		}
4533*c54f35caSApple OSS Distributions 	}
4534*c54f35caSApple OSS Distributions }
4535*c54f35caSApple OSS Distributions 
4536*c54f35caSApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537*c54f35caSApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4538*c54f35caSApple OSS Distributions 	IOMapper                    * mapper,
4539*c54f35caSApple OSS Distributions 	IODMACommand                * command,
4540*c54f35caSApple OSS Distributions 	uint64_t                      offset,
4541*c54f35caSApple OSS Distributions 	uint64_t                      mapAddress,
4542*c54f35caSApple OSS Distributions 	uint64_t                      mapLength)
4543*c54f35caSApple OSS Distributions {
4544*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545*c54f35caSApple OSS Distributions 	IOReturn ret;
4546*c54f35caSApple OSS Distributions 	kern_allocation_name_t alloc;
4547*c54f35caSApple OSS Distributions 	kern_allocation_name_t mapName;
4548*c54f35caSApple OSS Distributions 	int16_t prior;
4549*c54f35caSApple OSS Distributions 
4550*c54f35caSApple OSS Distributions 	mapName = NULL;
4551*c54f35caSApple OSS Distributions 	prior = 0;
4552*c54f35caSApple OSS Distributions 	if (command) {
4553*c54f35caSApple OSS Distributions 		mapName = _mapName;
4554*c54f35caSApple OSS Distributions 		if (_dmaReferences) {
4555*c54f35caSApple OSS Distributions 			prior = OSAddAtomic16(-1, &_dmaReferences);
4556*c54f35caSApple OSS Distributions 		} else {
4557*c54f35caSApple OSS Distributions 			panic("_dmaReferences underflow");
4558*c54f35caSApple OSS Distributions 		}
4559*c54f35caSApple OSS Distributions 	}
4560*c54f35caSApple OSS Distributions 
4561*c54f35caSApple OSS Distributions 	if (!mapLength) {
4562*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4563*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
4564*c54f35caSApple OSS Distributions 	}
4565*c54f35caSApple OSS Distributions 
4566*c54f35caSApple OSS Distributions 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567*c54f35caSApple OSS Distributions 
4568*c54f35caSApple OSS Distributions 	if ((alloc = mapper->fAllocName)) {
4569*c54f35caSApple OSS Distributions 		kern_allocation_update_size(alloc, -mapLength);
4570*c54f35caSApple OSS Distributions 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571*c54f35caSApple OSS Distributions 			mapLength = _length;
4572*c54f35caSApple OSS Distributions 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573*c54f35caSApple OSS Distributions 		}
4574*c54f35caSApple OSS Distributions 	}
4575*c54f35caSApple OSS Distributions 
4576*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(ret);
4577*c54f35caSApple OSS Distributions 	return ret;
4578*c54f35caSApple OSS Distributions }
4579*c54f35caSApple OSS Distributions 
4580*c54f35caSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4582*c54f35caSApple OSS Distributions 	IOMapper                    * mapper,
4583*c54f35caSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4584*c54f35caSApple OSS Distributions 	IODMACommand                * command,
4585*c54f35caSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4586*c54f35caSApple OSS Distributions 	uint64_t                      offset,
4587*c54f35caSApple OSS Distributions 	uint64_t                      length,
4588*c54f35caSApple OSS Distributions 	uint64_t                    * mapAddress,
4589*c54f35caSApple OSS Distributions 	uint64_t                    * mapLength)
4590*c54f35caSApple OSS Distributions {
4591*c54f35caSApple OSS Distributions 	IOReturn          err = kIOReturnSuccess;
4592*c54f35caSApple OSS Distributions 	ioGMDData *       dataP;
4593*c54f35caSApple OSS Distributions 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4594*c54f35caSApple OSS Distributions 
4595*c54f35caSApple OSS Distributions 	*mapAddress = 0;
4596*c54f35caSApple OSS Distributions 	if (kIOMemoryHostOnly & _flags) {
4597*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
4598*c54f35caSApple OSS Distributions 	}
4599*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4600*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
4601*c54f35caSApple OSS Distributions 	}
4602*c54f35caSApple OSS Distributions 
4603*c54f35caSApple OSS Distributions 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604*c54f35caSApple OSS Distributions 	    || offset || (length != _length)) {
4605*c54f35caSApple OSS Distributions 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606*c54f35caSApple OSS Distributions 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607*c54f35caSApple OSS Distributions 		const ioPLBlock * ioplList = getIOPLList(dataP);
4608*c54f35caSApple OSS Distributions 		upl_page_info_t * pageList;
4609*c54f35caSApple OSS Distributions 		uint32_t          mapOptions = 0;
4610*c54f35caSApple OSS Distributions 
4611*c54f35caSApple OSS Distributions 		IODMAMapSpecification mapSpec;
4612*c54f35caSApple OSS Distributions 		bzero(&mapSpec, sizeof(mapSpec));
4613*c54f35caSApple OSS Distributions 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614*c54f35caSApple OSS Distributions 		mapSpec.alignment = dataP->fDMAMapAlignment;
4615*c54f35caSApple OSS Distributions 
4616*c54f35caSApple OSS Distributions 		// For external UPLs the fPageInfo field points directly to
4617*c54f35caSApple OSS Distributions 		// the upl's upl_page_info_t array.
4618*c54f35caSApple OSS Distributions 		if (ioplList->fFlags & kIOPLExternUPL) {
4619*c54f35caSApple OSS Distributions 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620*c54f35caSApple OSS Distributions 			mapOptions |= kIODMAMapPagingPath;
4621*c54f35caSApple OSS Distributions 		} else {
4622*c54f35caSApple OSS Distributions 			pageList = getPageList(dataP);
4623*c54f35caSApple OSS Distributions 		}
4624*c54f35caSApple OSS Distributions 
4625*c54f35caSApple OSS Distributions 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626*c54f35caSApple OSS Distributions 			mapOptions |= kIODMAMapPageListFullyOccupied;
4627*c54f35caSApple OSS Distributions 		}
4628*c54f35caSApple OSS Distributions 
4629*c54f35caSApple OSS Distributions 		assert(dataP->fDMAAccess);
4630*c54f35caSApple OSS Distributions 		mapOptions |= dataP->fDMAAccess;
4631*c54f35caSApple OSS Distributions 
4632*c54f35caSApple OSS Distributions 		// Check for direct device non-paged memory
4633*c54f35caSApple OSS Distributions 		if (ioplList->fFlags & kIOPLOnDevice) {
4634*c54f35caSApple OSS Distributions 			mapOptions |= kIODMAMapPhysicallyContiguous;
4635*c54f35caSApple OSS Distributions 		}
4636*c54f35caSApple OSS Distributions 
4637*c54f35caSApple OSS Distributions 		IODMAMapPageList dmaPageList =
4638*c54f35caSApple OSS Distributions 		{
4639*c54f35caSApple OSS Distributions 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4640*c54f35caSApple OSS Distributions 			.pageListCount = _pages,
4641*c54f35caSApple OSS Distributions 			.pageList      = &pageList[0]
4642*c54f35caSApple OSS Distributions 		};
4643*c54f35caSApple OSS Distributions 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644*c54f35caSApple OSS Distributions 		    command, &dmaPageList, mapAddress, mapLength);
4645*c54f35caSApple OSS Distributions 
4646*c54f35caSApple OSS Distributions 		if (kIOReturnSuccess == err) {
4647*c54f35caSApple OSS Distributions 			dmaMapRecord(mapper, command, *mapLength);
4648*c54f35caSApple OSS Distributions 		}
4649*c54f35caSApple OSS Distributions 	}
4650*c54f35caSApple OSS Distributions 
4651*c54f35caSApple OSS Distributions 	return err;
4652*c54f35caSApple OSS Distributions }
4653*c54f35caSApple OSS Distributions 
4654*c54f35caSApple OSS Distributions /*
4655*c54f35caSApple OSS Distributions  * prepare
4656*c54f35caSApple OSS Distributions  *
4657*c54f35caSApple OSS Distributions  * Prepare the memory for an I/O transfer.  This involves paging in
4658*c54f35caSApple OSS Distributions  * the memory, if necessary, and wiring it down for the duration of
4659*c54f35caSApple OSS Distributions  * the transfer.  The complete() method completes the processing of
4660*c54f35caSApple OSS Distributions  * the memory after the I/O transfer finishes.  This method needn't
4661*c54f35caSApple OSS Distributions  * called for non-pageable memory.
4662*c54f35caSApple OSS Distributions  */
4663*c54f35caSApple OSS Distributions 
4664*c54f35caSApple OSS Distributions IOReturn
prepare(IODirection forDirection)4665*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666*c54f35caSApple OSS Distributions {
4667*c54f35caSApple OSS Distributions 	IOReturn     error    = kIOReturnSuccess;
4668*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4669*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670*c54f35caSApple OSS Distributions 
4671*c54f35caSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4673*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
4674*c54f35caSApple OSS Distributions 	}
4675*c54f35caSApple OSS Distributions 
4676*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4677*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4678*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4679*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
4680*c54f35caSApple OSS Distributions 	}
4681*c54f35caSApple OSS Distributions 
4682*c54f35caSApple OSS Distributions 	if (_prepareLock) {
4683*c54f35caSApple OSS Distributions 		IOLockLock(_prepareLock);
4684*c54f35caSApple OSS Distributions 	}
4685*c54f35caSApple OSS Distributions 
4686*c54f35caSApple OSS Distributions 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687*c54f35caSApple OSS Distributions 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688*c54f35caSApple OSS Distributions 			error = kIOReturnNotReady;
4689*c54f35caSApple OSS Distributions 			goto finish;
4690*c54f35caSApple OSS Distributions 		}
4691*c54f35caSApple OSS Distributions 		error = wireVirtual(forDirection);
4692*c54f35caSApple OSS Distributions 	}
4693*c54f35caSApple OSS Distributions 
4694*c54f35caSApple OSS Distributions 	if (kIOReturnSuccess == error) {
4695*c54f35caSApple OSS Distributions 		if (1 == ++_wireCount) {
4696*c54f35caSApple OSS Distributions 			if (kIOMemoryClearEncrypt & _flags) {
4697*c54f35caSApple OSS Distributions 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4698*c54f35caSApple OSS Distributions 			}
4699*c54f35caSApple OSS Distributions 
4700*c54f35caSApple OSS Distributions 			ktraceEmitPhysicalSegments();
4701*c54f35caSApple OSS Distributions 		}
4702*c54f35caSApple OSS Distributions 	}
4703*c54f35caSApple OSS Distributions 
4704*c54f35caSApple OSS Distributions finish:
4705*c54f35caSApple OSS Distributions 
4706*c54f35caSApple OSS Distributions 	if (_prepareLock) {
4707*c54f35caSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4708*c54f35caSApple OSS Distributions 	}
4709*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(error);
4710*c54f35caSApple OSS Distributions 
4711*c54f35caSApple OSS Distributions 	return error;
4712*c54f35caSApple OSS Distributions }
4713*c54f35caSApple OSS Distributions 
4714*c54f35caSApple OSS Distributions /*
4715*c54f35caSApple OSS Distributions  * complete
4716*c54f35caSApple OSS Distributions  *
4717*c54f35caSApple OSS Distributions  * Complete processing of the memory after an I/O transfer finishes.
4718*c54f35caSApple OSS Distributions  * This method should not be called unless a prepare was previously
4719*c54f35caSApple OSS Distributions  * issued; the prepare() and complete() must occur in pairs, before
4720*c54f35caSApple OSS Distributions  * before and after an I/O transfer involving pageable memory.
4721*c54f35caSApple OSS Distributions  */
4722*c54f35caSApple OSS Distributions 
4723*c54f35caSApple OSS Distributions IOReturn
complete(IODirection forDirection)4724*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725*c54f35caSApple OSS Distributions {
4726*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4727*c54f35caSApple OSS Distributions 	ioGMDData  * dataP;
4728*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729*c54f35caSApple OSS Distributions 
4730*c54f35caSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4732*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
4733*c54f35caSApple OSS Distributions 	}
4734*c54f35caSApple OSS Distributions 
4735*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4736*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4737*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4738*c54f35caSApple OSS Distributions 		return kIOReturnNotAttached;
4739*c54f35caSApple OSS Distributions 	}
4740*c54f35caSApple OSS Distributions 
4741*c54f35caSApple OSS Distributions 	if (_prepareLock) {
4742*c54f35caSApple OSS Distributions 		IOLockLock(_prepareLock);
4743*c54f35caSApple OSS Distributions 	}
4744*c54f35caSApple OSS Distributions 	do{
4745*c54f35caSApple OSS Distributions 		assert(_wireCount);
4746*c54f35caSApple OSS Distributions 		if (!_wireCount) {
4747*c54f35caSApple OSS Distributions 			break;
4748*c54f35caSApple OSS Distributions 		}
4749*c54f35caSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4750*c54f35caSApple OSS Distributions 		if (!dataP) {
4751*c54f35caSApple OSS Distributions 			break;
4752*c54f35caSApple OSS Distributions 		}
4753*c54f35caSApple OSS Distributions 
4754*c54f35caSApple OSS Distributions 		if (kIODirectionCompleteWithError & forDirection) {
4755*c54f35caSApple OSS Distributions 			dataP->fCompletionError = true;
4756*c54f35caSApple OSS Distributions 		}
4757*c54f35caSApple OSS Distributions 
4758*c54f35caSApple OSS Distributions 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759*c54f35caSApple OSS Distributions 			performOperation(kIOMemorySetEncrypted, 0, _length);
4760*c54f35caSApple OSS Distributions 		}
4761*c54f35caSApple OSS Distributions 
4762*c54f35caSApple OSS Distributions 		_wireCount--;
4763*c54f35caSApple OSS Distributions 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764*c54f35caSApple OSS Distributions 			ioPLBlock *ioplList = getIOPLList(dataP);
4765*c54f35caSApple OSS Distributions 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766*c54f35caSApple OSS Distributions 
4767*c54f35caSApple OSS Distributions 			if (_wireCount) {
4768*c54f35caSApple OSS Distributions 				// kIODirectionCompleteWithDataValid & forDirection
4769*c54f35caSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770*c54f35caSApple OSS Distributions 					vm_tag_t tag;
4771*c54f35caSApple OSS Distributions 					tag = (typeof(tag))getVMTag(kernel_map);
4772*c54f35caSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4773*c54f35caSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4774*c54f35caSApple OSS Distributions 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4775*c54f35caSApple OSS Distributions 						}
4776*c54f35caSApple OSS Distributions 					}
4777*c54f35caSApple OSS Distributions 				}
4778*c54f35caSApple OSS Distributions 			} else {
4779*c54f35caSApple OSS Distributions 				if (_dmaReferences) {
4780*c54f35caSApple OSS Distributions 					panic("complete() while dma active");
4781*c54f35caSApple OSS Distributions 				}
4782*c54f35caSApple OSS Distributions 
4783*c54f35caSApple OSS Distributions 				if (dataP->fMappedBaseValid) {
4784*c54f35caSApple OSS Distributions 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785*c54f35caSApple OSS Distributions 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786*c54f35caSApple OSS Distributions 				}
4787*c54f35caSApple OSS Distributions #if IOTRACKING
4788*c54f35caSApple OSS Distributions 				if (dataP->fWireTracking.link.next) {
4789*c54f35caSApple OSS Distributions 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790*c54f35caSApple OSS Distributions 				}
4791*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
4792*c54f35caSApple OSS Distributions 				// Only complete iopls that we created which are for TypeVirtual
4793*c54f35caSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*c54f35caSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4795*c54f35caSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4796*c54f35caSApple OSS Distributions 							if (dataP->fCompletionError) {
4797*c54f35caSApple OSS Distributions 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798*c54f35caSApple OSS Distributions 							} else {
4799*c54f35caSApple OSS Distributions 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800*c54f35caSApple OSS Distributions 							}
4801*c54f35caSApple OSS Distributions 							upl_deallocate(ioplList[ind].fIOPL);
4802*c54f35caSApple OSS Distributions 						}
4803*c54f35caSApple OSS Distributions 					}
4804*c54f35caSApple OSS Distributions 				} else if (kIOMemoryTypeUPL == type) {
4805*c54f35caSApple OSS Distributions 					upl_set_referenced(ioplList[0].fIOPL, false);
4806*c54f35caSApple OSS Distributions 				}
4807*c54f35caSApple OSS Distributions 
4808*c54f35caSApple OSS Distributions 				_memoryEntries->setLength(computeDataSize(0, 0));
4809*c54f35caSApple OSS Distributions 
4810*c54f35caSApple OSS Distributions 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4811*c54f35caSApple OSS Distributions 				_flags &= ~kIOMemoryPreparedReadOnly;
4812*c54f35caSApple OSS Distributions 
4813*c54f35caSApple OSS Distributions 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814*c54f35caSApple OSS Distributions 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815*c54f35caSApple OSS Distributions 				}
4816*c54f35caSApple OSS Distributions 			}
4817*c54f35caSApple OSS Distributions 		}
4818*c54f35caSApple OSS Distributions 	}while (false);
4819*c54f35caSApple OSS Distributions 
4820*c54f35caSApple OSS Distributions 	if (_prepareLock) {
4821*c54f35caSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4822*c54f35caSApple OSS Distributions 	}
4823*c54f35caSApple OSS Distributions 
4824*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4825*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
4826*c54f35caSApple OSS Distributions }
4827*c54f35caSApple OSS Distributions 
4828*c54f35caSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4829*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4830*c54f35caSApple OSS Distributions 	vm_map_t                __addressMap,
4831*c54f35caSApple OSS Distributions 	IOVirtualAddress *      __address,
4832*c54f35caSApple OSS Distributions 	IOOptionBits            options,
4833*c54f35caSApple OSS Distributions 	IOByteCount             __offset,
4834*c54f35caSApple OSS Distributions 	IOByteCount             __length )
4835*c54f35caSApple OSS Distributions {
4836*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4837*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4838*c54f35caSApple OSS Distributions #ifndef __LP64__
4839*c54f35caSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4840*c54f35caSApple OSS Distributions 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4841*c54f35caSApple OSS Distributions 	}
4842*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
4843*c54f35caSApple OSS Distributions 
4844*c54f35caSApple OSS Distributions 	kern_return_t  err;
4845*c54f35caSApple OSS Distributions 
4846*c54f35caSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4847*c54f35caSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4848*c54f35caSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
4849*c54f35caSApple OSS Distributions 
4850*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4851*c54f35caSApple OSS Distributions 	Ranges vec = _ranges;
4852*c54f35caSApple OSS Distributions 
4853*c54f35caSApple OSS Distributions 	mach_vm_address_t range0Addr = 0;
4854*c54f35caSApple OSS Distributions 	mach_vm_size_t    range0Len = 0;
4855*c54f35caSApple OSS Distributions 
4856*c54f35caSApple OSS Distributions 	if ((offset >= _length) || ((offset + length) > _length)) {
4857*c54f35caSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnBadArgument);
4858*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4859*c54f35caSApple OSS Distributions 		// assert(offset == 0 && _length == 0 && length == 0);
4860*c54f35caSApple OSS Distributions 		return kIOReturnBadArgument;
4861*c54f35caSApple OSS Distributions 	}
4862*c54f35caSApple OSS Distributions 
4863*c54f35caSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4864*c54f35caSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4865*c54f35caSApple OSS Distributions 		return 0;
4866*c54f35caSApple OSS Distributions 	}
4867*c54f35caSApple OSS Distributions 
4868*c54f35caSApple OSS Distributions 	if (vec.v) {
4869*c54f35caSApple OSS Distributions 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4870*c54f35caSApple OSS Distributions 	}
4871*c54f35caSApple OSS Distributions 
4872*c54f35caSApple OSS Distributions 	// mapping source == dest? (could be much better)
4873*c54f35caSApple OSS Distributions 	if (_task
4874*c54f35caSApple OSS Distributions 	    && (mapping->fAddressTask == _task)
4875*c54f35caSApple OSS Distributions 	    && (mapping->fAddressMap == get_task_map(_task))
4876*c54f35caSApple OSS Distributions 	    && (options & kIOMapAnywhere)
4877*c54f35caSApple OSS Distributions 	    && (!(kIOMapUnique & options))
4878*c54f35caSApple OSS Distributions 	    && (!(kIOMapGuardedMask & options))
4879*c54f35caSApple OSS Distributions 	    && (1 == _rangesCount)
4880*c54f35caSApple OSS Distributions 	    && (0 == offset)
4881*c54f35caSApple OSS Distributions 	    && range0Addr
4882*c54f35caSApple OSS Distributions 	    && (length <= range0Len)) {
4883*c54f35caSApple OSS Distributions 		mapping->fAddress = range0Addr;
4884*c54f35caSApple OSS Distributions 		mapping->fOptions |= kIOMapStatic;
4885*c54f35caSApple OSS Distributions 
4886*c54f35caSApple OSS Distributions 		return kIOReturnSuccess;
4887*c54f35caSApple OSS Distributions 	}
4888*c54f35caSApple OSS Distributions 
4889*c54f35caSApple OSS Distributions 	if (!_memRef) {
4890*c54f35caSApple OSS Distributions 		IOOptionBits createOptions = 0;
4891*c54f35caSApple OSS Distributions 		if (!(kIOMapReadOnly & options)) {
4892*c54f35caSApple OSS Distributions 			createOptions |= kIOMemoryReferenceWrite;
4893*c54f35caSApple OSS Distributions #if DEVELOPMENT || DEBUG
4894*c54f35caSApple OSS Distributions 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4895*c54f35caSApple OSS Distributions 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4896*c54f35caSApple OSS Distributions 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4897*c54f35caSApple OSS Distributions 			}
4898*c54f35caSApple OSS Distributions #endif
4899*c54f35caSApple OSS Distributions 		}
4900*c54f35caSApple OSS Distributions 		err = memoryReferenceCreate(createOptions, &_memRef);
4901*c54f35caSApple OSS Distributions 		if (kIOReturnSuccess != err) {
4902*c54f35caSApple OSS Distributions 			traceInterval.setEndArg1(err);
4903*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4904*c54f35caSApple OSS Distributions 			return err;
4905*c54f35caSApple OSS Distributions 		}
4906*c54f35caSApple OSS Distributions 	}
4907*c54f35caSApple OSS Distributions 
4908*c54f35caSApple OSS Distributions 	memory_object_t pager;
4909*c54f35caSApple OSS Distributions 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4910*c54f35caSApple OSS Distributions 
4911*c54f35caSApple OSS Distributions 	// <upl_transpose //
4912*c54f35caSApple OSS Distributions 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4913*c54f35caSApple OSS Distributions 		do{
4914*c54f35caSApple OSS Distributions 			upl_t               redirUPL2;
4915*c54f35caSApple OSS Distributions 			upl_size_t          size;
4916*c54f35caSApple OSS Distributions 			upl_control_flags_t flags;
4917*c54f35caSApple OSS Distributions 			unsigned int        lock_count;
4918*c54f35caSApple OSS Distributions 
4919*c54f35caSApple OSS Distributions 			if (!_memRef || (1 != _memRef->count)) {
4920*c54f35caSApple OSS Distributions 				err = kIOReturnNotReadable;
4921*c54f35caSApple OSS Distributions 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4922*c54f35caSApple OSS Distributions 				break;
4923*c54f35caSApple OSS Distributions 			}
4924*c54f35caSApple OSS Distributions 
4925*c54f35caSApple OSS Distributions 			size = (upl_size_t) round_page(mapping->fLength);
4926*c54f35caSApple OSS Distributions 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4927*c54f35caSApple OSS Distributions 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4928*c54f35caSApple OSS Distributions 
4929*c54f35caSApple OSS Distributions 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4930*c54f35caSApple OSS Distributions 			    NULL, NULL,
4931*c54f35caSApple OSS Distributions 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4932*c54f35caSApple OSS Distributions 				redirUPL2 = NULL;
4933*c54f35caSApple OSS Distributions 			}
4934*c54f35caSApple OSS Distributions 
4935*c54f35caSApple OSS Distributions 			for (lock_count = 0;
4936*c54f35caSApple OSS Distributions 			    IORecursiveLockHaveLock(gIOMemoryLock);
4937*c54f35caSApple OSS Distributions 			    lock_count++) {
4938*c54f35caSApple OSS Distributions 				UNLOCK;
4939*c54f35caSApple OSS Distributions 			}
4940*c54f35caSApple OSS Distributions 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4941*c54f35caSApple OSS Distributions 			for (;
4942*c54f35caSApple OSS Distributions 			    lock_count;
4943*c54f35caSApple OSS Distributions 			    lock_count--) {
4944*c54f35caSApple OSS Distributions 				LOCK;
4945*c54f35caSApple OSS Distributions 			}
4946*c54f35caSApple OSS Distributions 
4947*c54f35caSApple OSS Distributions 			if (kIOReturnSuccess != err) {
4948*c54f35caSApple OSS Distributions 				IOLog("upl_transpose(%x)\n", err);
4949*c54f35caSApple OSS Distributions 				err = kIOReturnSuccess;
4950*c54f35caSApple OSS Distributions 			}
4951*c54f35caSApple OSS Distributions 
4952*c54f35caSApple OSS Distributions 			if (redirUPL2) {
4953*c54f35caSApple OSS Distributions 				upl_commit(redirUPL2, NULL, 0);
4954*c54f35caSApple OSS Distributions 				upl_deallocate(redirUPL2);
4955*c54f35caSApple OSS Distributions 				redirUPL2 = NULL;
4956*c54f35caSApple OSS Distributions 			}
4957*c54f35caSApple OSS Distributions 			{
4958*c54f35caSApple OSS Distributions 				// swap the memEntries since they now refer to different vm_objects
4959*c54f35caSApple OSS Distributions 				IOMemoryReference * me = _memRef;
4960*c54f35caSApple OSS Distributions 				_memRef = mapping->fMemory->_memRef;
4961*c54f35caSApple OSS Distributions 				mapping->fMemory->_memRef = me;
4962*c54f35caSApple OSS Distributions 			}
4963*c54f35caSApple OSS Distributions 			if (pager) {
4964*c54f35caSApple OSS Distributions 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4965*c54f35caSApple OSS Distributions 			}
4966*c54f35caSApple OSS Distributions 		}while (false);
4967*c54f35caSApple OSS Distributions 	}
4968*c54f35caSApple OSS Distributions 	// upl_transpose> //
4969*c54f35caSApple OSS Distributions 	else {
4970*c54f35caSApple OSS Distributions 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4971*c54f35caSApple OSS Distributions 		if (err) {
4972*c54f35caSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4973*c54f35caSApple OSS Distributions 		}
4974*c54f35caSApple OSS Distributions #if IOTRACKING
4975*c54f35caSApple OSS Distributions 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4976*c54f35caSApple OSS Distributions 			// only dram maps in the default on developement case
4977*c54f35caSApple OSS Distributions 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4978*c54f35caSApple OSS Distributions 		}
4979*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
4980*c54f35caSApple OSS Distributions 		if ((err == KERN_SUCCESS) && pager) {
4981*c54f35caSApple OSS Distributions 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4982*c54f35caSApple OSS Distributions 
4983*c54f35caSApple OSS Distributions 			if (err != KERN_SUCCESS) {
4984*c54f35caSApple OSS Distributions 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4985*c54f35caSApple OSS Distributions 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4986*c54f35caSApple OSS Distributions 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4987*c54f35caSApple OSS Distributions 			}
4988*c54f35caSApple OSS Distributions 		}
4989*c54f35caSApple OSS Distributions 	}
4990*c54f35caSApple OSS Distributions 
4991*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(err);
4992*c54f35caSApple OSS Distributions 	if (err) {
4993*c54f35caSApple OSS Distributions 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4994*c54f35caSApple OSS Distributions 	}
4995*c54f35caSApple OSS Distributions 	return err;
4996*c54f35caSApple OSS Distributions }
4997*c54f35caSApple OSS Distributions 
4998*c54f35caSApple OSS Distributions #if IOTRACKING
4999*c54f35caSApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5000*c54f35caSApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5001*c54f35caSApple OSS Distributions     mach_vm_address_t * address, mach_vm_size_t * size)
5002*c54f35caSApple OSS Distributions {
5003*c54f35caSApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5004*c54f35caSApple OSS Distributions 
5005*c54f35caSApple OSS Distributions 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5006*c54f35caSApple OSS Distributions 
5007*c54f35caSApple OSS Distributions 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5008*c54f35caSApple OSS Distributions 		return kIOReturnNotReady;
5009*c54f35caSApple OSS Distributions 	}
5010*c54f35caSApple OSS Distributions 
5011*c54f35caSApple OSS Distributions 	*task    = map->fAddressTask;
5012*c54f35caSApple OSS Distributions 	*address = map->fAddress;
5013*c54f35caSApple OSS Distributions 	*size    = map->fLength;
5014*c54f35caSApple OSS Distributions 
5015*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
5016*c54f35caSApple OSS Distributions }
5017*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
5018*c54f35caSApple OSS Distributions 
5019*c54f35caSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5020*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5021*c54f35caSApple OSS Distributions 	vm_map_t                addressMap,
5022*c54f35caSApple OSS Distributions 	IOVirtualAddress        __address,
5023*c54f35caSApple OSS Distributions 	IOByteCount             __length )
5024*c54f35caSApple OSS Distributions {
5025*c54f35caSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5026*c54f35caSApple OSS Distributions 	IOReturn ret;
5027*c54f35caSApple OSS Distributions 	ret = super::doUnmap(addressMap, __address, __length);
5028*c54f35caSApple OSS Distributions 	traceInterval.setEndArg1(ret);
5029*c54f35caSApple OSS Distributions 	return ret;
5030*c54f35caSApple OSS Distributions }
5031*c54f35caSApple OSS Distributions 
5032*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5033*c54f35caSApple OSS Distributions 
5034*c54f35caSApple OSS Distributions #undef super
5035*c54f35caSApple OSS Distributions #define super OSObject
5036*c54f35caSApple OSS Distributions 
5037*c54f35caSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5038*c54f35caSApple OSS Distributions 
5039*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5040*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5041*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5042*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5043*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5044*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5045*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5046*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5047*c54f35caSApple OSS Distributions 
5048*c54f35caSApple OSS Distributions /* ex-inline function implementation */
5049*c54f35caSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5050*c54f35caSApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5051*c54f35caSApple OSS Distributions {
5052*c54f35caSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
5053*c54f35caSApple OSS Distributions }
5054*c54f35caSApple OSS Distributions 
5055*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5056*c54f35caSApple OSS Distributions 
5057*c54f35caSApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5058*c54f35caSApple OSS Distributions IOMemoryMap::init(
5059*c54f35caSApple OSS Distributions 	task_t                  intoTask,
5060*c54f35caSApple OSS Distributions 	mach_vm_address_t       toAddress,
5061*c54f35caSApple OSS Distributions 	IOOptionBits            _options,
5062*c54f35caSApple OSS Distributions 	mach_vm_size_t          _offset,
5063*c54f35caSApple OSS Distributions 	mach_vm_size_t          _length )
5064*c54f35caSApple OSS Distributions {
5065*c54f35caSApple OSS Distributions 	if (!intoTask) {
5066*c54f35caSApple OSS Distributions 		return false;
5067*c54f35caSApple OSS Distributions 	}
5068*c54f35caSApple OSS Distributions 
5069*c54f35caSApple OSS Distributions 	if (!super::init()) {
5070*c54f35caSApple OSS Distributions 		return false;
5071*c54f35caSApple OSS Distributions 	}
5072*c54f35caSApple OSS Distributions 
5073*c54f35caSApple OSS Distributions 	fAddressMap  = get_task_map(intoTask);
5074*c54f35caSApple OSS Distributions 	if (!fAddressMap) {
5075*c54f35caSApple OSS Distributions 		return false;
5076*c54f35caSApple OSS Distributions 	}
5077*c54f35caSApple OSS Distributions 	vm_map_reference(fAddressMap);
5078*c54f35caSApple OSS Distributions 
5079*c54f35caSApple OSS Distributions 	fAddressTask = intoTask;
5080*c54f35caSApple OSS Distributions 	fOptions     = _options;
5081*c54f35caSApple OSS Distributions 	fLength      = _length;
5082*c54f35caSApple OSS Distributions 	fOffset      = _offset;
5083*c54f35caSApple OSS Distributions 	fAddress     = toAddress;
5084*c54f35caSApple OSS Distributions 
5085*c54f35caSApple OSS Distributions 	return true;
5086*c54f35caSApple OSS Distributions }
5087*c54f35caSApple OSS Distributions 
5088*c54f35caSApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5089*c54f35caSApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5090*c54f35caSApple OSS Distributions {
5091*c54f35caSApple OSS Distributions 	if (!_memory) {
5092*c54f35caSApple OSS Distributions 		return false;
5093*c54f35caSApple OSS Distributions 	}
5094*c54f35caSApple OSS Distributions 
5095*c54f35caSApple OSS Distributions 	if (!fSuperMap) {
5096*c54f35caSApple OSS Distributions 		if ((_offset + fLength) > _memory->getLength()) {
5097*c54f35caSApple OSS Distributions 			return false;
5098*c54f35caSApple OSS Distributions 		}
5099*c54f35caSApple OSS Distributions 		fOffset = _offset;
5100*c54f35caSApple OSS Distributions 	}
5101*c54f35caSApple OSS Distributions 
5102*c54f35caSApple OSS Distributions 
5103*c54f35caSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5104*c54f35caSApple OSS Distributions 	if (fMemory) {
5105*c54f35caSApple OSS Distributions 		if (fMemory != _memory) {
5106*c54f35caSApple OSS Distributions 			fMemory->removeMapping(this);
5107*c54f35caSApple OSS Distributions 		}
5108*c54f35caSApple OSS Distributions 	}
5109*c54f35caSApple OSS Distributions 	fMemory = os::move(tempval);
5110*c54f35caSApple OSS Distributions 
5111*c54f35caSApple OSS Distributions 	return true;
5112*c54f35caSApple OSS Distributions }
5113*c54f35caSApple OSS Distributions 
5114*c54f35caSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5115*c54f35caSApple OSS Distributions IOMemoryDescriptor::doMap(
5116*c54f35caSApple OSS Distributions 	vm_map_t                __addressMap,
5117*c54f35caSApple OSS Distributions 	IOVirtualAddress *      __address,
5118*c54f35caSApple OSS Distributions 	IOOptionBits            options,
5119*c54f35caSApple OSS Distributions 	IOByteCount             __offset,
5120*c54f35caSApple OSS Distributions 	IOByteCount             __length )
5121*c54f35caSApple OSS Distributions {
5122*c54f35caSApple OSS Distributions 	return kIOReturnUnsupported;
5123*c54f35caSApple OSS Distributions }
5124*c54f35caSApple OSS Distributions 
5125*c54f35caSApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5126*c54f35caSApple OSS Distributions IOMemoryDescriptor::handleFault(
5127*c54f35caSApple OSS Distributions 	void *                  _pager,
5128*c54f35caSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5129*c54f35caSApple OSS Distributions 	mach_vm_size_t          length)
5130*c54f35caSApple OSS Distributions {
5131*c54f35caSApple OSS Distributions 	if (kIOMemoryRedirected & _flags) {
5132*c54f35caSApple OSS Distributions #if DEBUG
5133*c54f35caSApple OSS Distributions 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5134*c54f35caSApple OSS Distributions #endif
5135*c54f35caSApple OSS Distributions 		do {
5136*c54f35caSApple OSS Distributions 			SLEEP;
5137*c54f35caSApple OSS Distributions 		} while (kIOMemoryRedirected & _flags);
5138*c54f35caSApple OSS Distributions 	}
5139*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
5140*c54f35caSApple OSS Distributions }
5141*c54f35caSApple OSS Distributions 
5142*c54f35caSApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5143*c54f35caSApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5144*c54f35caSApple OSS Distributions 	void *                  _pager,
5145*c54f35caSApple OSS Distributions 	vm_map_t                addressMap,
5146*c54f35caSApple OSS Distributions 	mach_vm_address_t       address,
5147*c54f35caSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5148*c54f35caSApple OSS Distributions 	mach_vm_size_t          length,
5149*c54f35caSApple OSS Distributions 	IOOptionBits            options )
5150*c54f35caSApple OSS Distributions {
5151*c54f35caSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5152*c54f35caSApple OSS Distributions 	memory_object_t     pager = (memory_object_t) _pager;
5153*c54f35caSApple OSS Distributions 	mach_vm_size_t      size;
5154*c54f35caSApple OSS Distributions 	mach_vm_size_t      bytes;
5155*c54f35caSApple OSS Distributions 	mach_vm_size_t      page;
5156*c54f35caSApple OSS Distributions 	mach_vm_size_t      pageOffset;
5157*c54f35caSApple OSS Distributions 	mach_vm_size_t      pagerOffset;
5158*c54f35caSApple OSS Distributions 	IOPhysicalLength    segLen, chunk;
5159*c54f35caSApple OSS Distributions 	addr64_t            physAddr;
5160*c54f35caSApple OSS Distributions 	IOOptionBits        type;
5161*c54f35caSApple OSS Distributions 
5162*c54f35caSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
5163*c54f35caSApple OSS Distributions 
5164*c54f35caSApple OSS Distributions 	if (reserved->dp.pagerContig) {
5165*c54f35caSApple OSS Distributions 		sourceOffset = 0;
5166*c54f35caSApple OSS Distributions 		pagerOffset  = 0;
5167*c54f35caSApple OSS Distributions 	}
5168*c54f35caSApple OSS Distributions 
5169*c54f35caSApple OSS Distributions 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5170*c54f35caSApple OSS Distributions 	assert( physAddr );
5171*c54f35caSApple OSS Distributions 	pageOffset = physAddr - trunc_page_64( physAddr );
5172*c54f35caSApple OSS Distributions 	pagerOffset = sourceOffset;
5173*c54f35caSApple OSS Distributions 
5174*c54f35caSApple OSS Distributions 	size = length + pageOffset;
5175*c54f35caSApple OSS Distributions 	physAddr -= pageOffset;
5176*c54f35caSApple OSS Distributions 
5177*c54f35caSApple OSS Distributions 	segLen += pageOffset;
5178*c54f35caSApple OSS Distributions 	bytes = size;
5179*c54f35caSApple OSS Distributions 	do{
5180*c54f35caSApple OSS Distributions 		// in the middle of the loop only map whole pages
5181*c54f35caSApple OSS Distributions 		if (segLen >= bytes) {
5182*c54f35caSApple OSS Distributions 			segLen = bytes;
5183*c54f35caSApple OSS Distributions 		} else if (segLen != trunc_page_64(segLen)) {
5184*c54f35caSApple OSS Distributions 			err = kIOReturnVMError;
5185*c54f35caSApple OSS Distributions 		}
5186*c54f35caSApple OSS Distributions 		if (physAddr != trunc_page_64(physAddr)) {
5187*c54f35caSApple OSS Distributions 			err = kIOReturnBadArgument;
5188*c54f35caSApple OSS Distributions 		}
5189*c54f35caSApple OSS Distributions 
5190*c54f35caSApple OSS Distributions 		if (kIOReturnSuccess != err) {
5191*c54f35caSApple OSS Distributions 			break;
5192*c54f35caSApple OSS Distributions 		}
5193*c54f35caSApple OSS Distributions 
5194*c54f35caSApple OSS Distributions #if DEBUG || DEVELOPMENT
5195*c54f35caSApple OSS Distributions 		if ((kIOMemoryTypeUPL != type)
5196*c54f35caSApple OSS Distributions 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5197*c54f35caSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5198*c54f35caSApple OSS Distributions 			    physAddr, (uint64_t)segLen);
5199*c54f35caSApple OSS Distributions 		}
5200*c54f35caSApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5201*c54f35caSApple OSS Distributions 
5202*c54f35caSApple OSS Distributions 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5203*c54f35caSApple OSS Distributions 		for (page = 0;
5204*c54f35caSApple OSS Distributions 		    (page < segLen) && (KERN_SUCCESS == err);
5205*c54f35caSApple OSS Distributions 		    page += chunk) {
5206*c54f35caSApple OSS Distributions 			err = device_pager_populate_object(pager, pagerOffset,
5207*c54f35caSApple OSS Distributions 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5208*c54f35caSApple OSS Distributions 			pagerOffset += chunk;
5209*c54f35caSApple OSS Distributions 		}
5210*c54f35caSApple OSS Distributions 
5211*c54f35caSApple OSS Distributions 		assert(KERN_SUCCESS == err);
5212*c54f35caSApple OSS Distributions 		if (err) {
5213*c54f35caSApple OSS Distributions 			break;
5214*c54f35caSApple OSS Distributions 		}
5215*c54f35caSApple OSS Distributions 
5216*c54f35caSApple OSS Distributions 		// This call to vm_fault causes an early pmap level resolution
5217*c54f35caSApple OSS Distributions 		// of the mappings created above for kernel mappings, since
5218*c54f35caSApple OSS Distributions 		// faulting in later can't take place from interrupt level.
5219*c54f35caSApple OSS Distributions 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5220*c54f35caSApple OSS Distributions 			err = vm_fault(addressMap,
5221*c54f35caSApple OSS Distributions 			    (vm_map_offset_t)trunc_page_64(address),
5222*c54f35caSApple OSS Distributions 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5223*c54f35caSApple OSS Distributions 			    FALSE, VM_KERN_MEMORY_NONE,
5224*c54f35caSApple OSS Distributions 			    THREAD_UNINT, NULL,
5225*c54f35caSApple OSS Distributions 			    (vm_map_offset_t)0);
5226*c54f35caSApple OSS Distributions 
5227*c54f35caSApple OSS Distributions 			if (KERN_SUCCESS != err) {
5228*c54f35caSApple OSS Distributions 				break;
5229*c54f35caSApple OSS Distributions 			}
5230*c54f35caSApple OSS Distributions 		}
5231*c54f35caSApple OSS Distributions 
5232*c54f35caSApple OSS Distributions 		sourceOffset += segLen - pageOffset;
5233*c54f35caSApple OSS Distributions 		address += segLen;
5234*c54f35caSApple OSS Distributions 		bytes -= segLen;
5235*c54f35caSApple OSS Distributions 		pageOffset = 0;
5236*c54f35caSApple OSS Distributions 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5237*c54f35caSApple OSS Distributions 
5238*c54f35caSApple OSS Distributions 	if (bytes) {
5239*c54f35caSApple OSS Distributions 		err = kIOReturnBadArgument;
5240*c54f35caSApple OSS Distributions 	}
5241*c54f35caSApple OSS Distributions 
5242*c54f35caSApple OSS Distributions 	return err;
5243*c54f35caSApple OSS Distributions }
5244*c54f35caSApple OSS Distributions 
5245*c54f35caSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5246*c54f35caSApple OSS Distributions IOMemoryDescriptor::doUnmap(
5247*c54f35caSApple OSS Distributions 	vm_map_t                addressMap,
5248*c54f35caSApple OSS Distributions 	IOVirtualAddress        __address,
5249*c54f35caSApple OSS Distributions 	IOByteCount             __length )
5250*c54f35caSApple OSS Distributions {
5251*c54f35caSApple OSS Distributions 	IOReturn          err;
5252*c54f35caSApple OSS Distributions 	IOMemoryMap *     mapping;
5253*c54f35caSApple OSS Distributions 	mach_vm_address_t address;
5254*c54f35caSApple OSS Distributions 	mach_vm_size_t    length;
5255*c54f35caSApple OSS Distributions 
5256*c54f35caSApple OSS Distributions 	if (__length) {
5257*c54f35caSApple OSS Distributions 		panic("doUnmap");
5258*c54f35caSApple OSS Distributions 	}
5259*c54f35caSApple OSS Distributions 
5260*c54f35caSApple OSS Distributions 	mapping = (IOMemoryMap *) __address;
5261*c54f35caSApple OSS Distributions 	addressMap = mapping->fAddressMap;
5262*c54f35caSApple OSS Distributions 	address    = mapping->fAddress;
5263*c54f35caSApple OSS Distributions 	length     = mapping->fLength;
5264*c54f35caSApple OSS Distributions 
5265*c54f35caSApple OSS Distributions 	if (kIOMapOverwrite & mapping->fOptions) {
5266*c54f35caSApple OSS Distributions 		err = KERN_SUCCESS;
5267*c54f35caSApple OSS Distributions 	} else {
5268*c54f35caSApple OSS Distributions 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5269*c54f35caSApple OSS Distributions 			addressMap = IOPageableMapForAddress( address );
5270*c54f35caSApple OSS Distributions 		}
5271*c54f35caSApple OSS Distributions #if DEBUG
5272*c54f35caSApple OSS Distributions 		if (kIOLogMapping & gIOKitDebug) {
5273*c54f35caSApple OSS Distributions 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5274*c54f35caSApple OSS Distributions 			    addressMap, address, length );
5275*c54f35caSApple OSS Distributions 		}
5276*c54f35caSApple OSS Distributions #endif
5277*c54f35caSApple OSS Distributions 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5278*c54f35caSApple OSS Distributions 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5279*c54f35caSApple OSS Distributions 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5280*c54f35caSApple OSS Distributions 		}
5281*c54f35caSApple OSS Distributions 	}
5282*c54f35caSApple OSS Distributions 
5283*c54f35caSApple OSS Distributions #if IOTRACKING
5284*c54f35caSApple OSS Distributions 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5285*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
5286*c54f35caSApple OSS Distributions 
5287*c54f35caSApple OSS Distributions 	return err;
5288*c54f35caSApple OSS Distributions }
5289*c54f35caSApple OSS Distributions 
5290*c54f35caSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5291*c54f35caSApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5292*c54f35caSApple OSS Distributions {
5293*c54f35caSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5294*c54f35caSApple OSS Distributions 	IOMemoryMap *       mapping = NULL;
5295*c54f35caSApple OSS Distributions 	OSSharedPtr<OSIterator>        iter;
5296*c54f35caSApple OSS Distributions 
5297*c54f35caSApple OSS Distributions 	LOCK;
5298*c54f35caSApple OSS Distributions 
5299*c54f35caSApple OSS Distributions 	if (doRedirect) {
5300*c54f35caSApple OSS Distributions 		_flags |= kIOMemoryRedirected;
5301*c54f35caSApple OSS Distributions 	} else {
5302*c54f35caSApple OSS Distributions 		_flags &= ~kIOMemoryRedirected;
5303*c54f35caSApple OSS Distributions 	}
5304*c54f35caSApple OSS Distributions 
5305*c54f35caSApple OSS Distributions 	do {
5306*c54f35caSApple OSS Distributions 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5307*c54f35caSApple OSS Distributions 			memory_object_t   pager;
5308*c54f35caSApple OSS Distributions 
5309*c54f35caSApple OSS Distributions 			if (reserved) {
5310*c54f35caSApple OSS Distributions 				pager = (memory_object_t) reserved->dp.devicePager;
5311*c54f35caSApple OSS Distributions 			} else {
5312*c54f35caSApple OSS Distributions 				pager = MACH_PORT_NULL;
5313*c54f35caSApple OSS Distributions 			}
5314*c54f35caSApple OSS Distributions 
5315*c54f35caSApple OSS Distributions 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5316*c54f35caSApple OSS Distributions 				mapping->redirect( safeTask, doRedirect );
5317*c54f35caSApple OSS Distributions 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5318*c54f35caSApple OSS Distributions 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5319*c54f35caSApple OSS Distributions 				}
5320*c54f35caSApple OSS Distributions 			}
5321*c54f35caSApple OSS Distributions 
5322*c54f35caSApple OSS Distributions 			iter.reset();
5323*c54f35caSApple OSS Distributions 		}
5324*c54f35caSApple OSS Distributions 	} while (false);
5325*c54f35caSApple OSS Distributions 
5326*c54f35caSApple OSS Distributions 	if (!doRedirect) {
5327*c54f35caSApple OSS Distributions 		WAKEUP;
5328*c54f35caSApple OSS Distributions 	}
5329*c54f35caSApple OSS Distributions 
5330*c54f35caSApple OSS Distributions 	UNLOCK;
5331*c54f35caSApple OSS Distributions 
5332*c54f35caSApple OSS Distributions #ifndef __LP64__
5333*c54f35caSApple OSS Distributions 	// temporary binary compatibility
5334*c54f35caSApple OSS Distributions 	IOSubMemoryDescriptor * subMem;
5335*c54f35caSApple OSS Distributions 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5336*c54f35caSApple OSS Distributions 		err = subMem->redirect( safeTask, doRedirect );
5337*c54f35caSApple OSS Distributions 	} else {
5338*c54f35caSApple OSS Distributions 		err = kIOReturnSuccess;
5339*c54f35caSApple OSS Distributions 	}
5340*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5341*c54f35caSApple OSS Distributions 
5342*c54f35caSApple OSS Distributions 	return err;
5343*c54f35caSApple OSS Distributions }
5344*c54f35caSApple OSS Distributions 
5345*c54f35caSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5346*c54f35caSApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5347*c54f35caSApple OSS Distributions {
5348*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5349*c54f35caSApple OSS Distributions 
5350*c54f35caSApple OSS Distributions 	if (fSuperMap) {
5351*c54f35caSApple OSS Distributions //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5352*c54f35caSApple OSS Distributions 	} else {
5353*c54f35caSApple OSS Distributions 		LOCK;
5354*c54f35caSApple OSS Distributions 
5355*c54f35caSApple OSS Distributions 		do{
5356*c54f35caSApple OSS Distributions 			if (!fAddress) {
5357*c54f35caSApple OSS Distributions 				break;
5358*c54f35caSApple OSS Distributions 			}
5359*c54f35caSApple OSS Distributions 			if (!fAddressMap) {
5360*c54f35caSApple OSS Distributions 				break;
5361*c54f35caSApple OSS Distributions 			}
5362*c54f35caSApple OSS Distributions 
5363*c54f35caSApple OSS Distributions 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5364*c54f35caSApple OSS Distributions 			    && (0 == (fOptions & kIOMapStatic))) {
5365*c54f35caSApple OSS Distributions 				IOUnmapPages( fAddressMap, fAddress, fLength );
5366*c54f35caSApple OSS Distributions 				err = kIOReturnSuccess;
5367*c54f35caSApple OSS Distributions #if DEBUG
5368*c54f35caSApple OSS Distributions 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5369*c54f35caSApple OSS Distributions #endif
5370*c54f35caSApple OSS Distributions 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5371*c54f35caSApple OSS Distributions 				IOOptionBits newMode;
5372*c54f35caSApple OSS Distributions 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5373*c54f35caSApple OSS Distributions 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5374*c54f35caSApple OSS Distributions 			}
5375*c54f35caSApple OSS Distributions 		}while (false);
5376*c54f35caSApple OSS Distributions 		UNLOCK;
5377*c54f35caSApple OSS Distributions 	}
5378*c54f35caSApple OSS Distributions 
5379*c54f35caSApple OSS Distributions 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5380*c54f35caSApple OSS Distributions 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5381*c54f35caSApple OSS Distributions 	    && safeTask
5382*c54f35caSApple OSS Distributions 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5383*c54f35caSApple OSS Distributions 		fMemory->redirect(safeTask, doRedirect);
5384*c54f35caSApple OSS Distributions 	}
5385*c54f35caSApple OSS Distributions 
5386*c54f35caSApple OSS Distributions 	return err;
5387*c54f35caSApple OSS Distributions }
5388*c54f35caSApple OSS Distributions 
5389*c54f35caSApple OSS Distributions IOReturn
unmap(void)5390*c54f35caSApple OSS Distributions IOMemoryMap::unmap( void )
5391*c54f35caSApple OSS Distributions {
5392*c54f35caSApple OSS Distributions 	IOReturn    err;
5393*c54f35caSApple OSS Distributions 
5394*c54f35caSApple OSS Distributions 	LOCK;
5395*c54f35caSApple OSS Distributions 
5396*c54f35caSApple OSS Distributions 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5397*c54f35caSApple OSS Distributions 	    && (0 == (kIOMapStatic & fOptions))) {
5398*c54f35caSApple OSS Distributions 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5399*c54f35caSApple OSS Distributions 	} else {
5400*c54f35caSApple OSS Distributions 		err = kIOReturnSuccess;
5401*c54f35caSApple OSS Distributions 	}
5402*c54f35caSApple OSS Distributions 
5403*c54f35caSApple OSS Distributions 	if (fAddressMap) {
5404*c54f35caSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5405*c54f35caSApple OSS Distributions 		fAddressMap = NULL;
5406*c54f35caSApple OSS Distributions 	}
5407*c54f35caSApple OSS Distributions 
5408*c54f35caSApple OSS Distributions 	fAddress = 0;
5409*c54f35caSApple OSS Distributions 
5410*c54f35caSApple OSS Distributions 	UNLOCK;
5411*c54f35caSApple OSS Distributions 
5412*c54f35caSApple OSS Distributions 	return err;
5413*c54f35caSApple OSS Distributions }
5414*c54f35caSApple OSS Distributions 
5415*c54f35caSApple OSS Distributions void
taskDied(void)5416*c54f35caSApple OSS Distributions IOMemoryMap::taskDied( void )
5417*c54f35caSApple OSS Distributions {
5418*c54f35caSApple OSS Distributions 	LOCK;
5419*c54f35caSApple OSS Distributions 	if (fUserClientUnmap) {
5420*c54f35caSApple OSS Distributions 		unmap();
5421*c54f35caSApple OSS Distributions 	}
5422*c54f35caSApple OSS Distributions #if IOTRACKING
5423*c54f35caSApple OSS Distributions 	else {
5424*c54f35caSApple OSS Distributions 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5425*c54f35caSApple OSS Distributions 	}
5426*c54f35caSApple OSS Distributions #endif /* IOTRACKING */
5427*c54f35caSApple OSS Distributions 
5428*c54f35caSApple OSS Distributions 	if (fAddressMap) {
5429*c54f35caSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5430*c54f35caSApple OSS Distributions 		fAddressMap = NULL;
5431*c54f35caSApple OSS Distributions 	}
5432*c54f35caSApple OSS Distributions 	fAddressTask = NULL;
5433*c54f35caSApple OSS Distributions 	fAddress     = 0;
5434*c54f35caSApple OSS Distributions 	UNLOCK;
5435*c54f35caSApple OSS Distributions }
5436*c54f35caSApple OSS Distributions 
5437*c54f35caSApple OSS Distributions IOReturn
userClientUnmap(void)5438*c54f35caSApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5439*c54f35caSApple OSS Distributions {
5440*c54f35caSApple OSS Distributions 	fUserClientUnmap = true;
5441*c54f35caSApple OSS Distributions 	return kIOReturnSuccess;
5442*c54f35caSApple OSS Distributions }
5443*c54f35caSApple OSS Distributions 
5444*c54f35caSApple OSS Distributions // Overload the release mechanism.  All mappings must be a member
5445*c54f35caSApple OSS Distributions // of a memory descriptors _mappings set.  This means that we
5446*c54f35caSApple OSS Distributions // always have 2 references on a mapping.  When either of these mappings
5447*c54f35caSApple OSS Distributions // are released we need to free ourselves.
5448*c54f35caSApple OSS Distributions void
taggedRelease(const void * tag) const5449*c54f35caSApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5450*c54f35caSApple OSS Distributions {
5451*c54f35caSApple OSS Distributions 	LOCK;
5452*c54f35caSApple OSS Distributions 	super::taggedRelease(tag, 2);
5453*c54f35caSApple OSS Distributions 	UNLOCK;
5454*c54f35caSApple OSS Distributions }
5455*c54f35caSApple OSS Distributions 
5456*c54f35caSApple OSS Distributions void
free()5457*c54f35caSApple OSS Distributions IOMemoryMap::free()
5458*c54f35caSApple OSS Distributions {
5459*c54f35caSApple OSS Distributions 	unmap();
5460*c54f35caSApple OSS Distributions 
5461*c54f35caSApple OSS Distributions 	if (fMemory) {
5462*c54f35caSApple OSS Distributions 		LOCK;
5463*c54f35caSApple OSS Distributions 		fMemory->removeMapping(this);
5464*c54f35caSApple OSS Distributions 		UNLOCK;
5465*c54f35caSApple OSS Distributions 		fMemory.reset();
5466*c54f35caSApple OSS Distributions 	}
5467*c54f35caSApple OSS Distributions 
5468*c54f35caSApple OSS Distributions 	if (fSuperMap) {
5469*c54f35caSApple OSS Distributions 		fSuperMap.reset();
5470*c54f35caSApple OSS Distributions 	}
5471*c54f35caSApple OSS Distributions 
5472*c54f35caSApple OSS Distributions 	if (fRedirUPL) {
5473*c54f35caSApple OSS Distributions 		upl_commit(fRedirUPL, NULL, 0);
5474*c54f35caSApple OSS Distributions 		upl_deallocate(fRedirUPL);
5475*c54f35caSApple OSS Distributions 	}
5476*c54f35caSApple OSS Distributions 
5477*c54f35caSApple OSS Distributions 	super::free();
5478*c54f35caSApple OSS Distributions }
5479*c54f35caSApple OSS Distributions 
5480*c54f35caSApple OSS Distributions IOByteCount
getLength()5481*c54f35caSApple OSS Distributions IOMemoryMap::getLength()
5482*c54f35caSApple OSS Distributions {
5483*c54f35caSApple OSS Distributions 	return fLength;
5484*c54f35caSApple OSS Distributions }
5485*c54f35caSApple OSS Distributions 
5486*c54f35caSApple OSS Distributions IOVirtualAddress
getVirtualAddress()5487*c54f35caSApple OSS Distributions IOMemoryMap::getVirtualAddress()
5488*c54f35caSApple OSS Distributions {
5489*c54f35caSApple OSS Distributions #ifndef __LP64__
5490*c54f35caSApple OSS Distributions 	if (fSuperMap) {
5491*c54f35caSApple OSS Distributions 		fSuperMap->getVirtualAddress();
5492*c54f35caSApple OSS Distributions 	} else if (fAddressMap
5493*c54f35caSApple OSS Distributions 	    && vm_map_is_64bit(fAddressMap)
5494*c54f35caSApple OSS Distributions 	    && (sizeof(IOVirtualAddress) < 8)) {
5495*c54f35caSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5496*c54f35caSApple OSS Distributions 	}
5497*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5498*c54f35caSApple OSS Distributions 
5499*c54f35caSApple OSS Distributions 	return fAddress;
5500*c54f35caSApple OSS Distributions }
5501*c54f35caSApple OSS Distributions 
5502*c54f35caSApple OSS Distributions #ifndef __LP64__
5503*c54f35caSApple OSS Distributions mach_vm_address_t
getAddress()5504*c54f35caSApple OSS Distributions IOMemoryMap::getAddress()
5505*c54f35caSApple OSS Distributions {
5506*c54f35caSApple OSS Distributions 	return fAddress;
5507*c54f35caSApple OSS Distributions }
5508*c54f35caSApple OSS Distributions 
5509*c54f35caSApple OSS Distributions mach_vm_size_t
getSize()5510*c54f35caSApple OSS Distributions IOMemoryMap::getSize()
5511*c54f35caSApple OSS Distributions {
5512*c54f35caSApple OSS Distributions 	return fLength;
5513*c54f35caSApple OSS Distributions }
5514*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5515*c54f35caSApple OSS Distributions 
5516*c54f35caSApple OSS Distributions 
5517*c54f35caSApple OSS Distributions task_t
getAddressTask()5518*c54f35caSApple OSS Distributions IOMemoryMap::getAddressTask()
5519*c54f35caSApple OSS Distributions {
5520*c54f35caSApple OSS Distributions 	if (fSuperMap) {
5521*c54f35caSApple OSS Distributions 		return fSuperMap->getAddressTask();
5522*c54f35caSApple OSS Distributions 	} else {
5523*c54f35caSApple OSS Distributions 		return fAddressTask;
5524*c54f35caSApple OSS Distributions 	}
5525*c54f35caSApple OSS Distributions }
5526*c54f35caSApple OSS Distributions 
5527*c54f35caSApple OSS Distributions IOOptionBits
getMapOptions()5528*c54f35caSApple OSS Distributions IOMemoryMap::getMapOptions()
5529*c54f35caSApple OSS Distributions {
5530*c54f35caSApple OSS Distributions 	return fOptions;
5531*c54f35caSApple OSS Distributions }
5532*c54f35caSApple OSS Distributions 
5533*c54f35caSApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5534*c54f35caSApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5535*c54f35caSApple OSS Distributions {
5536*c54f35caSApple OSS Distributions 	return fMemory.get();
5537*c54f35caSApple OSS Distributions }
5538*c54f35caSApple OSS Distributions 
5539*c54f35caSApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5540*c54f35caSApple OSS Distributions IOMemoryMap::copyCompatible(
5541*c54f35caSApple OSS Distributions 	IOMemoryMap * newMapping )
5542*c54f35caSApple OSS Distributions {
5543*c54f35caSApple OSS Distributions 	task_t              task      = newMapping->getAddressTask();
5544*c54f35caSApple OSS Distributions 	mach_vm_address_t   toAddress = newMapping->fAddress;
5545*c54f35caSApple OSS Distributions 	IOOptionBits        _options  = newMapping->fOptions;
5546*c54f35caSApple OSS Distributions 	mach_vm_size_t      _offset   = newMapping->fOffset;
5547*c54f35caSApple OSS Distributions 	mach_vm_size_t      _length   = newMapping->fLength;
5548*c54f35caSApple OSS Distributions 
5549*c54f35caSApple OSS Distributions 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5550*c54f35caSApple OSS Distributions 		return NULL;
5551*c54f35caSApple OSS Distributions 	}
5552*c54f35caSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5553*c54f35caSApple OSS Distributions 		return NULL;
5554*c54f35caSApple OSS Distributions 	}
5555*c54f35caSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5556*c54f35caSApple OSS Distributions 		return NULL;
5557*c54f35caSApple OSS Distributions 	}
5558*c54f35caSApple OSS Distributions 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5559*c54f35caSApple OSS Distributions 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5560*c54f35caSApple OSS Distributions 		return NULL;
5561*c54f35caSApple OSS Distributions 	}
5562*c54f35caSApple OSS Distributions 
5563*c54f35caSApple OSS Distributions 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5564*c54f35caSApple OSS Distributions 		return NULL;
5565*c54f35caSApple OSS Distributions 	}
5566*c54f35caSApple OSS Distributions 
5567*c54f35caSApple OSS Distributions 	if (_offset < fOffset) {
5568*c54f35caSApple OSS Distributions 		return NULL;
5569*c54f35caSApple OSS Distributions 	}
5570*c54f35caSApple OSS Distributions 
5571*c54f35caSApple OSS Distributions 	_offset -= fOffset;
5572*c54f35caSApple OSS Distributions 
5573*c54f35caSApple OSS Distributions 	if ((_offset + _length) > fLength) {
5574*c54f35caSApple OSS Distributions 		return NULL;
5575*c54f35caSApple OSS Distributions 	}
5576*c54f35caSApple OSS Distributions 
5577*c54f35caSApple OSS Distributions 	if ((fLength == _length) && (!_offset)) {
5578*c54f35caSApple OSS Distributions 		retain();
5579*c54f35caSApple OSS Distributions 		newMapping = this;
5580*c54f35caSApple OSS Distributions 	} else {
5581*c54f35caSApple OSS Distributions 		newMapping->fSuperMap.reset(this, OSRetain);
5582*c54f35caSApple OSS Distributions 		newMapping->fOffset   = fOffset + _offset;
5583*c54f35caSApple OSS Distributions 		newMapping->fAddress  = fAddress + _offset;
5584*c54f35caSApple OSS Distributions 	}
5585*c54f35caSApple OSS Distributions 
5586*c54f35caSApple OSS Distributions 	return newMapping;
5587*c54f35caSApple OSS Distributions }
5588*c54f35caSApple OSS Distributions 
5589*c54f35caSApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5590*c54f35caSApple OSS Distributions IOMemoryMap::wireRange(
5591*c54f35caSApple OSS Distributions 	uint32_t                options,
5592*c54f35caSApple OSS Distributions 	mach_vm_size_t          offset,
5593*c54f35caSApple OSS Distributions 	mach_vm_size_t          length)
5594*c54f35caSApple OSS Distributions {
5595*c54f35caSApple OSS Distributions 	IOReturn kr;
5596*c54f35caSApple OSS Distributions 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5597*c54f35caSApple OSS Distributions 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5598*c54f35caSApple OSS Distributions 	vm_prot_t prot;
5599*c54f35caSApple OSS Distributions 
5600*c54f35caSApple OSS Distributions 	prot = (kIODirectionOutIn & options);
5601*c54f35caSApple OSS Distributions 	if (prot) {
5602*c54f35caSApple OSS Distributions 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5603*c54f35caSApple OSS Distributions 	} else {
5604*c54f35caSApple OSS Distributions 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5605*c54f35caSApple OSS Distributions 	}
5606*c54f35caSApple OSS Distributions 
5607*c54f35caSApple OSS Distributions 	return kr;
5608*c54f35caSApple OSS Distributions }
5609*c54f35caSApple OSS Distributions 
5610*c54f35caSApple OSS Distributions 
5611*c54f35caSApple OSS Distributions IOPhysicalAddress
5612*c54f35caSApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5613*c54f35caSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5614*c54f35caSApple OSS Distributions #else /* !__LP64__ */
5615*c54f35caSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5616*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5617*c54f35caSApple OSS Distributions {
5618*c54f35caSApple OSS Distributions 	IOPhysicalAddress   address;
5619*c54f35caSApple OSS Distributions 
5620*c54f35caSApple OSS Distributions 	LOCK;
5621*c54f35caSApple OSS Distributions #ifdef __LP64__
5622*c54f35caSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5623*c54f35caSApple OSS Distributions #else /* !__LP64__ */
5624*c54f35caSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5625*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5626*c54f35caSApple OSS Distributions 	UNLOCK;
5627*c54f35caSApple OSS Distributions 
5628*c54f35caSApple OSS Distributions 	return address;
5629*c54f35caSApple OSS Distributions }
5630*c54f35caSApple OSS Distributions 
5631*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5632*c54f35caSApple OSS Distributions 
5633*c54f35caSApple OSS Distributions #undef super
5634*c54f35caSApple OSS Distributions #define super OSObject
5635*c54f35caSApple OSS Distributions 
5636*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5637*c54f35caSApple OSS Distributions 
5638*c54f35caSApple OSS Distributions void
initialize(void)5639*c54f35caSApple OSS Distributions IOMemoryDescriptor::initialize( void )
5640*c54f35caSApple OSS Distributions {
5641*c54f35caSApple OSS Distributions 	if (NULL == gIOMemoryLock) {
5642*c54f35caSApple OSS Distributions 		gIOMemoryLock = IORecursiveLockAlloc();
5643*c54f35caSApple OSS Distributions 	}
5644*c54f35caSApple OSS Distributions 
5645*c54f35caSApple OSS Distributions 	gIOLastPage = IOGetLastPageNumber();
5646*c54f35caSApple OSS Distributions }
5647*c54f35caSApple OSS Distributions 
5648*c54f35caSApple OSS Distributions void
free(void)5649*c54f35caSApple OSS Distributions IOMemoryDescriptor::free( void )
5650*c54f35caSApple OSS Distributions {
5651*c54f35caSApple OSS Distributions 	if (_mappings) {
5652*c54f35caSApple OSS Distributions 		_mappings.reset();
5653*c54f35caSApple OSS Distributions 	}
5654*c54f35caSApple OSS Distributions 
5655*c54f35caSApple OSS Distributions 	if (reserved) {
5656*c54f35caSApple OSS Distributions 		cleanKernelReserved(reserved);
5657*c54f35caSApple OSS Distributions 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5658*c54f35caSApple OSS Distributions 		reserved = NULL;
5659*c54f35caSApple OSS Distributions 	}
5660*c54f35caSApple OSS Distributions 	super::free();
5661*c54f35caSApple OSS Distributions }
5662*c54f35caSApple OSS Distributions 
5663*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5664*c54f35caSApple OSS Distributions IOMemoryDescriptor::setMapping(
5665*c54f35caSApple OSS Distributions 	task_t                  intoTask,
5666*c54f35caSApple OSS Distributions 	IOVirtualAddress        mapAddress,
5667*c54f35caSApple OSS Distributions 	IOOptionBits            options )
5668*c54f35caSApple OSS Distributions {
5669*c54f35caSApple OSS Distributions 	return createMappingInTask( intoTask, mapAddress,
5670*c54f35caSApple OSS Distributions 	           options | kIOMapStatic,
5671*c54f35caSApple OSS Distributions 	           0, getLength());
5672*c54f35caSApple OSS Distributions }
5673*c54f35caSApple OSS Distributions 
5674*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5675*c54f35caSApple OSS Distributions IOMemoryDescriptor::map(
5676*c54f35caSApple OSS Distributions 	IOOptionBits            options )
5677*c54f35caSApple OSS Distributions {
5678*c54f35caSApple OSS Distributions 	return createMappingInTask( kernel_task, 0,
5679*c54f35caSApple OSS Distributions 	           options | kIOMapAnywhere,
5680*c54f35caSApple OSS Distributions 	           0, getLength());
5681*c54f35caSApple OSS Distributions }
5682*c54f35caSApple OSS Distributions 
5683*c54f35caSApple OSS Distributions #ifndef __LP64__
5684*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5685*c54f35caSApple OSS Distributions IOMemoryDescriptor::map(
5686*c54f35caSApple OSS Distributions 	task_t                  intoTask,
5687*c54f35caSApple OSS Distributions 	IOVirtualAddress        atAddress,
5688*c54f35caSApple OSS Distributions 	IOOptionBits            options,
5689*c54f35caSApple OSS Distributions 	IOByteCount             offset,
5690*c54f35caSApple OSS Distributions 	IOByteCount             length )
5691*c54f35caSApple OSS Distributions {
5692*c54f35caSApple OSS Distributions 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5693*c54f35caSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5694*c54f35caSApple OSS Distributions 		return NULL;
5695*c54f35caSApple OSS Distributions 	}
5696*c54f35caSApple OSS Distributions 
5697*c54f35caSApple OSS Distributions 	return createMappingInTask(intoTask, atAddress,
5698*c54f35caSApple OSS Distributions 	           options, offset, length);
5699*c54f35caSApple OSS Distributions }
5700*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5701*c54f35caSApple OSS Distributions 
5702*c54f35caSApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5703*c54f35caSApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5704*c54f35caSApple OSS Distributions 	task_t                  intoTask,
5705*c54f35caSApple OSS Distributions 	mach_vm_address_t       atAddress,
5706*c54f35caSApple OSS Distributions 	IOOptionBits            options,
5707*c54f35caSApple OSS Distributions 	mach_vm_size_t          offset,
5708*c54f35caSApple OSS Distributions 	mach_vm_size_t          length)
5709*c54f35caSApple OSS Distributions {
5710*c54f35caSApple OSS Distributions 	IOMemoryMap * result;
5711*c54f35caSApple OSS Distributions 	IOMemoryMap * mapping;
5712*c54f35caSApple OSS Distributions 
5713*c54f35caSApple OSS Distributions 	if (0 == length) {
5714*c54f35caSApple OSS Distributions 		length = getLength();
5715*c54f35caSApple OSS Distributions 	}
5716*c54f35caSApple OSS Distributions 
5717*c54f35caSApple OSS Distributions 	mapping = new IOMemoryMap;
5718*c54f35caSApple OSS Distributions 
5719*c54f35caSApple OSS Distributions 	if (mapping
5720*c54f35caSApple OSS Distributions 	    && !mapping->init( intoTask, atAddress,
5721*c54f35caSApple OSS Distributions 	    options, offset, length )) {
5722*c54f35caSApple OSS Distributions 		mapping->release();
5723*c54f35caSApple OSS Distributions 		mapping = NULL;
5724*c54f35caSApple OSS Distributions 	}
5725*c54f35caSApple OSS Distributions 
5726*c54f35caSApple OSS Distributions 	if (mapping) {
5727*c54f35caSApple OSS Distributions 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5728*c54f35caSApple OSS Distributions 	} else {
5729*c54f35caSApple OSS Distributions 		result = nullptr;
5730*c54f35caSApple OSS Distributions 	}
5731*c54f35caSApple OSS Distributions 
5732*c54f35caSApple OSS Distributions #if DEBUG
5733*c54f35caSApple OSS Distributions 	if (!result) {
5734*c54f35caSApple OSS Distributions 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5735*c54f35caSApple OSS Distributions 		    this, atAddress, (uint32_t) options, offset, length);
5736*c54f35caSApple OSS Distributions 	}
5737*c54f35caSApple OSS Distributions #endif
5738*c54f35caSApple OSS Distributions 
5739*c54f35caSApple OSS Distributions 	// already retained through makeMapping
5740*c54f35caSApple OSS Distributions 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5741*c54f35caSApple OSS Distributions 
5742*c54f35caSApple OSS Distributions 	return retval;
5743*c54f35caSApple OSS Distributions }
5744*c54f35caSApple OSS Distributions 
5745*c54f35caSApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5746*c54f35caSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5747*c54f35caSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5748*c54f35caSApple OSS Distributions     IOOptionBits         options,
5749*c54f35caSApple OSS Distributions     IOByteCount          offset)
5750*c54f35caSApple OSS Distributions {
5751*c54f35caSApple OSS Distributions 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5752*c54f35caSApple OSS Distributions }
5753*c54f35caSApple OSS Distributions #endif
5754*c54f35caSApple OSS Distributions 
5755*c54f35caSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5756*c54f35caSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5757*c54f35caSApple OSS Distributions     IOOptionBits         options,
5758*c54f35caSApple OSS Distributions     mach_vm_size_t       offset)
5759*c54f35caSApple OSS Distributions {
5760*c54f35caSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5761*c54f35caSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> physMem;
5762*c54f35caSApple OSS Distributions 
5763*c54f35caSApple OSS Distributions 	LOCK;
5764*c54f35caSApple OSS Distributions 
5765*c54f35caSApple OSS Distributions 	if (fAddress && fAddressMap) {
5766*c54f35caSApple OSS Distributions 		do{
5767*c54f35caSApple OSS Distributions 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5768*c54f35caSApple OSS Distributions 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5769*c54f35caSApple OSS Distributions 				physMem = fMemory;
5770*c54f35caSApple OSS Distributions 			}
5771*c54f35caSApple OSS Distributions 
5772*c54f35caSApple OSS Distributions 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5773*c54f35caSApple OSS Distributions 				upl_size_t          size = (typeof(size))round_page(fLength);
5774*c54f35caSApple OSS Distributions 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5775*c54f35caSApple OSS Distributions 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5776*c54f35caSApple OSS Distributions 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5777*c54f35caSApple OSS Distributions 				    NULL, NULL,
5778*c54f35caSApple OSS Distributions 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5779*c54f35caSApple OSS Distributions 					fRedirUPL = NULL;
5780*c54f35caSApple OSS Distributions 				}
5781*c54f35caSApple OSS Distributions 
5782*c54f35caSApple OSS Distributions 				if (physMem) {
5783*c54f35caSApple OSS Distributions 					IOUnmapPages( fAddressMap, fAddress, fLength );
5784*c54f35caSApple OSS Distributions 					if ((false)) {
5785*c54f35caSApple OSS Distributions 						physMem->redirect(NULL, true);
5786*c54f35caSApple OSS Distributions 					}
5787*c54f35caSApple OSS Distributions 				}
5788*c54f35caSApple OSS Distributions 			}
5789*c54f35caSApple OSS Distributions 
5790*c54f35caSApple OSS Distributions 			if (newBackingMemory) {
5791*c54f35caSApple OSS Distributions 				if (newBackingMemory != fMemory) {
5792*c54f35caSApple OSS Distributions 					fOffset = 0;
5793*c54f35caSApple OSS Distributions 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5794*c54f35caSApple OSS Distributions 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5795*c54f35caSApple OSS Distributions 					    offset, fLength)) {
5796*c54f35caSApple OSS Distributions 						err = kIOReturnError;
5797*c54f35caSApple OSS Distributions 					}
5798*c54f35caSApple OSS Distributions 				}
5799*c54f35caSApple OSS Distributions 				if (fRedirUPL) {
5800*c54f35caSApple OSS Distributions 					upl_commit(fRedirUPL, NULL, 0);
5801*c54f35caSApple OSS Distributions 					upl_deallocate(fRedirUPL);
5802*c54f35caSApple OSS Distributions 					fRedirUPL = NULL;
5803*c54f35caSApple OSS Distributions 				}
5804*c54f35caSApple OSS Distributions 				if ((false) && physMem) {
5805*c54f35caSApple OSS Distributions 					physMem->redirect(NULL, false);
5806*c54f35caSApple OSS Distributions 				}
5807*c54f35caSApple OSS Distributions 			}
5808*c54f35caSApple OSS Distributions 		}while (false);
5809*c54f35caSApple OSS Distributions 	}
5810*c54f35caSApple OSS Distributions 
5811*c54f35caSApple OSS Distributions 	UNLOCK;
5812*c54f35caSApple OSS Distributions 
5813*c54f35caSApple OSS Distributions 	return err;
5814*c54f35caSApple OSS Distributions }
5815*c54f35caSApple OSS Distributions 
5816*c54f35caSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5817*c54f35caSApple OSS Distributions IOMemoryDescriptor::makeMapping(
5818*c54f35caSApple OSS Distributions 	IOMemoryDescriptor *    owner,
5819*c54f35caSApple OSS Distributions 	task_t                  __intoTask,
5820*c54f35caSApple OSS Distributions 	IOVirtualAddress        __address,
5821*c54f35caSApple OSS Distributions 	IOOptionBits            options,
5822*c54f35caSApple OSS Distributions 	IOByteCount             __offset,
5823*c54f35caSApple OSS Distributions 	IOByteCount             __length )
5824*c54f35caSApple OSS Distributions {
5825*c54f35caSApple OSS Distributions #ifndef __LP64__
5826*c54f35caSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
5827*c54f35caSApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
5828*c54f35caSApple OSS Distributions 	}
5829*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
5830*c54f35caSApple OSS Distributions 
5831*c54f35caSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5832*c54f35caSApple OSS Distributions 	__block IOMemoryMap * result  = NULL;
5833*c54f35caSApple OSS Distributions 
5834*c54f35caSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5835*c54f35caSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5836*c54f35caSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
5837*c54f35caSApple OSS Distributions 
5838*c54f35caSApple OSS Distributions 	mapping->fOffset = offset;
5839*c54f35caSApple OSS Distributions 
5840*c54f35caSApple OSS Distributions 	LOCK;
5841*c54f35caSApple OSS Distributions 
5842*c54f35caSApple OSS Distributions 	do{
5843*c54f35caSApple OSS Distributions 		if (kIOMapStatic & options) {
5844*c54f35caSApple OSS Distributions 			result = mapping;
5845*c54f35caSApple OSS Distributions 			addMapping(mapping);
5846*c54f35caSApple OSS Distributions 			mapping->setMemoryDescriptor(this, 0);
5847*c54f35caSApple OSS Distributions 			continue;
5848*c54f35caSApple OSS Distributions 		}
5849*c54f35caSApple OSS Distributions 
5850*c54f35caSApple OSS Distributions 		if (kIOMapUnique & options) {
5851*c54f35caSApple OSS Distributions 			addr64_t phys;
5852*c54f35caSApple OSS Distributions 			IOByteCount       physLen;
5853*c54f35caSApple OSS Distributions 
5854*c54f35caSApple OSS Distributions //	    if (owner != this)		continue;
5855*c54f35caSApple OSS Distributions 
5856*c54f35caSApple OSS Distributions 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5857*c54f35caSApple OSS Distributions 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5858*c54f35caSApple OSS Distributions 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5859*c54f35caSApple OSS Distributions 				if (!phys || (physLen < length)) {
5860*c54f35caSApple OSS Distributions 					continue;
5861*c54f35caSApple OSS Distributions 				}
5862*c54f35caSApple OSS Distributions 
5863*c54f35caSApple OSS Distributions 				mapDesc = IOMemoryDescriptor::withAddressRange(
5864*c54f35caSApple OSS Distributions 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5865*c54f35caSApple OSS Distributions 				if (!mapDesc) {
5866*c54f35caSApple OSS Distributions 					continue;
5867*c54f35caSApple OSS Distributions 				}
5868*c54f35caSApple OSS Distributions 				offset = 0;
5869*c54f35caSApple OSS Distributions 				mapping->fOffset = offset;
5870*c54f35caSApple OSS Distributions 			}
5871*c54f35caSApple OSS Distributions 		} else {
5872*c54f35caSApple OSS Distributions 			// look for a compatible existing mapping
5873*c54f35caSApple OSS Distributions 			if (_mappings) {
5874*c54f35caSApple OSS Distributions 				_mappings->iterateObjects(^(OSObject * object)
5875*c54f35caSApple OSS Distributions 				{
5876*c54f35caSApple OSS Distributions 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5877*c54f35caSApple OSS Distributions 					if ((result = lookMapping->copyCompatible(mapping))) {
5878*c54f35caSApple OSS Distributions 					        addMapping(result);
5879*c54f35caSApple OSS Distributions 					        result->setMemoryDescriptor(this, offset);
5880*c54f35caSApple OSS Distributions 					        return true;
5881*c54f35caSApple OSS Distributions 					}
5882*c54f35caSApple OSS Distributions 					return false;
5883*c54f35caSApple OSS Distributions 				});
5884*c54f35caSApple OSS Distributions 			}
5885*c54f35caSApple OSS Distributions 			if (result || (options & kIOMapReference)) {
5886*c54f35caSApple OSS Distributions 				if (result != mapping) {
5887*c54f35caSApple OSS Distributions 					mapping->release();
5888*c54f35caSApple OSS Distributions 					mapping = NULL;
5889*c54f35caSApple OSS Distributions 				}
5890*c54f35caSApple OSS Distributions 				continue;
5891*c54f35caSApple OSS Distributions 			}
5892*c54f35caSApple OSS Distributions 		}
5893*c54f35caSApple OSS Distributions 
5894*c54f35caSApple OSS Distributions 		if (!mapDesc) {
5895*c54f35caSApple OSS Distributions 			mapDesc.reset(this, OSRetain);
5896*c54f35caSApple OSS Distributions 		}
5897*c54f35caSApple OSS Distributions 		IOReturn
5898*c54f35caSApple OSS Distributions 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5899*c54f35caSApple OSS Distributions 		if (kIOReturnSuccess == kr) {
5900*c54f35caSApple OSS Distributions 			result = mapping;
5901*c54f35caSApple OSS Distributions 			mapDesc->addMapping(result);
5902*c54f35caSApple OSS Distributions 			result->setMemoryDescriptor(mapDesc.get(), offset);
5903*c54f35caSApple OSS Distributions 		} else {
5904*c54f35caSApple OSS Distributions 			mapping->release();
5905*c54f35caSApple OSS Distributions 			mapping = NULL;
5906*c54f35caSApple OSS Distributions 		}
5907*c54f35caSApple OSS Distributions 	}while (false);
5908*c54f35caSApple OSS Distributions 
5909*c54f35caSApple OSS Distributions 	UNLOCK;
5910*c54f35caSApple OSS Distributions 
5911*c54f35caSApple OSS Distributions 	return result;
5912*c54f35caSApple OSS Distributions }
5913*c54f35caSApple OSS Distributions 
5914*c54f35caSApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5915*c54f35caSApple OSS Distributions IOMemoryDescriptor::addMapping(
5916*c54f35caSApple OSS Distributions 	IOMemoryMap * mapping )
5917*c54f35caSApple OSS Distributions {
5918*c54f35caSApple OSS Distributions 	if (mapping) {
5919*c54f35caSApple OSS Distributions 		if (NULL == _mappings) {
5920*c54f35caSApple OSS Distributions 			_mappings = OSSet::withCapacity(1);
5921*c54f35caSApple OSS Distributions 		}
5922*c54f35caSApple OSS Distributions 		if (_mappings) {
5923*c54f35caSApple OSS Distributions 			_mappings->setObject( mapping );
5924*c54f35caSApple OSS Distributions 		}
5925*c54f35caSApple OSS Distributions 	}
5926*c54f35caSApple OSS Distributions }
5927*c54f35caSApple OSS Distributions 
5928*c54f35caSApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5929*c54f35caSApple OSS Distributions IOMemoryDescriptor::removeMapping(
5930*c54f35caSApple OSS Distributions 	IOMemoryMap * mapping )
5931*c54f35caSApple OSS Distributions {
5932*c54f35caSApple OSS Distributions 	if (_mappings) {
5933*c54f35caSApple OSS Distributions 		_mappings->removeObject( mapping);
5934*c54f35caSApple OSS Distributions 	}
5935*c54f35caSApple OSS Distributions }
5936*c54f35caSApple OSS Distributions 
5937*c54f35caSApple OSS Distributions void
setMapperOptions(uint16_t options)5938*c54f35caSApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5939*c54f35caSApple OSS Distributions {
5940*c54f35caSApple OSS Distributions 	_iomapperOptions = options;
5941*c54f35caSApple OSS Distributions }
5942*c54f35caSApple OSS Distributions 
5943*c54f35caSApple OSS Distributions uint16_t
getMapperOptions(void)5944*c54f35caSApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5945*c54f35caSApple OSS Distributions {
5946*c54f35caSApple OSS Distributions 	return _iomapperOptions;
5947*c54f35caSApple OSS Distributions }
5948*c54f35caSApple OSS Distributions 
5949*c54f35caSApple OSS Distributions #ifndef __LP64__
5950*c54f35caSApple OSS Distributions // obsolete initializers
5951*c54f35caSApple OSS Distributions // - initWithOptions is the designated initializer
5952*c54f35caSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5953*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithAddress(void *      address,
5954*c54f35caSApple OSS Distributions     IOByteCount   length,
5955*c54f35caSApple OSS Distributions     IODirection direction)
5956*c54f35caSApple OSS Distributions {
5957*c54f35caSApple OSS Distributions 	return false;
5958*c54f35caSApple OSS Distributions }
5959*c54f35caSApple OSS Distributions 
5960*c54f35caSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5961*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5962*c54f35caSApple OSS Distributions     IOByteCount    length,
5963*c54f35caSApple OSS Distributions     IODirection  direction,
5964*c54f35caSApple OSS Distributions     task_t       task)
5965*c54f35caSApple OSS Distributions {
5966*c54f35caSApple OSS Distributions 	return false;
5967*c54f35caSApple OSS Distributions }
5968*c54f35caSApple OSS Distributions 
5969*c54f35caSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5970*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
5971*c54f35caSApple OSS Distributions 	IOPhysicalAddress      address,
5972*c54f35caSApple OSS Distributions 	IOByteCount            length,
5973*c54f35caSApple OSS Distributions 	IODirection            direction )
5974*c54f35caSApple OSS Distributions {
5975*c54f35caSApple OSS Distributions 	return false;
5976*c54f35caSApple OSS Distributions }
5977*c54f35caSApple OSS Distributions 
5978*c54f35caSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5979*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithRanges(
5980*c54f35caSApple OSS Distributions 	IOVirtualRange * ranges,
5981*c54f35caSApple OSS Distributions 	UInt32           withCount,
5982*c54f35caSApple OSS Distributions 	IODirection      direction,
5983*c54f35caSApple OSS Distributions 	task_t           task,
5984*c54f35caSApple OSS Distributions 	bool             asReference)
5985*c54f35caSApple OSS Distributions {
5986*c54f35caSApple OSS Distributions 	return false;
5987*c54f35caSApple OSS Distributions }
5988*c54f35caSApple OSS Distributions 
5989*c54f35caSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5990*c54f35caSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5991*c54f35caSApple OSS Distributions     UInt32           withCount,
5992*c54f35caSApple OSS Distributions     IODirection      direction,
5993*c54f35caSApple OSS Distributions     bool             asReference)
5994*c54f35caSApple OSS Distributions {
5995*c54f35caSApple OSS Distributions 	return false;
5996*c54f35caSApple OSS Distributions }
5997*c54f35caSApple OSS Distributions 
5998*c54f35caSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5999*c54f35caSApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6000*c54f35caSApple OSS Distributions     IOByteCount * lengthOfSegment)
6001*c54f35caSApple OSS Distributions {
6002*c54f35caSApple OSS Distributions 	return NULL;
6003*c54f35caSApple OSS Distributions }
6004*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
6005*c54f35caSApple OSS Distributions 
6006*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6007*c54f35caSApple OSS Distributions 
6008*c54f35caSApple OSS Distributions bool
serialize(OSSerialize * s) const6009*c54f35caSApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6010*c54f35caSApple OSS Distributions {
6011*c54f35caSApple OSS Distributions 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6012*c54f35caSApple OSS Distributions 	OSSharedPtr<OSObject>           values[2] = {NULL};
6013*c54f35caSApple OSS Distributions 	OSSharedPtr<OSArray>            array;
6014*c54f35caSApple OSS Distributions 
6015*c54f35caSApple OSS Distributions 	struct SerData {
6016*c54f35caSApple OSS Distributions 		user_addr_t address;
6017*c54f35caSApple OSS Distributions 		user_size_t length;
6018*c54f35caSApple OSS Distributions 	};
6019*c54f35caSApple OSS Distributions 
6020*c54f35caSApple OSS Distributions 	unsigned int index;
6021*c54f35caSApple OSS Distributions 
6022*c54f35caSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6023*c54f35caSApple OSS Distributions 
6024*c54f35caSApple OSS Distributions 	if (s == NULL) {
6025*c54f35caSApple OSS Distributions 		return false;
6026*c54f35caSApple OSS Distributions 	}
6027*c54f35caSApple OSS Distributions 
6028*c54f35caSApple OSS Distributions 	array = OSArray::withCapacity(4);
6029*c54f35caSApple OSS Distributions 	if (!array) {
6030*c54f35caSApple OSS Distributions 		return false;
6031*c54f35caSApple OSS Distributions 	}
6032*c54f35caSApple OSS Distributions 
6033*c54f35caSApple OSS Distributions 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6034*c54f35caSApple OSS Distributions 	if (!vcopy) {
6035*c54f35caSApple OSS Distributions 		return false;
6036*c54f35caSApple OSS Distributions 	}
6037*c54f35caSApple OSS Distributions 
6038*c54f35caSApple OSS Distributions 	keys[0] = OSSymbol::withCString("address");
6039*c54f35caSApple OSS Distributions 	keys[1] = OSSymbol::withCString("length");
6040*c54f35caSApple OSS Distributions 
6041*c54f35caSApple OSS Distributions 	// Copy the volatile data so we don't have to allocate memory
6042*c54f35caSApple OSS Distributions 	// while the lock is held.
6043*c54f35caSApple OSS Distributions 	LOCK;
6044*c54f35caSApple OSS Distributions 	if (vcopy.size() == _rangesCount) {
6045*c54f35caSApple OSS Distributions 		Ranges vec = _ranges;
6046*c54f35caSApple OSS Distributions 		for (index = 0; index < vcopy.size(); index++) {
6047*c54f35caSApple OSS Distributions 			mach_vm_address_t addr; mach_vm_size_t len;
6048*c54f35caSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, index, _task);
6049*c54f35caSApple OSS Distributions 			vcopy[index].address = addr;
6050*c54f35caSApple OSS Distributions 			vcopy[index].length  = len;
6051*c54f35caSApple OSS Distributions 		}
6052*c54f35caSApple OSS Distributions 	} else {
6053*c54f35caSApple OSS Distributions 		// The descriptor changed out from under us.  Give up.
6054*c54f35caSApple OSS Distributions 		UNLOCK;
6055*c54f35caSApple OSS Distributions 		return false;
6056*c54f35caSApple OSS Distributions 	}
6057*c54f35caSApple OSS Distributions 	UNLOCK;
6058*c54f35caSApple OSS Distributions 
6059*c54f35caSApple OSS Distributions 	for (index = 0; index < vcopy.size(); index++) {
6060*c54f35caSApple OSS Distributions 		user_addr_t addr = vcopy[index].address;
6061*c54f35caSApple OSS Distributions 		IOByteCount len = (IOByteCount) vcopy[index].length;
6062*c54f35caSApple OSS Distributions 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6063*c54f35caSApple OSS Distributions 		if (values[0] == NULL) {
6064*c54f35caSApple OSS Distributions 			return false;
6065*c54f35caSApple OSS Distributions 		}
6066*c54f35caSApple OSS Distributions 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6067*c54f35caSApple OSS Distributions 		if (values[1] == NULL) {
6068*c54f35caSApple OSS Distributions 			return false;
6069*c54f35caSApple OSS Distributions 		}
6070*c54f35caSApple OSS Distributions 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6071*c54f35caSApple OSS Distributions 		if (dict == NULL) {
6072*c54f35caSApple OSS Distributions 			return false;
6073*c54f35caSApple OSS Distributions 		}
6074*c54f35caSApple OSS Distributions 		array->setObject(dict.get());
6075*c54f35caSApple OSS Distributions 		dict.reset();
6076*c54f35caSApple OSS Distributions 		values[0].reset();
6077*c54f35caSApple OSS Distributions 		values[1].reset();
6078*c54f35caSApple OSS Distributions 	}
6079*c54f35caSApple OSS Distributions 
6080*c54f35caSApple OSS Distributions 	return array->serialize(s);
6081*c54f35caSApple OSS Distributions }
6082*c54f35caSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6083*c54f35caSApple OSS Distributions 
6084*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6085*c54f35caSApple OSS Distributions #ifdef __LP64__
6086*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6087*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6088*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6089*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6090*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6091*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6092*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6093*c54f35caSApple OSS Distributions #else /* !__LP64__ */
6094*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6095*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6096*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6097*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6098*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6099*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6100*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6101*c54f35caSApple OSS Distributions #endif /* !__LP64__ */
6102*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6103*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6104*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6105*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6106*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6107*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6108*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6109*c54f35caSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6110*c54f35caSApple OSS Distributions 
6111*c54f35caSApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6112*c54f35caSApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6113*c54f35caSApple OSS Distributions     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6114*c54f35caSApple OSS Distributions 
6115*c54f35caSApple OSS Distributions /* ex-inline function implementation */
6116*c54f35caSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6117*c54f35caSApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6118*c54f35caSApple OSS Distributions {
6119*c54f35caSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
6120*c54f35caSApple OSS Distributions }
6121*c54f35caSApple OSS Distributions 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6122*c54f35caSApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6123*c54f35caSApple OSS Distributions 
6124*c54f35caSApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6125*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6126*c54f35caSApple OSS Distributions {
6127*c54f35caSApple OSS Distributions 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6128*c54f35caSApple OSS Distributions 	if (me && !me->initWithCapacity(capacity)) {
6129*c54f35caSApple OSS Distributions 		return nullptr;
6130*c54f35caSApple OSS Distributions 	}
6131*c54f35caSApple OSS Distributions 	return me;
6132*c54f35caSApple OSS Distributions }
6133*c54f35caSApple OSS Distributions 
6134*c54f35caSApple OSS Distributions bool
initWithCapacity(size_t capacity)6135*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6136*c54f35caSApple OSS Distributions {
6137*c54f35caSApple OSS Distributions 	if (_data && (!capacity || (_capacity < capacity))) {
6138*c54f35caSApple OSS Distributions 		freeMemory();
6139*c54f35caSApple OSS Distributions 	}
6140*c54f35caSApple OSS Distributions 
6141*c54f35caSApple OSS Distributions 	if (!OSObject::init()) {
6142*c54f35caSApple OSS Distributions 		return false;
6143*c54f35caSApple OSS Distributions 	}
6144*c54f35caSApple OSS Distributions 
6145*c54f35caSApple OSS Distributions 	if (!_data && capacity) {
6146*c54f35caSApple OSS Distributions 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6147*c54f35caSApple OSS Distributions 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6148*c54f35caSApple OSS Distributions 		if (!_data) {
6149*c54f35caSApple OSS Distributions 			return false;
6150*c54f35caSApple OSS Distributions 		}
6151*c54f35caSApple OSS Distributions 		_capacity = capacity;
6152*c54f35caSApple OSS Distributions 	}
6153*c54f35caSApple OSS Distributions 
6154*c54f35caSApple OSS Distributions 	_length = 0;
6155*c54f35caSApple OSS Distributions 
6156*c54f35caSApple OSS Distributions 	return true;
6157*c54f35caSApple OSS Distributions }
6158*c54f35caSApple OSS Distributions 
6159*c54f35caSApple OSS Distributions void
free()6160*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6161*c54f35caSApple OSS Distributions {
6162*c54f35caSApple OSS Distributions 	freeMemory();
6163*c54f35caSApple OSS Distributions 	OSObject::free();
6164*c54f35caSApple OSS Distributions }
6165*c54f35caSApple OSS Distributions 
6166*c54f35caSApple OSS Distributions void
freeMemory()6167*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6168*c54f35caSApple OSS Distributions {
6169*c54f35caSApple OSS Distributions 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6170*c54f35caSApple OSS Distributions 	_data = nullptr;
6171*c54f35caSApple OSS Distributions 	_capacity = _length = 0;
6172*c54f35caSApple OSS Distributions }
6173*c54f35caSApple OSS Distributions 
6174*c54f35caSApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6175*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6176*c54f35caSApple OSS Distributions {
6177*c54f35caSApple OSS Distributions 	const auto oldLength = getLength();
6178*c54f35caSApple OSS Distributions 	size_t newLength;
6179*c54f35caSApple OSS Distributions 	if (os_add_overflow(oldLength, length, &newLength)) {
6180*c54f35caSApple OSS Distributions 		return false;
6181*c54f35caSApple OSS Distributions 	}
6182*c54f35caSApple OSS Distributions 
6183*c54f35caSApple OSS Distributions 	if (!setLength(newLength)) {
6184*c54f35caSApple OSS Distributions 		return false;
6185*c54f35caSApple OSS Distributions 	}
6186*c54f35caSApple OSS Distributions 
6187*c54f35caSApple OSS Distributions 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6188*c54f35caSApple OSS Distributions 	if (bytes) {
6189*c54f35caSApple OSS Distributions 		bcopy(bytes, dest, length);
6190*c54f35caSApple OSS Distributions 	}
6191*c54f35caSApple OSS Distributions 
6192*c54f35caSApple OSS Distributions 	return true;
6193*c54f35caSApple OSS Distributions }
6194*c54f35caSApple OSS Distributions 
6195*c54f35caSApple OSS Distributions bool
setLength(size_t length)6196*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6197*c54f35caSApple OSS Distributions {
6198*c54f35caSApple OSS Distributions 	if (!_data || (length > _capacity)) {
6199*c54f35caSApple OSS Distributions 		void *newData;
6200*c54f35caSApple OSS Distributions 
6201*c54f35caSApple OSS Distributions 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6202*c54f35caSApple OSS Distributions 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6203*c54f35caSApple OSS Distributions 		    NULL);
6204*c54f35caSApple OSS Distributions 		if (!newData) {
6205*c54f35caSApple OSS Distributions 			return false;
6206*c54f35caSApple OSS Distributions 		}
6207*c54f35caSApple OSS Distributions 
6208*c54f35caSApple OSS Distributions 		_data = newData;
6209*c54f35caSApple OSS Distributions 		_capacity = length;
6210*c54f35caSApple OSS Distributions 	}
6211*c54f35caSApple OSS Distributions 
6212*c54f35caSApple OSS Distributions 	_length = length;
6213*c54f35caSApple OSS Distributions 	return true;
6214*c54f35caSApple OSS Distributions }
6215*c54f35caSApple OSS Distributions 
6216*c54f35caSApple OSS Distributions const void *
getBytes() const6217*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6218*c54f35caSApple OSS Distributions {
6219*c54f35caSApple OSS Distributions 	return _length ? _data : nullptr;
6220*c54f35caSApple OSS Distributions }
6221*c54f35caSApple OSS Distributions 
6222*c54f35caSApple OSS Distributions size_t
getLength() const6223*c54f35caSApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6224*c54f35caSApple OSS Distributions {
6225*c54f35caSApple OSS Distributions 	return _data ? _length : 0;
6226*c54f35caSApple OSS Distributions }
6227