xref: /xnu-11417.121.6/iokit/Kernel/IOMemoryDescriptor.cpp (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1*a1e26a70SApple OSS Distributions /*
2*a1e26a70SApple OSS Distributions  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*a1e26a70SApple OSS Distributions  *
4*a1e26a70SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*a1e26a70SApple OSS Distributions  *
6*a1e26a70SApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*a1e26a70SApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*a1e26a70SApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*a1e26a70SApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*a1e26a70SApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*a1e26a70SApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*a1e26a70SApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*a1e26a70SApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*a1e26a70SApple OSS Distributions  *
15*a1e26a70SApple OSS Distributions  * Please obtain a copy of the License at
16*a1e26a70SApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*a1e26a70SApple OSS Distributions  *
18*a1e26a70SApple OSS Distributions  * The Original Code and all software distributed under the License are
19*a1e26a70SApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*a1e26a70SApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*a1e26a70SApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*a1e26a70SApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*a1e26a70SApple OSS Distributions  * Please see the License for the specific language governing rights and
24*a1e26a70SApple OSS Distributions  * limitations under the License.
25*a1e26a70SApple OSS Distributions  *
26*a1e26a70SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*a1e26a70SApple OSS Distributions  */
28*a1e26a70SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*a1e26a70SApple OSS Distributions 
30*a1e26a70SApple OSS Distributions #include <sys/cdefs.h>
31*a1e26a70SApple OSS Distributions 
32*a1e26a70SApple OSS Distributions #include <IOKit/assert.h>
33*a1e26a70SApple OSS Distributions #include <IOKit/system.h>
34*a1e26a70SApple OSS Distributions #include <IOKit/IOLib.h>
35*a1e26a70SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*a1e26a70SApple OSS Distributions #include <IOKit/IOMapper.h>
37*a1e26a70SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*a1e26a70SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*a1e26a70SApple OSS Distributions 
40*a1e26a70SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*a1e26a70SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*a1e26a70SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*a1e26a70SApple OSS Distributions 
44*a1e26a70SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*a1e26a70SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*a1e26a70SApple OSS Distributions #include <libkern/OSDebug.h>
47*a1e26a70SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*a1e26a70SApple OSS Distributions 
49*a1e26a70SApple OSS Distributions #include "IOKitKernelInternal.h"
50*a1e26a70SApple OSS Distributions 
51*a1e26a70SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*a1e26a70SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*a1e26a70SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*a1e26a70SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*a1e26a70SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*a1e26a70SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*a1e26a70SApple OSS Distributions #include <os/overflow.h>
58*a1e26a70SApple OSS Distributions #include <os/cpp_util.h>
59*a1e26a70SApple OSS Distributions #include <os/base_private.h>
60*a1e26a70SApple OSS Distributions 
61*a1e26a70SApple OSS Distributions #include <sys/uio.h>
62*a1e26a70SApple OSS Distributions 
63*a1e26a70SApple OSS Distributions __BEGIN_DECLS
64*a1e26a70SApple OSS Distributions #include <vm/pmap.h>
65*a1e26a70SApple OSS Distributions #include <vm/vm_pageout_xnu.h>
66*a1e26a70SApple OSS Distributions #include <mach/memory_object_types.h>
67*a1e26a70SApple OSS Distributions #include <device/device_port.h>
68*a1e26a70SApple OSS Distributions 
69*a1e26a70SApple OSS Distributions #include <mach/vm_prot.h>
70*a1e26a70SApple OSS Distributions #include <mach/mach_vm.h>
71*a1e26a70SApple OSS Distributions #include <mach/memory_entry.h>
72*a1e26a70SApple OSS Distributions #include <mach/mach_host.h>
73*a1e26a70SApple OSS Distributions #include <vm/vm_fault_xnu.h>
74*a1e26a70SApple OSS Distributions #include <vm/vm_protos.h>
75*a1e26a70SApple OSS Distributions #include <vm/vm_memory_entry.h>
76*a1e26a70SApple OSS Distributions #include <vm/vm_kern_xnu.h>
77*a1e26a70SApple OSS Distributions #include <vm/vm_iokit.h>
78*a1e26a70SApple OSS Distributions #include <vm/vm_map_xnu.h>
79*a1e26a70SApple OSS Distributions #include <kern/thread.h>
80*a1e26a70SApple OSS Distributions 
81*a1e26a70SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
82*a1e26a70SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
83*a1e26a70SApple OSS Distributions 
84*a1e26a70SApple OSS Distributions __END_DECLS
85*a1e26a70SApple OSS Distributions 
86*a1e26a70SApple OSS Distributions #define kIOMapperWaitSystem     ((IOMapper *) 1)
87*a1e26a70SApple OSS Distributions 
88*a1e26a70SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
89*a1e26a70SApple OSS Distributions 
90*a1e26a70SApple OSS Distributions ppnum_t           gIOLastPage;
91*a1e26a70SApple OSS Distributions 
92*a1e26a70SApple OSS Distributions enum {
93*a1e26a70SApple OSS Distributions 	kIOMapGuardSizeLarge = 65536
94*a1e26a70SApple OSS Distributions };
95*a1e26a70SApple OSS Distributions 
96*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97*a1e26a70SApple OSS Distributions 
98*a1e26a70SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
99*a1e26a70SApple OSS Distributions 
100*a1e26a70SApple OSS Distributions #define super IOMemoryDescriptor
101*a1e26a70SApple OSS Distributions 
102*a1e26a70SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
103*a1e26a70SApple OSS Distributions     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
104*a1e26a70SApple OSS Distributions 
105*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106*a1e26a70SApple OSS Distributions 
107*a1e26a70SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
108*a1e26a70SApple OSS Distributions 
109*a1e26a70SApple OSS Distributions #define LOCK    IORecursiveLockLock( gIOMemoryLock)
110*a1e26a70SApple OSS Distributions #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
111*a1e26a70SApple OSS Distributions #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112*a1e26a70SApple OSS Distributions #define WAKEUP  \
113*a1e26a70SApple OSS Distributions     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114*a1e26a70SApple OSS Distributions 
115*a1e26a70SApple OSS Distributions #if 0
116*a1e26a70SApple OSS Distributions #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
117*a1e26a70SApple OSS Distributions #else
118*a1e26a70SApple OSS Distributions #define DEBG(fmt, args...)      {}
119*a1e26a70SApple OSS Distributions #endif
120*a1e26a70SApple OSS Distributions 
121*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122*a1e26a70SApple OSS Distributions 
123*a1e26a70SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
124*a1e26a70SApple OSS Distributions // Function
125*a1e26a70SApple OSS Distributions 
126*a1e26a70SApple OSS Distributions enum ioPLBlockFlags {
127*a1e26a70SApple OSS Distributions 	kIOPLOnDevice  = 0x00000001,
128*a1e26a70SApple OSS Distributions 	kIOPLExternUPL = 0x00000002,
129*a1e26a70SApple OSS Distributions };
130*a1e26a70SApple OSS Distributions 
131*a1e26a70SApple OSS Distributions struct IOMDPersistentInitData {
132*a1e26a70SApple OSS Distributions 	const IOGeneralMemoryDescriptor * fMD;
133*a1e26a70SApple OSS Distributions 	IOMemoryReference               * fMemRef;
134*a1e26a70SApple OSS Distributions };
135*a1e26a70SApple OSS Distributions 
136*a1e26a70SApple OSS Distributions struct ioPLBlock {
137*a1e26a70SApple OSS Distributions 	upl_t fIOPL;
138*a1e26a70SApple OSS Distributions 	vm_address_t fPageInfo; // Pointer to page list or index into it
139*a1e26a70SApple OSS Distributions 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
140*a1e26a70SApple OSS Distributions 	ppnum_t fMappedPage;        // Page number of first page in this iopl
141*a1e26a70SApple OSS Distributions 	unsigned int fPageOffset;   // Offset within first page of iopl
142*a1e26a70SApple OSS Distributions 	unsigned int fFlags;        // Flags
143*a1e26a70SApple OSS Distributions };
144*a1e26a70SApple OSS Distributions 
145*a1e26a70SApple OSS Distributions enum { kMaxWireTags = 6 };
146*a1e26a70SApple OSS Distributions 
147*a1e26a70SApple OSS Distributions struct ioGMDData {
148*a1e26a70SApple OSS Distributions 	IOMapper *  fMapper;
149*a1e26a70SApple OSS Distributions 	uint64_t    fDMAMapAlignment;
150*a1e26a70SApple OSS Distributions 	uint64_t    fMappedBase;
151*a1e26a70SApple OSS Distributions 	uint64_t    fMappedLength;
152*a1e26a70SApple OSS Distributions 	uint64_t    fPreparationID;
153*a1e26a70SApple OSS Distributions #if IOTRACKING
154*a1e26a70SApple OSS Distributions 	IOTracking  fWireTracking;
155*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
156*a1e26a70SApple OSS Distributions 	unsigned int      fPageCnt;
157*a1e26a70SApple OSS Distributions 	uint8_t           fDMAMapNumAddressBits;
158*a1e26a70SApple OSS Distributions 	unsigned char     fCompletionError:1;
159*a1e26a70SApple OSS Distributions 	unsigned char     fMappedBaseValid:1;
160*a1e26a70SApple OSS Distributions 	unsigned char     _resv:4;
161*a1e26a70SApple OSS Distributions 	unsigned char     fDMAAccess:2;
162*a1e26a70SApple OSS Distributions 
163*a1e26a70SApple OSS Distributions 	/* variable length arrays */
164*a1e26a70SApple OSS Distributions 	upl_page_info_t fPageList[1]
165*a1e26a70SApple OSS Distributions #if __LP64__
166*a1e26a70SApple OSS Distributions 	// align fPageList as for ioPLBlock
167*a1e26a70SApple OSS Distributions 	__attribute__((aligned(sizeof(upl_t))))
168*a1e26a70SApple OSS Distributions #endif
169*a1e26a70SApple OSS Distributions 	;
170*a1e26a70SApple OSS Distributions 	//ioPLBlock fBlocks[1];
171*a1e26a70SApple OSS Distributions };
172*a1e26a70SApple OSS Distributions 
173*a1e26a70SApple OSS Distributions #pragma GCC visibility push(hidden)
174*a1e26a70SApple OSS Distributions 
175*a1e26a70SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
176*a1e26a70SApple OSS Distributions {
177*a1e26a70SApple OSS Distributions 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
178*a1e26a70SApple OSS Distributions 
179*a1e26a70SApple OSS Distributions public:
180*a1e26a70SApple OSS Distributions 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
181*a1e26a70SApple OSS Distributions 	bool initWithCapacity(size_t capacity);
182*a1e26a70SApple OSS Distributions 	virtual void free() APPLE_KEXT_OVERRIDE;
183*a1e26a70SApple OSS Distributions 
184*a1e26a70SApple OSS Distributions 	bool appendBytes(const void * bytes, size_t length);
185*a1e26a70SApple OSS Distributions 	bool setLength(size_t length);
186*a1e26a70SApple OSS Distributions 
187*a1e26a70SApple OSS Distributions 	const void * getBytes() const;
188*a1e26a70SApple OSS Distributions 	size_t getLength() const;
189*a1e26a70SApple OSS Distributions 
190*a1e26a70SApple OSS Distributions private:
191*a1e26a70SApple OSS Distributions 	void freeMemory();
192*a1e26a70SApple OSS Distributions 
193*a1e26a70SApple OSS Distributions 	void *  _data = nullptr;
194*a1e26a70SApple OSS Distributions 	size_t  _length = 0;
195*a1e26a70SApple OSS Distributions 	size_t  _capacity = 0;
196*a1e26a70SApple OSS Distributions };
197*a1e26a70SApple OSS Distributions 
198*a1e26a70SApple OSS Distributions #pragma GCC visibility pop
199*a1e26a70SApple OSS Distributions 
200*a1e26a70SApple OSS Distributions #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
201*a1e26a70SApple OSS Distributions #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
202*a1e26a70SApple OSS Distributions #define getNumIOPL(osd, d)      \
203*a1e26a70SApple OSS Distributions     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
204*a1e26a70SApple OSS Distributions #define getPageList(d)  (&(d->fPageList[0]))
205*a1e26a70SApple OSS Distributions #define computeDataSize(p, u) \
206*a1e26a70SApple OSS Distributions     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
207*a1e26a70SApple OSS Distributions 
208*a1e26a70SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
209*a1e26a70SApple OSS Distributions 
210*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211*a1e26a70SApple OSS Distributions 
212*a1e26a70SApple OSS Distributions extern "C" {
213*a1e26a70SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)214*a1e26a70SApple OSS Distributions device_data_action(
215*a1e26a70SApple OSS Distributions 	uintptr_t               device_handle,
216*a1e26a70SApple OSS Distributions 	ipc_port_t              device_pager,
217*a1e26a70SApple OSS Distributions 	vm_prot_t               protection,
218*a1e26a70SApple OSS Distributions 	vm_object_offset_t      offset,
219*a1e26a70SApple OSS Distributions 	vm_size_t               size)
220*a1e26a70SApple OSS Distributions {
221*a1e26a70SApple OSS Distributions 	kern_return_t        kr;
222*a1e26a70SApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
223*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> memDesc;
224*a1e26a70SApple OSS Distributions 
225*a1e26a70SApple OSS Distributions 	LOCK;
226*a1e26a70SApple OSS Distributions 	if (ref->dp.memory) {
227*a1e26a70SApple OSS Distributions 		memDesc.reset(ref->dp.memory, OSRetain);
228*a1e26a70SApple OSS Distributions 		kr = memDesc->handleFault(device_pager, offset, size);
229*a1e26a70SApple OSS Distributions 		memDesc.reset();
230*a1e26a70SApple OSS Distributions 	} else {
231*a1e26a70SApple OSS Distributions 		kr = KERN_ABORTED;
232*a1e26a70SApple OSS Distributions 	}
233*a1e26a70SApple OSS Distributions 	UNLOCK;
234*a1e26a70SApple OSS Distributions 
235*a1e26a70SApple OSS Distributions 	return kr;
236*a1e26a70SApple OSS Distributions }
237*a1e26a70SApple OSS Distributions 
238*a1e26a70SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)239*a1e26a70SApple OSS Distributions device_close(
240*a1e26a70SApple OSS Distributions 	uintptr_t     device_handle)
241*a1e26a70SApple OSS Distributions {
242*a1e26a70SApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
243*a1e26a70SApple OSS Distributions 
244*a1e26a70SApple OSS Distributions 	IOFreeType( ref, IOMemoryDescriptorReserved );
245*a1e26a70SApple OSS Distributions 
246*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
247*a1e26a70SApple OSS Distributions }
248*a1e26a70SApple OSS Distributions };      // end extern "C"
249*a1e26a70SApple OSS Distributions 
250*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251*a1e26a70SApple OSS Distributions 
252*a1e26a70SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
253*a1e26a70SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
254*a1e26a70SApple OSS Distributions // checked for as a NULL reference is illegal.
255*a1e26a70SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)256*a1e26a70SApple OSS Distributions getAddrLenForInd(
257*a1e26a70SApple OSS Distributions 	mach_vm_address_t                &addr,
258*a1e26a70SApple OSS Distributions 	mach_vm_size_t                   &len, // Output variables
259*a1e26a70SApple OSS Distributions 	UInt32                            type,
260*a1e26a70SApple OSS Distributions 	IOGeneralMemoryDescriptor::Ranges r,
261*a1e26a70SApple OSS Distributions 	UInt32                            ind,
262*a1e26a70SApple OSS Distributions 	task_t                            task __unused)
263*a1e26a70SApple OSS Distributions {
264*a1e26a70SApple OSS Distributions 	assert(kIOMemoryTypeUIO == type
265*a1e26a70SApple OSS Distributions 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
266*a1e26a70SApple OSS Distributions 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
267*a1e26a70SApple OSS Distributions 	if (kIOMemoryTypeUIO == type) {
268*a1e26a70SApple OSS Distributions 		user_size_t us;
269*a1e26a70SApple OSS Distributions 		user_addr_t ad;
270*a1e26a70SApple OSS Distributions 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
271*a1e26a70SApple OSS Distributions 	}
272*a1e26a70SApple OSS Distributions #ifndef __LP64__
273*a1e26a70SApple OSS Distributions 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
274*a1e26a70SApple OSS Distributions 		IOAddressRange cur = r.v64[ind];
275*a1e26a70SApple OSS Distributions 		addr = cur.address;
276*a1e26a70SApple OSS Distributions 		len  = cur.length;
277*a1e26a70SApple OSS Distributions 	}
278*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
279*a1e26a70SApple OSS Distributions 	else {
280*a1e26a70SApple OSS Distributions 		IOVirtualRange cur = r.v[ind];
281*a1e26a70SApple OSS Distributions 		addr = cur.address;
282*a1e26a70SApple OSS Distributions 		len  = cur.length;
283*a1e26a70SApple OSS Distributions 	}
284*a1e26a70SApple OSS Distributions #if CONFIG_PROB_GZALLOC
285*a1e26a70SApple OSS Distributions 	if (task == kernel_task) {
286*a1e26a70SApple OSS Distributions 		addr = pgz_decode(addr, len);
287*a1e26a70SApple OSS Distributions 	}
288*a1e26a70SApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
289*a1e26a70SApple OSS Distributions }
290*a1e26a70SApple OSS Distributions 
291*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292*a1e26a70SApple OSS Distributions 
293*a1e26a70SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)294*a1e26a70SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
295*a1e26a70SApple OSS Distributions {
296*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
297*a1e26a70SApple OSS Distributions 
298*a1e26a70SApple OSS Distributions 	*control = VM_PURGABLE_SET_STATE;
299*a1e26a70SApple OSS Distributions 
300*a1e26a70SApple OSS Distributions 	enum { kIOMemoryPurgeableControlMask = 15 };
301*a1e26a70SApple OSS Distributions 
302*a1e26a70SApple OSS Distributions 	switch (kIOMemoryPurgeableControlMask & newState) {
303*a1e26a70SApple OSS Distributions 	case kIOMemoryPurgeableKeepCurrent:
304*a1e26a70SApple OSS Distributions 		*control = VM_PURGABLE_GET_STATE;
305*a1e26a70SApple OSS Distributions 		break;
306*a1e26a70SApple OSS Distributions 
307*a1e26a70SApple OSS Distributions 	case kIOMemoryPurgeableNonVolatile:
308*a1e26a70SApple OSS Distributions 		*state = VM_PURGABLE_NONVOLATILE;
309*a1e26a70SApple OSS Distributions 		break;
310*a1e26a70SApple OSS Distributions 	case kIOMemoryPurgeableVolatile:
311*a1e26a70SApple OSS Distributions 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
312*a1e26a70SApple OSS Distributions 		break;
313*a1e26a70SApple OSS Distributions 	case kIOMemoryPurgeableEmpty:
314*a1e26a70SApple OSS Distributions 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
315*a1e26a70SApple OSS Distributions 		break;
316*a1e26a70SApple OSS Distributions 	default:
317*a1e26a70SApple OSS Distributions 		err = kIOReturnBadArgument;
318*a1e26a70SApple OSS Distributions 		break;
319*a1e26a70SApple OSS Distributions 	}
320*a1e26a70SApple OSS Distributions 
321*a1e26a70SApple OSS Distributions 	if (*control == VM_PURGABLE_SET_STATE) {
322*a1e26a70SApple OSS Distributions 		// let VM know this call is from the kernel and is allowed to alter
323*a1e26a70SApple OSS Distributions 		// the volatility of the memory entry even if it was created with
324*a1e26a70SApple OSS Distributions 		// MAP_MEM_PURGABLE_KERNEL_ONLY
325*a1e26a70SApple OSS Distributions 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
326*a1e26a70SApple OSS Distributions 	}
327*a1e26a70SApple OSS Distributions 
328*a1e26a70SApple OSS Distributions 	return err;
329*a1e26a70SApple OSS Distributions }
330*a1e26a70SApple OSS Distributions 
331*a1e26a70SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)332*a1e26a70SApple OSS Distributions purgeableStateBits(int * state)
333*a1e26a70SApple OSS Distributions {
334*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
335*a1e26a70SApple OSS Distributions 
336*a1e26a70SApple OSS Distributions 	switch (VM_PURGABLE_STATE_MASK & *state) {
337*a1e26a70SApple OSS Distributions 	case VM_PURGABLE_NONVOLATILE:
338*a1e26a70SApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
339*a1e26a70SApple OSS Distributions 		break;
340*a1e26a70SApple OSS Distributions 	case VM_PURGABLE_VOLATILE:
341*a1e26a70SApple OSS Distributions 		*state = kIOMemoryPurgeableVolatile;
342*a1e26a70SApple OSS Distributions 		break;
343*a1e26a70SApple OSS Distributions 	case VM_PURGABLE_EMPTY:
344*a1e26a70SApple OSS Distributions 		*state = kIOMemoryPurgeableEmpty;
345*a1e26a70SApple OSS Distributions 		break;
346*a1e26a70SApple OSS Distributions 	default:
347*a1e26a70SApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
348*a1e26a70SApple OSS Distributions 		err = kIOReturnNotReady;
349*a1e26a70SApple OSS Distributions 		break;
350*a1e26a70SApple OSS Distributions 	}
351*a1e26a70SApple OSS Distributions 	return err;
352*a1e26a70SApple OSS Distributions }
353*a1e26a70SApple OSS Distributions 
354*a1e26a70SApple OSS Distributions typedef struct {
355*a1e26a70SApple OSS Distributions 	unsigned int wimg;
356*a1e26a70SApple OSS Distributions 	unsigned int object_type;
357*a1e26a70SApple OSS Distributions } iokit_memtype_entry;
358*a1e26a70SApple OSS Distributions 
359*a1e26a70SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
360*a1e26a70SApple OSS Distributions 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
361*a1e26a70SApple OSS Distributions 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
362*a1e26a70SApple OSS Distributions 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
363*a1e26a70SApple OSS Distributions 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
364*a1e26a70SApple OSS Distributions 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
365*a1e26a70SApple OSS Distributions 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
366*a1e26a70SApple OSS Distributions 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
367*a1e26a70SApple OSS Distributions 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
368*a1e26a70SApple OSS Distributions 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
369*a1e26a70SApple OSS Distributions 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
370*a1e26a70SApple OSS Distributions };
371*a1e26a70SApple OSS Distributions 
372*a1e26a70SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)373*a1e26a70SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
374*a1e26a70SApple OSS Distributions {
375*a1e26a70SApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376*a1e26a70SApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
377*a1e26a70SApple OSS Distributions 		cacheMode = kIODefaultCache;
378*a1e26a70SApple OSS Distributions 	}
379*a1e26a70SApple OSS Distributions 	vm_prot_t prot = 0;
380*a1e26a70SApple OSS Distributions 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
381*a1e26a70SApple OSS Distributions 	return prot;
382*a1e26a70SApple OSS Distributions }
383*a1e26a70SApple OSS Distributions 
384*a1e26a70SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)385*a1e26a70SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
386*a1e26a70SApple OSS Distributions {
387*a1e26a70SApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
388*a1e26a70SApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
389*a1e26a70SApple OSS Distributions 		cacheMode = kIODefaultCache;
390*a1e26a70SApple OSS Distributions 	}
391*a1e26a70SApple OSS Distributions 	if (cacheMode == kIODefaultCache) {
392*a1e26a70SApple OSS Distributions 		return -1U;
393*a1e26a70SApple OSS Distributions 	}
394*a1e26a70SApple OSS Distributions 	return iomd_mem_types[cacheMode].wimg;
395*a1e26a70SApple OSS Distributions }
396*a1e26a70SApple OSS Distributions 
397*a1e26a70SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)398*a1e26a70SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
399*a1e26a70SApple OSS Distributions {
400*a1e26a70SApple OSS Distributions 	pagerFlags &= VM_WIMG_MASK;
401*a1e26a70SApple OSS Distributions 	IOOptionBits cacheMode = kIODefaultCache;
402*a1e26a70SApple OSS Distributions 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
403*a1e26a70SApple OSS Distributions 		if (iomd_mem_types[i].wimg == pagerFlags) {
404*a1e26a70SApple OSS Distributions 			cacheMode = i;
405*a1e26a70SApple OSS Distributions 			break;
406*a1e26a70SApple OSS Distributions 		}
407*a1e26a70SApple OSS Distributions 	}
408*a1e26a70SApple OSS Distributions 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
409*a1e26a70SApple OSS Distributions }
410*a1e26a70SApple OSS Distributions 
411*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413*a1e26a70SApple OSS Distributions 
414*a1e26a70SApple OSS Distributions struct IOMemoryEntry {
415*a1e26a70SApple OSS Distributions 	ipc_port_t entry;
416*a1e26a70SApple OSS Distributions 	int64_t    offset;
417*a1e26a70SApple OSS Distributions 	uint64_t   size;
418*a1e26a70SApple OSS Distributions 	uint64_t   start;
419*a1e26a70SApple OSS Distributions };
420*a1e26a70SApple OSS Distributions 
421*a1e26a70SApple OSS Distributions struct IOMemoryReference {
422*a1e26a70SApple OSS Distributions 	volatile SInt32             refCount;
423*a1e26a70SApple OSS Distributions 	vm_prot_t                   prot;
424*a1e26a70SApple OSS Distributions 	uint32_t                    capacity;
425*a1e26a70SApple OSS Distributions 	uint32_t                    count;
426*a1e26a70SApple OSS Distributions 	struct IOMemoryReference  * mapRef;
427*a1e26a70SApple OSS Distributions 	IOMemoryEntry               entries[0];
428*a1e26a70SApple OSS Distributions };
429*a1e26a70SApple OSS Distributions 
430*a1e26a70SApple OSS Distributions enum{
431*a1e26a70SApple OSS Distributions 	kIOMemoryReferenceReuse = 0x00000001,
432*a1e26a70SApple OSS Distributions 	kIOMemoryReferenceWrite = 0x00000002,
433*a1e26a70SApple OSS Distributions 	kIOMemoryReferenceCOW   = 0x00000004,
434*a1e26a70SApple OSS Distributions };
435*a1e26a70SApple OSS Distributions 
436*a1e26a70SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
437*a1e26a70SApple OSS Distributions 
438*a1e26a70SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)439*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
440*a1e26a70SApple OSS Distributions {
441*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref;
442*a1e26a70SApple OSS Distributions 	size_t              oldCapacity;
443*a1e26a70SApple OSS Distributions 
444*a1e26a70SApple OSS Distributions 	if (realloc) {
445*a1e26a70SApple OSS Distributions 		oldCapacity = realloc->capacity;
446*a1e26a70SApple OSS Distributions 	} else {
447*a1e26a70SApple OSS Distributions 		oldCapacity = 0;
448*a1e26a70SApple OSS Distributions 	}
449*a1e26a70SApple OSS Distributions 
450*a1e26a70SApple OSS Distributions 	// Use the kalloc API instead of manually handling the reallocation
451*a1e26a70SApple OSS Distributions 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
452*a1e26a70SApple OSS Distributions 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
453*a1e26a70SApple OSS Distributions 	if (ref) {
454*a1e26a70SApple OSS Distributions 		if (oldCapacity == 0) {
455*a1e26a70SApple OSS Distributions 			ref->refCount = 1;
456*a1e26a70SApple OSS Distributions 			OSIncrementAtomic(&gIOMemoryReferenceCount);
457*a1e26a70SApple OSS Distributions 		}
458*a1e26a70SApple OSS Distributions 		ref->capacity = capacity;
459*a1e26a70SApple OSS Distributions 	}
460*a1e26a70SApple OSS Distributions 	return ref;
461*a1e26a70SApple OSS Distributions }
462*a1e26a70SApple OSS Distributions 
463*a1e26a70SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)464*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
465*a1e26a70SApple OSS Distributions {
466*a1e26a70SApple OSS Distributions 	IOMemoryEntry * entries;
467*a1e26a70SApple OSS Distributions 
468*a1e26a70SApple OSS Distributions 	if (ref->mapRef) {
469*a1e26a70SApple OSS Distributions 		memoryReferenceFree(ref->mapRef);
470*a1e26a70SApple OSS Distributions 		ref->mapRef = NULL;
471*a1e26a70SApple OSS Distributions 	}
472*a1e26a70SApple OSS Distributions 
473*a1e26a70SApple OSS Distributions 	entries = ref->entries + ref->count;
474*a1e26a70SApple OSS Distributions 	while (entries > &ref->entries[0]) {
475*a1e26a70SApple OSS Distributions 		entries--;
476*a1e26a70SApple OSS Distributions 		ipc_port_release_send(entries->entry);
477*a1e26a70SApple OSS Distributions 	}
478*a1e26a70SApple OSS Distributions 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
479*a1e26a70SApple OSS Distributions 
480*a1e26a70SApple OSS Distributions 	OSDecrementAtomic(&gIOMemoryReferenceCount);
481*a1e26a70SApple OSS Distributions }
482*a1e26a70SApple OSS Distributions 
483*a1e26a70SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)484*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
485*a1e26a70SApple OSS Distributions {
486*a1e26a70SApple OSS Distributions 	if (1 == OSDecrementAtomic(&ref->refCount)) {
487*a1e26a70SApple OSS Distributions 		memoryReferenceFree(ref);
488*a1e26a70SApple OSS Distributions 	}
489*a1e26a70SApple OSS Distributions }
490*a1e26a70SApple OSS Distributions 
491*a1e26a70SApple OSS Distributions 
492*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)493*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
494*a1e26a70SApple OSS Distributions 	IOOptionBits         options,
495*a1e26a70SApple OSS Distributions 	IOMemoryReference ** reference)
496*a1e26a70SApple OSS Distributions {
497*a1e26a70SApple OSS Distributions 	enum { kCapacity = 4, kCapacityInc = 4 };
498*a1e26a70SApple OSS Distributions 
499*a1e26a70SApple OSS Distributions 	kern_return_t        err;
500*a1e26a70SApple OSS Distributions 	IOMemoryReference *  ref;
501*a1e26a70SApple OSS Distributions 	IOMemoryEntry *      entries;
502*a1e26a70SApple OSS Distributions 	IOMemoryEntry *      cloneEntries = NULL;
503*a1e26a70SApple OSS Distributions 	vm_map_t             map;
504*a1e26a70SApple OSS Distributions 	ipc_port_t           entry, cloneEntry;
505*a1e26a70SApple OSS Distributions 	vm_prot_t            prot;
506*a1e26a70SApple OSS Distributions 	memory_object_size_t actualSize;
507*a1e26a70SApple OSS Distributions 	uint32_t             rangeIdx;
508*a1e26a70SApple OSS Distributions 	uint32_t             count;
509*a1e26a70SApple OSS Distributions 	mach_vm_address_t    entryAddr, endAddr, entrySize;
510*a1e26a70SApple OSS Distributions 	mach_vm_size_t       srcAddr, srcLen;
511*a1e26a70SApple OSS Distributions 	mach_vm_size_t       nextAddr, nextLen;
512*a1e26a70SApple OSS Distributions 	mach_vm_size_t       offset, remain;
513*a1e26a70SApple OSS Distributions 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
514*a1e26a70SApple OSS Distributions 	int                  misaligned_start = 0, misaligned_end = 0;
515*a1e26a70SApple OSS Distributions 	IOByteCount          physLen;
516*a1e26a70SApple OSS Distributions 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
517*a1e26a70SApple OSS Distributions 	IOOptionBits         cacheMode;
518*a1e26a70SApple OSS Distributions 	unsigned int         pagerFlags;
519*a1e26a70SApple OSS Distributions 	vm_tag_t             tag;
520*a1e26a70SApple OSS Distributions 	vm_named_entry_kernel_flags_t vmne_kflags;
521*a1e26a70SApple OSS Distributions 
522*a1e26a70SApple OSS Distributions 	ref = memoryReferenceAlloc(kCapacity, NULL);
523*a1e26a70SApple OSS Distributions 	if (!ref) {
524*a1e26a70SApple OSS Distributions 		return kIOReturnNoMemory;
525*a1e26a70SApple OSS Distributions 	}
526*a1e26a70SApple OSS Distributions 
527*a1e26a70SApple OSS Distributions 	tag = (vm_tag_t) getVMTag(kernel_map);
528*a1e26a70SApple OSS Distributions 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
529*a1e26a70SApple OSS Distributions 	entries = &ref->entries[0];
530*a1e26a70SApple OSS Distributions 	count = 0;
531*a1e26a70SApple OSS Distributions 	err = KERN_SUCCESS;
532*a1e26a70SApple OSS Distributions 
533*a1e26a70SApple OSS Distributions 	offset = 0;
534*a1e26a70SApple OSS Distributions 	rangeIdx = 0;
535*a1e26a70SApple OSS Distributions 	remain = _length;
536*a1e26a70SApple OSS Distributions 	if (_task) {
537*a1e26a70SApple OSS Distributions 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
538*a1e26a70SApple OSS Distributions 
539*a1e26a70SApple OSS Distributions 		// account for IOBMD setLength(), use its capacity as length
540*a1e26a70SApple OSS Distributions 		IOBufferMemoryDescriptor * bmd;
541*a1e26a70SApple OSS Distributions 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
542*a1e26a70SApple OSS Distributions 			nextLen = bmd->getCapacity();
543*a1e26a70SApple OSS Distributions 			remain  = nextLen;
544*a1e26a70SApple OSS Distributions 		}
545*a1e26a70SApple OSS Distributions 	} else {
546*a1e26a70SApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
547*a1e26a70SApple OSS Distributions 		nextLen = physLen;
548*a1e26a70SApple OSS Distributions 
549*a1e26a70SApple OSS Distributions 		// default cache mode for physical
550*a1e26a70SApple OSS Distributions 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
551*a1e26a70SApple OSS Distributions 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
552*a1e26a70SApple OSS Distributions 			_flags |= (mode << kIOMemoryBufferCacheShift);
553*a1e26a70SApple OSS Distributions 		}
554*a1e26a70SApple OSS Distributions 	}
555*a1e26a70SApple OSS Distributions 
556*a1e26a70SApple OSS Distributions 	// cache mode & vm_prot
557*a1e26a70SApple OSS Distributions 	prot = VM_PROT_READ;
558*a1e26a70SApple OSS Distributions 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
559*a1e26a70SApple OSS Distributions 	prot |= vmProtForCacheMode(cacheMode);
560*a1e26a70SApple OSS Distributions 	// VM system requires write access to change cache mode
561*a1e26a70SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
562*a1e26a70SApple OSS Distributions 		prot |= VM_PROT_WRITE;
563*a1e26a70SApple OSS Distributions 	}
564*a1e26a70SApple OSS Distributions 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
565*a1e26a70SApple OSS Distributions 		prot |= VM_PROT_WRITE;
566*a1e26a70SApple OSS Distributions 	}
567*a1e26a70SApple OSS Distributions 	if (kIOMemoryReferenceWrite & options) {
568*a1e26a70SApple OSS Distributions 		prot |= VM_PROT_WRITE;
569*a1e26a70SApple OSS Distributions 	}
570*a1e26a70SApple OSS Distributions 	if (kIOMemoryReferenceCOW   & options) {
571*a1e26a70SApple OSS Distributions 		prot |= MAP_MEM_VM_COPY;
572*a1e26a70SApple OSS Distributions 	}
573*a1e26a70SApple OSS Distributions 
574*a1e26a70SApple OSS Distributions 	if (kIOMemoryUseReserve & _flags) {
575*a1e26a70SApple OSS Distributions 		prot |= MAP_MEM_GRAB_SECLUDED;
576*a1e26a70SApple OSS Distributions 	}
577*a1e26a70SApple OSS Distributions 
578*a1e26a70SApple OSS Distributions 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
579*a1e26a70SApple OSS Distributions 		cloneEntries = &_memRef->entries[0];
580*a1e26a70SApple OSS Distributions 		prot |= MAP_MEM_NAMED_REUSE;
581*a1e26a70SApple OSS Distributions 	}
582*a1e26a70SApple OSS Distributions 
583*a1e26a70SApple OSS Distributions 	if (_task) {
584*a1e26a70SApple OSS Distributions 		// virtual ranges
585*a1e26a70SApple OSS Distributions 
586*a1e26a70SApple OSS Distributions 		if (kIOMemoryBufferPageable & _flags) {
587*a1e26a70SApple OSS Distributions 			int ledger_tag, ledger_no_footprint;
588*a1e26a70SApple OSS Distributions 
589*a1e26a70SApple OSS Distributions 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
590*a1e26a70SApple OSS Distributions 			prot |= MAP_MEM_NAMED_CREATE;
591*a1e26a70SApple OSS Distributions 
592*a1e26a70SApple OSS Distributions 			// default accounting settings:
593*a1e26a70SApple OSS Distributions 			//   + "none" ledger tag
594*a1e26a70SApple OSS Distributions 			//   + include in footprint
595*a1e26a70SApple OSS Distributions 			// can be changed later with ::setOwnership()
596*a1e26a70SApple OSS Distributions 			ledger_tag = VM_LEDGER_TAG_NONE;
597*a1e26a70SApple OSS Distributions 			ledger_no_footprint = 0;
598*a1e26a70SApple OSS Distributions 
599*a1e26a70SApple OSS Distributions 			if (kIOMemoryBufferPurgeable & _flags) {
600*a1e26a70SApple OSS Distributions 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
601*a1e26a70SApple OSS Distributions 				if (VM_KERN_MEMORY_SKYWALK == tag) {
602*a1e26a70SApple OSS Distributions 					// Skywalk purgeable memory accounting:
603*a1e26a70SApple OSS Distributions 					//    + "network" ledger tag
604*a1e26a70SApple OSS Distributions 					//    + not included in footprint
605*a1e26a70SApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NETWORK;
606*a1e26a70SApple OSS Distributions 					ledger_no_footprint = 1;
607*a1e26a70SApple OSS Distributions 				} else {
608*a1e26a70SApple OSS Distributions 					// regular purgeable memory accounting:
609*a1e26a70SApple OSS Distributions 					//    + no ledger tag
610*a1e26a70SApple OSS Distributions 					//    + included in footprint
611*a1e26a70SApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NONE;
612*a1e26a70SApple OSS Distributions 					ledger_no_footprint = 0;
613*a1e26a70SApple OSS Distributions 				}
614*a1e26a70SApple OSS Distributions 			}
615*a1e26a70SApple OSS Distributions 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
616*a1e26a70SApple OSS Distributions 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
617*a1e26a70SApple OSS Distributions 			if (kIOMemoryUseReserve & _flags) {
618*a1e26a70SApple OSS Distributions 				prot |= MAP_MEM_GRAB_SECLUDED;
619*a1e26a70SApple OSS Distributions 			}
620*a1e26a70SApple OSS Distributions 
621*a1e26a70SApple OSS Distributions 			prot |= VM_PROT_WRITE;
622*a1e26a70SApple OSS Distributions 			map = NULL;
623*a1e26a70SApple OSS Distributions 		} else {
624*a1e26a70SApple OSS Distributions 			prot |= MAP_MEM_USE_DATA_ADDR;
625*a1e26a70SApple OSS Distributions 			map = get_task_map(_task);
626*a1e26a70SApple OSS Distributions 		}
627*a1e26a70SApple OSS Distributions 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
628*a1e26a70SApple OSS Distributions 
629*a1e26a70SApple OSS Distributions 		while (remain) {
630*a1e26a70SApple OSS Distributions 			srcAddr  = nextAddr;
631*a1e26a70SApple OSS Distributions 			srcLen   = nextLen;
632*a1e26a70SApple OSS Distributions 			nextAddr = 0;
633*a1e26a70SApple OSS Distributions 			nextLen  = 0;
634*a1e26a70SApple OSS Distributions 			// coalesce addr range
635*a1e26a70SApple OSS Distributions 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
636*a1e26a70SApple OSS Distributions 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
637*a1e26a70SApple OSS Distributions 				if ((srcAddr + srcLen) != nextAddr) {
638*a1e26a70SApple OSS Distributions 					break;
639*a1e26a70SApple OSS Distributions 				}
640*a1e26a70SApple OSS Distributions 				srcLen += nextLen;
641*a1e26a70SApple OSS Distributions 			}
642*a1e26a70SApple OSS Distributions 
643*a1e26a70SApple OSS Distributions 			if (MAP_MEM_USE_DATA_ADDR & prot) {
644*a1e26a70SApple OSS Distributions 				entryAddr = srcAddr;
645*a1e26a70SApple OSS Distributions 				endAddr   = srcAddr + srcLen;
646*a1e26a70SApple OSS Distributions 			} else {
647*a1e26a70SApple OSS Distributions 				entryAddr = trunc_page_64(srcAddr);
648*a1e26a70SApple OSS Distributions 				endAddr   = round_page_64(srcAddr + srcLen);
649*a1e26a70SApple OSS Distributions 			}
650*a1e26a70SApple OSS Distributions 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
651*a1e26a70SApple OSS Distributions 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
652*a1e26a70SApple OSS Distributions 			}
653*a1e26a70SApple OSS Distributions 
654*a1e26a70SApple OSS Distributions 			do{
655*a1e26a70SApple OSS Distributions 				entrySize = (endAddr - entryAddr);
656*a1e26a70SApple OSS Distributions 				if (!entrySize) {
657*a1e26a70SApple OSS Distributions 					break;
658*a1e26a70SApple OSS Distributions 				}
659*a1e26a70SApple OSS Distributions 				actualSize = entrySize;
660*a1e26a70SApple OSS Distributions 
661*a1e26a70SApple OSS Distributions 				cloneEntry = MACH_PORT_NULL;
662*a1e26a70SApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
663*a1e26a70SApple OSS Distributions 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
664*a1e26a70SApple OSS Distributions 						cloneEntry = cloneEntries->entry;
665*a1e26a70SApple OSS Distributions 					} else {
666*a1e26a70SApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
667*a1e26a70SApple OSS Distributions 					}
668*a1e26a70SApple OSS Distributions 				}
669*a1e26a70SApple OSS Distributions 
670*a1e26a70SApple OSS Distributions 				mach_vm_offset_t entryAddrForVm = entryAddr;
671*a1e26a70SApple OSS Distributions 				err = mach_make_memory_entry_internal(map,
672*a1e26a70SApple OSS Distributions 				    &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
673*a1e26a70SApple OSS Distributions 
674*a1e26a70SApple OSS Distributions 				if (KERN_SUCCESS != err) {
675*a1e26a70SApple OSS Distributions 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
676*a1e26a70SApple OSS Distributions 					break;
677*a1e26a70SApple OSS Distributions 				}
678*a1e26a70SApple OSS Distributions 				if (MAP_MEM_USE_DATA_ADDR & prot) {
679*a1e26a70SApple OSS Distributions 					if (actualSize > entrySize) {
680*a1e26a70SApple OSS Distributions 						actualSize = entrySize;
681*a1e26a70SApple OSS Distributions 					}
682*a1e26a70SApple OSS Distributions 				} else if (actualSize > entrySize) {
683*a1e26a70SApple OSS Distributions 					panic("mach_make_memory_entry_64 actualSize");
684*a1e26a70SApple OSS Distributions 				}
685*a1e26a70SApple OSS Distributions 
686*a1e26a70SApple OSS Distributions 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687*a1e26a70SApple OSS Distributions 
688*a1e26a70SApple OSS Distributions 				if (count && overmap_start) {
689*a1e26a70SApple OSS Distributions 					/*
690*a1e26a70SApple OSS Distributions 					 * Track misaligned start for all
691*a1e26a70SApple OSS Distributions 					 * except the first entry.
692*a1e26a70SApple OSS Distributions 					 */
693*a1e26a70SApple OSS Distributions 					misaligned_start++;
694*a1e26a70SApple OSS Distributions 				}
695*a1e26a70SApple OSS Distributions 
696*a1e26a70SApple OSS Distributions 				if (overmap_end) {
697*a1e26a70SApple OSS Distributions 					/*
698*a1e26a70SApple OSS Distributions 					 * Ignore misaligned end for the
699*a1e26a70SApple OSS Distributions 					 * last entry.
700*a1e26a70SApple OSS Distributions 					 */
701*a1e26a70SApple OSS Distributions 					if ((entryAddr + actualSize) != endAddr) {
702*a1e26a70SApple OSS Distributions 						misaligned_end++;
703*a1e26a70SApple OSS Distributions 					}
704*a1e26a70SApple OSS Distributions 				}
705*a1e26a70SApple OSS Distributions 
706*a1e26a70SApple OSS Distributions 				if (count) {
707*a1e26a70SApple OSS Distributions 					/* Middle entries */
708*a1e26a70SApple OSS Distributions 					if (misaligned_start || misaligned_end) {
709*a1e26a70SApple OSS Distributions 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710*a1e26a70SApple OSS Distributions 						ipc_port_release_send(entry);
711*a1e26a70SApple OSS Distributions 						err = KERN_NOT_SUPPORTED;
712*a1e26a70SApple OSS Distributions 						break;
713*a1e26a70SApple OSS Distributions 					}
714*a1e26a70SApple OSS Distributions 				}
715*a1e26a70SApple OSS Distributions 
716*a1e26a70SApple OSS Distributions 				if (count >= ref->capacity) {
717*a1e26a70SApple OSS Distributions 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718*a1e26a70SApple OSS Distributions 					entries = &ref->entries[count];
719*a1e26a70SApple OSS Distributions 				}
720*a1e26a70SApple OSS Distributions 				entries->entry  = entry;
721*a1e26a70SApple OSS Distributions 				entries->size   = actualSize;
722*a1e26a70SApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
723*a1e26a70SApple OSS Distributions 				entries->start = entryAddr;
724*a1e26a70SApple OSS Distributions 				entryAddr += actualSize;
725*a1e26a70SApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
726*a1e26a70SApple OSS Distributions 					if ((cloneEntries->entry == entries->entry)
727*a1e26a70SApple OSS Distributions 					    && (cloneEntries->size == entries->size)
728*a1e26a70SApple OSS Distributions 					    && (cloneEntries->offset == entries->offset)) {
729*a1e26a70SApple OSS Distributions 						cloneEntries++;
730*a1e26a70SApple OSS Distributions 					} else {
731*a1e26a70SApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
732*a1e26a70SApple OSS Distributions 					}
733*a1e26a70SApple OSS Distributions 				}
734*a1e26a70SApple OSS Distributions 				entries++;
735*a1e26a70SApple OSS Distributions 				count++;
736*a1e26a70SApple OSS Distributions 			}while (true);
737*a1e26a70SApple OSS Distributions 			offset += srcLen;
738*a1e26a70SApple OSS Distributions 			remain -= srcLen;
739*a1e26a70SApple OSS Distributions 		}
740*a1e26a70SApple OSS Distributions 	} else {
741*a1e26a70SApple OSS Distributions 		// _task == 0, physical or kIOMemoryTypeUPL
742*a1e26a70SApple OSS Distributions 		memory_object_t pager;
743*a1e26a70SApple OSS Distributions 		vm_size_t       size = ptoa_64(_pages);
744*a1e26a70SApple OSS Distributions 
745*a1e26a70SApple OSS Distributions 		if (!getKernelReserved()) {
746*a1e26a70SApple OSS Distributions 			panic("getKernelReserved");
747*a1e26a70SApple OSS Distributions 		}
748*a1e26a70SApple OSS Distributions 
749*a1e26a70SApple OSS Distributions 		reserved->dp.pagerContig = (1 == _rangesCount);
750*a1e26a70SApple OSS Distributions 		reserved->dp.memory      = this;
751*a1e26a70SApple OSS Distributions 
752*a1e26a70SApple OSS Distributions 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
753*a1e26a70SApple OSS Distributions 		if (-1U == pagerFlags) {
754*a1e26a70SApple OSS Distributions 			panic("phys is kIODefaultCache");
755*a1e26a70SApple OSS Distributions 		}
756*a1e26a70SApple OSS Distributions 		if (reserved->dp.pagerContig) {
757*a1e26a70SApple OSS Distributions 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758*a1e26a70SApple OSS Distributions 		}
759*a1e26a70SApple OSS Distributions 
760*a1e26a70SApple OSS Distributions 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761*a1e26a70SApple OSS Distributions 		    size, pagerFlags);
762*a1e26a70SApple OSS Distributions 		assert(pager);
763*a1e26a70SApple OSS Distributions 		if (!pager) {
764*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765*a1e26a70SApple OSS Distributions 			err = kIOReturnVMError;
766*a1e26a70SApple OSS Distributions 		} else {
767*a1e26a70SApple OSS Distributions 			srcAddr  = nextAddr;
768*a1e26a70SApple OSS Distributions 			entryAddr = trunc_page_64(srcAddr);
769*a1e26a70SApple OSS Distributions 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770*a1e26a70SApple OSS Distributions 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
772*a1e26a70SApple OSS Distributions 			if (KERN_SUCCESS != err) {
773*a1e26a70SApple OSS Distributions 				device_pager_deallocate(pager);
774*a1e26a70SApple OSS Distributions 			} else {
775*a1e26a70SApple OSS Distributions 				reserved->dp.devicePager = pager;
776*a1e26a70SApple OSS Distributions 				entries->entry  = entry;
777*a1e26a70SApple OSS Distributions 				entries->size   = size;
778*a1e26a70SApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
779*a1e26a70SApple OSS Distributions 				entries++;
780*a1e26a70SApple OSS Distributions 				count++;
781*a1e26a70SApple OSS Distributions 			}
782*a1e26a70SApple OSS Distributions 		}
783*a1e26a70SApple OSS Distributions 	}
784*a1e26a70SApple OSS Distributions 
785*a1e26a70SApple OSS Distributions 	ref->count = count;
786*a1e26a70SApple OSS Distributions 	ref->prot  = prot;
787*a1e26a70SApple OSS Distributions 
788*a1e26a70SApple OSS Distributions 	if (_task && (KERN_SUCCESS == err)
789*a1e26a70SApple OSS Distributions 	    && (kIOMemoryMapCopyOnWrite & _flags)
790*a1e26a70SApple OSS Distributions 	    && !(kIOMemoryReferenceCOW & options)) {
791*a1e26a70SApple OSS Distributions 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
793*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794*a1e26a70SApple OSS Distributions 		}
795*a1e26a70SApple OSS Distributions 	}
796*a1e26a70SApple OSS Distributions 
797*a1e26a70SApple OSS Distributions 	if (KERN_SUCCESS == err) {
798*a1e26a70SApple OSS Distributions 		if (MAP_MEM_NAMED_REUSE & prot) {
799*a1e26a70SApple OSS Distributions 			memoryReferenceFree(ref);
800*a1e26a70SApple OSS Distributions 			OSIncrementAtomic(&_memRef->refCount);
801*a1e26a70SApple OSS Distributions 			ref = _memRef;
802*a1e26a70SApple OSS Distributions 		}
803*a1e26a70SApple OSS Distributions 	} else {
804*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805*a1e26a70SApple OSS Distributions 		memoryReferenceFree(ref);
806*a1e26a70SApple OSS Distributions 		ref = NULL;
807*a1e26a70SApple OSS Distributions 	}
808*a1e26a70SApple OSS Distributions 
809*a1e26a70SApple OSS Distributions 	*reference = ref;
810*a1e26a70SApple OSS Distributions 
811*a1e26a70SApple OSS Distributions 	return err;
812*a1e26a70SApple OSS Distributions }
813*a1e26a70SApple OSS Distributions 
814*a1e26a70SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815*a1e26a70SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816*a1e26a70SApple OSS Distributions {
817*a1e26a70SApple OSS Distributions 	switch (kIOMapGuardedMask & options) {
818*a1e26a70SApple OSS Distributions 	default:
819*a1e26a70SApple OSS Distributions 	case kIOMapGuardedSmall:
820*a1e26a70SApple OSS Distributions 		return vm_map_page_size(map);
821*a1e26a70SApple OSS Distributions 	case kIOMapGuardedLarge:
822*a1e26a70SApple OSS Distributions 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823*a1e26a70SApple OSS Distributions 		return kIOMapGuardSizeLarge;
824*a1e26a70SApple OSS Distributions 	}
825*a1e26a70SApple OSS Distributions 	;
826*a1e26a70SApple OSS Distributions }
827*a1e26a70SApple OSS Distributions 
828*a1e26a70SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829*a1e26a70SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830*a1e26a70SApple OSS Distributions     vm_map_offset_t addr, mach_vm_size_t size)
831*a1e26a70SApple OSS Distributions {
832*a1e26a70SApple OSS Distributions 	kern_return_t   kr;
833*a1e26a70SApple OSS Distributions 	vm_map_offset_t actualAddr;
834*a1e26a70SApple OSS Distributions 	mach_vm_size_t  actualSize;
835*a1e26a70SApple OSS Distributions 
836*a1e26a70SApple OSS Distributions 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837*a1e26a70SApple OSS Distributions 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838*a1e26a70SApple OSS Distributions 
839*a1e26a70SApple OSS Distributions 	if (kIOMapGuardedMask & options) {
840*a1e26a70SApple OSS Distributions 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841*a1e26a70SApple OSS Distributions 		actualAddr -= guardSize;
842*a1e26a70SApple OSS Distributions 		actualSize += 2 * guardSize;
843*a1e26a70SApple OSS Distributions 	}
844*a1e26a70SApple OSS Distributions 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
845*a1e26a70SApple OSS Distributions 
846*a1e26a70SApple OSS Distributions 	return kr;
847*a1e26a70SApple OSS Distributions }
848*a1e26a70SApple OSS Distributions 
849*a1e26a70SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850*a1e26a70SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851*a1e26a70SApple OSS Distributions {
852*a1e26a70SApple OSS Distributions 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853*a1e26a70SApple OSS Distributions 	IOReturn                        err;
854*a1e26a70SApple OSS Distributions 	vm_map_offset_t                 addr;
855*a1e26a70SApple OSS Distributions 	mach_vm_size_t                  size;
856*a1e26a70SApple OSS Distributions 	mach_vm_size_t                  guardSize;
857*a1e26a70SApple OSS Distributions 	vm_map_kernel_flags_t           vmk_flags;
858*a1e26a70SApple OSS Distributions 
859*a1e26a70SApple OSS Distributions 	addr = ref->mapped;
860*a1e26a70SApple OSS Distributions 	size = ref->size;
861*a1e26a70SApple OSS Distributions 	guardSize = 0;
862*a1e26a70SApple OSS Distributions 
863*a1e26a70SApple OSS Distributions 	if (kIOMapGuardedMask & ref->options) {
864*a1e26a70SApple OSS Distributions 		if (!(kIOMapAnywhere & ref->options)) {
865*a1e26a70SApple OSS Distributions 			return kIOReturnBadArgument;
866*a1e26a70SApple OSS Distributions 		}
867*a1e26a70SApple OSS Distributions 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868*a1e26a70SApple OSS Distributions 		size += 2 * guardSize;
869*a1e26a70SApple OSS Distributions 	}
870*a1e26a70SApple OSS Distributions 	if (kIOMapAnywhere & ref->options) {
871*a1e26a70SApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872*a1e26a70SApple OSS Distributions 	} else {
873*a1e26a70SApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874*a1e26a70SApple OSS Distributions 	}
875*a1e26a70SApple OSS Distributions 	vmk_flags.vm_tag = ref->tag;
876*a1e26a70SApple OSS Distributions 
877*a1e26a70SApple OSS Distributions 	/*
878*a1e26a70SApple OSS Distributions 	 * Mapping memory into the kernel_map using IOMDs use the data range.
879*a1e26a70SApple OSS Distributions 	 * Memory being mapped should not contain kernel pointers.
880*a1e26a70SApple OSS Distributions 	 */
881*a1e26a70SApple OSS Distributions 	if (map == kernel_map) {
882*a1e26a70SApple OSS Distributions 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883*a1e26a70SApple OSS Distributions 	}
884*a1e26a70SApple OSS Distributions 
885*a1e26a70SApple OSS Distributions 	err = mach_vm_map_kernel(map, &addr, size,
886*a1e26a70SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
887*a1e26a70SApple OSS Distributions 	    // TODO4K this should not be necessary...
888*a1e26a70SApple OSS Distributions 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889*a1e26a70SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
890*a1e26a70SApple OSS Distributions 	    (vm_map_offset_t) 0,
891*a1e26a70SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
892*a1e26a70SApple OSS Distributions 	    vmk_flags,
893*a1e26a70SApple OSS Distributions 	    IPC_PORT_NULL,
894*a1e26a70SApple OSS Distributions 	    (memory_object_offset_t) 0,
895*a1e26a70SApple OSS Distributions 	    false,                       /* copy */
896*a1e26a70SApple OSS Distributions 	    ref->prot,
897*a1e26a70SApple OSS Distributions 	    ref->prot,
898*a1e26a70SApple OSS Distributions 	    VM_INHERIT_NONE);
899*a1e26a70SApple OSS Distributions 	if (KERN_SUCCESS == err) {
900*a1e26a70SApple OSS Distributions 		ref->mapped = (mach_vm_address_t) addr;
901*a1e26a70SApple OSS Distributions 		ref->map = map;
902*a1e26a70SApple OSS Distributions 		if (kIOMapGuardedMask & ref->options) {
903*a1e26a70SApple OSS Distributions 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904*a1e26a70SApple OSS Distributions 
905*a1e26a70SApple OSS Distributions 			err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
906*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
907*a1e26a70SApple OSS Distributions 			err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
908*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
909*a1e26a70SApple OSS Distributions 			ref->mapped += guardSize;
910*a1e26a70SApple OSS Distributions 		}
911*a1e26a70SApple OSS Distributions 	}
912*a1e26a70SApple OSS Distributions 
913*a1e26a70SApple OSS Distributions 	return err;
914*a1e26a70SApple OSS Distributions }
915*a1e26a70SApple OSS Distributions 
916*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
918*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
919*a1e26a70SApple OSS Distributions 	vm_map_t            map,
920*a1e26a70SApple OSS Distributions 	mach_vm_size_t      inoffset,
921*a1e26a70SApple OSS Distributions 	mach_vm_size_t      size,
922*a1e26a70SApple OSS Distributions 	IOOptionBits        options,
923*a1e26a70SApple OSS Distributions 	mach_vm_address_t * inaddr)
924*a1e26a70SApple OSS Distributions {
925*a1e26a70SApple OSS Distributions 	IOReturn        err;
926*a1e26a70SApple OSS Distributions 	int64_t         offset = inoffset;
927*a1e26a70SApple OSS Distributions 	uint32_t        rangeIdx, entryIdx;
928*a1e26a70SApple OSS Distributions 	vm_map_offset_t addr, mapAddr;
929*a1e26a70SApple OSS Distributions 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930*a1e26a70SApple OSS Distributions 
931*a1e26a70SApple OSS Distributions 	mach_vm_address_t nextAddr;
932*a1e26a70SApple OSS Distributions 	mach_vm_size_t    nextLen;
933*a1e26a70SApple OSS Distributions 	IOByteCount       physLen;
934*a1e26a70SApple OSS Distributions 	IOMemoryEntry   * entry;
935*a1e26a70SApple OSS Distributions 	vm_prot_t         prot, memEntryCacheMode;
936*a1e26a70SApple OSS Distributions 	IOOptionBits      type;
937*a1e26a70SApple OSS Distributions 	IOOptionBits      cacheMode;
938*a1e26a70SApple OSS Distributions 	vm_tag_t          tag;
939*a1e26a70SApple OSS Distributions 	// for the kIOMapPrefault option.
940*a1e26a70SApple OSS Distributions 	upl_page_info_t * pageList = NULL;
941*a1e26a70SApple OSS Distributions 	UInt              currentPageIndex = 0;
942*a1e26a70SApple OSS Distributions 	bool              didAlloc;
943*a1e26a70SApple OSS Distributions 
944*a1e26a70SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945*a1e26a70SApple OSS Distributions 
946*a1e26a70SApple OSS Distributions 	if (ref->mapRef) {
947*a1e26a70SApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948*a1e26a70SApple OSS Distributions 		return err;
949*a1e26a70SApple OSS Distributions 	}
950*a1e26a70SApple OSS Distributions 
951*a1e26a70SApple OSS Distributions 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952*a1e26a70SApple OSS Distributions 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953*a1e26a70SApple OSS Distributions 		return err;
954*a1e26a70SApple OSS Distributions 	}
955*a1e26a70SApple OSS Distributions 
956*a1e26a70SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
957*a1e26a70SApple OSS Distributions 
958*a1e26a70SApple OSS Distributions 	prot = VM_PROT_READ;
959*a1e26a70SApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
960*a1e26a70SApple OSS Distributions 		prot |= VM_PROT_WRITE;
961*a1e26a70SApple OSS Distributions 	}
962*a1e26a70SApple OSS Distributions 	prot &= ref->prot;
963*a1e26a70SApple OSS Distributions 
964*a1e26a70SApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965*a1e26a70SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
966*a1e26a70SApple OSS Distributions 		// VM system requires write access to update named entry cache mode
967*a1e26a70SApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968*a1e26a70SApple OSS Distributions 	}
969*a1e26a70SApple OSS Distributions 
970*a1e26a70SApple OSS Distributions 	tag = (typeof(tag))getVMTag(map);
971*a1e26a70SApple OSS Distributions 
972*a1e26a70SApple OSS Distributions 	if (_task) {
973*a1e26a70SApple OSS Distributions 		// Find first range for offset
974*a1e26a70SApple OSS Distributions 		if (!_rangesCount) {
975*a1e26a70SApple OSS Distributions 			return kIOReturnBadArgument;
976*a1e26a70SApple OSS Distributions 		}
977*a1e26a70SApple OSS Distributions 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978*a1e26a70SApple OSS Distributions 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979*a1e26a70SApple OSS Distributions 			if (remain < nextLen) {
980*a1e26a70SApple OSS Distributions 				break;
981*a1e26a70SApple OSS Distributions 			}
982*a1e26a70SApple OSS Distributions 			remain -= nextLen;
983*a1e26a70SApple OSS Distributions 		}
984*a1e26a70SApple OSS Distributions 	} else {
985*a1e26a70SApple OSS Distributions 		rangeIdx = 0;
986*a1e26a70SApple OSS Distributions 		remain   = 0;
987*a1e26a70SApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988*a1e26a70SApple OSS Distributions 		nextLen  = size;
989*a1e26a70SApple OSS Distributions 	}
990*a1e26a70SApple OSS Distributions 
991*a1e26a70SApple OSS Distributions 	assert(remain < nextLen);
992*a1e26a70SApple OSS Distributions 	if (remain >= nextLen) {
993*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
995*a1e26a70SApple OSS Distributions 	}
996*a1e26a70SApple OSS Distributions 
997*a1e26a70SApple OSS Distributions 	nextAddr  += remain;
998*a1e26a70SApple OSS Distributions 	nextLen   -= remain;
999*a1e26a70SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1000*a1e26a70SApple OSS Distributions 	pageOffset = (vm_map_page_mask(map) & nextAddr);
1001*a1e26a70SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1002*a1e26a70SApple OSS Distributions 	pageOffset = (page_mask & nextAddr);
1003*a1e26a70SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004*a1e26a70SApple OSS Distributions 	addr       = 0;
1005*a1e26a70SApple OSS Distributions 	didAlloc   = false;
1006*a1e26a70SApple OSS Distributions 
1007*a1e26a70SApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1008*a1e26a70SApple OSS Distributions 		addr = *inaddr;
1009*a1e26a70SApple OSS Distributions 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011*a1e26a70SApple OSS Distributions 		}
1012*a1e26a70SApple OSS Distributions 		addr -= pageOffset;
1013*a1e26a70SApple OSS Distributions 	}
1014*a1e26a70SApple OSS Distributions 
1015*a1e26a70SApple OSS Distributions 	// find first entry for offset
1016*a1e26a70SApple OSS Distributions 	for (entryIdx = 0;
1017*a1e26a70SApple OSS Distributions 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018*a1e26a70SApple OSS Distributions 	    entryIdx++) {
1019*a1e26a70SApple OSS Distributions 	}
1020*a1e26a70SApple OSS Distributions 	entryIdx--;
1021*a1e26a70SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1022*a1e26a70SApple OSS Distributions 
1023*a1e26a70SApple OSS Distributions 	// allocate VM
1024*a1e26a70SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1025*a1e26a70SApple OSS Distributions 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026*a1e26a70SApple OSS Distributions #else
1027*a1e26a70SApple OSS Distributions 	size = round_page_64(size + pageOffset);
1028*a1e26a70SApple OSS Distributions #endif
1029*a1e26a70SApple OSS Distributions 	if (kIOMapOverwrite & options) {
1030*a1e26a70SApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031*a1e26a70SApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1032*a1e26a70SApple OSS Distributions 		}
1033*a1e26a70SApple OSS Distributions 		err = KERN_SUCCESS;
1034*a1e26a70SApple OSS Distributions 	} else {
1035*a1e26a70SApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1036*a1e26a70SApple OSS Distributions 		ref.map     = map;
1037*a1e26a70SApple OSS Distributions 		ref.tag     = tag;
1038*a1e26a70SApple OSS Distributions 		ref.options = options;
1039*a1e26a70SApple OSS Distributions 		ref.size    = size;
1040*a1e26a70SApple OSS Distributions 		ref.prot    = prot;
1041*a1e26a70SApple OSS Distributions 		if (options & kIOMapAnywhere) {
1042*a1e26a70SApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043*a1e26a70SApple OSS Distributions 			ref.mapped = 0;
1044*a1e26a70SApple OSS Distributions 		} else {
1045*a1e26a70SApple OSS Distributions 			ref.mapped = addr;
1046*a1e26a70SApple OSS Distributions 		}
1047*a1e26a70SApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048*a1e26a70SApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049*a1e26a70SApple OSS Distributions 		} else {
1050*a1e26a70SApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051*a1e26a70SApple OSS Distributions 		}
1052*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS == err) {
1053*a1e26a70SApple OSS Distributions 			addr     = ref.mapped;
1054*a1e26a70SApple OSS Distributions 			map      = ref.map;
1055*a1e26a70SApple OSS Distributions 			didAlloc = true;
1056*a1e26a70SApple OSS Distributions 		}
1057*a1e26a70SApple OSS Distributions 	}
1058*a1e26a70SApple OSS Distributions 
1059*a1e26a70SApple OSS Distributions 	/*
1060*a1e26a70SApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1061*a1e26a70SApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1062*a1e26a70SApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063*a1e26a70SApple OSS Distributions 	 * operations.
1064*a1e26a70SApple OSS Distributions 	 */
1065*a1e26a70SApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066*a1e26a70SApple OSS Distributions 		options &= ~kIOMapPrefault;
1067*a1e26a70SApple OSS Distributions 	}
1068*a1e26a70SApple OSS Distributions 
1069*a1e26a70SApple OSS Distributions 	/*
1070*a1e26a70SApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1071*a1e26a70SApple OSS Distributions 	 * memory type, and the underlying data.
1072*a1e26a70SApple OSS Distributions 	 */
1073*a1e26a70SApple OSS Distributions 	if (options & kIOMapPrefault) {
1074*a1e26a70SApple OSS Distributions 		/*
1075*a1e26a70SApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1076*a1e26a70SApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077*a1e26a70SApple OSS Distributions 		 */
1078*a1e26a70SApple OSS Distributions 		assert(_wireCount != 0);
1079*a1e26a70SApple OSS Distributions 		assert(_memoryEntries != NULL);
1080*a1e26a70SApple OSS Distributions 		if ((_wireCount == 0) ||
1081*a1e26a70SApple OSS Distributions 		    (_memoryEntries == NULL)) {
1082*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083*a1e26a70SApple OSS Distributions 			return kIOReturnBadArgument;
1084*a1e26a70SApple OSS Distributions 		}
1085*a1e26a70SApple OSS Distributions 
1086*a1e26a70SApple OSS Distributions 		// Get the page list.
1087*a1e26a70SApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1088*a1e26a70SApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1089*a1e26a70SApple OSS Distributions 		pageList = getPageList(dataP);
1090*a1e26a70SApple OSS Distributions 
1091*a1e26a70SApple OSS Distributions 		// Get the number of IOPLs.
1092*a1e26a70SApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093*a1e26a70SApple OSS Distributions 
1094*a1e26a70SApple OSS Distributions 		/*
1095*a1e26a70SApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1096*a1e26a70SApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1097*a1e26a70SApple OSS Distributions 		 * right range at the end.
1098*a1e26a70SApple OSS Distributions 		 */
1099*a1e26a70SApple OSS Distributions 		UInt ioplIndex = 0;
1100*a1e26a70SApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101*a1e26a70SApple OSS Distributions 			ioplIndex++;
1102*a1e26a70SApple OSS Distributions 		}
1103*a1e26a70SApple OSS Distributions 		ioplIndex--;
1104*a1e26a70SApple OSS Distributions 
1105*a1e26a70SApple OSS Distributions 		// Retrieve the IOPL info block.
1106*a1e26a70SApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1107*a1e26a70SApple OSS Distributions 
1108*a1e26a70SApple OSS Distributions 		/*
1109*a1e26a70SApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110*a1e26a70SApple OSS Distributions 		 * array.
1111*a1e26a70SApple OSS Distributions 		 */
1112*a1e26a70SApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1113*a1e26a70SApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114*a1e26a70SApple OSS Distributions 		} else {
1115*a1e26a70SApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1116*a1e26a70SApple OSS Distributions 		}
1117*a1e26a70SApple OSS Distributions 
1118*a1e26a70SApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1119*a1e26a70SApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120*a1e26a70SApple OSS Distributions 
1121*a1e26a70SApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1122*a1e26a70SApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1123*a1e26a70SApple OSS Distributions 	}
1124*a1e26a70SApple OSS Distributions 
1125*a1e26a70SApple OSS Distributions 	// enter mappings
1126*a1e26a70SApple OSS Distributions 	remain  = size;
1127*a1e26a70SApple OSS Distributions 	mapAddr = addr;
1128*a1e26a70SApple OSS Distributions 	addr    += pageOffset;
1129*a1e26a70SApple OSS Distributions 
1130*a1e26a70SApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1131*a1e26a70SApple OSS Distributions 		entryOffset = offset - entry->offset;
1132*a1e26a70SApple OSS Distributions 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133*a1e26a70SApple OSS Distributions 			err = kIOReturnNotAligned;
1134*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135*a1e26a70SApple OSS Distributions 			break;
1136*a1e26a70SApple OSS Distributions 		}
1137*a1e26a70SApple OSS Distributions 
1138*a1e26a70SApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1139*a1e26a70SApple OSS Distributions 			vm_size_t unused = 0;
1140*a1e26a70SApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141*a1e26a70SApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1142*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1143*a1e26a70SApple OSS Distributions 		}
1144*a1e26a70SApple OSS Distributions 
1145*a1e26a70SApple OSS Distributions 		entryOffset -= pageOffset;
1146*a1e26a70SApple OSS Distributions 		if (entryOffset >= entry->size) {
1147*a1e26a70SApple OSS Distributions 			panic("entryOffset");
1148*a1e26a70SApple OSS Distributions 		}
1149*a1e26a70SApple OSS Distributions 		chunk = entry->size - entryOffset;
1150*a1e26a70SApple OSS Distributions 		if (chunk) {
1151*a1e26a70SApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1152*a1e26a70SApple OSS Distributions 				.vmf_fixed = true,
1153*a1e26a70SApple OSS Distributions 				.vmf_overwrite = true,
1154*a1e26a70SApple OSS Distributions 				.vm_tag = tag,
1155*a1e26a70SApple OSS Distributions 				.vmkf_iokit_acct = true,
1156*a1e26a70SApple OSS Distributions 			};
1157*a1e26a70SApple OSS Distributions 
1158*a1e26a70SApple OSS Distributions 			if (chunk > remain) {
1159*a1e26a70SApple OSS Distributions 				chunk = remain;
1160*a1e26a70SApple OSS Distributions 			}
1161*a1e26a70SApple OSS Distributions 			if (options & kIOMapPrefault) {
1162*a1e26a70SApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163*a1e26a70SApple OSS Distributions 
1164*a1e26a70SApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1165*a1e26a70SApple OSS Distributions 				    &mapAddr,
1166*a1e26a70SApple OSS Distributions 				    chunk, 0 /* mask */,
1167*a1e26a70SApple OSS Distributions 				    vmk_flags,
1168*a1e26a70SApple OSS Distributions 				    entry->entry,
1169*a1e26a70SApple OSS Distributions 				    entryOffset,
1170*a1e26a70SApple OSS Distributions 				    prot,                        // cur
1171*a1e26a70SApple OSS Distributions 				    prot,                        // max
1172*a1e26a70SApple OSS Distributions 				    &pageList[currentPageIndex],
1173*a1e26a70SApple OSS Distributions 				    nb_pages);
1174*a1e26a70SApple OSS Distributions 
1175*a1e26a70SApple OSS Distributions 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176*a1e26a70SApple OSS Distributions 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177*a1e26a70SApple OSS Distributions 				}
1178*a1e26a70SApple OSS Distributions 				// Compute the next index in the page list.
1179*a1e26a70SApple OSS Distributions 				currentPageIndex += nb_pages;
1180*a1e26a70SApple OSS Distributions 				assert(currentPageIndex <= _pages);
1181*a1e26a70SApple OSS Distributions 			} else {
1182*a1e26a70SApple OSS Distributions 				err = mach_vm_map_kernel(map,
1183*a1e26a70SApple OSS Distributions 				    &mapAddr,
1184*a1e26a70SApple OSS Distributions 				    chunk, 0 /* mask */,
1185*a1e26a70SApple OSS Distributions 				    vmk_flags,
1186*a1e26a70SApple OSS Distributions 				    entry->entry,
1187*a1e26a70SApple OSS Distributions 				    entryOffset,
1188*a1e26a70SApple OSS Distributions 				    false,               // copy
1189*a1e26a70SApple OSS Distributions 				    prot,               // cur
1190*a1e26a70SApple OSS Distributions 				    prot,               // max
1191*a1e26a70SApple OSS Distributions 				    VM_INHERIT_NONE);
1192*a1e26a70SApple OSS Distributions 			}
1193*a1e26a70SApple OSS Distributions 			if (KERN_SUCCESS != err) {
1194*a1e26a70SApple OSS Distributions 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195*a1e26a70SApple OSS Distributions 				break;
1196*a1e26a70SApple OSS Distributions 			}
1197*a1e26a70SApple OSS Distributions 			remain -= chunk;
1198*a1e26a70SApple OSS Distributions 			if (!remain) {
1199*a1e26a70SApple OSS Distributions 				break;
1200*a1e26a70SApple OSS Distributions 			}
1201*a1e26a70SApple OSS Distributions 			mapAddr  += chunk;
1202*a1e26a70SApple OSS Distributions 			offset   += chunk - pageOffset;
1203*a1e26a70SApple OSS Distributions 		}
1204*a1e26a70SApple OSS Distributions 		pageOffset = 0;
1205*a1e26a70SApple OSS Distributions 		entry++;
1206*a1e26a70SApple OSS Distributions 		entryIdx++;
1207*a1e26a70SApple OSS Distributions 		if (entryIdx >= ref->count) {
1208*a1e26a70SApple OSS Distributions 			err = kIOReturnOverrun;
1209*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210*a1e26a70SApple OSS Distributions 			break;
1211*a1e26a70SApple OSS Distributions 		}
1212*a1e26a70SApple OSS Distributions 	}
1213*a1e26a70SApple OSS Distributions 
1214*a1e26a70SApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1215*a1e26a70SApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216*a1e26a70SApple OSS Distributions 		addr = 0;
1217*a1e26a70SApple OSS Distributions 	}
1218*a1e26a70SApple OSS Distributions 	*inaddr = addr;
1219*a1e26a70SApple OSS Distributions 
1220*a1e26a70SApple OSS Distributions 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222*a1e26a70SApple OSS Distributions 	}
1223*a1e26a70SApple OSS Distributions 	return err;
1224*a1e26a70SApple OSS Distributions }
1225*a1e26a70SApple OSS Distributions 
1226*a1e26a70SApple OSS Distributions #define LOGUNALIGN 0
1227*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
1230*a1e26a70SApple OSS Distributions 	vm_map_t            map,
1231*a1e26a70SApple OSS Distributions 	mach_vm_size_t      inoffset,
1232*a1e26a70SApple OSS Distributions 	mach_vm_size_t      size,
1233*a1e26a70SApple OSS Distributions 	IOOptionBits        options,
1234*a1e26a70SApple OSS Distributions 	mach_vm_address_t * inaddr)
1235*a1e26a70SApple OSS Distributions {
1236*a1e26a70SApple OSS Distributions 	IOReturn            err;
1237*a1e26a70SApple OSS Distributions 	int64_t             offset = inoffset;
1238*a1e26a70SApple OSS Distributions 	uint32_t            entryIdx, firstEntryIdx;
1239*a1e26a70SApple OSS Distributions 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1240*a1e26a70SApple OSS Distributions 	vm_map_offset_t     entryOffset, remain, chunk;
1241*a1e26a70SApple OSS Distributions 
1242*a1e26a70SApple OSS Distributions 	IOMemoryEntry    * entry;
1243*a1e26a70SApple OSS Distributions 	vm_prot_t          prot, memEntryCacheMode;
1244*a1e26a70SApple OSS Distributions 	IOOptionBits       type;
1245*a1e26a70SApple OSS Distributions 	IOOptionBits       cacheMode;
1246*a1e26a70SApple OSS Distributions 	vm_tag_t           tag;
1247*a1e26a70SApple OSS Distributions 	// for the kIOMapPrefault option.
1248*a1e26a70SApple OSS Distributions 	upl_page_info_t  * pageList = NULL;
1249*a1e26a70SApple OSS Distributions 	UInt               currentPageIndex = 0;
1250*a1e26a70SApple OSS Distributions 	bool               didAlloc;
1251*a1e26a70SApple OSS Distributions 
1252*a1e26a70SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253*a1e26a70SApple OSS Distributions 
1254*a1e26a70SApple OSS Distributions 	if (ref->mapRef) {
1255*a1e26a70SApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256*a1e26a70SApple OSS Distributions 		return err;
1257*a1e26a70SApple OSS Distributions 	}
1258*a1e26a70SApple OSS Distributions 
1259*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1260*a1e26a70SApple OSS Distributions 	printf("MAP offset %qx, %qx\n", inoffset, size);
1261*a1e26a70SApple OSS Distributions #endif
1262*a1e26a70SApple OSS Distributions 
1263*a1e26a70SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
1264*a1e26a70SApple OSS Distributions 
1265*a1e26a70SApple OSS Distributions 	prot = VM_PROT_READ;
1266*a1e26a70SApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
1267*a1e26a70SApple OSS Distributions 		prot |= VM_PROT_WRITE;
1268*a1e26a70SApple OSS Distributions 	}
1269*a1e26a70SApple OSS Distributions 	prot &= ref->prot;
1270*a1e26a70SApple OSS Distributions 
1271*a1e26a70SApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272*a1e26a70SApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
1273*a1e26a70SApple OSS Distributions 		// VM system requires write access to update named entry cache mode
1274*a1e26a70SApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275*a1e26a70SApple OSS Distributions 	}
1276*a1e26a70SApple OSS Distributions 
1277*a1e26a70SApple OSS Distributions 	tag = (vm_tag_t) getVMTag(map);
1278*a1e26a70SApple OSS Distributions 
1279*a1e26a70SApple OSS Distributions 	addr       = 0;
1280*a1e26a70SApple OSS Distributions 	didAlloc   = false;
1281*a1e26a70SApple OSS Distributions 
1282*a1e26a70SApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1283*a1e26a70SApple OSS Distributions 		addr = *inaddr;
1284*a1e26a70SApple OSS Distributions 	}
1285*a1e26a70SApple OSS Distributions 
1286*a1e26a70SApple OSS Distributions 	// find first entry for offset
1287*a1e26a70SApple OSS Distributions 	for (firstEntryIdx = 0;
1288*a1e26a70SApple OSS Distributions 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289*a1e26a70SApple OSS Distributions 	    firstEntryIdx++) {
1290*a1e26a70SApple OSS Distributions 	}
1291*a1e26a70SApple OSS Distributions 	firstEntryIdx--;
1292*a1e26a70SApple OSS Distributions 
1293*a1e26a70SApple OSS Distributions 	// calculate required VM space
1294*a1e26a70SApple OSS Distributions 
1295*a1e26a70SApple OSS Distributions 	entryIdx = firstEntryIdx;
1296*a1e26a70SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1297*a1e26a70SApple OSS Distributions 
1298*a1e26a70SApple OSS Distributions 	remain  = size;
1299*a1e26a70SApple OSS Distributions 	int64_t iteroffset = offset;
1300*a1e26a70SApple OSS Distributions 	uint64_t mapSize = 0;
1301*a1e26a70SApple OSS Distributions 	while (remain) {
1302*a1e26a70SApple OSS Distributions 		entryOffset = iteroffset - entry->offset;
1303*a1e26a70SApple OSS Distributions 		if (entryOffset >= entry->size) {
1304*a1e26a70SApple OSS Distributions 			panic("entryOffset");
1305*a1e26a70SApple OSS Distributions 		}
1306*a1e26a70SApple OSS Distributions 
1307*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1308*a1e26a70SApple OSS Distributions 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309*a1e26a70SApple OSS Distributions 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310*a1e26a70SApple OSS Distributions #endif
1311*a1e26a70SApple OSS Distributions 
1312*a1e26a70SApple OSS Distributions 		chunk = entry->size - entryOffset;
1313*a1e26a70SApple OSS Distributions 		if (chunk) {
1314*a1e26a70SApple OSS Distributions 			if (chunk > remain) {
1315*a1e26a70SApple OSS Distributions 				chunk = remain;
1316*a1e26a70SApple OSS Distributions 			}
1317*a1e26a70SApple OSS Distributions 			mach_vm_size_t entrySize;
1318*a1e26a70SApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1320*a1e26a70SApple OSS Distributions 			mapSize += entrySize;
1321*a1e26a70SApple OSS Distributions 
1322*a1e26a70SApple OSS Distributions 			remain -= chunk;
1323*a1e26a70SApple OSS Distributions 			if (!remain) {
1324*a1e26a70SApple OSS Distributions 				break;
1325*a1e26a70SApple OSS Distributions 			}
1326*a1e26a70SApple OSS Distributions 			iteroffset   += chunk; // - pageOffset;
1327*a1e26a70SApple OSS Distributions 		}
1328*a1e26a70SApple OSS Distributions 		entry++;
1329*a1e26a70SApple OSS Distributions 		entryIdx++;
1330*a1e26a70SApple OSS Distributions 		if (entryIdx >= ref->count) {
1331*a1e26a70SApple OSS Distributions 			panic("overrun");
1332*a1e26a70SApple OSS Distributions 			err = kIOReturnOverrun;
1333*a1e26a70SApple OSS Distributions 			break;
1334*a1e26a70SApple OSS Distributions 		}
1335*a1e26a70SApple OSS Distributions 	}
1336*a1e26a70SApple OSS Distributions 
1337*a1e26a70SApple OSS Distributions 	if (kIOMapOverwrite & options) {
1338*a1e26a70SApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*a1e26a70SApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1340*a1e26a70SApple OSS Distributions 		}
1341*a1e26a70SApple OSS Distributions 		err = KERN_SUCCESS;
1342*a1e26a70SApple OSS Distributions 	} else {
1343*a1e26a70SApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1344*a1e26a70SApple OSS Distributions 		ref.map     = map;
1345*a1e26a70SApple OSS Distributions 		ref.tag     = tag;
1346*a1e26a70SApple OSS Distributions 		ref.options = options;
1347*a1e26a70SApple OSS Distributions 		ref.size    = mapSize;
1348*a1e26a70SApple OSS Distributions 		ref.prot    = prot;
1349*a1e26a70SApple OSS Distributions 		if (options & kIOMapAnywhere) {
1350*a1e26a70SApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351*a1e26a70SApple OSS Distributions 			ref.mapped = 0;
1352*a1e26a70SApple OSS Distributions 		} else {
1353*a1e26a70SApple OSS Distributions 			ref.mapped = addr;
1354*a1e26a70SApple OSS Distributions 		}
1355*a1e26a70SApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356*a1e26a70SApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357*a1e26a70SApple OSS Distributions 		} else {
1358*a1e26a70SApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359*a1e26a70SApple OSS Distributions 		}
1360*a1e26a70SApple OSS Distributions 
1361*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS == err) {
1362*a1e26a70SApple OSS Distributions 			addr     = ref.mapped;
1363*a1e26a70SApple OSS Distributions 			map      = ref.map;
1364*a1e26a70SApple OSS Distributions 			didAlloc = true;
1365*a1e26a70SApple OSS Distributions 		}
1366*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1367*a1e26a70SApple OSS Distributions 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368*a1e26a70SApple OSS Distributions #endif
1369*a1e26a70SApple OSS Distributions 	}
1370*a1e26a70SApple OSS Distributions 
1371*a1e26a70SApple OSS Distributions 	/*
1372*a1e26a70SApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1373*a1e26a70SApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1374*a1e26a70SApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375*a1e26a70SApple OSS Distributions 	 * operations.
1376*a1e26a70SApple OSS Distributions 	 */
1377*a1e26a70SApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378*a1e26a70SApple OSS Distributions 		options &= ~kIOMapPrefault;
1379*a1e26a70SApple OSS Distributions 	}
1380*a1e26a70SApple OSS Distributions 
1381*a1e26a70SApple OSS Distributions 	/*
1382*a1e26a70SApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1383*a1e26a70SApple OSS Distributions 	 * memory type, and the underlying data.
1384*a1e26a70SApple OSS Distributions 	 */
1385*a1e26a70SApple OSS Distributions 	if (options & kIOMapPrefault) {
1386*a1e26a70SApple OSS Distributions 		/*
1387*a1e26a70SApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1388*a1e26a70SApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389*a1e26a70SApple OSS Distributions 		 */
1390*a1e26a70SApple OSS Distributions 		assert(_wireCount != 0);
1391*a1e26a70SApple OSS Distributions 		assert(_memoryEntries != NULL);
1392*a1e26a70SApple OSS Distributions 		if ((_wireCount == 0) ||
1393*a1e26a70SApple OSS Distributions 		    (_memoryEntries == NULL)) {
1394*a1e26a70SApple OSS Distributions 			return kIOReturnBadArgument;
1395*a1e26a70SApple OSS Distributions 		}
1396*a1e26a70SApple OSS Distributions 
1397*a1e26a70SApple OSS Distributions 		// Get the page list.
1398*a1e26a70SApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1399*a1e26a70SApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1400*a1e26a70SApple OSS Distributions 		pageList = getPageList(dataP);
1401*a1e26a70SApple OSS Distributions 
1402*a1e26a70SApple OSS Distributions 		// Get the number of IOPLs.
1403*a1e26a70SApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404*a1e26a70SApple OSS Distributions 
1405*a1e26a70SApple OSS Distributions 		/*
1406*a1e26a70SApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1407*a1e26a70SApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1408*a1e26a70SApple OSS Distributions 		 * right range at the end.
1409*a1e26a70SApple OSS Distributions 		 */
1410*a1e26a70SApple OSS Distributions 		UInt ioplIndex = 0;
1411*a1e26a70SApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412*a1e26a70SApple OSS Distributions 			ioplIndex++;
1413*a1e26a70SApple OSS Distributions 		}
1414*a1e26a70SApple OSS Distributions 		ioplIndex--;
1415*a1e26a70SApple OSS Distributions 
1416*a1e26a70SApple OSS Distributions 		// Retrieve the IOPL info block.
1417*a1e26a70SApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1418*a1e26a70SApple OSS Distributions 
1419*a1e26a70SApple OSS Distributions 		/*
1420*a1e26a70SApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421*a1e26a70SApple OSS Distributions 		 * array.
1422*a1e26a70SApple OSS Distributions 		 */
1423*a1e26a70SApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1424*a1e26a70SApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425*a1e26a70SApple OSS Distributions 		} else {
1426*a1e26a70SApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1427*a1e26a70SApple OSS Distributions 		}
1428*a1e26a70SApple OSS Distributions 
1429*a1e26a70SApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1430*a1e26a70SApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431*a1e26a70SApple OSS Distributions 
1432*a1e26a70SApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1433*a1e26a70SApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1434*a1e26a70SApple OSS Distributions 	}
1435*a1e26a70SApple OSS Distributions 
1436*a1e26a70SApple OSS Distributions 	// enter mappings
1437*a1e26a70SApple OSS Distributions 	remain   = size;
1438*a1e26a70SApple OSS Distributions 	mapAddr  = addr;
1439*a1e26a70SApple OSS Distributions 	entryIdx = firstEntryIdx;
1440*a1e26a70SApple OSS Distributions 	entry = &ref->entries[entryIdx];
1441*a1e26a70SApple OSS Distributions 
1442*a1e26a70SApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1443*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1444*a1e26a70SApple OSS Distributions 		printf("offset %qx, %qx\n", offset, entry->offset);
1445*a1e26a70SApple OSS Distributions #endif
1446*a1e26a70SApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1447*a1e26a70SApple OSS Distributions 			vm_size_t unused = 0;
1448*a1e26a70SApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449*a1e26a70SApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1450*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1451*a1e26a70SApple OSS Distributions 		}
1452*a1e26a70SApple OSS Distributions 		entryOffset = offset - entry->offset;
1453*a1e26a70SApple OSS Distributions 		if (entryOffset >= entry->size) {
1454*a1e26a70SApple OSS Distributions 			panic("entryOffset");
1455*a1e26a70SApple OSS Distributions 		}
1456*a1e26a70SApple OSS Distributions 		chunk = entry->size - entryOffset;
1457*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1458*a1e26a70SApple OSS Distributions 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459*a1e26a70SApple OSS Distributions #endif
1460*a1e26a70SApple OSS Distributions 		if (chunk) {
1461*a1e26a70SApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1462*a1e26a70SApple OSS Distributions 				.vmf_fixed = true,
1463*a1e26a70SApple OSS Distributions 				.vmf_overwrite = true,
1464*a1e26a70SApple OSS Distributions 				.vmf_return_data_addr = true,
1465*a1e26a70SApple OSS Distributions 				.vm_tag = tag,
1466*a1e26a70SApple OSS Distributions 				.vmkf_iokit_acct = true,
1467*a1e26a70SApple OSS Distributions 			};
1468*a1e26a70SApple OSS Distributions 
1469*a1e26a70SApple OSS Distributions 			if (chunk > remain) {
1470*a1e26a70SApple OSS Distributions 				chunk = remain;
1471*a1e26a70SApple OSS Distributions 			}
1472*a1e26a70SApple OSS Distributions 			mapAddrOut = mapAddr;
1473*a1e26a70SApple OSS Distributions 			if (options & kIOMapPrefault) {
1474*a1e26a70SApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475*a1e26a70SApple OSS Distributions 
1476*a1e26a70SApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1477*a1e26a70SApple OSS Distributions 				    &mapAddrOut,
1478*a1e26a70SApple OSS Distributions 				    chunk, 0 /* mask */,
1479*a1e26a70SApple OSS Distributions 				    vmk_flags,
1480*a1e26a70SApple OSS Distributions 				    entry->entry,
1481*a1e26a70SApple OSS Distributions 				    entryOffset,
1482*a1e26a70SApple OSS Distributions 				    prot,                        // cur
1483*a1e26a70SApple OSS Distributions 				    prot,                        // max
1484*a1e26a70SApple OSS Distributions 				    &pageList[currentPageIndex],
1485*a1e26a70SApple OSS Distributions 				    nb_pages);
1486*a1e26a70SApple OSS Distributions 
1487*a1e26a70SApple OSS Distributions 				// Compute the next index in the page list.
1488*a1e26a70SApple OSS Distributions 				currentPageIndex += nb_pages;
1489*a1e26a70SApple OSS Distributions 				assert(currentPageIndex <= _pages);
1490*a1e26a70SApple OSS Distributions 			} else {
1491*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1492*a1e26a70SApple OSS Distributions 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493*a1e26a70SApple OSS Distributions #endif
1494*a1e26a70SApple OSS Distributions 				err = mach_vm_map_kernel(map,
1495*a1e26a70SApple OSS Distributions 				    &mapAddrOut,
1496*a1e26a70SApple OSS Distributions 				    chunk, 0 /* mask */,
1497*a1e26a70SApple OSS Distributions 				    vmk_flags,
1498*a1e26a70SApple OSS Distributions 				    entry->entry,
1499*a1e26a70SApple OSS Distributions 				    entryOffset,
1500*a1e26a70SApple OSS Distributions 				    false,               // copy
1501*a1e26a70SApple OSS Distributions 				    prot,               // cur
1502*a1e26a70SApple OSS Distributions 				    prot,               // max
1503*a1e26a70SApple OSS Distributions 				    VM_INHERIT_NONE);
1504*a1e26a70SApple OSS Distributions 			}
1505*a1e26a70SApple OSS Distributions 			if (KERN_SUCCESS != err) {
1506*a1e26a70SApple OSS Distributions 				panic("map enter err %x", err);
1507*a1e26a70SApple OSS Distributions 				break;
1508*a1e26a70SApple OSS Distributions 			}
1509*a1e26a70SApple OSS Distributions #if LOGUNALIGN
1510*a1e26a70SApple OSS Distributions 			printf("mapAddr o %qx\n", mapAddrOut);
1511*a1e26a70SApple OSS Distributions #endif
1512*a1e26a70SApple OSS Distributions 			if (entryIdx == firstEntryIdx) {
1513*a1e26a70SApple OSS Distributions 				addr = mapAddrOut;
1514*a1e26a70SApple OSS Distributions 			}
1515*a1e26a70SApple OSS Distributions 			remain -= chunk;
1516*a1e26a70SApple OSS Distributions 			if (!remain) {
1517*a1e26a70SApple OSS Distributions 				break;
1518*a1e26a70SApple OSS Distributions 			}
1519*a1e26a70SApple OSS Distributions 			mach_vm_size_t entrySize;
1520*a1e26a70SApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521*a1e26a70SApple OSS Distributions 			assert(KERN_SUCCESS == err);
1522*a1e26a70SApple OSS Distributions 			mapAddr += entrySize;
1523*a1e26a70SApple OSS Distributions 			offset  += chunk;
1524*a1e26a70SApple OSS Distributions 		}
1525*a1e26a70SApple OSS Distributions 
1526*a1e26a70SApple OSS Distributions 		entry++;
1527*a1e26a70SApple OSS Distributions 		entryIdx++;
1528*a1e26a70SApple OSS Distributions 		if (entryIdx >= ref->count) {
1529*a1e26a70SApple OSS Distributions 			err = kIOReturnOverrun;
1530*a1e26a70SApple OSS Distributions 			break;
1531*a1e26a70SApple OSS Distributions 		}
1532*a1e26a70SApple OSS Distributions 	}
1533*a1e26a70SApple OSS Distributions 
1534*a1e26a70SApple OSS Distributions 	if (KERN_SUCCESS != err) {
1535*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536*a1e26a70SApple OSS Distributions 	}
1537*a1e26a70SApple OSS Distributions 
1538*a1e26a70SApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1539*a1e26a70SApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540*a1e26a70SApple OSS Distributions 		addr = 0;
1541*a1e26a70SApple OSS Distributions 	}
1542*a1e26a70SApple OSS Distributions 	*inaddr = addr;
1543*a1e26a70SApple OSS Distributions 
1544*a1e26a70SApple OSS Distributions 	return err;
1545*a1e26a70SApple OSS Distributions }
1546*a1e26a70SApple OSS Distributions 
1547*a1e26a70SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
1550*a1e26a70SApple OSS Distributions 	uint64_t          * offset)
1551*a1e26a70SApple OSS Distributions {
1552*a1e26a70SApple OSS Distributions 	kern_return_t kr;
1553*a1e26a70SApple OSS Distributions 	vm_object_offset_t data_offset = 0;
1554*a1e26a70SApple OSS Distributions 	uint64_t total;
1555*a1e26a70SApple OSS Distributions 	uint32_t idx;
1556*a1e26a70SApple OSS Distributions 
1557*a1e26a70SApple OSS Distributions 	assert(ref->count);
1558*a1e26a70SApple OSS Distributions 	if (offset) {
1559*a1e26a70SApple OSS Distributions 		*offset = (uint64_t) data_offset;
1560*a1e26a70SApple OSS Distributions 	}
1561*a1e26a70SApple OSS Distributions 	total = 0;
1562*a1e26a70SApple OSS Distributions 	for (idx = 0; idx < ref->count; idx++) {
1563*a1e26a70SApple OSS Distributions 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564*a1e26a70SApple OSS Distributions 		    &data_offset);
1565*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != kr) {
1566*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567*a1e26a70SApple OSS Distributions 		} else if (0 != data_offset) {
1568*a1e26a70SApple OSS Distributions 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569*a1e26a70SApple OSS Distributions 		}
1570*a1e26a70SApple OSS Distributions 		if (offset && !idx) {
1571*a1e26a70SApple OSS Distributions 			*offset = (uint64_t) data_offset;
1572*a1e26a70SApple OSS Distributions 		}
1573*a1e26a70SApple OSS Distributions 		total += round_page(data_offset + ref->entries[idx].size);
1574*a1e26a70SApple OSS Distributions 	}
1575*a1e26a70SApple OSS Distributions 
1576*a1e26a70SApple OSS Distributions 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577*a1e26a70SApple OSS Distributions 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1578*a1e26a70SApple OSS Distributions 
1579*a1e26a70SApple OSS Distributions 	return total;
1580*a1e26a70SApple OSS Distributions }
1581*a1e26a70SApple OSS Distributions 
1582*a1e26a70SApple OSS Distributions 
1583*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
1586*a1e26a70SApple OSS Distributions 	IOByteCount       * residentPageCount,
1587*a1e26a70SApple OSS Distributions 	IOByteCount       * dirtyPageCount)
1588*a1e26a70SApple OSS Distributions {
1589*a1e26a70SApple OSS Distributions 	IOReturn        err;
1590*a1e26a70SApple OSS Distributions 	IOMemoryEntry * entries;
1591*a1e26a70SApple OSS Distributions 	unsigned int resident, dirty;
1592*a1e26a70SApple OSS Distributions 	unsigned int totalResident, totalDirty;
1593*a1e26a70SApple OSS Distributions 
1594*a1e26a70SApple OSS Distributions 	totalResident = totalDirty = 0;
1595*a1e26a70SApple OSS Distributions 	err = kIOReturnSuccess;
1596*a1e26a70SApple OSS Distributions 	entries = ref->entries + ref->count;
1597*a1e26a70SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1598*a1e26a70SApple OSS Distributions 		entries--;
1599*a1e26a70SApple OSS Distributions 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1601*a1e26a70SApple OSS Distributions 			break;
1602*a1e26a70SApple OSS Distributions 		}
1603*a1e26a70SApple OSS Distributions 		totalResident += resident;
1604*a1e26a70SApple OSS Distributions 		totalDirty    += dirty;
1605*a1e26a70SApple OSS Distributions 	}
1606*a1e26a70SApple OSS Distributions 
1607*a1e26a70SApple OSS Distributions 	if (residentPageCount) {
1608*a1e26a70SApple OSS Distributions 		*residentPageCount = totalResident;
1609*a1e26a70SApple OSS Distributions 	}
1610*a1e26a70SApple OSS Distributions 	if (dirtyPageCount) {
1611*a1e26a70SApple OSS Distributions 		*dirtyPageCount    = totalDirty;
1612*a1e26a70SApple OSS Distributions 	}
1613*a1e26a70SApple OSS Distributions 	return err;
1614*a1e26a70SApple OSS Distributions }
1615*a1e26a70SApple OSS Distributions 
1616*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
1619*a1e26a70SApple OSS Distributions 	IOOptionBits        newState,
1620*a1e26a70SApple OSS Distributions 	IOOptionBits      * oldState)
1621*a1e26a70SApple OSS Distributions {
1622*a1e26a70SApple OSS Distributions 	IOReturn        err;
1623*a1e26a70SApple OSS Distributions 	IOMemoryEntry * entries;
1624*a1e26a70SApple OSS Distributions 	vm_purgable_t   control;
1625*a1e26a70SApple OSS Distributions 	int             totalState, state;
1626*a1e26a70SApple OSS Distributions 
1627*a1e26a70SApple OSS Distributions 	totalState = kIOMemoryPurgeableNonVolatile;
1628*a1e26a70SApple OSS Distributions 	err = kIOReturnSuccess;
1629*a1e26a70SApple OSS Distributions 	entries = ref->entries + ref->count;
1630*a1e26a70SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1631*a1e26a70SApple OSS Distributions 		entries--;
1632*a1e26a70SApple OSS Distributions 
1633*a1e26a70SApple OSS Distributions 		err = purgeableControlBits(newState, &control, &state);
1634*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1635*a1e26a70SApple OSS Distributions 			break;
1636*a1e26a70SApple OSS Distributions 		}
1637*a1e26a70SApple OSS Distributions 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1639*a1e26a70SApple OSS Distributions 			break;
1640*a1e26a70SApple OSS Distributions 		}
1641*a1e26a70SApple OSS Distributions 		err = purgeableStateBits(&state);
1642*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1643*a1e26a70SApple OSS Distributions 			break;
1644*a1e26a70SApple OSS Distributions 		}
1645*a1e26a70SApple OSS Distributions 
1646*a1e26a70SApple OSS Distributions 		if (kIOMemoryPurgeableEmpty == state) {
1647*a1e26a70SApple OSS Distributions 			totalState = kIOMemoryPurgeableEmpty;
1648*a1e26a70SApple OSS Distributions 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1649*a1e26a70SApple OSS Distributions 			continue;
1650*a1e26a70SApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1651*a1e26a70SApple OSS Distributions 			continue;
1652*a1e26a70SApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == state) {
1653*a1e26a70SApple OSS Distributions 			totalState = kIOMemoryPurgeableVolatile;
1654*a1e26a70SApple OSS Distributions 		} else {
1655*a1e26a70SApple OSS Distributions 			totalState = kIOMemoryPurgeableNonVolatile;
1656*a1e26a70SApple OSS Distributions 		}
1657*a1e26a70SApple OSS Distributions 	}
1658*a1e26a70SApple OSS Distributions 
1659*a1e26a70SApple OSS Distributions 	if (oldState) {
1660*a1e26a70SApple OSS Distributions 		*oldState = totalState;
1661*a1e26a70SApple OSS Distributions 	}
1662*a1e26a70SApple OSS Distributions 	return err;
1663*a1e26a70SApple OSS Distributions }
1664*a1e26a70SApple OSS Distributions 
1665*a1e26a70SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667*a1e26a70SApple OSS Distributions 	IOMemoryReference * ref,
1668*a1e26a70SApple OSS Distributions 	task_t              newOwner,
1669*a1e26a70SApple OSS Distributions 	int                 newLedgerTag,
1670*a1e26a70SApple OSS Distributions 	IOOptionBits        newLedgerOptions)
1671*a1e26a70SApple OSS Distributions {
1672*a1e26a70SApple OSS Distributions 	IOReturn        err, totalErr;
1673*a1e26a70SApple OSS Distributions 	IOMemoryEntry * entries;
1674*a1e26a70SApple OSS Distributions 
1675*a1e26a70SApple OSS Distributions 	totalErr = kIOReturnSuccess;
1676*a1e26a70SApple OSS Distributions 	entries = ref->entries + ref->count;
1677*a1e26a70SApple OSS Distributions 	while (entries > &ref->entries[0]) {
1678*a1e26a70SApple OSS Distributions 		entries--;
1679*a1e26a70SApple OSS Distributions 
1680*a1e26a70SApple OSS Distributions 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681*a1e26a70SApple OSS Distributions 		if (KERN_SUCCESS != err) {
1682*a1e26a70SApple OSS Distributions 			totalErr = err;
1683*a1e26a70SApple OSS Distributions 		}
1684*a1e26a70SApple OSS Distributions 	}
1685*a1e26a70SApple OSS Distributions 
1686*a1e26a70SApple OSS Distributions 	return totalErr;
1687*a1e26a70SApple OSS Distributions }
1688*a1e26a70SApple OSS Distributions 
1689*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690*a1e26a70SApple OSS Distributions 
1691*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withAddress(void *      address,
1693*a1e26a70SApple OSS Distributions     IOByteCount   length,
1694*a1e26a70SApple OSS Distributions     IODirection direction)
1695*a1e26a70SApple OSS Distributions {
1696*a1e26a70SApple OSS Distributions 	return IOMemoryDescriptor::
1697*a1e26a70SApple OSS Distributions 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698*a1e26a70SApple OSS Distributions }
1699*a1e26a70SApple OSS Distributions 
1700*a1e26a70SApple OSS Distributions #ifndef __LP64__
1701*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703*a1e26a70SApple OSS Distributions     IOByteCount  length,
1704*a1e26a70SApple OSS Distributions     IODirection  direction,
1705*a1e26a70SApple OSS Distributions     task_t       task)
1706*a1e26a70SApple OSS Distributions {
1707*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708*a1e26a70SApple OSS Distributions 	if (that) {
1709*a1e26a70SApple OSS Distributions 		if (that->initWithAddress(address, length, direction, task)) {
1710*a1e26a70SApple OSS Distributions 			return os::move(that);
1711*a1e26a70SApple OSS Distributions 		}
1712*a1e26a70SApple OSS Distributions 	}
1713*a1e26a70SApple OSS Distributions 	return nullptr;
1714*a1e26a70SApple OSS Distributions }
1715*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
1716*a1e26a70SApple OSS Distributions 
1717*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1719*a1e26a70SApple OSS Distributions 	IOPhysicalAddress       address,
1720*a1e26a70SApple OSS Distributions 	IOByteCount             length,
1721*a1e26a70SApple OSS Distributions 	IODirection             direction )
1722*a1e26a70SApple OSS Distributions {
1723*a1e26a70SApple OSS Distributions 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724*a1e26a70SApple OSS Distributions }
1725*a1e26a70SApple OSS Distributions 
1726*a1e26a70SApple OSS Distributions #ifndef __LP64__
1727*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729*a1e26a70SApple OSS Distributions     UInt32           withCount,
1730*a1e26a70SApple OSS Distributions     IODirection      direction,
1731*a1e26a70SApple OSS Distributions     task_t           task,
1732*a1e26a70SApple OSS Distributions     bool             asReference)
1733*a1e26a70SApple OSS Distributions {
1734*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735*a1e26a70SApple OSS Distributions 	if (that) {
1736*a1e26a70SApple OSS Distributions 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737*a1e26a70SApple OSS Distributions 			return os::move(that);
1738*a1e26a70SApple OSS Distributions 		}
1739*a1e26a70SApple OSS Distributions 	}
1740*a1e26a70SApple OSS Distributions 	return nullptr;
1741*a1e26a70SApple OSS Distributions }
1742*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
1743*a1e26a70SApple OSS Distributions 
1744*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746*a1e26a70SApple OSS Distributions     mach_vm_size_t length,
1747*a1e26a70SApple OSS Distributions     IOOptionBits   options,
1748*a1e26a70SApple OSS Distributions     task_t         task)
1749*a1e26a70SApple OSS Distributions {
1750*a1e26a70SApple OSS Distributions 	IOAddressRange range = { address, length };
1751*a1e26a70SApple OSS Distributions 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752*a1e26a70SApple OSS Distributions }
1753*a1e26a70SApple OSS Distributions 
1754*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1756*a1e26a70SApple OSS Distributions     UInt32           rangeCount,
1757*a1e26a70SApple OSS Distributions     IOOptionBits     options,
1758*a1e26a70SApple OSS Distributions     task_t           task)
1759*a1e26a70SApple OSS Distributions {
1760*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761*a1e26a70SApple OSS Distributions 	if (that) {
1762*a1e26a70SApple OSS Distributions 		if (task) {
1763*a1e26a70SApple OSS Distributions 			options |= kIOMemoryTypeVirtual64;
1764*a1e26a70SApple OSS Distributions 		} else {
1765*a1e26a70SApple OSS Distributions 			options |= kIOMemoryTypePhysical64;
1766*a1e26a70SApple OSS Distributions 		}
1767*a1e26a70SApple OSS Distributions 
1768*a1e26a70SApple OSS Distributions 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769*a1e26a70SApple OSS Distributions 			return os::move(that);
1770*a1e26a70SApple OSS Distributions 		}
1771*a1e26a70SApple OSS Distributions 	}
1772*a1e26a70SApple OSS Distributions 
1773*a1e26a70SApple OSS Distributions 	return nullptr;
1774*a1e26a70SApple OSS Distributions }
1775*a1e26a70SApple OSS Distributions 
1776*a1e26a70SApple OSS Distributions 
1777*a1e26a70SApple OSS Distributions /*
1778*a1e26a70SApple OSS Distributions  * withOptions:
1779*a1e26a70SApple OSS Distributions  *
1780*a1e26a70SApple OSS Distributions  * Create a new IOMemoryDescriptor. The buffer is made up of several
1781*a1e26a70SApple OSS Distributions  * virtual address ranges, from a given task.
1782*a1e26a70SApple OSS Distributions  *
1783*a1e26a70SApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1784*a1e26a70SApple OSS Distributions  */
1785*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withOptions(void *          buffers,
1787*a1e26a70SApple OSS Distributions     UInt32          count,
1788*a1e26a70SApple OSS Distributions     UInt32          offset,
1789*a1e26a70SApple OSS Distributions     task_t          task,
1790*a1e26a70SApple OSS Distributions     IOOptionBits    opts,
1791*a1e26a70SApple OSS Distributions     IOMapper *      mapper)
1792*a1e26a70SApple OSS Distributions {
1793*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794*a1e26a70SApple OSS Distributions 
1795*a1e26a70SApple OSS Distributions 	if (self
1796*a1e26a70SApple OSS Distributions 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797*a1e26a70SApple OSS Distributions 		return nullptr;
1798*a1e26a70SApple OSS Distributions 	}
1799*a1e26a70SApple OSS Distributions 
1800*a1e26a70SApple OSS Distributions 	return os::move(self);
1801*a1e26a70SApple OSS Distributions }
1802*a1e26a70SApple OSS Distributions 
1803*a1e26a70SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void *         buffers,
1805*a1e26a70SApple OSS Distributions     UInt32         count,
1806*a1e26a70SApple OSS Distributions     UInt32         offset,
1807*a1e26a70SApple OSS Distributions     task_t         task,
1808*a1e26a70SApple OSS Distributions     IOOptionBits   options,
1809*a1e26a70SApple OSS Distributions     IOMapper *     mapper)
1810*a1e26a70SApple OSS Distributions {
1811*a1e26a70SApple OSS Distributions 	return false;
1812*a1e26a70SApple OSS Distributions }
1813*a1e26a70SApple OSS Distributions 
1814*a1e26a70SApple OSS Distributions #ifndef __LP64__
1815*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817*a1e26a70SApple OSS Distributions     UInt32          withCount,
1818*a1e26a70SApple OSS Distributions     IODirection     direction,
1819*a1e26a70SApple OSS Distributions     bool            asReference)
1820*a1e26a70SApple OSS Distributions {
1821*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822*a1e26a70SApple OSS Distributions 	if (that) {
1823*a1e26a70SApple OSS Distributions 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824*a1e26a70SApple OSS Distributions 			return os::move(that);
1825*a1e26a70SApple OSS Distributions 		}
1826*a1e26a70SApple OSS Distributions 	}
1827*a1e26a70SApple OSS Distributions 	return nullptr;
1828*a1e26a70SApple OSS Distributions }
1829*a1e26a70SApple OSS Distributions 
1830*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1832*a1e26a70SApple OSS Distributions     IOByteCount             offset,
1833*a1e26a70SApple OSS Distributions     IOByteCount             length,
1834*a1e26a70SApple OSS Distributions     IODirection             direction)
1835*a1e26a70SApple OSS Distributions {
1836*a1e26a70SApple OSS Distributions 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837*a1e26a70SApple OSS Distributions }
1838*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
1839*a1e26a70SApple OSS Distributions 
1840*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841*a1e26a70SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842*a1e26a70SApple OSS Distributions {
1843*a1e26a70SApple OSS Distributions 	IOGeneralMemoryDescriptor *origGenMD =
1844*a1e26a70SApple OSS Distributions 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845*a1e26a70SApple OSS Distributions 
1846*a1e26a70SApple OSS Distributions 	if (origGenMD) {
1847*a1e26a70SApple OSS Distributions 		return IOGeneralMemoryDescriptor::
1848*a1e26a70SApple OSS Distributions 		       withPersistentMemoryDescriptor(origGenMD);
1849*a1e26a70SApple OSS Distributions 	} else {
1850*a1e26a70SApple OSS Distributions 		return nullptr;
1851*a1e26a70SApple OSS Distributions 	}
1852*a1e26a70SApple OSS Distributions }
1853*a1e26a70SApple OSS Distributions 
1854*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856*a1e26a70SApple OSS Distributions {
1857*a1e26a70SApple OSS Distributions 	IOMemoryReference * memRef;
1858*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859*a1e26a70SApple OSS Distributions 
1860*a1e26a70SApple OSS Distributions 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861*a1e26a70SApple OSS Distributions 		return nullptr;
1862*a1e26a70SApple OSS Distributions 	}
1863*a1e26a70SApple OSS Distributions 
1864*a1e26a70SApple OSS Distributions 	if (memRef == originalMD->_memRef) {
1865*a1e26a70SApple OSS Distributions 		self.reset(originalMD, OSRetain);
1866*a1e26a70SApple OSS Distributions 		originalMD->memoryReferenceRelease(memRef);
1867*a1e26a70SApple OSS Distributions 		return os::move(self);
1868*a1e26a70SApple OSS Distributions 	}
1869*a1e26a70SApple OSS Distributions 
1870*a1e26a70SApple OSS Distributions 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871*a1e26a70SApple OSS Distributions 	IOMDPersistentInitData initData = { originalMD, memRef };
1872*a1e26a70SApple OSS Distributions 
1873*a1e26a70SApple OSS Distributions 	if (self
1874*a1e26a70SApple OSS Distributions 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875*a1e26a70SApple OSS Distributions 		return nullptr;
1876*a1e26a70SApple OSS Distributions 	}
1877*a1e26a70SApple OSS Distributions 	return os::move(self);
1878*a1e26a70SApple OSS Distributions }
1879*a1e26a70SApple OSS Distributions 
1880*a1e26a70SApple OSS Distributions #ifndef __LP64__
1881*a1e26a70SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1883*a1e26a70SApple OSS Distributions     IOByteCount   withLength,
1884*a1e26a70SApple OSS Distributions     IODirection withDirection)
1885*a1e26a70SApple OSS Distributions {
1886*a1e26a70SApple OSS Distributions 	_singleRange.v.address = (vm_offset_t) address;
1887*a1e26a70SApple OSS Distributions 	_singleRange.v.length  = withLength;
1888*a1e26a70SApple OSS Distributions 
1889*a1e26a70SApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890*a1e26a70SApple OSS Distributions }
1891*a1e26a70SApple OSS Distributions 
1892*a1e26a70SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894*a1e26a70SApple OSS Distributions     IOByteCount    withLength,
1895*a1e26a70SApple OSS Distributions     IODirection  withDirection,
1896*a1e26a70SApple OSS Distributions     task_t       withTask)
1897*a1e26a70SApple OSS Distributions {
1898*a1e26a70SApple OSS Distributions 	_singleRange.v.address = address;
1899*a1e26a70SApple OSS Distributions 	_singleRange.v.length  = withLength;
1900*a1e26a70SApple OSS Distributions 
1901*a1e26a70SApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902*a1e26a70SApple OSS Distributions }
1903*a1e26a70SApple OSS Distributions 
1904*a1e26a70SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906*a1e26a70SApple OSS Distributions 	IOPhysicalAddress      address,
1907*a1e26a70SApple OSS Distributions 	IOByteCount            withLength,
1908*a1e26a70SApple OSS Distributions 	IODirection            withDirection )
1909*a1e26a70SApple OSS Distributions {
1910*a1e26a70SApple OSS Distributions 	_singleRange.p.address = address;
1911*a1e26a70SApple OSS Distributions 	_singleRange.p.length  = withLength;
1912*a1e26a70SApple OSS Distributions 
1913*a1e26a70SApple OSS Distributions 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914*a1e26a70SApple OSS Distributions }
1915*a1e26a70SApple OSS Distributions 
1916*a1e26a70SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918*a1e26a70SApple OSS Distributions 	IOPhysicalRange * ranges,
1919*a1e26a70SApple OSS Distributions 	UInt32            count,
1920*a1e26a70SApple OSS Distributions 	IODirection       direction,
1921*a1e26a70SApple OSS Distributions 	bool              reference)
1922*a1e26a70SApple OSS Distributions {
1923*a1e26a70SApple OSS Distributions 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924*a1e26a70SApple OSS Distributions 
1925*a1e26a70SApple OSS Distributions 	if (reference) {
1926*a1e26a70SApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1927*a1e26a70SApple OSS Distributions 	}
1928*a1e26a70SApple OSS Distributions 
1929*a1e26a70SApple OSS Distributions 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930*a1e26a70SApple OSS Distributions }
1931*a1e26a70SApple OSS Distributions 
1932*a1e26a70SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1934*a1e26a70SApple OSS Distributions 	IOVirtualRange * ranges,
1935*a1e26a70SApple OSS Distributions 	UInt32           count,
1936*a1e26a70SApple OSS Distributions 	IODirection      direction,
1937*a1e26a70SApple OSS Distributions 	task_t           task,
1938*a1e26a70SApple OSS Distributions 	bool             reference)
1939*a1e26a70SApple OSS Distributions {
1940*a1e26a70SApple OSS Distributions 	IOOptionBits mdOpts = direction;
1941*a1e26a70SApple OSS Distributions 
1942*a1e26a70SApple OSS Distributions 	if (reference) {
1943*a1e26a70SApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1944*a1e26a70SApple OSS Distributions 	}
1945*a1e26a70SApple OSS Distributions 
1946*a1e26a70SApple OSS Distributions 	if (task) {
1947*a1e26a70SApple OSS Distributions 		mdOpts |= kIOMemoryTypeVirtual;
1948*a1e26a70SApple OSS Distributions 
1949*a1e26a70SApple OSS Distributions 		// Auto-prepare if this is a kernel memory descriptor as very few
1950*a1e26a70SApple OSS Distributions 		// clients bother to prepare() kernel memory.
1951*a1e26a70SApple OSS Distributions 		// But it was not enforced so what are you going to do?
1952*a1e26a70SApple OSS Distributions 		if (task == kernel_task) {
1953*a1e26a70SApple OSS Distributions 			mdOpts |= kIOMemoryAutoPrepare;
1954*a1e26a70SApple OSS Distributions 		}
1955*a1e26a70SApple OSS Distributions 	} else {
1956*a1e26a70SApple OSS Distributions 		mdOpts |= kIOMemoryTypePhysical;
1957*a1e26a70SApple OSS Distributions 	}
1958*a1e26a70SApple OSS Distributions 
1959*a1e26a70SApple OSS Distributions 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960*a1e26a70SApple OSS Distributions }
1961*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
1962*a1e26a70SApple OSS Distributions 
1963*a1e26a70SApple OSS Distributions /*
1964*a1e26a70SApple OSS Distributions  * initWithOptions:
1965*a1e26a70SApple OSS Distributions  *
1966*a1e26a70SApple OSS Distributions  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967*a1e26a70SApple OSS Distributions  * from a given task, several physical ranges, an UPL from the ubc
1968*a1e26a70SApple OSS Distributions  * system or a uio (may be 64bit) from the BSD subsystem.
1969*a1e26a70SApple OSS Distributions  *
1970*a1e26a70SApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1971*a1e26a70SApple OSS Distributions  *
1972*a1e26a70SApple OSS Distributions  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973*a1e26a70SApple OSS Distributions  * existing instance -- note this behavior is not commonly supported in other
1974*a1e26a70SApple OSS Distributions  * I/O Kit classes, although it is supported here.
1975*a1e26a70SApple OSS Distributions  */
1976*a1e26a70SApple OSS Distributions 
1977*a1e26a70SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1979*a1e26a70SApple OSS Distributions     UInt32       count,
1980*a1e26a70SApple OSS Distributions     UInt32       offset,
1981*a1e26a70SApple OSS Distributions     task_t       task,
1982*a1e26a70SApple OSS Distributions     IOOptionBits options,
1983*a1e26a70SApple OSS Distributions     IOMapper *   mapper)
1984*a1e26a70SApple OSS Distributions {
1985*a1e26a70SApple OSS Distributions 	IOOptionBits type = options & kIOMemoryTypeMask;
1986*a1e26a70SApple OSS Distributions 
1987*a1e26a70SApple OSS Distributions #ifndef __LP64__
1988*a1e26a70SApple OSS Distributions 	if (task
1989*a1e26a70SApple OSS Distributions 	    && (kIOMemoryTypeVirtual == type)
1990*a1e26a70SApple OSS Distributions 	    && vm_map_is_64bit(get_task_map(task))
1991*a1e26a70SApple OSS Distributions 	    && ((IOVirtualRange *) buffers)->address) {
1992*a1e26a70SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993*a1e26a70SApple OSS Distributions 		return false;
1994*a1e26a70SApple OSS Distributions 	}
1995*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
1996*a1e26a70SApple OSS Distributions 
1997*a1e26a70SApple OSS Distributions 	// Grab the original MD's configuation data to initialse the
1998*a1e26a70SApple OSS Distributions 	// arguments to this function.
1999*a1e26a70SApple OSS Distributions 	if (kIOMemoryTypePersistentMD == type) {
2000*a1e26a70SApple OSS Distributions 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001*a1e26a70SApple OSS Distributions 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002*a1e26a70SApple OSS Distributions 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003*a1e26a70SApple OSS Distributions 
2004*a1e26a70SApple OSS Distributions 		// Only accept persistent memory descriptors with valid dataP data.
2005*a1e26a70SApple OSS Distributions 		assert(orig->_rangesCount == 1);
2006*a1e26a70SApple OSS Distributions 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007*a1e26a70SApple OSS Distributions 			return false;
2008*a1e26a70SApple OSS Distributions 		}
2009*a1e26a70SApple OSS Distributions 
2010*a1e26a70SApple OSS Distributions 		_memRef = initData->fMemRef; // Grab the new named entry
2011*a1e26a70SApple OSS Distributions 		options = orig->_flags & ~kIOMemoryAsReference;
2012*a1e26a70SApple OSS Distributions 		type = options & kIOMemoryTypeMask;
2013*a1e26a70SApple OSS Distributions 		buffers = orig->_ranges.v;
2014*a1e26a70SApple OSS Distributions 		count = orig->_rangesCount;
2015*a1e26a70SApple OSS Distributions 
2016*a1e26a70SApple OSS Distributions 		// Now grab the original task and whatever mapper was previously used
2017*a1e26a70SApple OSS Distributions 		task = orig->_task;
2018*a1e26a70SApple OSS Distributions 		mapper = dataP->fMapper;
2019*a1e26a70SApple OSS Distributions 
2020*a1e26a70SApple OSS Distributions 		// We are ready to go through the original initialisation now
2021*a1e26a70SApple OSS Distributions 	}
2022*a1e26a70SApple OSS Distributions 
2023*a1e26a70SApple OSS Distributions 	switch (type) {
2024*a1e26a70SApple OSS Distributions 	case kIOMemoryTypeUIO:
2025*a1e26a70SApple OSS Distributions 	case kIOMemoryTypeVirtual:
2026*a1e26a70SApple OSS Distributions #ifndef __LP64__
2027*a1e26a70SApple OSS Distributions 	case kIOMemoryTypeVirtual64:
2028*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2029*a1e26a70SApple OSS Distributions 		assert(task);
2030*a1e26a70SApple OSS Distributions 		if (!task) {
2031*a1e26a70SApple OSS Distributions 			return false;
2032*a1e26a70SApple OSS Distributions 		}
2033*a1e26a70SApple OSS Distributions 		break;
2034*a1e26a70SApple OSS Distributions 
2035*a1e26a70SApple OSS Distributions 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2036*a1e26a70SApple OSS Distributions #ifndef __LP64__
2037*a1e26a70SApple OSS Distributions 	case kIOMemoryTypePhysical64:
2038*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2039*a1e26a70SApple OSS Distributions 	case kIOMemoryTypeUPL:
2040*a1e26a70SApple OSS Distributions 		assert(!task);
2041*a1e26a70SApple OSS Distributions 		break;
2042*a1e26a70SApple OSS Distributions 	default:
2043*a1e26a70SApple OSS Distributions 		return false; /* bad argument */
2044*a1e26a70SApple OSS Distributions 	}
2045*a1e26a70SApple OSS Distributions 
2046*a1e26a70SApple OSS Distributions 	assert(buffers);
2047*a1e26a70SApple OSS Distributions 	assert(count);
2048*a1e26a70SApple OSS Distributions 
2049*a1e26a70SApple OSS Distributions 	/*
2050*a1e26a70SApple OSS Distributions 	 * We can check the _initialized  instance variable before having ever set
2051*a1e26a70SApple OSS Distributions 	 * it to an initial value because I/O Kit guarantees that all our instance
2052*a1e26a70SApple OSS Distributions 	 * variables are zeroed on an object's allocation.
2053*a1e26a70SApple OSS Distributions 	 */
2054*a1e26a70SApple OSS Distributions 
2055*a1e26a70SApple OSS Distributions 	if (_initialized) {
2056*a1e26a70SApple OSS Distributions 		/*
2057*a1e26a70SApple OSS Distributions 		 * An existing memory descriptor is being retargeted to point to
2058*a1e26a70SApple OSS Distributions 		 * somewhere else.  Clean up our present state.
2059*a1e26a70SApple OSS Distributions 		 */
2060*a1e26a70SApple OSS Distributions 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2061*a1e26a70SApple OSS Distributions 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062*a1e26a70SApple OSS Distributions 			while (_wireCount) {
2063*a1e26a70SApple OSS Distributions 				complete();
2064*a1e26a70SApple OSS Distributions 			}
2065*a1e26a70SApple OSS Distributions 		}
2066*a1e26a70SApple OSS Distributions 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067*a1e26a70SApple OSS Distributions 			if (kIOMemoryTypeUIO == type) {
2068*a1e26a70SApple OSS Distributions 				uio_free((uio_t) _ranges.v);
2069*a1e26a70SApple OSS Distributions 			}
2070*a1e26a70SApple OSS Distributions #ifndef __LP64__
2071*a1e26a70SApple OSS Distributions 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072*a1e26a70SApple OSS Distributions 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073*a1e26a70SApple OSS Distributions 			}
2074*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2075*a1e26a70SApple OSS Distributions 			else {
2076*a1e26a70SApple OSS Distributions 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077*a1e26a70SApple OSS Distributions 			}
2078*a1e26a70SApple OSS Distributions 		}
2079*a1e26a70SApple OSS Distributions 
2080*a1e26a70SApple OSS Distributions 		options |= (kIOMemoryRedirected & _flags);
2081*a1e26a70SApple OSS Distributions 		if (!(kIOMemoryRedirected & options)) {
2082*a1e26a70SApple OSS Distributions 			if (_memRef) {
2083*a1e26a70SApple OSS Distributions 				memoryReferenceRelease(_memRef);
2084*a1e26a70SApple OSS Distributions 				_memRef = NULL;
2085*a1e26a70SApple OSS Distributions 			}
2086*a1e26a70SApple OSS Distributions 			if (_mappings) {
2087*a1e26a70SApple OSS Distributions 				_mappings->flushCollection();
2088*a1e26a70SApple OSS Distributions 			}
2089*a1e26a70SApple OSS Distributions 		}
2090*a1e26a70SApple OSS Distributions 	} else {
2091*a1e26a70SApple OSS Distributions 		if (!super::init()) {
2092*a1e26a70SApple OSS Distributions 			return false;
2093*a1e26a70SApple OSS Distributions 		}
2094*a1e26a70SApple OSS Distributions 		_initialized = true;
2095*a1e26a70SApple OSS Distributions 	}
2096*a1e26a70SApple OSS Distributions 
2097*a1e26a70SApple OSS Distributions 	// Grab the appropriate mapper
2098*a1e26a70SApple OSS Distributions 	if (kIOMemoryHostOrRemote & options) {
2099*a1e26a70SApple OSS Distributions 		options |= kIOMemoryMapperNone;
2100*a1e26a70SApple OSS Distributions 	}
2101*a1e26a70SApple OSS Distributions 	if (kIOMemoryMapperNone & options) {
2102*a1e26a70SApple OSS Distributions 		mapper = NULL; // No Mapper
2103*a1e26a70SApple OSS Distributions 	} else if (mapper == kIOMapperSystem) {
2104*a1e26a70SApple OSS Distributions 		IOMapper::checkForSystemMapper();
2105*a1e26a70SApple OSS Distributions 		gIOSystemMapper = mapper = IOMapper::gSystem;
2106*a1e26a70SApple OSS Distributions 	}
2107*a1e26a70SApple OSS Distributions 
2108*a1e26a70SApple OSS Distributions 	// Remove the dynamic internal use flags from the initial setting
2109*a1e26a70SApple OSS Distributions 	options               &= ~(kIOMemoryPreparedReadOnly);
2110*a1e26a70SApple OSS Distributions 	_flags                 = options;
2111*a1e26a70SApple OSS Distributions 	_task                  = task;
2112*a1e26a70SApple OSS Distributions 
2113*a1e26a70SApple OSS Distributions #ifndef __LP64__
2114*a1e26a70SApple OSS Distributions 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2115*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2116*a1e26a70SApple OSS Distributions 
2117*a1e26a70SApple OSS Distributions 	_dmaReferences = 0;
2118*a1e26a70SApple OSS Distributions 	__iomd_reservedA = 0;
2119*a1e26a70SApple OSS Distributions 	__iomd_reservedB = 0;
2120*a1e26a70SApple OSS Distributions 	_highestPage = 0;
2121*a1e26a70SApple OSS Distributions 
2122*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & options) {
2123*a1e26a70SApple OSS Distributions 		if (!_prepareLock) {
2124*a1e26a70SApple OSS Distributions 			_prepareLock = IOLockAlloc();
2125*a1e26a70SApple OSS Distributions 		}
2126*a1e26a70SApple OSS Distributions 	} else if (_prepareLock) {
2127*a1e26a70SApple OSS Distributions 		IOLockFree(_prepareLock);
2128*a1e26a70SApple OSS Distributions 		_prepareLock = NULL;
2129*a1e26a70SApple OSS Distributions 	}
2130*a1e26a70SApple OSS Distributions 
2131*a1e26a70SApple OSS Distributions 	if (kIOMemoryTypeUPL == type) {
2132*a1e26a70SApple OSS Distributions 		ioGMDData *dataP;
2133*a1e26a70SApple OSS Distributions 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134*a1e26a70SApple OSS Distributions 
2135*a1e26a70SApple OSS Distributions 		if (!initMemoryEntries(dataSize, mapper)) {
2136*a1e26a70SApple OSS Distributions 			return false;
2137*a1e26a70SApple OSS Distributions 		}
2138*a1e26a70SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
2139*a1e26a70SApple OSS Distributions 		dataP->fPageCnt = 0;
2140*a1e26a70SApple OSS Distributions 		switch (kIOMemoryDirectionMask & options) {
2141*a1e26a70SApple OSS Distributions 		case kIODirectionOut:
2142*a1e26a70SApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapReadAccess;
2143*a1e26a70SApple OSS Distributions 			break;
2144*a1e26a70SApple OSS Distributions 		case kIODirectionIn:
2145*a1e26a70SApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2146*a1e26a70SApple OSS Distributions 			break;
2147*a1e26a70SApple OSS Distributions 		case kIODirectionNone:
2148*a1e26a70SApple OSS Distributions 		case kIODirectionOutIn:
2149*a1e26a70SApple OSS Distributions 		default:
2150*a1e26a70SApple OSS Distributions 			panic("bad dir for upl 0x%x", (int) options);
2151*a1e26a70SApple OSS Distributions 			break;
2152*a1e26a70SApple OSS Distributions 		}
2153*a1e26a70SApple OSS Distributions 		//       _wireCount++;	// UPLs start out life wired
2154*a1e26a70SApple OSS Distributions 
2155*a1e26a70SApple OSS Distributions 		_length    = count;
2156*a1e26a70SApple OSS Distributions 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157*a1e26a70SApple OSS Distributions 
2158*a1e26a70SApple OSS Distributions 		ioPLBlock iopl;
2159*a1e26a70SApple OSS Distributions 		iopl.fIOPL = (upl_t) buffers;
2160*a1e26a70SApple OSS Distributions 		upl_set_referenced(iopl.fIOPL, true);
2161*a1e26a70SApple OSS Distributions 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162*a1e26a70SApple OSS Distributions 
2163*a1e26a70SApple OSS Distributions 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164*a1e26a70SApple OSS Distributions 			panic("short external upl");
2165*a1e26a70SApple OSS Distributions 		}
2166*a1e26a70SApple OSS Distributions 
2167*a1e26a70SApple OSS Distributions 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2168*a1e26a70SApple OSS Distributions 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169*a1e26a70SApple OSS Distributions 
2170*a1e26a70SApple OSS Distributions 		// Set the flag kIOPLOnDevice convieniently equal to 1
2171*a1e26a70SApple OSS Distributions 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2172*a1e26a70SApple OSS Distributions 		if (!pageList->device) {
2173*a1e26a70SApple OSS Distributions 			// Pre-compute the offset into the UPL's page list
2174*a1e26a70SApple OSS Distributions 			pageList = &pageList[atop_32(offset)];
2175*a1e26a70SApple OSS Distributions 			offset &= PAGE_MASK;
2176*a1e26a70SApple OSS Distributions 		}
2177*a1e26a70SApple OSS Distributions 		iopl.fIOMDOffset = 0;
2178*a1e26a70SApple OSS Distributions 		iopl.fMappedPage = 0;
2179*a1e26a70SApple OSS Distributions 		iopl.fPageInfo = (vm_address_t) pageList;
2180*a1e26a70SApple OSS Distributions 		iopl.fPageOffset = offset;
2181*a1e26a70SApple OSS Distributions 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182*a1e26a70SApple OSS Distributions 	} else {
2183*a1e26a70SApple OSS Distributions 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184*a1e26a70SApple OSS Distributions 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185*a1e26a70SApple OSS Distributions 
2186*a1e26a70SApple OSS Distributions 		// Initialize the memory descriptor
2187*a1e26a70SApple OSS Distributions 		if (options & kIOMemoryAsReference) {
2188*a1e26a70SApple OSS Distributions #ifndef __LP64__
2189*a1e26a70SApple OSS Distributions 			_rangesIsAllocated = false;
2190*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2191*a1e26a70SApple OSS Distributions 
2192*a1e26a70SApple OSS Distributions 			// Hack assignment to get the buffer arg into _ranges.
2193*a1e26a70SApple OSS Distributions 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194*a1e26a70SApple OSS Distributions 			// work, C++ sigh.
2195*a1e26a70SApple OSS Distributions 			// This also initialises the uio & physical ranges.
2196*a1e26a70SApple OSS Distributions 			_ranges.v = (IOVirtualRange *) buffers;
2197*a1e26a70SApple OSS Distributions 		} else {
2198*a1e26a70SApple OSS Distributions #ifndef __LP64__
2199*a1e26a70SApple OSS Distributions 			_rangesIsAllocated = true;
2200*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2201*a1e26a70SApple OSS Distributions 			switch (type) {
2202*a1e26a70SApple OSS Distributions 			case kIOMemoryTypeUIO:
2203*a1e26a70SApple OSS Distributions 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204*a1e26a70SApple OSS Distributions 				break;
2205*a1e26a70SApple OSS Distributions 
2206*a1e26a70SApple OSS Distributions #ifndef __LP64__
2207*a1e26a70SApple OSS Distributions 			case kIOMemoryTypeVirtual64:
2208*a1e26a70SApple OSS Distributions 			case kIOMemoryTypePhysical64:
2209*a1e26a70SApple OSS Distributions 				if (count == 1
2210*a1e26a70SApple OSS Distributions #ifndef __arm__
2211*a1e26a70SApple OSS Distributions 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212*a1e26a70SApple OSS Distributions #endif
2213*a1e26a70SApple OSS Distributions 				    ) {
2214*a1e26a70SApple OSS Distributions 					if (type == kIOMemoryTypeVirtual64) {
2215*a1e26a70SApple OSS Distributions 						type = kIOMemoryTypeVirtual;
2216*a1e26a70SApple OSS Distributions 					} else {
2217*a1e26a70SApple OSS Distributions 						type = kIOMemoryTypePhysical;
2218*a1e26a70SApple OSS Distributions 					}
2219*a1e26a70SApple OSS Distributions 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220*a1e26a70SApple OSS Distributions 					_rangesIsAllocated = false;
2221*a1e26a70SApple OSS Distributions 					_ranges.v = &_singleRange.v;
2222*a1e26a70SApple OSS Distributions 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223*a1e26a70SApple OSS Distributions 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2224*a1e26a70SApple OSS Distributions 					break;
2225*a1e26a70SApple OSS Distributions 				}
2226*a1e26a70SApple OSS Distributions 				_ranges.v64 = IONew(IOAddressRange, count);
2227*a1e26a70SApple OSS Distributions 				if (!_ranges.v64) {
2228*a1e26a70SApple OSS Distributions 					return false;
2229*a1e26a70SApple OSS Distributions 				}
2230*a1e26a70SApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231*a1e26a70SApple OSS Distributions 				break;
2232*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2233*a1e26a70SApple OSS Distributions 			case kIOMemoryTypeVirtual:
2234*a1e26a70SApple OSS Distributions 			case kIOMemoryTypePhysical:
2235*a1e26a70SApple OSS Distributions 				if (count == 1) {
2236*a1e26a70SApple OSS Distributions 					_flags |= kIOMemoryAsReference;
2237*a1e26a70SApple OSS Distributions #ifndef __LP64__
2238*a1e26a70SApple OSS Distributions 					_rangesIsAllocated = false;
2239*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2240*a1e26a70SApple OSS Distributions 					_ranges.v = &_singleRange.v;
2241*a1e26a70SApple OSS Distributions 				} else {
2242*a1e26a70SApple OSS Distributions 					_ranges.v = IONew(IOVirtualRange, count);
2243*a1e26a70SApple OSS Distributions 					if (!_ranges.v) {
2244*a1e26a70SApple OSS Distributions 						return false;
2245*a1e26a70SApple OSS Distributions 					}
2246*a1e26a70SApple OSS Distributions 				}
2247*a1e26a70SApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248*a1e26a70SApple OSS Distributions 				break;
2249*a1e26a70SApple OSS Distributions 			}
2250*a1e26a70SApple OSS Distributions 		}
2251*a1e26a70SApple OSS Distributions 		_rangesCount = count;
2252*a1e26a70SApple OSS Distributions 
2253*a1e26a70SApple OSS Distributions 		// Find starting address within the vector of ranges
2254*a1e26a70SApple OSS Distributions 		Ranges vec = _ranges;
2255*a1e26a70SApple OSS Distributions 		mach_vm_size_t totalLength = 0;
2256*a1e26a70SApple OSS Distributions 		unsigned int ind, pages = 0;
2257*a1e26a70SApple OSS Distributions 		for (ind = 0; ind < count; ind++) {
2258*a1e26a70SApple OSS Distributions 			mach_vm_address_t addr;
2259*a1e26a70SApple OSS Distributions 			mach_vm_address_t endAddr;
2260*a1e26a70SApple OSS Distributions 			mach_vm_size_t    len;
2261*a1e26a70SApple OSS Distributions 
2262*a1e26a70SApple OSS Distributions 			// addr & len are returned by this function
2263*a1e26a70SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2264*a1e26a70SApple OSS Distributions 			if (_task) {
2265*a1e26a70SApple OSS Distributions 				mach_vm_size_t phys_size;
2266*a1e26a70SApple OSS Distributions 				kern_return_t kret;
2267*a1e26a70SApple OSS Distributions 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268*a1e26a70SApple OSS Distributions 				if (KERN_SUCCESS != kret) {
2269*a1e26a70SApple OSS Distributions 					break;
2270*a1e26a70SApple OSS Distributions 				}
2271*a1e26a70SApple OSS Distributions 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272*a1e26a70SApple OSS Distributions 					break;
2273*a1e26a70SApple OSS Distributions 				}
2274*a1e26a70SApple OSS Distributions 			} else {
2275*a1e26a70SApple OSS Distributions 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276*a1e26a70SApple OSS Distributions 					break;
2277*a1e26a70SApple OSS Distributions 				}
2278*a1e26a70SApple OSS Distributions 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279*a1e26a70SApple OSS Distributions 					break;
2280*a1e26a70SApple OSS Distributions 				}
2281*a1e26a70SApple OSS Distributions 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282*a1e26a70SApple OSS Distributions 					break;
2283*a1e26a70SApple OSS Distributions 				}
2284*a1e26a70SApple OSS Distributions 			}
2285*a1e26a70SApple OSS Distributions 			if (os_add_overflow(totalLength, len, &totalLength)) {
2286*a1e26a70SApple OSS Distributions 				break;
2287*a1e26a70SApple OSS Distributions 			}
2288*a1e26a70SApple OSS Distributions 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289*a1e26a70SApple OSS Distributions 				uint64_t highPage = atop_64(addr + len - 1);
2290*a1e26a70SApple OSS Distributions 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291*a1e26a70SApple OSS Distributions 					_highestPage = (ppnum_t) highPage;
2292*a1e26a70SApple OSS Distributions 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293*a1e26a70SApple OSS Distributions 				}
2294*a1e26a70SApple OSS Distributions 			}
2295*a1e26a70SApple OSS Distributions 		}
2296*a1e26a70SApple OSS Distributions 		if ((ind < count)
2297*a1e26a70SApple OSS Distributions 		    || (totalLength != ((IOByteCount) totalLength))) {
2298*a1e26a70SApple OSS Distributions 			return false;                                   /* overflow */
2299*a1e26a70SApple OSS Distributions 		}
2300*a1e26a70SApple OSS Distributions 		_length      = totalLength;
2301*a1e26a70SApple OSS Distributions 		_pages       = pages;
2302*a1e26a70SApple OSS Distributions 
2303*a1e26a70SApple OSS Distributions 		// Auto-prepare memory at creation time.
2304*a1e26a70SApple OSS Distributions 		// Implied completion when descriptor is free-ed
2305*a1e26a70SApple OSS Distributions 
2306*a1e26a70SApple OSS Distributions 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2307*a1e26a70SApple OSS Distributions 			_wireCount++; // Physical MDs are, by definition, wired
2308*a1e26a70SApple OSS Distributions 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2309*a1e26a70SApple OSS Distributions 			ioGMDData *dataP;
2310*a1e26a70SApple OSS Distributions 			unsigned dataSize;
2311*a1e26a70SApple OSS Distributions 
2312*a1e26a70SApple OSS Distributions 			if (_pages > atop_64(max_mem)) {
2313*a1e26a70SApple OSS Distributions 				return false;
2314*a1e26a70SApple OSS Distributions 			}
2315*a1e26a70SApple OSS Distributions 
2316*a1e26a70SApple OSS Distributions 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2317*a1e26a70SApple OSS Distributions 			if (!initMemoryEntries(dataSize, mapper)) {
2318*a1e26a70SApple OSS Distributions 				return false;
2319*a1e26a70SApple OSS Distributions 			}
2320*a1e26a70SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2321*a1e26a70SApple OSS Distributions 			dataP->fPageCnt = _pages;
2322*a1e26a70SApple OSS Distributions 
2323*a1e26a70SApple OSS Distributions 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2324*a1e26a70SApple OSS Distributions 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2325*a1e26a70SApple OSS Distributions 				_kernelTag = IOMemoryTag(kernel_map);
2326*a1e26a70SApple OSS Distributions 				if (_kernelTag == gIOSurfaceTag) {
2327*a1e26a70SApple OSS Distributions 					_userTag = VM_MEMORY_IOSURFACE;
2328*a1e26a70SApple OSS Distributions 				}
2329*a1e26a70SApple OSS Distributions 			}
2330*a1e26a70SApple OSS Distributions 
2331*a1e26a70SApple OSS Distributions 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2332*a1e26a70SApple OSS Distributions 				IOReturn
2333*a1e26a70SApple OSS Distributions 				    err = memoryReferenceCreate(0, &_memRef);
2334*a1e26a70SApple OSS Distributions 				if (kIOReturnSuccess != err) {
2335*a1e26a70SApple OSS Distributions 					return false;
2336*a1e26a70SApple OSS Distributions 				}
2337*a1e26a70SApple OSS Distributions 			}
2338*a1e26a70SApple OSS Distributions 
2339*a1e26a70SApple OSS Distributions 			if ((_flags & kIOMemoryAutoPrepare)
2340*a1e26a70SApple OSS Distributions 			    && prepare() != kIOReturnSuccess) {
2341*a1e26a70SApple OSS Distributions 				return false;
2342*a1e26a70SApple OSS Distributions 			}
2343*a1e26a70SApple OSS Distributions 		}
2344*a1e26a70SApple OSS Distributions 	}
2345*a1e26a70SApple OSS Distributions 
2346*a1e26a70SApple OSS Distributions 	return true;
2347*a1e26a70SApple OSS Distributions }
2348*a1e26a70SApple OSS Distributions 
2349*a1e26a70SApple OSS Distributions /*
2350*a1e26a70SApple OSS Distributions  * free
2351*a1e26a70SApple OSS Distributions  *
2352*a1e26a70SApple OSS Distributions  * Free resources.
2353*a1e26a70SApple OSS Distributions  */
2354*a1e26a70SApple OSS Distributions void
free()2355*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2356*a1e26a70SApple OSS Distributions {
2357*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2358*a1e26a70SApple OSS Distributions 
2359*a1e26a70SApple OSS Distributions 	if (reserved && reserved->dp.memory) {
2360*a1e26a70SApple OSS Distributions 		LOCK;
2361*a1e26a70SApple OSS Distributions 		reserved->dp.memory = NULL;
2362*a1e26a70SApple OSS Distributions 		UNLOCK;
2363*a1e26a70SApple OSS Distributions 	}
2364*a1e26a70SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2365*a1e26a70SApple OSS Distributions 		ioGMDData * dataP;
2366*a1e26a70SApple OSS Distributions 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2367*a1e26a70SApple OSS Distributions 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2368*a1e26a70SApple OSS Distributions 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2369*a1e26a70SApple OSS Distributions 		}
2370*a1e26a70SApple OSS Distributions 	} else {
2371*a1e26a70SApple OSS Distributions 		while (_wireCount) {
2372*a1e26a70SApple OSS Distributions 			complete();
2373*a1e26a70SApple OSS Distributions 		}
2374*a1e26a70SApple OSS Distributions 	}
2375*a1e26a70SApple OSS Distributions 
2376*a1e26a70SApple OSS Distributions 	if (_memoryEntries) {
2377*a1e26a70SApple OSS Distributions 		_memoryEntries.reset();
2378*a1e26a70SApple OSS Distributions 	}
2379*a1e26a70SApple OSS Distributions 
2380*a1e26a70SApple OSS Distributions 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2381*a1e26a70SApple OSS Distributions 		if (kIOMemoryTypeUIO == type) {
2382*a1e26a70SApple OSS Distributions 			uio_free((uio_t) _ranges.v);
2383*a1e26a70SApple OSS Distributions 		}
2384*a1e26a70SApple OSS Distributions #ifndef __LP64__
2385*a1e26a70SApple OSS Distributions 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2386*a1e26a70SApple OSS Distributions 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2387*a1e26a70SApple OSS Distributions 		}
2388*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2389*a1e26a70SApple OSS Distributions 		else {
2390*a1e26a70SApple OSS Distributions 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2391*a1e26a70SApple OSS Distributions 		}
2392*a1e26a70SApple OSS Distributions 
2393*a1e26a70SApple OSS Distributions 		_ranges.v = NULL;
2394*a1e26a70SApple OSS Distributions 	}
2395*a1e26a70SApple OSS Distributions 
2396*a1e26a70SApple OSS Distributions 	if (reserved) {
2397*a1e26a70SApple OSS Distributions 		cleanKernelReserved(reserved);
2398*a1e26a70SApple OSS Distributions 		if (reserved->dp.devicePager) {
2399*a1e26a70SApple OSS Distributions 			// memEntry holds a ref on the device pager which owns reserved
2400*a1e26a70SApple OSS Distributions 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2401*a1e26a70SApple OSS Distributions 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2402*a1e26a70SApple OSS Distributions 		} else {
2403*a1e26a70SApple OSS Distributions 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2404*a1e26a70SApple OSS Distributions 		}
2405*a1e26a70SApple OSS Distributions 		reserved = NULL;
2406*a1e26a70SApple OSS Distributions 	}
2407*a1e26a70SApple OSS Distributions 
2408*a1e26a70SApple OSS Distributions 	if (_memRef) {
2409*a1e26a70SApple OSS Distributions 		memoryReferenceRelease(_memRef);
2410*a1e26a70SApple OSS Distributions 	}
2411*a1e26a70SApple OSS Distributions 	if (_prepareLock) {
2412*a1e26a70SApple OSS Distributions 		IOLockFree(_prepareLock);
2413*a1e26a70SApple OSS Distributions 	}
2414*a1e26a70SApple OSS Distributions 
2415*a1e26a70SApple OSS Distributions 	super::free();
2416*a1e26a70SApple OSS Distributions }
2417*a1e26a70SApple OSS Distributions 
2418*a1e26a70SApple OSS Distributions #ifndef __LP64__
2419*a1e26a70SApple OSS Distributions void
unmapFromKernel()2420*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2421*a1e26a70SApple OSS Distributions {
2422*a1e26a70SApple OSS Distributions 	panic("IOGMD::unmapFromKernel deprecated");
2423*a1e26a70SApple OSS Distributions }
2424*a1e26a70SApple OSS Distributions 
2425*a1e26a70SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2426*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2427*a1e26a70SApple OSS Distributions {
2428*a1e26a70SApple OSS Distributions 	panic("IOGMD::mapIntoKernel deprecated");
2429*a1e26a70SApple OSS Distributions }
2430*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2431*a1e26a70SApple OSS Distributions 
2432*a1e26a70SApple OSS Distributions /*
2433*a1e26a70SApple OSS Distributions  * getDirection:
2434*a1e26a70SApple OSS Distributions  *
2435*a1e26a70SApple OSS Distributions  * Get the direction of the transfer.
2436*a1e26a70SApple OSS Distributions  */
2437*a1e26a70SApple OSS Distributions IODirection
getDirection() const2438*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2439*a1e26a70SApple OSS Distributions {
2440*a1e26a70SApple OSS Distributions #ifndef __LP64__
2441*a1e26a70SApple OSS Distributions 	if (_direction) {
2442*a1e26a70SApple OSS Distributions 		return _direction;
2443*a1e26a70SApple OSS Distributions 	}
2444*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2445*a1e26a70SApple OSS Distributions 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2446*a1e26a70SApple OSS Distributions }
2447*a1e26a70SApple OSS Distributions 
2448*a1e26a70SApple OSS Distributions /*
2449*a1e26a70SApple OSS Distributions  * getLength:
2450*a1e26a70SApple OSS Distributions  *
2451*a1e26a70SApple OSS Distributions  * Get the length of the transfer (over all ranges).
2452*a1e26a70SApple OSS Distributions  */
2453*a1e26a70SApple OSS Distributions IOByteCount
getLength() const2454*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getLength() const
2455*a1e26a70SApple OSS Distributions {
2456*a1e26a70SApple OSS Distributions 	return _length;
2457*a1e26a70SApple OSS Distributions }
2458*a1e26a70SApple OSS Distributions 
2459*a1e26a70SApple OSS Distributions void
setTag(IOOptionBits tag)2460*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2461*a1e26a70SApple OSS Distributions {
2462*a1e26a70SApple OSS Distributions 	_tag = tag;
2463*a1e26a70SApple OSS Distributions }
2464*a1e26a70SApple OSS Distributions 
2465*a1e26a70SApple OSS Distributions IOOptionBits
getTag(void)2466*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2467*a1e26a70SApple OSS Distributions {
2468*a1e26a70SApple OSS Distributions 	return _tag;
2469*a1e26a70SApple OSS Distributions }
2470*a1e26a70SApple OSS Distributions 
2471*a1e26a70SApple OSS Distributions uint64_t
getFlags(void)2472*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2473*a1e26a70SApple OSS Distributions {
2474*a1e26a70SApple OSS Distributions 	return _flags;
2475*a1e26a70SApple OSS Distributions }
2476*a1e26a70SApple OSS Distributions 
2477*a1e26a70SApple OSS Distributions OSObject *
copyContext(void) const2478*a1e26a70SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2479*a1e26a70SApple OSS Distributions {
2480*a1e26a70SApple OSS Distributions 	if (reserved) {
2481*a1e26a70SApple OSS Distributions 		OSObject * context = reserved->contextObject;
2482*a1e26a70SApple OSS Distributions 		if (context) {
2483*a1e26a70SApple OSS Distributions 			context->retain();
2484*a1e26a70SApple OSS Distributions 		}
2485*a1e26a70SApple OSS Distributions 		return context;
2486*a1e26a70SApple OSS Distributions 	} else {
2487*a1e26a70SApple OSS Distributions 		return NULL;
2488*a1e26a70SApple OSS Distributions 	}
2489*a1e26a70SApple OSS Distributions }
2490*a1e26a70SApple OSS Distributions 
2491*a1e26a70SApple OSS Distributions void
setContext(OSObject * obj)2492*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2493*a1e26a70SApple OSS Distributions {
2494*a1e26a70SApple OSS Distributions 	if (this->reserved == NULL && obj == NULL) {
2495*a1e26a70SApple OSS Distributions 		// No existing object, and no object to set
2496*a1e26a70SApple OSS Distributions 		return;
2497*a1e26a70SApple OSS Distributions 	}
2498*a1e26a70SApple OSS Distributions 
2499*a1e26a70SApple OSS Distributions 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2500*a1e26a70SApple OSS Distributions 	if (reserved) {
2501*a1e26a70SApple OSS Distributions 		OSObject * oldObject = reserved->contextObject;
2502*a1e26a70SApple OSS Distributions 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2503*a1e26a70SApple OSS Distributions 			oldObject->release();
2504*a1e26a70SApple OSS Distributions 		}
2505*a1e26a70SApple OSS Distributions 		if (obj != NULL) {
2506*a1e26a70SApple OSS Distributions 			obj->retain();
2507*a1e26a70SApple OSS Distributions 			reserved->contextObject = obj;
2508*a1e26a70SApple OSS Distributions 		}
2509*a1e26a70SApple OSS Distributions 	}
2510*a1e26a70SApple OSS Distributions }
2511*a1e26a70SApple OSS Distributions 
2512*a1e26a70SApple OSS Distributions #ifndef __LP64__
2513*a1e26a70SApple OSS Distributions #pragma clang diagnostic push
2514*a1e26a70SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2515*a1e26a70SApple OSS Distributions 
2516*a1e26a70SApple OSS Distributions // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2517*a1e26a70SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2518*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2519*a1e26a70SApple OSS Distributions {
2520*a1e26a70SApple OSS Distributions 	addr64_t physAddr = 0;
2521*a1e26a70SApple OSS Distributions 
2522*a1e26a70SApple OSS Distributions 	if (prepare() == kIOReturnSuccess) {
2523*a1e26a70SApple OSS Distributions 		physAddr = getPhysicalSegment64( offset, length );
2524*a1e26a70SApple OSS Distributions 		complete();
2525*a1e26a70SApple OSS Distributions 	}
2526*a1e26a70SApple OSS Distributions 
2527*a1e26a70SApple OSS Distributions 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2528*a1e26a70SApple OSS Distributions }
2529*a1e26a70SApple OSS Distributions 
2530*a1e26a70SApple OSS Distributions #pragma clang diagnostic pop
2531*a1e26a70SApple OSS Distributions 
2532*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2533*a1e26a70SApple OSS Distributions 
2534*a1e26a70SApple OSS Distributions 
2535*a1e26a70SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536*a1e26a70SApple OSS Distributions IOMemoryDescriptor::readBytes
2537*a1e26a70SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2538*a1e26a70SApple OSS Distributions {
2539*a1e26a70SApple OSS Distributions 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540*a1e26a70SApple OSS Distributions 	IOByteCount endoffset;
2541*a1e26a70SApple OSS Distributions 	IOByteCount remaining;
2542*a1e26a70SApple OSS Distributions 
2543*a1e26a70SApple OSS Distributions 	// Check that this entire I/O is within the available range
2544*a1e26a70SApple OSS Distributions 	if ((offset > _length)
2545*a1e26a70SApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2546*a1e26a70SApple OSS Distributions 	    || (endoffset > _length)) {
2547*a1e26a70SApple OSS Distributions 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2548*a1e26a70SApple OSS Distributions 		return 0;
2549*a1e26a70SApple OSS Distributions 	}
2550*a1e26a70SApple OSS Distributions 	if (offset >= _length) {
2551*a1e26a70SApple OSS Distributions 		return 0;
2552*a1e26a70SApple OSS Distributions 	}
2553*a1e26a70SApple OSS Distributions 
2554*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2555*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2556*a1e26a70SApple OSS Distributions 		return 0;
2557*a1e26a70SApple OSS Distributions 	}
2558*a1e26a70SApple OSS Distributions 
2559*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2560*a1e26a70SApple OSS Distributions 		LOCK;
2561*a1e26a70SApple OSS Distributions 	}
2562*a1e26a70SApple OSS Distributions 
2563*a1e26a70SApple OSS Distributions 	remaining = length = min(length, _length - offset);
2564*a1e26a70SApple OSS Distributions 	while (remaining) { // (process another target segment?)
2565*a1e26a70SApple OSS Distributions 		addr64_t        srcAddr64;
2566*a1e26a70SApple OSS Distributions 		IOByteCount     srcLen;
2567*a1e26a70SApple OSS Distributions 		int             options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2568*a1e26a70SApple OSS Distributions 
2569*a1e26a70SApple OSS Distributions 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2570*a1e26a70SApple OSS Distributions 		srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2571*a1e26a70SApple OSS Distributions 		if (!srcAddr64) {
2572*a1e26a70SApple OSS Distributions 			break;
2573*a1e26a70SApple OSS Distributions 		}
2574*a1e26a70SApple OSS Distributions 
2575*a1e26a70SApple OSS Distributions 		// Clip segment length to remaining
2576*a1e26a70SApple OSS Distributions 		if (srcLen > remaining) {
2577*a1e26a70SApple OSS Distributions 			srcLen = remaining;
2578*a1e26a70SApple OSS Distributions 		}
2579*a1e26a70SApple OSS Distributions 
2580*a1e26a70SApple OSS Distributions 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2581*a1e26a70SApple OSS Distributions 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2582*a1e26a70SApple OSS Distributions 		}
2583*a1e26a70SApple OSS Distributions 
2584*a1e26a70SApple OSS Distributions 
2585*a1e26a70SApple OSS Distributions 		kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2586*a1e26a70SApple OSS Distributions #pragma unused(copy_ret)
2587*a1e26a70SApple OSS Distributions 
2588*a1e26a70SApple OSS Distributions 		dstAddr   += srcLen;
2589*a1e26a70SApple OSS Distributions 		offset    += srcLen;
2590*a1e26a70SApple OSS Distributions 		remaining -= srcLen;
2591*a1e26a70SApple OSS Distributions 	}
2592*a1e26a70SApple OSS Distributions 
2593*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2594*a1e26a70SApple OSS Distributions 		UNLOCK;
2595*a1e26a70SApple OSS Distributions 	}
2596*a1e26a70SApple OSS Distributions 
2597*a1e26a70SApple OSS Distributions 	assert(!remaining);
2598*a1e26a70SApple OSS Distributions 
2599*a1e26a70SApple OSS Distributions 	return length - remaining;
2600*a1e26a70SApple OSS Distributions }
2601*a1e26a70SApple OSS Distributions 
2602*a1e26a70SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2603*a1e26a70SApple OSS Distributions IOMemoryDescriptor::writeBytes
2604*a1e26a70SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2605*a1e26a70SApple OSS Distributions {
2606*a1e26a70SApple OSS Distributions 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2607*a1e26a70SApple OSS Distributions 	IOByteCount remaining;
2608*a1e26a70SApple OSS Distributions 	IOByteCount endoffset;
2609*a1e26a70SApple OSS Distributions 	IOByteCount offset = inoffset;
2610*a1e26a70SApple OSS Distributions 
2611*a1e26a70SApple OSS Distributions 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2612*a1e26a70SApple OSS Distributions 
2613*a1e26a70SApple OSS Distributions 	// Check that this entire I/O is within the available range
2614*a1e26a70SApple OSS Distributions 	if ((offset > _length)
2615*a1e26a70SApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2616*a1e26a70SApple OSS Distributions 	    || (endoffset > _length)) {
2617*a1e26a70SApple OSS Distributions 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2618*a1e26a70SApple OSS Distributions 		return 0;
2619*a1e26a70SApple OSS Distributions 	}
2620*a1e26a70SApple OSS Distributions 	if (kIOMemoryPreparedReadOnly & _flags) {
2621*a1e26a70SApple OSS Distributions 		return 0;
2622*a1e26a70SApple OSS Distributions 	}
2623*a1e26a70SApple OSS Distributions 	if (offset >= _length) {
2624*a1e26a70SApple OSS Distributions 		return 0;
2625*a1e26a70SApple OSS Distributions 	}
2626*a1e26a70SApple OSS Distributions 
2627*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2628*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2629*a1e26a70SApple OSS Distributions 		return 0;
2630*a1e26a70SApple OSS Distributions 	}
2631*a1e26a70SApple OSS Distributions 
2632*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2633*a1e26a70SApple OSS Distributions 		LOCK;
2634*a1e26a70SApple OSS Distributions 	}
2635*a1e26a70SApple OSS Distributions 
2636*a1e26a70SApple OSS Distributions 	remaining = length = min(length, _length - offset);
2637*a1e26a70SApple OSS Distributions 	while (remaining) { // (process another target segment?)
2638*a1e26a70SApple OSS Distributions 		addr64_t    dstAddr64;
2639*a1e26a70SApple OSS Distributions 		IOByteCount dstLen;
2640*a1e26a70SApple OSS Distributions 		int         options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2641*a1e26a70SApple OSS Distributions 
2642*a1e26a70SApple OSS Distributions 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2643*a1e26a70SApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2644*a1e26a70SApple OSS Distributions 		if (!dstAddr64) {
2645*a1e26a70SApple OSS Distributions 			break;
2646*a1e26a70SApple OSS Distributions 		}
2647*a1e26a70SApple OSS Distributions 
2648*a1e26a70SApple OSS Distributions 		// Clip segment length to remaining
2649*a1e26a70SApple OSS Distributions 		if (dstLen > remaining) {
2650*a1e26a70SApple OSS Distributions 			dstLen = remaining;
2651*a1e26a70SApple OSS Distributions 		}
2652*a1e26a70SApple OSS Distributions 
2653*a1e26a70SApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2654*a1e26a70SApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2655*a1e26a70SApple OSS Distributions 		}
2656*a1e26a70SApple OSS Distributions 
2657*a1e26a70SApple OSS Distributions 
2658*a1e26a70SApple OSS Distributions 		if (!srcAddr) {
2659*a1e26a70SApple OSS Distributions 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2660*a1e26a70SApple OSS Distributions 		} else {
2661*a1e26a70SApple OSS Distributions 			kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2662*a1e26a70SApple OSS Distributions #pragma unused(copy_ret)
2663*a1e26a70SApple OSS Distributions 			srcAddr   += dstLen;
2664*a1e26a70SApple OSS Distributions 		}
2665*a1e26a70SApple OSS Distributions 		offset    += dstLen;
2666*a1e26a70SApple OSS Distributions 		remaining -= dstLen;
2667*a1e26a70SApple OSS Distributions 	}
2668*a1e26a70SApple OSS Distributions 
2669*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2670*a1e26a70SApple OSS Distributions 		UNLOCK;
2671*a1e26a70SApple OSS Distributions 	}
2672*a1e26a70SApple OSS Distributions 
2673*a1e26a70SApple OSS Distributions 	assert(!remaining);
2674*a1e26a70SApple OSS Distributions 
2675*a1e26a70SApple OSS Distributions #if defined(__x86_64__)
2676*a1e26a70SApple OSS Distributions 	// copypv does not cppvFsnk on intel
2677*a1e26a70SApple OSS Distributions #else
2678*a1e26a70SApple OSS Distributions 	if (!srcAddr) {
2679*a1e26a70SApple OSS Distributions 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2680*a1e26a70SApple OSS Distributions 	}
2681*a1e26a70SApple OSS Distributions #endif
2682*a1e26a70SApple OSS Distributions 
2683*a1e26a70SApple OSS Distributions 	return length - remaining;
2684*a1e26a70SApple OSS Distributions }
2685*a1e26a70SApple OSS Distributions 
2686*a1e26a70SApple OSS Distributions #ifndef __LP64__
2687*a1e26a70SApple OSS Distributions void
setPosition(IOByteCount position)2688*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2689*a1e26a70SApple OSS Distributions {
2690*a1e26a70SApple OSS Distributions 	panic("IOGMD::setPosition deprecated");
2691*a1e26a70SApple OSS Distributions }
2692*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
2693*a1e26a70SApple OSS Distributions 
2694*a1e26a70SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2695*a1e26a70SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2696*a1e26a70SApple OSS Distributions 
2697*a1e26a70SApple OSS Distributions uint64_t
getPreparationID(void)2698*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2699*a1e26a70SApple OSS Distributions {
2700*a1e26a70SApple OSS Distributions 	ioGMDData *dataP;
2701*a1e26a70SApple OSS Distributions 
2702*a1e26a70SApple OSS Distributions 	if (!_wireCount) {
2703*a1e26a70SApple OSS Distributions 		return kIOPreparationIDUnprepared;
2704*a1e26a70SApple OSS Distributions 	}
2705*a1e26a70SApple OSS Distributions 
2706*a1e26a70SApple OSS Distributions 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2707*a1e26a70SApple OSS Distributions 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2708*a1e26a70SApple OSS Distributions 		IOMemoryDescriptor::setPreparationID();
2709*a1e26a70SApple OSS Distributions 		return IOMemoryDescriptor::getPreparationID();
2710*a1e26a70SApple OSS Distributions 	}
2711*a1e26a70SApple OSS Distributions 
2712*a1e26a70SApple OSS Distributions 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2713*a1e26a70SApple OSS Distributions 		return kIOPreparationIDUnprepared;
2714*a1e26a70SApple OSS Distributions 	}
2715*a1e26a70SApple OSS Distributions 
2716*a1e26a70SApple OSS Distributions 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2717*a1e26a70SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2718*a1e26a70SApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2719*a1e26a70SApple OSS Distributions 	}
2720*a1e26a70SApple OSS Distributions 	return dataP->fPreparationID;
2721*a1e26a70SApple OSS Distributions }
2722*a1e26a70SApple OSS Distributions 
2723*a1e26a70SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2724*a1e26a70SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2725*a1e26a70SApple OSS Distributions {
2726*a1e26a70SApple OSS Distributions 	if (reserved->creator) {
2727*a1e26a70SApple OSS Distributions 		task_deallocate(reserved->creator);
2728*a1e26a70SApple OSS Distributions 		reserved->creator = NULL;
2729*a1e26a70SApple OSS Distributions 	}
2730*a1e26a70SApple OSS Distributions 
2731*a1e26a70SApple OSS Distributions 	if (reserved->contextObject) {
2732*a1e26a70SApple OSS Distributions 		reserved->contextObject->release();
2733*a1e26a70SApple OSS Distributions 		reserved->contextObject = NULL;
2734*a1e26a70SApple OSS Distributions 	}
2735*a1e26a70SApple OSS Distributions }
2736*a1e26a70SApple OSS Distributions 
2737*a1e26a70SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2738*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2739*a1e26a70SApple OSS Distributions {
2740*a1e26a70SApple OSS Distributions 	if (!reserved) {
2741*a1e26a70SApple OSS Distributions 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2742*a1e26a70SApple OSS Distributions 	}
2743*a1e26a70SApple OSS Distributions 	return reserved;
2744*a1e26a70SApple OSS Distributions }
2745*a1e26a70SApple OSS Distributions 
2746*a1e26a70SApple OSS Distributions void
setPreparationID(void)2747*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2748*a1e26a70SApple OSS Distributions {
2749*a1e26a70SApple OSS Distributions 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2750*a1e26a70SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2751*a1e26a70SApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2752*a1e26a70SApple OSS Distributions 	}
2753*a1e26a70SApple OSS Distributions }
2754*a1e26a70SApple OSS Distributions 
2755*a1e26a70SApple OSS Distributions uint64_t
getPreparationID(void)2756*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2757*a1e26a70SApple OSS Distributions {
2758*a1e26a70SApple OSS Distributions 	if (reserved) {
2759*a1e26a70SApple OSS Distributions 		return reserved->preparationID;
2760*a1e26a70SApple OSS Distributions 	} else {
2761*a1e26a70SApple OSS Distributions 		return kIOPreparationIDUnsupported;
2762*a1e26a70SApple OSS Distributions 	}
2763*a1e26a70SApple OSS Distributions }
2764*a1e26a70SApple OSS Distributions 
2765*a1e26a70SApple OSS Distributions void
setDescriptorID(void)2766*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2767*a1e26a70SApple OSS Distributions {
2768*a1e26a70SApple OSS Distributions 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2769*a1e26a70SApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2770*a1e26a70SApple OSS Distributions 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2771*a1e26a70SApple OSS Distributions 	}
2772*a1e26a70SApple OSS Distributions }
2773*a1e26a70SApple OSS Distributions 
2774*a1e26a70SApple OSS Distributions uint64_t
getDescriptorID(void)2775*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2776*a1e26a70SApple OSS Distributions {
2777*a1e26a70SApple OSS Distributions 	setDescriptorID();
2778*a1e26a70SApple OSS Distributions 
2779*a1e26a70SApple OSS Distributions 	if (reserved) {
2780*a1e26a70SApple OSS Distributions 		return reserved->descriptorID;
2781*a1e26a70SApple OSS Distributions 	} else {
2782*a1e26a70SApple OSS Distributions 		return kIODescriptorIDInvalid;
2783*a1e26a70SApple OSS Distributions 	}
2784*a1e26a70SApple OSS Distributions }
2785*a1e26a70SApple OSS Distributions 
2786*a1e26a70SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2787*a1e26a70SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2788*a1e26a70SApple OSS Distributions {
2789*a1e26a70SApple OSS Distributions 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2790*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
2791*a1e26a70SApple OSS Distributions 	}
2792*a1e26a70SApple OSS Distributions 
2793*a1e26a70SApple OSS Distributions 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2794*a1e26a70SApple OSS Distributions 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2795*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
2796*a1e26a70SApple OSS Distributions 	}
2797*a1e26a70SApple OSS Distributions 
2798*a1e26a70SApple OSS Distributions 	uint64_t descriptorID = getDescriptorID();
2799*a1e26a70SApple OSS Distributions 	assert(descriptorID != kIODescriptorIDInvalid);
2800*a1e26a70SApple OSS Distributions 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2801*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
2802*a1e26a70SApple OSS Distributions 	}
2803*a1e26a70SApple OSS Distributions 
2804*a1e26a70SApple OSS Distributions 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2805*a1e26a70SApple OSS Distributions 
2806*a1e26a70SApple OSS Distributions #if __LP64__
2807*a1e26a70SApple OSS Distributions 	static const uint8_t num_segments_page = 8;
2808*a1e26a70SApple OSS Distributions #else
2809*a1e26a70SApple OSS Distributions 	static const uint8_t num_segments_page = 4;
2810*a1e26a70SApple OSS Distributions #endif
2811*a1e26a70SApple OSS Distributions 	static const uint8_t num_segments_long = 2;
2812*a1e26a70SApple OSS Distributions 
2813*a1e26a70SApple OSS Distributions 	IOPhysicalAddress segments_page[num_segments_page];
2814*a1e26a70SApple OSS Distributions 	IOPhysicalRange   segments_long[num_segments_long];
2815*a1e26a70SApple OSS Distributions 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2816*a1e26a70SApple OSS Distributions 	memset(segments_long, 0, sizeof(segments_long));
2817*a1e26a70SApple OSS Distributions 
2818*a1e26a70SApple OSS Distributions 	uint8_t segment_page_idx = 0;
2819*a1e26a70SApple OSS Distributions 	uint8_t segment_long_idx = 0;
2820*a1e26a70SApple OSS Distributions 
2821*a1e26a70SApple OSS Distributions 	IOPhysicalRange physical_segment;
2822*a1e26a70SApple OSS Distributions 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2823*a1e26a70SApple OSS Distributions 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2824*a1e26a70SApple OSS Distributions 
2825*a1e26a70SApple OSS Distributions 		if (physical_segment.length == 0) {
2826*a1e26a70SApple OSS Distributions 			break;
2827*a1e26a70SApple OSS Distributions 		}
2828*a1e26a70SApple OSS Distributions 
2829*a1e26a70SApple OSS Distributions 		/**
2830*a1e26a70SApple OSS Distributions 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2831*a1e26a70SApple OSS Distributions 		 * buffer memory, pack segment events according to the following.
2832*a1e26a70SApple OSS Distributions 		 *
2833*a1e26a70SApple OSS Distributions 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2834*a1e26a70SApple OSS Distributions 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2835*a1e26a70SApple OSS Distributions 		 *
2836*a1e26a70SApple OSS Distributions 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2837*a1e26a70SApple OSS Distributions 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2838*a1e26a70SApple OSS Distributions 		 * - unmapped pages will have a ppn of MAX_INT_32
2839*a1e26a70SApple OSS Distributions 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2840*a1e26a70SApple OSS Distributions 		 * - address_0, length_0, address_0, length_1
2841*a1e26a70SApple OSS Distributions 		 * - unmapped pages will have an address of 0
2842*a1e26a70SApple OSS Distributions 		 *
2843*a1e26a70SApple OSS Distributions 		 * During each iteration do the following depending on the length of the mapping:
2844*a1e26a70SApple OSS Distributions 		 * 1. add the current segment to the appropriate queue of pending segments
2845*a1e26a70SApple OSS Distributions 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2846*a1e26a70SApple OSS Distributions 		 * 1a. if FALSE emit and reset all events in the previous queue
2847*a1e26a70SApple OSS Distributions 		 * 2. check if we have filled up the current queue of pending events
2848*a1e26a70SApple OSS Distributions 		 * 2a. if TRUE emit and reset all events in the pending queue
2849*a1e26a70SApple OSS Distributions 		 * 3. after completing all iterations emit events in the current queue
2850*a1e26a70SApple OSS Distributions 		 */
2851*a1e26a70SApple OSS Distributions 
2852*a1e26a70SApple OSS Distributions 		bool emit_page = false;
2853*a1e26a70SApple OSS Distributions 		bool emit_long = false;
2854*a1e26a70SApple OSS Distributions 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2855*a1e26a70SApple OSS Distributions 			segments_page[segment_page_idx] = physical_segment.address;
2856*a1e26a70SApple OSS Distributions 			segment_page_idx++;
2857*a1e26a70SApple OSS Distributions 
2858*a1e26a70SApple OSS Distributions 			emit_long = segment_long_idx != 0;
2859*a1e26a70SApple OSS Distributions 			emit_page = segment_page_idx == num_segments_page;
2860*a1e26a70SApple OSS Distributions 
2861*a1e26a70SApple OSS Distributions 			if (os_unlikely(emit_long)) {
2862*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2863*a1e26a70SApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2864*a1e26a70SApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2865*a1e26a70SApple OSS Distributions 			}
2866*a1e26a70SApple OSS Distributions 
2867*a1e26a70SApple OSS Distributions 			if (os_unlikely(emit_page)) {
2868*a1e26a70SApple OSS Distributions #if __LP64__
2869*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2870*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2871*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2872*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2873*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2874*a1e26a70SApple OSS Distributions #else
2875*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2876*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2877*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2878*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2879*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2880*a1e26a70SApple OSS Distributions #endif
2881*a1e26a70SApple OSS Distributions 			}
2882*a1e26a70SApple OSS Distributions 		} else {
2883*a1e26a70SApple OSS Distributions 			segments_long[segment_long_idx] = physical_segment;
2884*a1e26a70SApple OSS Distributions 			segment_long_idx++;
2885*a1e26a70SApple OSS Distributions 
2886*a1e26a70SApple OSS Distributions 			emit_page = segment_page_idx != 0;
2887*a1e26a70SApple OSS Distributions 			emit_long = segment_long_idx == num_segments_long;
2888*a1e26a70SApple OSS Distributions 
2889*a1e26a70SApple OSS Distributions 			if (os_unlikely(emit_page)) {
2890*a1e26a70SApple OSS Distributions #if __LP64__
2891*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2892*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2893*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2894*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2895*a1e26a70SApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2896*a1e26a70SApple OSS Distributions #else
2897*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2898*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2899*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2900*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2901*a1e26a70SApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2902*a1e26a70SApple OSS Distributions #endif
2903*a1e26a70SApple OSS Distributions 			}
2904*a1e26a70SApple OSS Distributions 
2905*a1e26a70SApple OSS Distributions 			if (emit_long) {
2906*a1e26a70SApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2907*a1e26a70SApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2908*a1e26a70SApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2909*a1e26a70SApple OSS Distributions 			}
2910*a1e26a70SApple OSS Distributions 		}
2911*a1e26a70SApple OSS Distributions 
2912*a1e26a70SApple OSS Distributions 		if (os_unlikely(emit_page)) {
2913*a1e26a70SApple OSS Distributions 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2914*a1e26a70SApple OSS Distributions 			segment_page_idx = 0;
2915*a1e26a70SApple OSS Distributions 		}
2916*a1e26a70SApple OSS Distributions 
2917*a1e26a70SApple OSS Distributions 		if (os_unlikely(emit_long)) {
2918*a1e26a70SApple OSS Distributions 			memset(segments_long, 0, sizeof(segments_long));
2919*a1e26a70SApple OSS Distributions 			segment_long_idx = 0;
2920*a1e26a70SApple OSS Distributions 		}
2921*a1e26a70SApple OSS Distributions 	}
2922*a1e26a70SApple OSS Distributions 
2923*a1e26a70SApple OSS Distributions 	if (segment_page_idx != 0) {
2924*a1e26a70SApple OSS Distributions 		assert(segment_long_idx == 0);
2925*a1e26a70SApple OSS Distributions #if __LP64__
2926*a1e26a70SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2927*a1e26a70SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2928*a1e26a70SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2929*a1e26a70SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2930*a1e26a70SApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2931*a1e26a70SApple OSS Distributions #else
2932*a1e26a70SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2933*a1e26a70SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[1]),
2934*a1e26a70SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[2]),
2935*a1e26a70SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[3]),
2936*a1e26a70SApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[4]));
2937*a1e26a70SApple OSS Distributions #endif
2938*a1e26a70SApple OSS Distributions 	} else if (segment_long_idx != 0) {
2939*a1e26a70SApple OSS Distributions 		assert(segment_page_idx == 0);
2940*a1e26a70SApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2941*a1e26a70SApple OSS Distributions 		    segments_long[0].address, segments_long[0].length,
2942*a1e26a70SApple OSS Distributions 		    segments_long[1].address, segments_long[1].length);
2943*a1e26a70SApple OSS Distributions 	}
2944*a1e26a70SApple OSS Distributions 
2945*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
2946*a1e26a70SApple OSS Distributions }
2947*a1e26a70SApple OSS Distributions 
2948*a1e26a70SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2949*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2950*a1e26a70SApple OSS Distributions {
2951*a1e26a70SApple OSS Distributions 	_kernelTag = (vm_tag_t) kernelTag;
2952*a1e26a70SApple OSS Distributions 	_userTag   = (vm_tag_t) userTag;
2953*a1e26a70SApple OSS Distributions }
2954*a1e26a70SApple OSS Distributions 
2955*a1e26a70SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2956*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2957*a1e26a70SApple OSS Distributions {
2958*a1e26a70SApple OSS Distributions 	if (vm_kernel_map_is_kernel(map)) {
2959*a1e26a70SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2960*a1e26a70SApple OSS Distributions 			return (uint32_t) _kernelTag;
2961*a1e26a70SApple OSS Distributions 		}
2962*a1e26a70SApple OSS Distributions 	} else {
2963*a1e26a70SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _userTag) {
2964*a1e26a70SApple OSS Distributions 			return (uint32_t) _userTag;
2965*a1e26a70SApple OSS Distributions 		}
2966*a1e26a70SApple OSS Distributions 	}
2967*a1e26a70SApple OSS Distributions 	return IOMemoryTag(map);
2968*a1e26a70SApple OSS Distributions }
2969*a1e26a70SApple OSS Distributions 
2970*a1e26a70SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2971*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2972*a1e26a70SApple OSS Distributions {
2973*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
2974*a1e26a70SApple OSS Distributions 	DMACommandOps params;
2975*a1e26a70SApple OSS Distributions 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2976*a1e26a70SApple OSS Distributions 	ioGMDData *dataP;
2977*a1e26a70SApple OSS Distributions 
2978*a1e26a70SApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
2979*a1e26a70SApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
2980*a1e26a70SApple OSS Distributions 
2981*a1e26a70SApple OSS Distributions 	if (kIOMDDMAMap == op) {
2982*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2983*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
2984*a1e26a70SApple OSS Distributions 		}
2985*a1e26a70SApple OSS Distributions 
2986*a1e26a70SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2987*a1e26a70SApple OSS Distributions 
2988*a1e26a70SApple OSS Distributions 		if (!_memoryEntries
2989*a1e26a70SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2990*a1e26a70SApple OSS Distributions 			return kIOReturnNoMemory;
2991*a1e26a70SApple OSS Distributions 		}
2992*a1e26a70SApple OSS Distributions 
2993*a1e26a70SApple OSS Distributions 		if (_memoryEntries && data->fMapper) {
2994*a1e26a70SApple OSS Distributions 			bool remap, keepMap;
2995*a1e26a70SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2996*a1e26a70SApple OSS Distributions 
2997*a1e26a70SApple OSS Distributions 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2998*a1e26a70SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2999*a1e26a70SApple OSS Distributions 			}
3000*a1e26a70SApple OSS Distributions 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3001*a1e26a70SApple OSS Distributions 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
3002*a1e26a70SApple OSS Distributions 			}
3003*a1e26a70SApple OSS Distributions 
3004*a1e26a70SApple OSS Distributions 			keepMap = (data->fMapper == gIOSystemMapper);
3005*a1e26a70SApple OSS Distributions 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3006*a1e26a70SApple OSS Distributions 
3007*a1e26a70SApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3008*a1e26a70SApple OSS Distributions 				IOLockLock(_prepareLock);
3009*a1e26a70SApple OSS Distributions 			}
3010*a1e26a70SApple OSS Distributions 
3011*a1e26a70SApple OSS Distributions 			remap = (!keepMap);
3012*a1e26a70SApple OSS Distributions 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3013*a1e26a70SApple OSS Distributions 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3014*a1e26a70SApple OSS Distributions 			remap |= (dataP->fDMAMapAlignment > page_size);
3015*a1e26a70SApple OSS Distributions 
3016*a1e26a70SApple OSS Distributions 			if (remap || !dataP->fMappedBaseValid) {
3017*a1e26a70SApple OSS Distributions 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3018*a1e26a70SApple OSS Distributions 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3019*a1e26a70SApple OSS Distributions 					dataP->fMappedBase      = data->fAlloc;
3020*a1e26a70SApple OSS Distributions 					dataP->fMappedBaseValid = true;
3021*a1e26a70SApple OSS Distributions 					dataP->fMappedLength    = data->fAllocLength;
3022*a1e26a70SApple OSS Distributions 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3023*a1e26a70SApple OSS Distributions 				}
3024*a1e26a70SApple OSS Distributions 			} else {
3025*a1e26a70SApple OSS Distributions 				data->fAlloc = dataP->fMappedBase;
3026*a1e26a70SApple OSS Distributions 				data->fAllocLength = 0;         // give out IOMD map
3027*a1e26a70SApple OSS Distributions 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3028*a1e26a70SApple OSS Distributions 			}
3029*a1e26a70SApple OSS Distributions 
3030*a1e26a70SApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3031*a1e26a70SApple OSS Distributions 				IOLockUnlock(_prepareLock);
3032*a1e26a70SApple OSS Distributions 			}
3033*a1e26a70SApple OSS Distributions 		}
3034*a1e26a70SApple OSS Distributions 		return err;
3035*a1e26a70SApple OSS Distributions 	}
3036*a1e26a70SApple OSS Distributions 	if (kIOMDDMAUnmap == op) {
3037*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3038*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3039*a1e26a70SApple OSS Distributions 		}
3040*a1e26a70SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3041*a1e26a70SApple OSS Distributions 
3042*a1e26a70SApple OSS Distributions 		if (_pages) {
3043*a1e26a70SApple OSS Distributions 			err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3044*a1e26a70SApple OSS Distributions 		}
3045*a1e26a70SApple OSS Distributions 
3046*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3047*a1e26a70SApple OSS Distributions 	}
3048*a1e26a70SApple OSS Distributions 
3049*a1e26a70SApple OSS Distributions 	if (kIOMDAddDMAMapSpec == op) {
3050*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IODMAMapSpecification)) {
3051*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3052*a1e26a70SApple OSS Distributions 		}
3053*a1e26a70SApple OSS Distributions 
3054*a1e26a70SApple OSS Distributions 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3055*a1e26a70SApple OSS Distributions 
3056*a1e26a70SApple OSS Distributions 		if (!_memoryEntries
3057*a1e26a70SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3058*a1e26a70SApple OSS Distributions 			return kIOReturnNoMemory;
3059*a1e26a70SApple OSS Distributions 		}
3060*a1e26a70SApple OSS Distributions 
3061*a1e26a70SApple OSS Distributions 		if (_memoryEntries) {
3062*a1e26a70SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3063*a1e26a70SApple OSS Distributions 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3064*a1e26a70SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3065*a1e26a70SApple OSS Distributions 			}
3066*a1e26a70SApple OSS Distributions 			if (data->alignment > dataP->fDMAMapAlignment) {
3067*a1e26a70SApple OSS Distributions 				dataP->fDMAMapAlignment = data->alignment;
3068*a1e26a70SApple OSS Distributions 			}
3069*a1e26a70SApple OSS Distributions 		}
3070*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3071*a1e26a70SApple OSS Distributions 	}
3072*a1e26a70SApple OSS Distributions 
3073*a1e26a70SApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3074*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3075*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3076*a1e26a70SApple OSS Distributions 		}
3077*a1e26a70SApple OSS Distributions 
3078*a1e26a70SApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3079*a1e26a70SApple OSS Distributions 		data->fLength = _length;
3080*a1e26a70SApple OSS Distributions 		data->fSGCount = _rangesCount;
3081*a1e26a70SApple OSS Distributions 		data->fPages = _pages;
3082*a1e26a70SApple OSS Distributions 		data->fDirection = getDirection();
3083*a1e26a70SApple OSS Distributions 		if (!_wireCount) {
3084*a1e26a70SApple OSS Distributions 			data->fIsPrepared = false;
3085*a1e26a70SApple OSS Distributions 		} else {
3086*a1e26a70SApple OSS Distributions 			data->fIsPrepared = true;
3087*a1e26a70SApple OSS Distributions 			data->fHighestPage = _highestPage;
3088*a1e26a70SApple OSS Distributions 			if (_memoryEntries) {
3089*a1e26a70SApple OSS Distributions 				dataP = getDataP(_memoryEntries);
3090*a1e26a70SApple OSS Distributions 				ioPLBlock *ioplList = getIOPLList(dataP);
3091*a1e26a70SApple OSS Distributions 				UInt count = getNumIOPL(_memoryEntries, dataP);
3092*a1e26a70SApple OSS Distributions 				if (count == 1) {
3093*a1e26a70SApple OSS Distributions 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3094*a1e26a70SApple OSS Distributions 				}
3095*a1e26a70SApple OSS Distributions 			}
3096*a1e26a70SApple OSS Distributions 		}
3097*a1e26a70SApple OSS Distributions 
3098*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3099*a1e26a70SApple OSS Distributions 	} else if (kIOMDDMAActive == op) {
3100*a1e26a70SApple OSS Distributions 		if (params) {
3101*a1e26a70SApple OSS Distributions 			int16_t prior;
3102*a1e26a70SApple OSS Distributions 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3103*a1e26a70SApple OSS Distributions 			if (!prior) {
3104*a1e26a70SApple OSS Distributions 				md->_mapName = NULL;
3105*a1e26a70SApple OSS Distributions 			}
3106*a1e26a70SApple OSS Distributions 		} else {
3107*a1e26a70SApple OSS Distributions 			if (md->_dmaReferences) {
3108*a1e26a70SApple OSS Distributions 				OSAddAtomic16(-1, &md->_dmaReferences);
3109*a1e26a70SApple OSS Distributions 			} else {
3110*a1e26a70SApple OSS Distributions 				panic("_dmaReferences underflow");
3111*a1e26a70SApple OSS Distributions 			}
3112*a1e26a70SApple OSS Distributions 		}
3113*a1e26a70SApple OSS Distributions 	} else if (kIOMDWalkSegments != op) {
3114*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
3115*a1e26a70SApple OSS Distributions 	}
3116*a1e26a70SApple OSS Distributions 
3117*a1e26a70SApple OSS Distributions 	// Get the next segment
3118*a1e26a70SApple OSS Distributions 	struct InternalState {
3119*a1e26a70SApple OSS Distributions 		IOMDDMAWalkSegmentArgs fIO;
3120*a1e26a70SApple OSS Distributions 		mach_vm_size_t fOffset2Index;
3121*a1e26a70SApple OSS Distributions 		mach_vm_size_t fNextOffset;
3122*a1e26a70SApple OSS Distributions 		UInt fIndex;
3123*a1e26a70SApple OSS Distributions 	} *isP;
3124*a1e26a70SApple OSS Distributions 
3125*a1e26a70SApple OSS Distributions 	// Find the next segment
3126*a1e26a70SApple OSS Distributions 	if (dataSize < sizeof(*isP)) {
3127*a1e26a70SApple OSS Distributions 		return kIOReturnUnderrun;
3128*a1e26a70SApple OSS Distributions 	}
3129*a1e26a70SApple OSS Distributions 
3130*a1e26a70SApple OSS Distributions 	isP = (InternalState *) vData;
3131*a1e26a70SApple OSS Distributions 	uint64_t offset = isP->fIO.fOffset;
3132*a1e26a70SApple OSS Distributions 	uint8_t mapped = isP->fIO.fMapped;
3133*a1e26a70SApple OSS Distributions 	uint64_t mappedBase;
3134*a1e26a70SApple OSS Distributions 
3135*a1e26a70SApple OSS Distributions 	if (mapped && (kIOMemoryRemote & _flags)) {
3136*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3137*a1e26a70SApple OSS Distributions 	}
3138*a1e26a70SApple OSS Distributions 
3139*a1e26a70SApple OSS Distributions 	if (IOMapper::gSystem && mapped
3140*a1e26a70SApple OSS Distributions 	    && (!(kIOMemoryHostOnly & _flags))
3141*a1e26a70SApple OSS Distributions 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3142*a1e26a70SApple OSS Distributions //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3143*a1e26a70SApple OSS Distributions 		if (!_memoryEntries
3144*a1e26a70SApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3145*a1e26a70SApple OSS Distributions 			return kIOReturnNoMemory;
3146*a1e26a70SApple OSS Distributions 		}
3147*a1e26a70SApple OSS Distributions 
3148*a1e26a70SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
3149*a1e26a70SApple OSS Distributions 		if (dataP->fMapper) {
3150*a1e26a70SApple OSS Distributions 			IODMAMapSpecification mapSpec;
3151*a1e26a70SApple OSS Distributions 			bzero(&mapSpec, sizeof(mapSpec));
3152*a1e26a70SApple OSS Distributions 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3153*a1e26a70SApple OSS Distributions 			mapSpec.alignment = dataP->fDMAMapAlignment;
3154*a1e26a70SApple OSS Distributions 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3155*a1e26a70SApple OSS Distributions 			if (kIOReturnSuccess != err) {
3156*a1e26a70SApple OSS Distributions 				return err;
3157*a1e26a70SApple OSS Distributions 			}
3158*a1e26a70SApple OSS Distributions 			dataP->fMappedBaseValid = true;
3159*a1e26a70SApple OSS Distributions 		}
3160*a1e26a70SApple OSS Distributions 	}
3161*a1e26a70SApple OSS Distributions 
3162*a1e26a70SApple OSS Distributions 	if (mapped) {
3163*a1e26a70SApple OSS Distributions 		if (IOMapper::gSystem
3164*a1e26a70SApple OSS Distributions 		    && (!(kIOMemoryHostOnly & _flags))
3165*a1e26a70SApple OSS Distributions 		    && _memoryEntries
3166*a1e26a70SApple OSS Distributions 		    && (dataP = getDataP(_memoryEntries))
3167*a1e26a70SApple OSS Distributions 		    && dataP->fMappedBaseValid) {
3168*a1e26a70SApple OSS Distributions 			mappedBase = dataP->fMappedBase;
3169*a1e26a70SApple OSS Distributions 		} else {
3170*a1e26a70SApple OSS Distributions 			mapped = 0;
3171*a1e26a70SApple OSS Distributions 		}
3172*a1e26a70SApple OSS Distributions 	}
3173*a1e26a70SApple OSS Distributions 
3174*a1e26a70SApple OSS Distributions 	if (offset >= _length) {
3175*a1e26a70SApple OSS Distributions 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3176*a1e26a70SApple OSS Distributions 	}
3177*a1e26a70SApple OSS Distributions 
3178*a1e26a70SApple OSS Distributions 	// Validate the previous offset
3179*a1e26a70SApple OSS Distributions 	UInt ind;
3180*a1e26a70SApple OSS Distributions 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3181*a1e26a70SApple OSS Distributions 	if (!params
3182*a1e26a70SApple OSS Distributions 	    && offset
3183*a1e26a70SApple OSS Distributions 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3184*a1e26a70SApple OSS Distributions 		ind = isP->fIndex;
3185*a1e26a70SApple OSS Distributions 	} else {
3186*a1e26a70SApple OSS Distributions 		ind = off2Ind = 0; // Start from beginning
3187*a1e26a70SApple OSS Distributions 	}
3188*a1e26a70SApple OSS Distributions 	mach_vm_size_t length;
3189*a1e26a70SApple OSS Distributions 	UInt64 address;
3190*a1e26a70SApple OSS Distributions 
3191*a1e26a70SApple OSS Distributions 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3192*a1e26a70SApple OSS Distributions 		// Physical address based memory descriptor
3193*a1e26a70SApple OSS Distributions 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3194*a1e26a70SApple OSS Distributions 
3195*a1e26a70SApple OSS Distributions 		// Find the range after the one that contains the offset
3196*a1e26a70SApple OSS Distributions 		mach_vm_size_t len;
3197*a1e26a70SApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3198*a1e26a70SApple OSS Distributions 			len = physP[ind].length;
3199*a1e26a70SApple OSS Distributions 			off2Ind += len;
3200*a1e26a70SApple OSS Distributions 		}
3201*a1e26a70SApple OSS Distributions 
3202*a1e26a70SApple OSS Distributions 		// Calculate length within range and starting address
3203*a1e26a70SApple OSS Distributions 		length   = off2Ind - offset;
3204*a1e26a70SApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3205*a1e26a70SApple OSS Distributions 
3206*a1e26a70SApple OSS Distributions 		if (true && mapped) {
3207*a1e26a70SApple OSS Distributions 			address = mappedBase + offset;
3208*a1e26a70SApple OSS Distributions 		} else {
3209*a1e26a70SApple OSS Distributions 			// see how far we can coalesce ranges
3210*a1e26a70SApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3211*a1e26a70SApple OSS Distributions 				len = physP[ind].length;
3212*a1e26a70SApple OSS Distributions 				length += len;
3213*a1e26a70SApple OSS Distributions 				off2Ind += len;
3214*a1e26a70SApple OSS Distributions 				ind++;
3215*a1e26a70SApple OSS Distributions 			}
3216*a1e26a70SApple OSS Distributions 		}
3217*a1e26a70SApple OSS Distributions 
3218*a1e26a70SApple OSS Distributions 		// correct contiguous check overshoot
3219*a1e26a70SApple OSS Distributions 		ind--;
3220*a1e26a70SApple OSS Distributions 		off2Ind -= len;
3221*a1e26a70SApple OSS Distributions 	}
3222*a1e26a70SApple OSS Distributions #ifndef __LP64__
3223*a1e26a70SApple OSS Distributions 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3224*a1e26a70SApple OSS Distributions 		// Physical address based memory descriptor
3225*a1e26a70SApple OSS Distributions 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3226*a1e26a70SApple OSS Distributions 
3227*a1e26a70SApple OSS Distributions 		// Find the range after the one that contains the offset
3228*a1e26a70SApple OSS Distributions 		mach_vm_size_t len;
3229*a1e26a70SApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3230*a1e26a70SApple OSS Distributions 			len = physP[ind].length;
3231*a1e26a70SApple OSS Distributions 			off2Ind += len;
3232*a1e26a70SApple OSS Distributions 		}
3233*a1e26a70SApple OSS Distributions 
3234*a1e26a70SApple OSS Distributions 		// Calculate length within range and starting address
3235*a1e26a70SApple OSS Distributions 		length   = off2Ind - offset;
3236*a1e26a70SApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3237*a1e26a70SApple OSS Distributions 
3238*a1e26a70SApple OSS Distributions 		if (true && mapped) {
3239*a1e26a70SApple OSS Distributions 			address = mappedBase + offset;
3240*a1e26a70SApple OSS Distributions 		} else {
3241*a1e26a70SApple OSS Distributions 			// see how far we can coalesce ranges
3242*a1e26a70SApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3243*a1e26a70SApple OSS Distributions 				len = physP[ind].length;
3244*a1e26a70SApple OSS Distributions 				length += len;
3245*a1e26a70SApple OSS Distributions 				off2Ind += len;
3246*a1e26a70SApple OSS Distributions 				ind++;
3247*a1e26a70SApple OSS Distributions 			}
3248*a1e26a70SApple OSS Distributions 		}
3249*a1e26a70SApple OSS Distributions 		// correct contiguous check overshoot
3250*a1e26a70SApple OSS Distributions 		ind--;
3251*a1e26a70SApple OSS Distributions 		off2Ind -= len;
3252*a1e26a70SApple OSS Distributions 	}
3253*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
3254*a1e26a70SApple OSS Distributions 	else {
3255*a1e26a70SApple OSS Distributions 		do {
3256*a1e26a70SApple OSS Distributions 			if (!_wireCount) {
3257*a1e26a70SApple OSS Distributions 				panic("IOGMD: not wired for the IODMACommand");
3258*a1e26a70SApple OSS Distributions 			}
3259*a1e26a70SApple OSS Distributions 
3260*a1e26a70SApple OSS Distributions 			assert(_memoryEntries);
3261*a1e26a70SApple OSS Distributions 
3262*a1e26a70SApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3263*a1e26a70SApple OSS Distributions 			const ioPLBlock *ioplList = getIOPLList(dataP);
3264*a1e26a70SApple OSS Distributions 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3265*a1e26a70SApple OSS Distributions 			upl_page_info_t *pageList = getPageList(dataP);
3266*a1e26a70SApple OSS Distributions 
3267*a1e26a70SApple OSS Distributions 			assert(numIOPLs > 0);
3268*a1e26a70SApple OSS Distributions 
3269*a1e26a70SApple OSS Distributions 			// Scan through iopl info blocks looking for block containing offset
3270*a1e26a70SApple OSS Distributions 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3271*a1e26a70SApple OSS Distributions 				ind++;
3272*a1e26a70SApple OSS Distributions 			}
3273*a1e26a70SApple OSS Distributions 
3274*a1e26a70SApple OSS Distributions 			// Go back to actual range as search goes past it
3275*a1e26a70SApple OSS Distributions 			ioPLBlock ioplInfo = ioplList[ind - 1];
3276*a1e26a70SApple OSS Distributions 			off2Ind = ioplInfo.fIOMDOffset;
3277*a1e26a70SApple OSS Distributions 
3278*a1e26a70SApple OSS Distributions 			if (ind < numIOPLs) {
3279*a1e26a70SApple OSS Distributions 				length = ioplList[ind].fIOMDOffset;
3280*a1e26a70SApple OSS Distributions 			} else {
3281*a1e26a70SApple OSS Distributions 				length = _length;
3282*a1e26a70SApple OSS Distributions 			}
3283*a1e26a70SApple OSS Distributions 			length -= offset;       // Remainder within iopl
3284*a1e26a70SApple OSS Distributions 
3285*a1e26a70SApple OSS Distributions 			// Subtract offset till this iopl in total list
3286*a1e26a70SApple OSS Distributions 			offset -= off2Ind;
3287*a1e26a70SApple OSS Distributions 
3288*a1e26a70SApple OSS Distributions 			// If a mapped address is requested and this is a pre-mapped IOPL
3289*a1e26a70SApple OSS Distributions 			// then just need to compute an offset relative to the mapped base.
3290*a1e26a70SApple OSS Distributions 			if (mapped) {
3291*a1e26a70SApple OSS Distributions 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3292*a1e26a70SApple OSS Distributions 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3293*a1e26a70SApple OSS Distributions 				continue; // Done leave do/while(false) now
3294*a1e26a70SApple OSS Distributions 			}
3295*a1e26a70SApple OSS Distributions 
3296*a1e26a70SApple OSS Distributions 			// The offset is rebased into the current iopl.
3297*a1e26a70SApple OSS Distributions 			// Now add the iopl 1st page offset.
3298*a1e26a70SApple OSS Distributions 			offset += ioplInfo.fPageOffset;
3299*a1e26a70SApple OSS Distributions 
3300*a1e26a70SApple OSS Distributions 			// For external UPLs the fPageInfo field points directly to
3301*a1e26a70SApple OSS Distributions 			// the upl's upl_page_info_t array.
3302*a1e26a70SApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3303*a1e26a70SApple OSS Distributions 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3304*a1e26a70SApple OSS Distributions 			} else {
3305*a1e26a70SApple OSS Distributions 				pageList = &pageList[ioplInfo.fPageInfo];
3306*a1e26a70SApple OSS Distributions 			}
3307*a1e26a70SApple OSS Distributions 
3308*a1e26a70SApple OSS Distributions 			// Check for direct device non-paged memory
3309*a1e26a70SApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3310*a1e26a70SApple OSS Distributions 				address = ptoa_64(pageList->phys_addr) + offset;
3311*a1e26a70SApple OSS Distributions 				continue; // Done leave do/while(false) now
3312*a1e26a70SApple OSS Distributions 			}
3313*a1e26a70SApple OSS Distributions 
3314*a1e26a70SApple OSS Distributions 			// Now we need compute the index into the pageList
3315*a1e26a70SApple OSS Distributions 			UInt pageInd = atop_32(offset);
3316*a1e26a70SApple OSS Distributions 			offset &= PAGE_MASK;
3317*a1e26a70SApple OSS Distributions 
3318*a1e26a70SApple OSS Distributions 			// Compute the starting address of this segment
3319*a1e26a70SApple OSS Distributions 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3320*a1e26a70SApple OSS Distributions 			if (!pageAddr) {
3321*a1e26a70SApple OSS Distributions 				panic("!pageList phys_addr");
3322*a1e26a70SApple OSS Distributions 			}
3323*a1e26a70SApple OSS Distributions 
3324*a1e26a70SApple OSS Distributions 			address = ptoa_64(pageAddr) + offset;
3325*a1e26a70SApple OSS Distributions 
3326*a1e26a70SApple OSS Distributions 			// length is currently set to the length of the remainider of the iopl.
3327*a1e26a70SApple OSS Distributions 			// We need to check that the remainder of the iopl is contiguous.
3328*a1e26a70SApple OSS Distributions 			// This is indicated by pageList[ind].phys_addr being sequential.
3329*a1e26a70SApple OSS Distributions 			IOByteCount contigLength = PAGE_SIZE - offset;
3330*a1e26a70SApple OSS Distributions 			while (contigLength < length
3331*a1e26a70SApple OSS Distributions 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3332*a1e26a70SApple OSS Distributions 				contigLength += PAGE_SIZE;
3333*a1e26a70SApple OSS Distributions 			}
3334*a1e26a70SApple OSS Distributions 
3335*a1e26a70SApple OSS Distributions 			if (contigLength < length) {
3336*a1e26a70SApple OSS Distributions 				length = contigLength;
3337*a1e26a70SApple OSS Distributions 			}
3338*a1e26a70SApple OSS Distributions 
3339*a1e26a70SApple OSS Distributions 			assert(address);
3340*a1e26a70SApple OSS Distributions 			assert(length);
3341*a1e26a70SApple OSS Distributions 		} while (false);
3342*a1e26a70SApple OSS Distributions 	}
3343*a1e26a70SApple OSS Distributions 
3344*a1e26a70SApple OSS Distributions 	// Update return values and state
3345*a1e26a70SApple OSS Distributions 	isP->fIO.fIOVMAddr = address;
3346*a1e26a70SApple OSS Distributions 	isP->fIO.fLength   = length;
3347*a1e26a70SApple OSS Distributions 	isP->fIndex        = ind;
3348*a1e26a70SApple OSS Distributions 	isP->fOffset2Index = off2Ind;
3349*a1e26a70SApple OSS Distributions 	isP->fNextOffset   = isP->fIO.fOffset + length;
3350*a1e26a70SApple OSS Distributions 
3351*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
3352*a1e26a70SApple OSS Distributions }
3353*a1e26a70SApple OSS Distributions 
3354*a1e26a70SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3355*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3356*a1e26a70SApple OSS Distributions {
3357*a1e26a70SApple OSS Distributions 	IOReturn          ret;
3358*a1e26a70SApple OSS Distributions 	mach_vm_address_t address = 0;
3359*a1e26a70SApple OSS Distributions 	mach_vm_size_t    length  = 0;
3360*a1e26a70SApple OSS Distributions 	IOMapper *        mapper  = gIOSystemMapper;
3361*a1e26a70SApple OSS Distributions 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3362*a1e26a70SApple OSS Distributions 
3363*a1e26a70SApple OSS Distributions 	if (lengthOfSegment) {
3364*a1e26a70SApple OSS Distributions 		*lengthOfSegment = 0;
3365*a1e26a70SApple OSS Distributions 	}
3366*a1e26a70SApple OSS Distributions 
3367*a1e26a70SApple OSS Distributions 	if (offset >= _length) {
3368*a1e26a70SApple OSS Distributions 		return 0;
3369*a1e26a70SApple OSS Distributions 	}
3370*a1e26a70SApple OSS Distributions 
3371*a1e26a70SApple OSS Distributions 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3372*a1e26a70SApple OSS Distributions 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3373*a1e26a70SApple OSS Distributions 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3374*a1e26a70SApple OSS Distributions 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3375*a1e26a70SApple OSS Distributions 
3376*a1e26a70SApple OSS Distributions 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3377*a1e26a70SApple OSS Distributions 		unsigned rangesIndex = 0;
3378*a1e26a70SApple OSS Distributions 		Ranges vec = _ranges;
3379*a1e26a70SApple OSS Distributions 		mach_vm_address_t addr;
3380*a1e26a70SApple OSS Distributions 
3381*a1e26a70SApple OSS Distributions 		// Find starting address within the vector of ranges
3382*a1e26a70SApple OSS Distributions 		for (;;) {
3383*a1e26a70SApple OSS Distributions 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3384*a1e26a70SApple OSS Distributions 			if (offset < length) {
3385*a1e26a70SApple OSS Distributions 				break;
3386*a1e26a70SApple OSS Distributions 			}
3387*a1e26a70SApple OSS Distributions 			offset -= length; // (make offset relative)
3388*a1e26a70SApple OSS Distributions 			rangesIndex++;
3389*a1e26a70SApple OSS Distributions 		}
3390*a1e26a70SApple OSS Distributions 
3391*a1e26a70SApple OSS Distributions 		// Now that we have the starting range,
3392*a1e26a70SApple OSS Distributions 		// lets find the last contiguous range
3393*a1e26a70SApple OSS Distributions 		addr   += offset;
3394*a1e26a70SApple OSS Distributions 		length -= offset;
3395*a1e26a70SApple OSS Distributions 
3396*a1e26a70SApple OSS Distributions 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3397*a1e26a70SApple OSS Distributions 			mach_vm_address_t newAddr;
3398*a1e26a70SApple OSS Distributions 			mach_vm_size_t    newLen;
3399*a1e26a70SApple OSS Distributions 
3400*a1e26a70SApple OSS Distributions 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3401*a1e26a70SApple OSS Distributions 			if (addr + length != newAddr) {
3402*a1e26a70SApple OSS Distributions 				break;
3403*a1e26a70SApple OSS Distributions 			}
3404*a1e26a70SApple OSS Distributions 			length += newLen;
3405*a1e26a70SApple OSS Distributions 		}
3406*a1e26a70SApple OSS Distributions 		if (addr) {
3407*a1e26a70SApple OSS Distributions 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3408*a1e26a70SApple OSS Distributions 		}
3409*a1e26a70SApple OSS Distributions 	} else {
3410*a1e26a70SApple OSS Distributions 		IOMDDMAWalkSegmentState _state;
3411*a1e26a70SApple OSS Distributions 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3412*a1e26a70SApple OSS Distributions 
3413*a1e26a70SApple OSS Distributions 		state->fOffset = offset;
3414*a1e26a70SApple OSS Distributions 		state->fLength = _length - offset;
3415*a1e26a70SApple OSS Distributions 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3416*a1e26a70SApple OSS Distributions 
3417*a1e26a70SApple OSS Distributions 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3418*a1e26a70SApple OSS Distributions 
3419*a1e26a70SApple OSS Distributions 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3420*a1e26a70SApple OSS Distributions 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3421*a1e26a70SApple OSS Distributions 			    ret, this, state->fOffset,
3422*a1e26a70SApple OSS Distributions 			    state->fIOVMAddr, state->fLength);
3423*a1e26a70SApple OSS Distributions 		}
3424*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess == ret) {
3425*a1e26a70SApple OSS Distributions 			address = state->fIOVMAddr;
3426*a1e26a70SApple OSS Distributions 			length  = state->fLength;
3427*a1e26a70SApple OSS Distributions 		}
3428*a1e26a70SApple OSS Distributions 
3429*a1e26a70SApple OSS Distributions 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3430*a1e26a70SApple OSS Distributions 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3431*a1e26a70SApple OSS Distributions 
3432*a1e26a70SApple OSS Distributions 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3433*a1e26a70SApple OSS Distributions 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3434*a1e26a70SApple OSS Distributions 				addr64_t    origAddr = address;
3435*a1e26a70SApple OSS Distributions 				IOByteCount origLen  = length;
3436*a1e26a70SApple OSS Distributions 
3437*a1e26a70SApple OSS Distributions 				address = mapper->mapToPhysicalAddress(origAddr);
3438*a1e26a70SApple OSS Distributions 				length = page_size - (address & (page_size - 1));
3439*a1e26a70SApple OSS Distributions 				while ((length < origLen)
3440*a1e26a70SApple OSS Distributions 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3441*a1e26a70SApple OSS Distributions 					length += page_size;
3442*a1e26a70SApple OSS Distributions 				}
3443*a1e26a70SApple OSS Distributions 				if (length > origLen) {
3444*a1e26a70SApple OSS Distributions 					length = origLen;
3445*a1e26a70SApple OSS Distributions 				}
3446*a1e26a70SApple OSS Distributions 			}
3447*a1e26a70SApple OSS Distributions 		}
3448*a1e26a70SApple OSS Distributions 	}
3449*a1e26a70SApple OSS Distributions 
3450*a1e26a70SApple OSS Distributions 	if (!address) {
3451*a1e26a70SApple OSS Distributions 		length = 0;
3452*a1e26a70SApple OSS Distributions 	}
3453*a1e26a70SApple OSS Distributions 
3454*a1e26a70SApple OSS Distributions 	if (lengthOfSegment) {
3455*a1e26a70SApple OSS Distributions 		*lengthOfSegment = length;
3456*a1e26a70SApple OSS Distributions 	}
3457*a1e26a70SApple OSS Distributions 
3458*a1e26a70SApple OSS Distributions 	return address;
3459*a1e26a70SApple OSS Distributions }
3460*a1e26a70SApple OSS Distributions 
3461*a1e26a70SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3462*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::readBytes
3463*a1e26a70SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
3464*a1e26a70SApple OSS Distributions {
3465*a1e26a70SApple OSS Distributions 	IOByteCount count = super::readBytes(offset, bytes, length);
3466*a1e26a70SApple OSS Distributions 	return count;
3467*a1e26a70SApple OSS Distributions }
3468*a1e26a70SApple OSS Distributions 
3469*a1e26a70SApple OSS Distributions IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3470*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::writeBytes
3471*a1e26a70SApple OSS Distributions (IOByteCount offset, const void* bytes, IOByteCount withLength)
3472*a1e26a70SApple OSS Distributions {
3473*a1e26a70SApple OSS Distributions 	IOByteCount count = super::writeBytes(offset, bytes, withLength);
3474*a1e26a70SApple OSS Distributions 	return count;
3475*a1e26a70SApple OSS Distributions }
3476*a1e26a70SApple OSS Distributions 
3477*a1e26a70SApple OSS Distributions #ifndef __LP64__
3478*a1e26a70SApple OSS Distributions #pragma clang diagnostic push
3479*a1e26a70SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3480*a1e26a70SApple OSS Distributions 
3481*a1e26a70SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3482*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3483*a1e26a70SApple OSS Distributions {
3484*a1e26a70SApple OSS Distributions 	addr64_t address = 0;
3485*a1e26a70SApple OSS Distributions 
3486*a1e26a70SApple OSS Distributions 	if (options & _kIOMemorySourceSegment) {
3487*a1e26a70SApple OSS Distributions 		address = getSourceSegment(offset, lengthOfSegment);
3488*a1e26a70SApple OSS Distributions 	} else if (options & kIOMemoryMapperNone) {
3489*a1e26a70SApple OSS Distributions 		address = getPhysicalSegment64(offset, lengthOfSegment);
3490*a1e26a70SApple OSS Distributions 	} else {
3491*a1e26a70SApple OSS Distributions 		address = getPhysicalSegment(offset, lengthOfSegment);
3492*a1e26a70SApple OSS Distributions 	}
3493*a1e26a70SApple OSS Distributions 
3494*a1e26a70SApple OSS Distributions 	return address;
3495*a1e26a70SApple OSS Distributions }
3496*a1e26a70SApple OSS Distributions #pragma clang diagnostic pop
3497*a1e26a70SApple OSS Distributions 
3498*a1e26a70SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500*a1e26a70SApple OSS Distributions {
3501*a1e26a70SApple OSS Distributions 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3502*a1e26a70SApple OSS Distributions }
3503*a1e26a70SApple OSS Distributions 
3504*a1e26a70SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3505*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3506*a1e26a70SApple OSS Distributions {
3507*a1e26a70SApple OSS Distributions 	addr64_t    address = 0;
3508*a1e26a70SApple OSS Distributions 	IOByteCount length  = 0;
3509*a1e26a70SApple OSS Distributions 
3510*a1e26a70SApple OSS Distributions 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3511*a1e26a70SApple OSS Distributions 
3512*a1e26a70SApple OSS Distributions 	if (lengthOfSegment) {
3513*a1e26a70SApple OSS Distributions 		length = *lengthOfSegment;
3514*a1e26a70SApple OSS Distributions 	}
3515*a1e26a70SApple OSS Distributions 
3516*a1e26a70SApple OSS Distributions 	if ((address + length) > 0x100000000ULL) {
3517*a1e26a70SApple OSS Distributions 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3518*a1e26a70SApple OSS Distributions 		    address, (long) length, (getMetaClass())->getClassName());
3519*a1e26a70SApple OSS Distributions 	}
3520*a1e26a70SApple OSS Distributions 
3521*a1e26a70SApple OSS Distributions 	return (IOPhysicalAddress) address;
3522*a1e26a70SApple OSS Distributions }
3523*a1e26a70SApple OSS Distributions 
3524*a1e26a70SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3525*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3526*a1e26a70SApple OSS Distributions {
3527*a1e26a70SApple OSS Distributions 	IOPhysicalAddress phys32;
3528*a1e26a70SApple OSS Distributions 	IOByteCount       length;
3529*a1e26a70SApple OSS Distributions 	addr64_t          phys64;
3530*a1e26a70SApple OSS Distributions 	IOMapper *        mapper = NULL;
3531*a1e26a70SApple OSS Distributions 
3532*a1e26a70SApple OSS Distributions 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3533*a1e26a70SApple OSS Distributions 	if (!phys32) {
3534*a1e26a70SApple OSS Distributions 		return 0;
3535*a1e26a70SApple OSS Distributions 	}
3536*a1e26a70SApple OSS Distributions 
3537*a1e26a70SApple OSS Distributions 	if (gIOSystemMapper) {
3538*a1e26a70SApple OSS Distributions 		mapper = gIOSystemMapper;
3539*a1e26a70SApple OSS Distributions 	}
3540*a1e26a70SApple OSS Distributions 
3541*a1e26a70SApple OSS Distributions 	if (mapper) {
3542*a1e26a70SApple OSS Distributions 		IOByteCount origLen;
3543*a1e26a70SApple OSS Distributions 
3544*a1e26a70SApple OSS Distributions 		phys64 = mapper->mapToPhysicalAddress(phys32);
3545*a1e26a70SApple OSS Distributions 		origLen = *lengthOfSegment;
3546*a1e26a70SApple OSS Distributions 		length = page_size - (phys64 & (page_size - 1));
3547*a1e26a70SApple OSS Distributions 		while ((length < origLen)
3548*a1e26a70SApple OSS Distributions 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3549*a1e26a70SApple OSS Distributions 			length += page_size;
3550*a1e26a70SApple OSS Distributions 		}
3551*a1e26a70SApple OSS Distributions 		if (length > origLen) {
3552*a1e26a70SApple OSS Distributions 			length = origLen;
3553*a1e26a70SApple OSS Distributions 		}
3554*a1e26a70SApple OSS Distributions 
3555*a1e26a70SApple OSS Distributions 		*lengthOfSegment = length;
3556*a1e26a70SApple OSS Distributions 	} else {
3557*a1e26a70SApple OSS Distributions 		phys64 = (addr64_t) phys32;
3558*a1e26a70SApple OSS Distributions 	}
3559*a1e26a70SApple OSS Distributions 
3560*a1e26a70SApple OSS Distributions 	return phys64;
3561*a1e26a70SApple OSS Distributions }
3562*a1e26a70SApple OSS Distributions 
3563*a1e26a70SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3564*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3565*a1e26a70SApple OSS Distributions {
3566*a1e26a70SApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3567*a1e26a70SApple OSS Distributions }
3568*a1e26a70SApple OSS Distributions 
3569*a1e26a70SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3570*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3571*a1e26a70SApple OSS Distributions {
3572*a1e26a70SApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3573*a1e26a70SApple OSS Distributions }
3574*a1e26a70SApple OSS Distributions 
3575*a1e26a70SApple OSS Distributions #pragma clang diagnostic push
3576*a1e26a70SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3577*a1e26a70SApple OSS Distributions 
3578*a1e26a70SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3579*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3580*a1e26a70SApple OSS Distributions     IOByteCount * lengthOfSegment)
3581*a1e26a70SApple OSS Distributions {
3582*a1e26a70SApple OSS Distributions 	if (_task == kernel_task) {
3583*a1e26a70SApple OSS Distributions 		return (void *) getSourceSegment(offset, lengthOfSegment);
3584*a1e26a70SApple OSS Distributions 	} else {
3585*a1e26a70SApple OSS Distributions 		panic("IOGMD::getVirtualSegment deprecated");
3586*a1e26a70SApple OSS Distributions 	}
3587*a1e26a70SApple OSS Distributions 
3588*a1e26a70SApple OSS Distributions 	return NULL;
3589*a1e26a70SApple OSS Distributions }
3590*a1e26a70SApple OSS Distributions #pragma clang diagnostic pop
3591*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
3592*a1e26a70SApple OSS Distributions 
3593*a1e26a70SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3594*a1e26a70SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3595*a1e26a70SApple OSS Distributions {
3596*a1e26a70SApple OSS Distributions 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3597*a1e26a70SApple OSS Distributions 	DMACommandOps params;
3598*a1e26a70SApple OSS Distributions 	IOReturn err;
3599*a1e26a70SApple OSS Distributions 
3600*a1e26a70SApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
3601*a1e26a70SApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
3602*a1e26a70SApple OSS Distributions 
3603*a1e26a70SApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3604*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3605*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3606*a1e26a70SApple OSS Distributions 		}
3607*a1e26a70SApple OSS Distributions 
3608*a1e26a70SApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3609*a1e26a70SApple OSS Distributions 		data->fLength = getLength();
3610*a1e26a70SApple OSS Distributions 		data->fSGCount = 0;
3611*a1e26a70SApple OSS Distributions 		data->fDirection = getDirection();
3612*a1e26a70SApple OSS Distributions 		data->fIsPrepared = true; // Assume prepared - fails safe
3613*a1e26a70SApple OSS Distributions 	} else if (kIOMDWalkSegments == op) {
3614*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3615*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3616*a1e26a70SApple OSS Distributions 		}
3617*a1e26a70SApple OSS Distributions 
3618*a1e26a70SApple OSS Distributions 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3619*a1e26a70SApple OSS Distributions 		IOByteCount offset  = (IOByteCount) data->fOffset;
3620*a1e26a70SApple OSS Distributions 		IOPhysicalLength length, nextLength;
3621*a1e26a70SApple OSS Distributions 		addr64_t         addr, nextAddr;
3622*a1e26a70SApple OSS Distributions 
3623*a1e26a70SApple OSS Distributions 		if (data->fMapped) {
3624*a1e26a70SApple OSS Distributions 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3625*a1e26a70SApple OSS Distributions 		}
3626*a1e26a70SApple OSS Distributions 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3627*a1e26a70SApple OSS Distributions 		offset += length;
3628*a1e26a70SApple OSS Distributions 		while (offset < getLength()) {
3629*a1e26a70SApple OSS Distributions 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3630*a1e26a70SApple OSS Distributions 			if ((addr + length) != nextAddr) {
3631*a1e26a70SApple OSS Distributions 				break;
3632*a1e26a70SApple OSS Distributions 			}
3633*a1e26a70SApple OSS Distributions 			length += nextLength;
3634*a1e26a70SApple OSS Distributions 			offset += nextLength;
3635*a1e26a70SApple OSS Distributions 		}
3636*a1e26a70SApple OSS Distributions 		data->fIOVMAddr = addr;
3637*a1e26a70SApple OSS Distributions 		data->fLength   = length;
3638*a1e26a70SApple OSS Distributions 	} else if (kIOMDAddDMAMapSpec == op) {
3639*a1e26a70SApple OSS Distributions 		return kIOReturnUnsupported;
3640*a1e26a70SApple OSS Distributions 	} else if (kIOMDDMAMap == op) {
3641*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3642*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3643*a1e26a70SApple OSS Distributions 		}
3644*a1e26a70SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3645*a1e26a70SApple OSS Distributions 
3646*a1e26a70SApple OSS Distributions 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3647*a1e26a70SApple OSS Distributions 
3648*a1e26a70SApple OSS Distributions 		return err;
3649*a1e26a70SApple OSS Distributions 	} else if (kIOMDDMAUnmap == op) {
3650*a1e26a70SApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3651*a1e26a70SApple OSS Distributions 			return kIOReturnUnderrun;
3652*a1e26a70SApple OSS Distributions 		}
3653*a1e26a70SApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3654*a1e26a70SApple OSS Distributions 
3655*a1e26a70SApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3656*a1e26a70SApple OSS Distributions 
3657*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3658*a1e26a70SApple OSS Distributions 	} else {
3659*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
3660*a1e26a70SApple OSS Distributions 	}
3661*a1e26a70SApple OSS Distributions 
3662*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
3663*a1e26a70SApple OSS Distributions }
3664*a1e26a70SApple OSS Distributions 
3665*a1e26a70SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3666*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3667*a1e26a70SApple OSS Distributions     IOOptionBits * oldState )
3668*a1e26a70SApple OSS Distributions {
3669*a1e26a70SApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3670*a1e26a70SApple OSS Distributions 
3671*a1e26a70SApple OSS Distributions 	vm_purgable_t control;
3672*a1e26a70SApple OSS Distributions 	int           state;
3673*a1e26a70SApple OSS Distributions 
3674*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3675*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3676*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3677*a1e26a70SApple OSS Distributions 	}
3678*a1e26a70SApple OSS Distributions 
3679*a1e26a70SApple OSS Distributions 	if (_memRef) {
3680*a1e26a70SApple OSS Distributions 		err = super::setPurgeable(newState, oldState);
3681*a1e26a70SApple OSS Distributions 	} else {
3682*a1e26a70SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3683*a1e26a70SApple OSS Distributions 			LOCK;
3684*a1e26a70SApple OSS Distributions 		}
3685*a1e26a70SApple OSS Distributions 		do{
3686*a1e26a70SApple OSS Distributions 			// Find the appropriate vm_map for the given task
3687*a1e26a70SApple OSS Distributions 			vm_map_t curMap;
3688*a1e26a70SApple OSS Distributions 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3689*a1e26a70SApple OSS Distributions 				err = kIOReturnNotReady;
3690*a1e26a70SApple OSS Distributions 				break;
3691*a1e26a70SApple OSS Distributions 			} else if (!_task) {
3692*a1e26a70SApple OSS Distributions 				err = kIOReturnUnsupported;
3693*a1e26a70SApple OSS Distributions 				break;
3694*a1e26a70SApple OSS Distributions 			} else {
3695*a1e26a70SApple OSS Distributions 				curMap = get_task_map(_task);
3696*a1e26a70SApple OSS Distributions 				if (NULL == curMap) {
3697*a1e26a70SApple OSS Distributions 					err = KERN_INVALID_ARGUMENT;
3698*a1e26a70SApple OSS Distributions 					break;
3699*a1e26a70SApple OSS Distributions 				}
3700*a1e26a70SApple OSS Distributions 			}
3701*a1e26a70SApple OSS Distributions 
3702*a1e26a70SApple OSS Distributions 			// can only do one range
3703*a1e26a70SApple OSS Distributions 			Ranges vec = _ranges;
3704*a1e26a70SApple OSS Distributions 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3705*a1e26a70SApple OSS Distributions 			mach_vm_address_t addr;
3706*a1e26a70SApple OSS Distributions 			mach_vm_size_t    len;
3707*a1e26a70SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3708*a1e26a70SApple OSS Distributions 
3709*a1e26a70SApple OSS Distributions 			err = purgeableControlBits(newState, &control, &state);
3710*a1e26a70SApple OSS Distributions 			if (kIOReturnSuccess != err) {
3711*a1e26a70SApple OSS Distributions 				break;
3712*a1e26a70SApple OSS Distributions 			}
3713*a1e26a70SApple OSS Distributions 			err = vm_map_purgable_control(curMap, addr, control, &state);
3714*a1e26a70SApple OSS Distributions 			if (oldState) {
3715*a1e26a70SApple OSS Distributions 				if (kIOReturnSuccess == err) {
3716*a1e26a70SApple OSS Distributions 					err = purgeableStateBits(&state);
3717*a1e26a70SApple OSS Distributions 					*oldState = state;
3718*a1e26a70SApple OSS Distributions 				}
3719*a1e26a70SApple OSS Distributions 			}
3720*a1e26a70SApple OSS Distributions 		}while (false);
3721*a1e26a70SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3722*a1e26a70SApple OSS Distributions 			UNLOCK;
3723*a1e26a70SApple OSS Distributions 		}
3724*a1e26a70SApple OSS Distributions 	}
3725*a1e26a70SApple OSS Distributions 
3726*a1e26a70SApple OSS Distributions 	return err;
3727*a1e26a70SApple OSS Distributions }
3728*a1e26a70SApple OSS Distributions 
3729*a1e26a70SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3730*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3731*a1e26a70SApple OSS Distributions     IOOptionBits * oldState )
3732*a1e26a70SApple OSS Distributions {
3733*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3734*a1e26a70SApple OSS Distributions 
3735*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3736*a1e26a70SApple OSS Distributions 		LOCK;
3737*a1e26a70SApple OSS Distributions 	}
3738*a1e26a70SApple OSS Distributions 	if (_memRef) {
3739*a1e26a70SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3740*a1e26a70SApple OSS Distributions 	}
3741*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3742*a1e26a70SApple OSS Distributions 		UNLOCK;
3743*a1e26a70SApple OSS Distributions 	}
3744*a1e26a70SApple OSS Distributions 
3745*a1e26a70SApple OSS Distributions 	return err;
3746*a1e26a70SApple OSS Distributions }
3747*a1e26a70SApple OSS Distributions 
3748*a1e26a70SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3749*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3750*a1e26a70SApple OSS Distributions     int newLedgerTag,
3751*a1e26a70SApple OSS Distributions     IOOptionBits newLedgerOptions )
3752*a1e26a70SApple OSS Distributions {
3753*a1e26a70SApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3754*a1e26a70SApple OSS Distributions 
3755*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3756*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3757*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3758*a1e26a70SApple OSS Distributions 	}
3759*a1e26a70SApple OSS Distributions 
3760*a1e26a70SApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3761*a1e26a70SApple OSS Distributions 		return kIOReturnUnsupported;
3762*a1e26a70SApple OSS Distributions 	}
3763*a1e26a70SApple OSS Distributions 
3764*a1e26a70SApple OSS Distributions 	if (_memRef) {
3765*a1e26a70SApple OSS Distributions 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3766*a1e26a70SApple OSS Distributions 	} else {
3767*a1e26a70SApple OSS Distributions 		err = kIOReturnUnsupported;
3768*a1e26a70SApple OSS Distributions 	}
3769*a1e26a70SApple OSS Distributions 
3770*a1e26a70SApple OSS Distributions 	return err;
3771*a1e26a70SApple OSS Distributions }
3772*a1e26a70SApple OSS Distributions 
3773*a1e26a70SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3774*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3775*a1e26a70SApple OSS Distributions     int newLedgerTag,
3776*a1e26a70SApple OSS Distributions     IOOptionBits newLedgerOptions )
3777*a1e26a70SApple OSS Distributions {
3778*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3779*a1e26a70SApple OSS Distributions 
3780*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3781*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3782*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3783*a1e26a70SApple OSS Distributions 	}
3784*a1e26a70SApple OSS Distributions 
3785*a1e26a70SApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3786*a1e26a70SApple OSS Distributions 		return kIOReturnUnsupported;
3787*a1e26a70SApple OSS Distributions 	}
3788*a1e26a70SApple OSS Distributions 
3789*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3790*a1e26a70SApple OSS Distributions 		LOCK;
3791*a1e26a70SApple OSS Distributions 	}
3792*a1e26a70SApple OSS Distributions 	if (_memRef) {
3793*a1e26a70SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3794*a1e26a70SApple OSS Distributions 	} else {
3795*a1e26a70SApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3796*a1e26a70SApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3797*a1e26a70SApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3798*a1e26a70SApple OSS Distributions 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3799*a1e26a70SApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3800*a1e26a70SApple OSS Distributions 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3801*a1e26a70SApple OSS Distributions 		}
3802*a1e26a70SApple OSS Distributions 	}
3803*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3804*a1e26a70SApple OSS Distributions 		UNLOCK;
3805*a1e26a70SApple OSS Distributions 	}
3806*a1e26a70SApple OSS Distributions 
3807*a1e26a70SApple OSS Distributions 	return err;
3808*a1e26a70SApple OSS Distributions }
3809*a1e26a70SApple OSS Distributions 
3810*a1e26a70SApple OSS Distributions 
3811*a1e26a70SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3812*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3813*a1e26a70SApple OSS Distributions {
3814*a1e26a70SApple OSS Distributions 	uint64_t length;
3815*a1e26a70SApple OSS Distributions 
3816*a1e26a70SApple OSS Distributions 	if (_memRef) {
3817*a1e26a70SApple OSS Distributions 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3818*a1e26a70SApple OSS Distributions 	} else {
3819*a1e26a70SApple OSS Distributions 		IOByteCount       iterate, segLen;
3820*a1e26a70SApple OSS Distributions 		IOPhysicalAddress sourceAddr, sourceAlign;
3821*a1e26a70SApple OSS Distributions 
3822*a1e26a70SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3823*a1e26a70SApple OSS Distributions 			LOCK;
3824*a1e26a70SApple OSS Distributions 		}
3825*a1e26a70SApple OSS Distributions 		length = 0;
3826*a1e26a70SApple OSS Distributions 		iterate = 0;
3827*a1e26a70SApple OSS Distributions 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3828*a1e26a70SApple OSS Distributions 			sourceAlign = (sourceAddr & page_mask);
3829*a1e26a70SApple OSS Distributions 			if (offset && !iterate) {
3830*a1e26a70SApple OSS Distributions 				*offset = sourceAlign;
3831*a1e26a70SApple OSS Distributions 			}
3832*a1e26a70SApple OSS Distributions 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3833*a1e26a70SApple OSS Distributions 			iterate += segLen;
3834*a1e26a70SApple OSS Distributions 		}
3835*a1e26a70SApple OSS Distributions 		if (!iterate) {
3836*a1e26a70SApple OSS Distributions 			length = getLength();
3837*a1e26a70SApple OSS Distributions 			if (offset) {
3838*a1e26a70SApple OSS Distributions 				*offset = 0;
3839*a1e26a70SApple OSS Distributions 			}
3840*a1e26a70SApple OSS Distributions 		}
3841*a1e26a70SApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3842*a1e26a70SApple OSS Distributions 			UNLOCK;
3843*a1e26a70SApple OSS Distributions 		}
3844*a1e26a70SApple OSS Distributions 	}
3845*a1e26a70SApple OSS Distributions 
3846*a1e26a70SApple OSS Distributions 	return length;
3847*a1e26a70SApple OSS Distributions }
3848*a1e26a70SApple OSS Distributions 
3849*a1e26a70SApple OSS Distributions 
3850*a1e26a70SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3851*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3852*a1e26a70SApple OSS Distributions     IOByteCount * dirtyPageCount )
3853*a1e26a70SApple OSS Distributions {
3854*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3855*a1e26a70SApple OSS Distributions 
3856*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3857*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3858*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3859*a1e26a70SApple OSS Distributions 	}
3860*a1e26a70SApple OSS Distributions 
3861*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3862*a1e26a70SApple OSS Distributions 		LOCK;
3863*a1e26a70SApple OSS Distributions 	}
3864*a1e26a70SApple OSS Distributions 	if (_memRef) {
3865*a1e26a70SApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3866*a1e26a70SApple OSS Distributions 	} else {
3867*a1e26a70SApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3868*a1e26a70SApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3869*a1e26a70SApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3870*a1e26a70SApple OSS Distributions 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3871*a1e26a70SApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3872*a1e26a70SApple OSS Distributions 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3873*a1e26a70SApple OSS Distributions 		}
3874*a1e26a70SApple OSS Distributions 	}
3875*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3876*a1e26a70SApple OSS Distributions 		UNLOCK;
3877*a1e26a70SApple OSS Distributions 	}
3878*a1e26a70SApple OSS Distributions 
3879*a1e26a70SApple OSS Distributions 	return err;
3880*a1e26a70SApple OSS Distributions }
3881*a1e26a70SApple OSS Distributions 
3882*a1e26a70SApple OSS Distributions 
3883*a1e26a70SApple OSS Distributions #if defined(__arm64__)
3884*a1e26a70SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3885*a1e26a70SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3886*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
3887*a1e26a70SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3888*a1e26a70SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3889*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
3890*a1e26a70SApple OSS Distributions 
3891*a1e26a70SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3892*a1e26a70SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3893*a1e26a70SApple OSS Distributions {
3894*a1e26a70SApple OSS Distributions 	ppnum_t page, end;
3895*a1e26a70SApple OSS Distributions 
3896*a1e26a70SApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3897*a1e26a70SApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3898*a1e26a70SApple OSS Distributions 	for (; page < end; page++) {
3899*a1e26a70SApple OSS Distributions 		pmap_clear_noencrypt(page);
3900*a1e26a70SApple OSS Distributions 	}
3901*a1e26a70SApple OSS Distributions }
3902*a1e26a70SApple OSS Distributions 
3903*a1e26a70SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3904*a1e26a70SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3905*a1e26a70SApple OSS Distributions {
3906*a1e26a70SApple OSS Distributions 	ppnum_t page, end;
3907*a1e26a70SApple OSS Distributions 
3908*a1e26a70SApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3909*a1e26a70SApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3910*a1e26a70SApple OSS Distributions 	for (; page < end; page++) {
3911*a1e26a70SApple OSS Distributions 		pmap_set_noencrypt(page);
3912*a1e26a70SApple OSS Distributions 	}
3913*a1e26a70SApple OSS Distributions }
3914*a1e26a70SApple OSS Distributions 
3915*a1e26a70SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3916*a1e26a70SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3917*a1e26a70SApple OSS Distributions     IOByteCount offset, IOByteCount length )
3918*a1e26a70SApple OSS Distributions {
3919*a1e26a70SApple OSS Distributions 	IOByteCount remaining;
3920*a1e26a70SApple OSS Distributions 	unsigned int res;
3921*a1e26a70SApple OSS Distributions 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3922*a1e26a70SApple OSS Distributions #if defined(__arm64__)
3923*a1e26a70SApple OSS Distributions 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3924*a1e26a70SApple OSS Distributions #endif
3925*a1e26a70SApple OSS Distributions 
3926*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3927*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3928*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
3929*a1e26a70SApple OSS Distributions 	}
3930*a1e26a70SApple OSS Distributions 
3931*a1e26a70SApple OSS Distributions 	switch (options) {
3932*a1e26a70SApple OSS Distributions 	case kIOMemoryIncoherentIOFlush:
3933*a1e26a70SApple OSS Distributions #if defined(__arm64__)
3934*a1e26a70SApple OSS Distributions 		func_ext = &dcache_incoherent_io_flush64;
3935*a1e26a70SApple OSS Distributions #if __ARM_COHERENT_IO__
3936*a1e26a70SApple OSS Distributions 		func_ext(0, 0, 0, &res);
3937*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3938*a1e26a70SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3939*a1e26a70SApple OSS Distributions 		break;
3940*a1e26a70SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3941*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
3942*a1e26a70SApple OSS Distributions 		func = &dcache_incoherent_io_flush64;
3943*a1e26a70SApple OSS Distributions 		break;
3944*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
3945*a1e26a70SApple OSS Distributions 	case kIOMemoryIncoherentIOStore:
3946*a1e26a70SApple OSS Distributions #if defined(__arm64__)
3947*a1e26a70SApple OSS Distributions 		func_ext = &dcache_incoherent_io_store64;
3948*a1e26a70SApple OSS Distributions #if __ARM_COHERENT_IO__
3949*a1e26a70SApple OSS Distributions 		func_ext(0, 0, 0, &res);
3950*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
3951*a1e26a70SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3952*a1e26a70SApple OSS Distributions 		break;
3953*a1e26a70SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3954*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
3955*a1e26a70SApple OSS Distributions 		func = &dcache_incoherent_io_store64;
3956*a1e26a70SApple OSS Distributions 		break;
3957*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
3958*a1e26a70SApple OSS Distributions 
3959*a1e26a70SApple OSS Distributions 	case kIOMemorySetEncrypted:
3960*a1e26a70SApple OSS Distributions 		func = &SetEncryptOp;
3961*a1e26a70SApple OSS Distributions 		break;
3962*a1e26a70SApple OSS Distributions 	case kIOMemoryClearEncrypted:
3963*a1e26a70SApple OSS Distributions 		func = &ClearEncryptOp;
3964*a1e26a70SApple OSS Distributions 		break;
3965*a1e26a70SApple OSS Distributions 	}
3966*a1e26a70SApple OSS Distributions 
3967*a1e26a70SApple OSS Distributions #if defined(__arm64__)
3968*a1e26a70SApple OSS Distributions 	if ((func == NULL) && (func_ext == NULL)) {
3969*a1e26a70SApple OSS Distributions 		return kIOReturnUnsupported;
3970*a1e26a70SApple OSS Distributions 	}
3971*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
3972*a1e26a70SApple OSS Distributions 	if (!func) {
3973*a1e26a70SApple OSS Distributions 		return kIOReturnUnsupported;
3974*a1e26a70SApple OSS Distributions 	}
3975*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
3976*a1e26a70SApple OSS Distributions 
3977*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3978*a1e26a70SApple OSS Distributions 		LOCK;
3979*a1e26a70SApple OSS Distributions 	}
3980*a1e26a70SApple OSS Distributions 
3981*a1e26a70SApple OSS Distributions 	res = 0x0UL;
3982*a1e26a70SApple OSS Distributions 	remaining = length = min(length, getLength() - offset);
3983*a1e26a70SApple OSS Distributions 	while (remaining) {
3984*a1e26a70SApple OSS Distributions 		// (process another target segment?)
3985*a1e26a70SApple OSS Distributions 		addr64_t    dstAddr64;
3986*a1e26a70SApple OSS Distributions 		IOByteCount dstLen;
3987*a1e26a70SApple OSS Distributions 
3988*a1e26a70SApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3989*a1e26a70SApple OSS Distributions 		if (!dstAddr64) {
3990*a1e26a70SApple OSS Distributions 			break;
3991*a1e26a70SApple OSS Distributions 		}
3992*a1e26a70SApple OSS Distributions 
3993*a1e26a70SApple OSS Distributions 		// Clip segment length to remaining
3994*a1e26a70SApple OSS Distributions 		if (dstLen > remaining) {
3995*a1e26a70SApple OSS Distributions 			dstLen = remaining;
3996*a1e26a70SApple OSS Distributions 		}
3997*a1e26a70SApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3998*a1e26a70SApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3999*a1e26a70SApple OSS Distributions 		}
4000*a1e26a70SApple OSS Distributions 		if (remaining > UINT_MAX) {
4001*a1e26a70SApple OSS Distributions 			remaining = UINT_MAX;
4002*a1e26a70SApple OSS Distributions 		}
4003*a1e26a70SApple OSS Distributions 
4004*a1e26a70SApple OSS Distributions #if defined(__arm64__)
4005*a1e26a70SApple OSS Distributions 		if (func) {
4006*a1e26a70SApple OSS Distributions 			(*func)(dstAddr64, (unsigned int) dstLen);
4007*a1e26a70SApple OSS Distributions 		}
4008*a1e26a70SApple OSS Distributions 		if (func_ext) {
4009*a1e26a70SApple OSS Distributions 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4010*a1e26a70SApple OSS Distributions 			if (res != 0x0UL) {
4011*a1e26a70SApple OSS Distributions 				remaining = 0;
4012*a1e26a70SApple OSS Distributions 				break;
4013*a1e26a70SApple OSS Distributions 			}
4014*a1e26a70SApple OSS Distributions 		}
4015*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
4016*a1e26a70SApple OSS Distributions 		(*func)(dstAddr64, (unsigned int) dstLen);
4017*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
4018*a1e26a70SApple OSS Distributions 
4019*a1e26a70SApple OSS Distributions 		offset    += dstLen;
4020*a1e26a70SApple OSS Distributions 		remaining -= dstLen;
4021*a1e26a70SApple OSS Distributions 	}
4022*a1e26a70SApple OSS Distributions 
4023*a1e26a70SApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
4024*a1e26a70SApple OSS Distributions 		UNLOCK;
4025*a1e26a70SApple OSS Distributions 	}
4026*a1e26a70SApple OSS Distributions 
4027*a1e26a70SApple OSS Distributions 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4028*a1e26a70SApple OSS Distributions }
4029*a1e26a70SApple OSS Distributions 
4030*a1e26a70SApple OSS Distributions /*
4031*a1e26a70SApple OSS Distributions  *
4032*a1e26a70SApple OSS Distributions  */
4033*a1e26a70SApple OSS Distributions 
4034*a1e26a70SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4035*a1e26a70SApple OSS Distributions 
4036*a1e26a70SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4037*a1e26a70SApple OSS Distributions 
4038*a1e26a70SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4039*a1e26a70SApple OSS Distributions  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4040*a1e26a70SApple OSS Distributions  * kernel non-text data -- should we just add another range instead?
4041*a1e26a70SApple OSS Distributions  */
4042*a1e26a70SApple OSS Distributions #define io_kernel_static_start  vm_kernel_stext
4043*a1e26a70SApple OSS Distributions #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4044*a1e26a70SApple OSS Distributions 
4045*a1e26a70SApple OSS Distributions #elif defined(__arm64__)
4046*a1e26a70SApple OSS Distributions 
4047*a1e26a70SApple OSS Distributions extern vm_offset_t              static_memory_end;
4048*a1e26a70SApple OSS Distributions 
4049*a1e26a70SApple OSS Distributions #if defined(__arm64__)
4050*a1e26a70SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4051*a1e26a70SApple OSS Distributions #else /* defined(__arm64__) */
4052*a1e26a70SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4053*a1e26a70SApple OSS Distributions #endif /* defined(__arm64__) */
4054*a1e26a70SApple OSS Distributions 
4055*a1e26a70SApple OSS Distributions #define io_kernel_static_end    static_memory_end
4056*a1e26a70SApple OSS Distributions 
4057*a1e26a70SApple OSS Distributions #else
4058*a1e26a70SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4059*a1e26a70SApple OSS Distributions #endif
4060*a1e26a70SApple OSS Distributions 
4061*a1e26a70SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4062*a1e26a70SApple OSS Distributions io_get_kernel_static_upl(
4063*a1e26a70SApple OSS Distributions 	vm_map_t                /* map */,
4064*a1e26a70SApple OSS Distributions 	uintptr_t               offset,
4065*a1e26a70SApple OSS Distributions 	upl_size_t              *upl_size,
4066*a1e26a70SApple OSS Distributions 	unsigned int            *page_offset,
4067*a1e26a70SApple OSS Distributions 	upl_t                   *upl,
4068*a1e26a70SApple OSS Distributions 	upl_page_info_array_t   page_list,
4069*a1e26a70SApple OSS Distributions 	unsigned int            *count,
4070*a1e26a70SApple OSS Distributions 	ppnum_t                 *highest_page)
4071*a1e26a70SApple OSS Distributions {
4072*a1e26a70SApple OSS Distributions 	unsigned int pageCount, page;
4073*a1e26a70SApple OSS Distributions 	ppnum_t phys;
4074*a1e26a70SApple OSS Distributions 	ppnum_t highestPage = 0;
4075*a1e26a70SApple OSS Distributions 
4076*a1e26a70SApple OSS Distributions 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4077*a1e26a70SApple OSS Distributions 	if (pageCount > *count) {
4078*a1e26a70SApple OSS Distributions 		pageCount = *count;
4079*a1e26a70SApple OSS Distributions 	}
4080*a1e26a70SApple OSS Distributions 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4081*a1e26a70SApple OSS Distributions 
4082*a1e26a70SApple OSS Distributions 	*upl = NULL;
4083*a1e26a70SApple OSS Distributions 	*page_offset = ((unsigned int) page_mask & offset);
4084*a1e26a70SApple OSS Distributions 
4085*a1e26a70SApple OSS Distributions 	for (page = 0; page < pageCount; page++) {
4086*a1e26a70SApple OSS Distributions 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4087*a1e26a70SApple OSS Distributions 		if (!phys) {
4088*a1e26a70SApple OSS Distributions 			break;
4089*a1e26a70SApple OSS Distributions 		}
4090*a1e26a70SApple OSS Distributions 		page_list[page].phys_addr = phys;
4091*a1e26a70SApple OSS Distributions 		page_list[page].free_when_done = 0;
4092*a1e26a70SApple OSS Distributions 		page_list[page].absent    = 0;
4093*a1e26a70SApple OSS Distributions 		page_list[page].dirty     = 0;
4094*a1e26a70SApple OSS Distributions 		page_list[page].precious  = 0;
4095*a1e26a70SApple OSS Distributions 		page_list[page].device    = 0;
4096*a1e26a70SApple OSS Distributions 		if (phys > highestPage) {
4097*a1e26a70SApple OSS Distributions 			highestPage = phys;
4098*a1e26a70SApple OSS Distributions 		}
4099*a1e26a70SApple OSS Distributions 	}
4100*a1e26a70SApple OSS Distributions 
4101*a1e26a70SApple OSS Distributions 	*highest_page = highestPage;
4102*a1e26a70SApple OSS Distributions 
4103*a1e26a70SApple OSS Distributions 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4104*a1e26a70SApple OSS Distributions }
4105*a1e26a70SApple OSS Distributions 
4106*a1e26a70SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4107*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4108*a1e26a70SApple OSS Distributions {
4109*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4110*a1e26a70SApple OSS Distributions 	IOReturn error = kIOReturnSuccess;
4111*a1e26a70SApple OSS Distributions 	ioGMDData *dataP;
4112*a1e26a70SApple OSS Distributions 	upl_page_info_array_t pageInfo;
4113*a1e26a70SApple OSS Distributions 	ppnum_t mapBase;
4114*a1e26a70SApple OSS Distributions 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4115*a1e26a70SApple OSS Distributions 	mach_vm_size_t numBytesWired = 0;
4116*a1e26a70SApple OSS Distributions 
4117*a1e26a70SApple OSS Distributions 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4118*a1e26a70SApple OSS Distributions 
4119*a1e26a70SApple OSS Distributions 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4120*a1e26a70SApple OSS Distributions 		forDirection = (IODirection) (forDirection | getDirection());
4121*a1e26a70SApple OSS Distributions 	}
4122*a1e26a70SApple OSS Distributions 
4123*a1e26a70SApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4124*a1e26a70SApple OSS Distributions 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4125*a1e26a70SApple OSS Distributions 	switch (kIODirectionOutIn & forDirection) {
4126*a1e26a70SApple OSS Distributions 	case kIODirectionOut:
4127*a1e26a70SApple OSS Distributions 		// Pages do not need to be marked as dirty on commit
4128*a1e26a70SApple OSS Distributions 		uplFlags = UPL_COPYOUT_FROM;
4129*a1e26a70SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess;
4130*a1e26a70SApple OSS Distributions 		break;
4131*a1e26a70SApple OSS Distributions 
4132*a1e26a70SApple OSS Distributions 	case kIODirectionIn:
4133*a1e26a70SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4134*a1e26a70SApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4135*a1e26a70SApple OSS Distributions 		break;
4136*a1e26a70SApple OSS Distributions 
4137*a1e26a70SApple OSS Distributions 	default:
4138*a1e26a70SApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4139*a1e26a70SApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4140*a1e26a70SApple OSS Distributions 		break;
4141*a1e26a70SApple OSS Distributions 	}
4142*a1e26a70SApple OSS Distributions 
4143*a1e26a70SApple OSS Distributions 	if (_wireCount) {
4144*a1e26a70SApple OSS Distributions 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4145*a1e26a70SApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4146*a1e26a70SApple OSS Distributions 			    (size_t)VM_KERNEL_ADDRPERM(this));
4147*a1e26a70SApple OSS Distributions 			error = kIOReturnNotWritable;
4148*a1e26a70SApple OSS Distributions 		}
4149*a1e26a70SApple OSS Distributions 	} else {
4150*a1e26a70SApple OSS Distributions 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4151*a1e26a70SApple OSS Distributions 		IOMapper *mapper;
4152*a1e26a70SApple OSS Distributions 
4153*a1e26a70SApple OSS Distributions 		mapper = dataP->fMapper;
4154*a1e26a70SApple OSS Distributions 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4155*a1e26a70SApple OSS Distributions 
4156*a1e26a70SApple OSS Distributions 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4157*a1e26a70SApple OSS Distributions 		tag = _kernelTag;
4158*a1e26a70SApple OSS Distributions 		if (VM_KERN_MEMORY_NONE == tag) {
4159*a1e26a70SApple OSS Distributions 			tag = IOMemoryTag(kernel_map);
4160*a1e26a70SApple OSS Distributions 		}
4161*a1e26a70SApple OSS Distributions 
4162*a1e26a70SApple OSS Distributions 		if (kIODirectionPrepareToPhys32 & forDirection) {
4163*a1e26a70SApple OSS Distributions 			if (!mapper) {
4164*a1e26a70SApple OSS Distributions 				uplFlags |= UPL_NEED_32BIT_ADDR;
4165*a1e26a70SApple OSS Distributions 			}
4166*a1e26a70SApple OSS Distributions 			if (dataP->fDMAMapNumAddressBits > 32) {
4167*a1e26a70SApple OSS Distributions 				dataP->fDMAMapNumAddressBits = 32;
4168*a1e26a70SApple OSS Distributions 			}
4169*a1e26a70SApple OSS Distributions 		}
4170*a1e26a70SApple OSS Distributions 		if (kIODirectionPrepareNoFault    & forDirection) {
4171*a1e26a70SApple OSS Distributions 			uplFlags |= UPL_REQUEST_NO_FAULT;
4172*a1e26a70SApple OSS Distributions 		}
4173*a1e26a70SApple OSS Distributions 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4174*a1e26a70SApple OSS Distributions 			uplFlags |= UPL_NOZEROFILLIO;
4175*a1e26a70SApple OSS Distributions 		}
4176*a1e26a70SApple OSS Distributions 		if (kIODirectionPrepareNonCoherent & forDirection) {
4177*a1e26a70SApple OSS Distributions 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4178*a1e26a70SApple OSS Distributions 		}
4179*a1e26a70SApple OSS Distributions 
4180*a1e26a70SApple OSS Distributions 		mapBase = 0;
4181*a1e26a70SApple OSS Distributions 
4182*a1e26a70SApple OSS Distributions 		// Note that appendBytes(NULL) zeros the data up to the desired length
4183*a1e26a70SApple OSS Distributions 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4184*a1e26a70SApple OSS Distributions 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4185*a1e26a70SApple OSS Distributions 			error = kIOReturnNoMemory;
4186*a1e26a70SApple OSS Distributions 			traceInterval.setEndArg2(error);
4187*a1e26a70SApple OSS Distributions 			return error;
4188*a1e26a70SApple OSS Distributions 		}
4189*a1e26a70SApple OSS Distributions 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4190*a1e26a70SApple OSS Distributions 			error = kIOReturnNoMemory;
4191*a1e26a70SApple OSS Distributions 			traceInterval.setEndArg2(error);
4192*a1e26a70SApple OSS Distributions 			return error;
4193*a1e26a70SApple OSS Distributions 		}
4194*a1e26a70SApple OSS Distributions 		dataP = NULL;
4195*a1e26a70SApple OSS Distributions 
4196*a1e26a70SApple OSS Distributions 		// Find the appropriate vm_map for the given task
4197*a1e26a70SApple OSS Distributions 		vm_map_t curMap;
4198*a1e26a70SApple OSS Distributions 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4199*a1e26a70SApple OSS Distributions 			curMap = NULL;
4200*a1e26a70SApple OSS Distributions 		} else {
4201*a1e26a70SApple OSS Distributions 			curMap = get_task_map(_task);
4202*a1e26a70SApple OSS Distributions 		}
4203*a1e26a70SApple OSS Distributions 
4204*a1e26a70SApple OSS Distributions 		// Iterate over the vector of virtual ranges
4205*a1e26a70SApple OSS Distributions 		Ranges vec = _ranges;
4206*a1e26a70SApple OSS Distributions 		unsigned int pageIndex  = 0;
4207*a1e26a70SApple OSS Distributions 		IOByteCount mdOffset    = 0;
4208*a1e26a70SApple OSS Distributions 		ppnum_t highestPage     = 0;
4209*a1e26a70SApple OSS Distributions 		bool         byteAlignUPL;
4210*a1e26a70SApple OSS Distributions 
4211*a1e26a70SApple OSS Distributions 		IOMemoryEntry * memRefEntry = NULL;
4212*a1e26a70SApple OSS Distributions 		if (_memRef) {
4213*a1e26a70SApple OSS Distributions 			memRefEntry = &_memRef->entries[0];
4214*a1e26a70SApple OSS Distributions 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4215*a1e26a70SApple OSS Distributions 		} else {
4216*a1e26a70SApple OSS Distributions 			byteAlignUPL = true;
4217*a1e26a70SApple OSS Distributions 		}
4218*a1e26a70SApple OSS Distributions 
4219*a1e26a70SApple OSS Distributions 		for (UInt range = 0; mdOffset < _length; range++) {
4220*a1e26a70SApple OSS Distributions 			ioPLBlock iopl;
4221*a1e26a70SApple OSS Distributions 			mach_vm_address_t startPage, startPageOffset;
4222*a1e26a70SApple OSS Distributions 			mach_vm_size_t    numBytes;
4223*a1e26a70SApple OSS Distributions 			ppnum_t highPage = 0;
4224*a1e26a70SApple OSS Distributions 
4225*a1e26a70SApple OSS Distributions 			if (_memRef) {
4226*a1e26a70SApple OSS Distributions 				if (range >= _memRef->count) {
4227*a1e26a70SApple OSS Distributions 					panic("memRefEntry");
4228*a1e26a70SApple OSS Distributions 				}
4229*a1e26a70SApple OSS Distributions 				memRefEntry = &_memRef->entries[range];
4230*a1e26a70SApple OSS Distributions 				numBytes    = memRefEntry->size;
4231*a1e26a70SApple OSS Distributions 				startPage   = -1ULL;
4232*a1e26a70SApple OSS Distributions 				if (byteAlignUPL) {
4233*a1e26a70SApple OSS Distributions 					startPageOffset = 0;
4234*a1e26a70SApple OSS Distributions 				} else {
4235*a1e26a70SApple OSS Distributions 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4236*a1e26a70SApple OSS Distributions 				}
4237*a1e26a70SApple OSS Distributions 			} else {
4238*a1e26a70SApple OSS Distributions 				// Get the startPage address and length of vec[range]
4239*a1e26a70SApple OSS Distributions 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4240*a1e26a70SApple OSS Distributions 				if (byteAlignUPL) {
4241*a1e26a70SApple OSS Distributions 					startPageOffset = 0;
4242*a1e26a70SApple OSS Distributions 				} else {
4243*a1e26a70SApple OSS Distributions 					startPageOffset = startPage & PAGE_MASK;
4244*a1e26a70SApple OSS Distributions 					startPage = trunc_page_64(startPage);
4245*a1e26a70SApple OSS Distributions 				}
4246*a1e26a70SApple OSS Distributions 			}
4247*a1e26a70SApple OSS Distributions 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4248*a1e26a70SApple OSS Distributions 			numBytes += startPageOffset;
4249*a1e26a70SApple OSS Distributions 
4250*a1e26a70SApple OSS Distributions 			if (mapper) {
4251*a1e26a70SApple OSS Distributions 				iopl.fMappedPage = mapBase + pageIndex;
4252*a1e26a70SApple OSS Distributions 			} else {
4253*a1e26a70SApple OSS Distributions 				iopl.fMappedPage = 0;
4254*a1e26a70SApple OSS Distributions 			}
4255*a1e26a70SApple OSS Distributions 
4256*a1e26a70SApple OSS Distributions 			// Iterate over the current range, creating UPLs
4257*a1e26a70SApple OSS Distributions 			while (numBytes) {
4258*a1e26a70SApple OSS Distributions 				vm_address_t kernelStart = (vm_address_t) startPage;
4259*a1e26a70SApple OSS Distributions 				vm_map_t theMap;
4260*a1e26a70SApple OSS Distributions 				if (curMap) {
4261*a1e26a70SApple OSS Distributions 					theMap = curMap;
4262*a1e26a70SApple OSS Distributions 				} else if (_memRef) {
4263*a1e26a70SApple OSS Distributions 					theMap = NULL;
4264*a1e26a70SApple OSS Distributions 				} else {
4265*a1e26a70SApple OSS Distributions 					assert(_task == kernel_task);
4266*a1e26a70SApple OSS Distributions 					theMap = IOPageableMapForAddress(kernelStart);
4267*a1e26a70SApple OSS Distributions 				}
4268*a1e26a70SApple OSS Distributions 
4269*a1e26a70SApple OSS Distributions 				// ioplFlags is an in/out parameter
4270*a1e26a70SApple OSS Distributions 				upl_control_flags_t ioplFlags = uplFlags;
4271*a1e26a70SApple OSS Distributions 				dataP = getDataP(_memoryEntries);
4272*a1e26a70SApple OSS Distributions 				pageInfo = getPageList(dataP);
4273*a1e26a70SApple OSS Distributions 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4274*a1e26a70SApple OSS Distributions 
4275*a1e26a70SApple OSS Distributions 				mach_vm_size_t ioplPhysSize;
4276*a1e26a70SApple OSS Distributions 				upl_size_t     ioplSize;
4277*a1e26a70SApple OSS Distributions 				unsigned int   numPageInfo;
4278*a1e26a70SApple OSS Distributions 
4279*a1e26a70SApple OSS Distributions 				if (_memRef) {
4280*a1e26a70SApple OSS Distributions 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4281*a1e26a70SApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4282*a1e26a70SApple OSS Distributions 				} else {
4283*a1e26a70SApple OSS Distributions 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4284*a1e26a70SApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4285*a1e26a70SApple OSS Distributions 				}
4286*a1e26a70SApple OSS Distributions 				if (error != KERN_SUCCESS) {
4287*a1e26a70SApple OSS Distributions 					if (_memRef) {
4288*a1e26a70SApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4289*a1e26a70SApple OSS Distributions 					} else {
4290*a1e26a70SApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4291*a1e26a70SApple OSS Distributions 					}
4292*a1e26a70SApple OSS Distributions 					printf("entry size error %d\n", error);
4293*a1e26a70SApple OSS Distributions 					goto abortExit;
4294*a1e26a70SApple OSS Distributions 				}
4295*a1e26a70SApple OSS Distributions 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4296*a1e26a70SApple OSS Distributions 				numPageInfo = atop_32(ioplPhysSize);
4297*a1e26a70SApple OSS Distributions 				if (byteAlignUPL) {
4298*a1e26a70SApple OSS Distributions 					if (numBytes > ioplPhysSize) {
4299*a1e26a70SApple OSS Distributions 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4300*a1e26a70SApple OSS Distributions 					} else {
4301*a1e26a70SApple OSS Distributions 						ioplSize = ((typeof(ioplSize))numBytes);
4302*a1e26a70SApple OSS Distributions 					}
4303*a1e26a70SApple OSS Distributions 				} else {
4304*a1e26a70SApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4305*a1e26a70SApple OSS Distributions 				}
4306*a1e26a70SApple OSS Distributions 
4307*a1e26a70SApple OSS Distributions 				if (_memRef) {
4308*a1e26a70SApple OSS Distributions 					memory_object_offset_t entryOffset;
4309*a1e26a70SApple OSS Distributions 
4310*a1e26a70SApple OSS Distributions 					entryOffset = mdOffset;
4311*a1e26a70SApple OSS Distributions 					if (byteAlignUPL) {
4312*a1e26a70SApple OSS Distributions 						entryOffset = (entryOffset - memRefEntry->offset);
4313*a1e26a70SApple OSS Distributions 					} else {
4314*a1e26a70SApple OSS Distributions 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4315*a1e26a70SApple OSS Distributions 					}
4316*a1e26a70SApple OSS Distributions 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4317*a1e26a70SApple OSS Distributions 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4318*a1e26a70SApple OSS Distributions 					}
4319*a1e26a70SApple OSS Distributions 					error = memory_object_iopl_request(memRefEntry->entry,
4320*a1e26a70SApple OSS Distributions 					    entryOffset,
4321*a1e26a70SApple OSS Distributions 					    &ioplSize,
4322*a1e26a70SApple OSS Distributions 					    &iopl.fIOPL,
4323*a1e26a70SApple OSS Distributions 					    baseInfo,
4324*a1e26a70SApple OSS Distributions 					    &numPageInfo,
4325*a1e26a70SApple OSS Distributions 					    &ioplFlags,
4326*a1e26a70SApple OSS Distributions 					    tag);
4327*a1e26a70SApple OSS Distributions 				} else if ((theMap == kernel_map)
4328*a1e26a70SApple OSS Distributions 				    && (kernelStart >= io_kernel_static_start)
4329*a1e26a70SApple OSS Distributions 				    && (kernelStart < io_kernel_static_end)) {
4330*a1e26a70SApple OSS Distributions 					error = io_get_kernel_static_upl(theMap,
4331*a1e26a70SApple OSS Distributions 					    kernelStart,
4332*a1e26a70SApple OSS Distributions 					    &ioplSize,
4333*a1e26a70SApple OSS Distributions 					    &iopl.fPageOffset,
4334*a1e26a70SApple OSS Distributions 					    &iopl.fIOPL,
4335*a1e26a70SApple OSS Distributions 					    baseInfo,
4336*a1e26a70SApple OSS Distributions 					    &numPageInfo,
4337*a1e26a70SApple OSS Distributions 					    &highPage);
4338*a1e26a70SApple OSS Distributions 				} else {
4339*a1e26a70SApple OSS Distributions 					assert(theMap);
4340*a1e26a70SApple OSS Distributions 					error = vm_map_create_upl(theMap,
4341*a1e26a70SApple OSS Distributions 					    startPage,
4342*a1e26a70SApple OSS Distributions 					    (upl_size_t*)&ioplSize,
4343*a1e26a70SApple OSS Distributions 					    &iopl.fIOPL,
4344*a1e26a70SApple OSS Distributions 					    baseInfo,
4345*a1e26a70SApple OSS Distributions 					    &numPageInfo,
4346*a1e26a70SApple OSS Distributions 					    &ioplFlags,
4347*a1e26a70SApple OSS Distributions 					    tag);
4348*a1e26a70SApple OSS Distributions 				}
4349*a1e26a70SApple OSS Distributions 
4350*a1e26a70SApple OSS Distributions 				if (error != KERN_SUCCESS) {
4351*a1e26a70SApple OSS Distributions 					traceInterval.setEndArg2(error);
4352*a1e26a70SApple OSS Distributions 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4353*a1e26a70SApple OSS Distributions 					goto abortExit;
4354*a1e26a70SApple OSS Distributions 				}
4355*a1e26a70SApple OSS Distributions 
4356*a1e26a70SApple OSS Distributions 				assert(ioplSize);
4357*a1e26a70SApple OSS Distributions 
4358*a1e26a70SApple OSS Distributions 				if (iopl.fIOPL) {
4359*a1e26a70SApple OSS Distributions 					highPage = upl_get_highest_page(iopl.fIOPL);
4360*a1e26a70SApple OSS Distributions 				}
4361*a1e26a70SApple OSS Distributions 				if (highPage > highestPage) {
4362*a1e26a70SApple OSS Distributions 					highestPage = highPage;
4363*a1e26a70SApple OSS Distributions 				}
4364*a1e26a70SApple OSS Distributions 
4365*a1e26a70SApple OSS Distributions 				if (baseInfo->device) {
4366*a1e26a70SApple OSS Distributions 					numPageInfo = 1;
4367*a1e26a70SApple OSS Distributions 					iopl.fFlags = kIOPLOnDevice;
4368*a1e26a70SApple OSS Distributions 				} else {
4369*a1e26a70SApple OSS Distributions 					iopl.fFlags = 0;
4370*a1e26a70SApple OSS Distributions 				}
4371*a1e26a70SApple OSS Distributions 
4372*a1e26a70SApple OSS Distributions 				if (byteAlignUPL) {
4373*a1e26a70SApple OSS Distributions 					if (iopl.fIOPL) {
4374*a1e26a70SApple OSS Distributions 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4375*a1e26a70SApple OSS Distributions 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4376*a1e26a70SApple OSS Distributions 					}
4377*a1e26a70SApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4378*a1e26a70SApple OSS Distributions 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4379*a1e26a70SApple OSS Distributions 						startPage -= iopl.fPageOffset;
4380*a1e26a70SApple OSS Distributions 					}
4381*a1e26a70SApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4382*a1e26a70SApple OSS Distributions 					numBytes += iopl.fPageOffset;
4383*a1e26a70SApple OSS Distributions 				}
4384*a1e26a70SApple OSS Distributions 
4385*a1e26a70SApple OSS Distributions 				iopl.fIOMDOffset = mdOffset;
4386*a1e26a70SApple OSS Distributions 				iopl.fPageInfo = pageIndex;
4387*a1e26a70SApple OSS Distributions 
4388*a1e26a70SApple OSS Distributions 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4389*a1e26a70SApple OSS Distributions 					// Clean up partial created and unsaved iopl
4390*a1e26a70SApple OSS Distributions 					if (iopl.fIOPL) {
4391*a1e26a70SApple OSS Distributions 						upl_abort(iopl.fIOPL, 0);
4392*a1e26a70SApple OSS Distributions 						upl_deallocate(iopl.fIOPL);
4393*a1e26a70SApple OSS Distributions 					}
4394*a1e26a70SApple OSS Distributions 					error = kIOReturnNoMemory;
4395*a1e26a70SApple OSS Distributions 					traceInterval.setEndArg2(error);
4396*a1e26a70SApple OSS Distributions 					goto abortExit;
4397*a1e26a70SApple OSS Distributions 				}
4398*a1e26a70SApple OSS Distributions 				dataP = NULL;
4399*a1e26a70SApple OSS Distributions 
4400*a1e26a70SApple OSS Distributions 				// Check for a multiple iopl's in one virtual range
4401*a1e26a70SApple OSS Distributions 				pageIndex += numPageInfo;
4402*a1e26a70SApple OSS Distributions 				mdOffset -= iopl.fPageOffset;
4403*a1e26a70SApple OSS Distributions 				numBytesWired += ioplSize;
4404*a1e26a70SApple OSS Distributions 				if (ioplSize < numBytes) {
4405*a1e26a70SApple OSS Distributions 					numBytes -= ioplSize;
4406*a1e26a70SApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4407*a1e26a70SApple OSS Distributions 						startPage += ioplSize;
4408*a1e26a70SApple OSS Distributions 					}
4409*a1e26a70SApple OSS Distributions 					mdOffset += ioplSize;
4410*a1e26a70SApple OSS Distributions 					iopl.fPageOffset = 0;
4411*a1e26a70SApple OSS Distributions 					if (mapper) {
4412*a1e26a70SApple OSS Distributions 						iopl.fMappedPage = mapBase + pageIndex;
4413*a1e26a70SApple OSS Distributions 					}
4414*a1e26a70SApple OSS Distributions 				} else {
4415*a1e26a70SApple OSS Distributions 					mdOffset += numBytes;
4416*a1e26a70SApple OSS Distributions 					break;
4417*a1e26a70SApple OSS Distributions 				}
4418*a1e26a70SApple OSS Distributions 			}
4419*a1e26a70SApple OSS Distributions 		}
4420*a1e26a70SApple OSS Distributions 
4421*a1e26a70SApple OSS Distributions 		_highestPage = highestPage;
4422*a1e26a70SApple OSS Distributions 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4423*a1e26a70SApple OSS Distributions 
4424*a1e26a70SApple OSS Distributions 		if (UPL_COPYOUT_FROM & uplFlags) {
4425*a1e26a70SApple OSS Distributions 			_flags |= kIOMemoryPreparedReadOnly;
4426*a1e26a70SApple OSS Distributions 		}
4427*a1e26a70SApple OSS Distributions 		traceInterval.setEndCodes(numBytesWired, error);
4428*a1e26a70SApple OSS Distributions 	}
4429*a1e26a70SApple OSS Distributions 
4430*a1e26a70SApple OSS Distributions #if IOTRACKING
4431*a1e26a70SApple OSS Distributions 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4432*a1e26a70SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4433*a1e26a70SApple OSS Distributions 		if (!dataP->fWireTracking.link.next) {
4434*a1e26a70SApple OSS Distributions 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4435*a1e26a70SApple OSS Distributions 		}
4436*a1e26a70SApple OSS Distributions 	}
4437*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
4438*a1e26a70SApple OSS Distributions 
4439*a1e26a70SApple OSS Distributions 	return error;
4440*a1e26a70SApple OSS Distributions 
4441*a1e26a70SApple OSS Distributions abortExit:
4442*a1e26a70SApple OSS Distributions 	{
4443*a1e26a70SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4444*a1e26a70SApple OSS Distributions 		UInt done = getNumIOPL(_memoryEntries, dataP);
4445*a1e26a70SApple OSS Distributions 		ioPLBlock *ioplList = getIOPLList(dataP);
4446*a1e26a70SApple OSS Distributions 
4447*a1e26a70SApple OSS Distributions 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4448*a1e26a70SApple OSS Distributions 			if (ioplList[ioplIdx].fIOPL) {
4449*a1e26a70SApple OSS Distributions 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4450*a1e26a70SApple OSS Distributions 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4451*a1e26a70SApple OSS Distributions 			}
4452*a1e26a70SApple OSS Distributions 		}
4453*a1e26a70SApple OSS Distributions 		_memoryEntries->setLength(computeDataSize(0, 0));
4454*a1e26a70SApple OSS Distributions 	}
4455*a1e26a70SApple OSS Distributions 
4456*a1e26a70SApple OSS Distributions 	if (error == KERN_FAILURE) {
4457*a1e26a70SApple OSS Distributions 		error = kIOReturnCannotWire;
4458*a1e26a70SApple OSS Distributions 	} else if (error == KERN_MEMORY_ERROR) {
4459*a1e26a70SApple OSS Distributions 		error = kIOReturnNoResources;
4460*a1e26a70SApple OSS Distributions 	}
4461*a1e26a70SApple OSS Distributions 
4462*a1e26a70SApple OSS Distributions 	return error;
4463*a1e26a70SApple OSS Distributions }
4464*a1e26a70SApple OSS Distributions 
4465*a1e26a70SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4466*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4467*a1e26a70SApple OSS Distributions {
4468*a1e26a70SApple OSS Distributions 	ioGMDData * dataP;
4469*a1e26a70SApple OSS Distributions 
4470*a1e26a70SApple OSS Distributions 	if (size > UINT_MAX) {
4471*a1e26a70SApple OSS Distributions 		return false;
4472*a1e26a70SApple OSS Distributions 	}
4473*a1e26a70SApple OSS Distributions 	if (!_memoryEntries) {
4474*a1e26a70SApple OSS Distributions 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4475*a1e26a70SApple OSS Distributions 		if (!_memoryEntries) {
4476*a1e26a70SApple OSS Distributions 			return false;
4477*a1e26a70SApple OSS Distributions 		}
4478*a1e26a70SApple OSS Distributions 	} else if (!_memoryEntries->initWithCapacity(size)) {
4479*a1e26a70SApple OSS Distributions 		return false;
4480*a1e26a70SApple OSS Distributions 	}
4481*a1e26a70SApple OSS Distributions 
4482*a1e26a70SApple OSS Distributions 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4483*a1e26a70SApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4484*a1e26a70SApple OSS Distributions 
4485*a1e26a70SApple OSS Distributions 	if (mapper == kIOMapperWaitSystem) {
4486*a1e26a70SApple OSS Distributions 		IOMapper::checkForSystemMapper();
4487*a1e26a70SApple OSS Distributions 		mapper = IOMapper::gSystem;
4488*a1e26a70SApple OSS Distributions 	}
4489*a1e26a70SApple OSS Distributions 	dataP->fMapper               = mapper;
4490*a1e26a70SApple OSS Distributions 	dataP->fPageCnt              = 0;
4491*a1e26a70SApple OSS Distributions 	dataP->fMappedBase           = 0;
4492*a1e26a70SApple OSS Distributions 	dataP->fDMAMapNumAddressBits = 64;
4493*a1e26a70SApple OSS Distributions 	dataP->fDMAMapAlignment      = 0;
4494*a1e26a70SApple OSS Distributions 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4495*a1e26a70SApple OSS Distributions 	dataP->fCompletionError      = false;
4496*a1e26a70SApple OSS Distributions 	dataP->fMappedBaseValid      = false;
4497*a1e26a70SApple OSS Distributions 
4498*a1e26a70SApple OSS Distributions 	return true;
4499*a1e26a70SApple OSS Distributions }
4500*a1e26a70SApple OSS Distributions 
4501*a1e26a70SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4502*a1e26a70SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4503*a1e26a70SApple OSS Distributions 	IOMapper                    * mapper,
4504*a1e26a70SApple OSS Distributions 	IOMemoryDescriptor          * memory,
4505*a1e26a70SApple OSS Distributions 	IODMACommand                * command,
4506*a1e26a70SApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4507*a1e26a70SApple OSS Distributions 	uint64_t                      offset,
4508*a1e26a70SApple OSS Distributions 	uint64_t                      length,
4509*a1e26a70SApple OSS Distributions 	uint64_t                    * mapAddress,
4510*a1e26a70SApple OSS Distributions 	uint64_t                    * mapLength)
4511*a1e26a70SApple OSS Distributions {
4512*a1e26a70SApple OSS Distributions 	IOReturn err;
4513*a1e26a70SApple OSS Distributions 	uint32_t mapOptions;
4514*a1e26a70SApple OSS Distributions 
4515*a1e26a70SApple OSS Distributions 	mapOptions = 0;
4516*a1e26a70SApple OSS Distributions 	mapOptions |= kIODMAMapReadAccess;
4517*a1e26a70SApple OSS Distributions 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4518*a1e26a70SApple OSS Distributions 		mapOptions |= kIODMAMapWriteAccess;
4519*a1e26a70SApple OSS Distributions 	}
4520*a1e26a70SApple OSS Distributions 
4521*a1e26a70SApple OSS Distributions 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4522*a1e26a70SApple OSS Distributions 	    mapSpec, command, NULL, mapAddress, mapLength);
4523*a1e26a70SApple OSS Distributions 
4524*a1e26a70SApple OSS Distributions 	if (kIOReturnSuccess == err) {
4525*a1e26a70SApple OSS Distributions 		dmaMapRecord(mapper, command, *mapLength);
4526*a1e26a70SApple OSS Distributions 	}
4527*a1e26a70SApple OSS Distributions 
4528*a1e26a70SApple OSS Distributions 	return err;
4529*a1e26a70SApple OSS Distributions }
4530*a1e26a70SApple OSS Distributions 
4531*a1e26a70SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4532*a1e26a70SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4533*a1e26a70SApple OSS Distributions 	IOMapper                    * mapper,
4534*a1e26a70SApple OSS Distributions 	IODMACommand                * command,
4535*a1e26a70SApple OSS Distributions 	uint64_t                      mapLength)
4536*a1e26a70SApple OSS Distributions {
4537*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4538*a1e26a70SApple OSS Distributions 	kern_allocation_name_t alloc;
4539*a1e26a70SApple OSS Distributions 	int16_t                prior;
4540*a1e26a70SApple OSS Distributions 
4541*a1e26a70SApple OSS Distributions 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4542*a1e26a70SApple OSS Distributions 		kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4543*a1e26a70SApple OSS Distributions 	}
4544*a1e26a70SApple OSS Distributions 
4545*a1e26a70SApple OSS Distributions 	if (!command) {
4546*a1e26a70SApple OSS Distributions 		return;
4547*a1e26a70SApple OSS Distributions 	}
4548*a1e26a70SApple OSS Distributions 	prior = OSAddAtomic16(1, &_dmaReferences);
4549*a1e26a70SApple OSS Distributions 	if (!prior) {
4550*a1e26a70SApple OSS Distributions 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4551*a1e26a70SApple OSS Distributions 			_mapName  = alloc;
4552*a1e26a70SApple OSS Distributions 			mapLength = _length;
4553*a1e26a70SApple OSS Distributions 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4554*a1e26a70SApple OSS Distributions 		} else {
4555*a1e26a70SApple OSS Distributions 			_mapName = NULL;
4556*a1e26a70SApple OSS Distributions 		}
4557*a1e26a70SApple OSS Distributions 	}
4558*a1e26a70SApple OSS Distributions }
4559*a1e26a70SApple OSS Distributions 
4560*a1e26a70SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4561*a1e26a70SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4562*a1e26a70SApple OSS Distributions 	IOMapper                    * mapper,
4563*a1e26a70SApple OSS Distributions 	IODMACommand                * command,
4564*a1e26a70SApple OSS Distributions 	uint64_t                      offset,
4565*a1e26a70SApple OSS Distributions 	uint64_t                      mapAddress,
4566*a1e26a70SApple OSS Distributions 	uint64_t                      mapLength)
4567*a1e26a70SApple OSS Distributions {
4568*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4569*a1e26a70SApple OSS Distributions 	IOReturn ret;
4570*a1e26a70SApple OSS Distributions 	kern_allocation_name_t alloc;
4571*a1e26a70SApple OSS Distributions 	kern_allocation_name_t mapName;
4572*a1e26a70SApple OSS Distributions 	int16_t prior;
4573*a1e26a70SApple OSS Distributions 
4574*a1e26a70SApple OSS Distributions 	mapName = NULL;
4575*a1e26a70SApple OSS Distributions 	prior = 0;
4576*a1e26a70SApple OSS Distributions 	if (command) {
4577*a1e26a70SApple OSS Distributions 		mapName = _mapName;
4578*a1e26a70SApple OSS Distributions 		if (_dmaReferences) {
4579*a1e26a70SApple OSS Distributions 			prior = OSAddAtomic16(-1, &_dmaReferences);
4580*a1e26a70SApple OSS Distributions 		} else {
4581*a1e26a70SApple OSS Distributions 			panic("_dmaReferences underflow");
4582*a1e26a70SApple OSS Distributions 		}
4583*a1e26a70SApple OSS Distributions 	}
4584*a1e26a70SApple OSS Distributions 
4585*a1e26a70SApple OSS Distributions 	if (!mapLength) {
4586*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4587*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
4588*a1e26a70SApple OSS Distributions 	}
4589*a1e26a70SApple OSS Distributions 
4590*a1e26a70SApple OSS Distributions 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4591*a1e26a70SApple OSS Distributions 
4592*a1e26a70SApple OSS Distributions 	if ((alloc = mapper->fAllocName)) {
4593*a1e26a70SApple OSS Distributions 		kern_allocation_update_size(alloc, -mapLength, NULL);
4594*a1e26a70SApple OSS Distributions 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4595*a1e26a70SApple OSS Distributions 			mapLength = _length;
4596*a1e26a70SApple OSS Distributions 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4597*a1e26a70SApple OSS Distributions 		}
4598*a1e26a70SApple OSS Distributions 	}
4599*a1e26a70SApple OSS Distributions 
4600*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(ret);
4601*a1e26a70SApple OSS Distributions 	return ret;
4602*a1e26a70SApple OSS Distributions }
4603*a1e26a70SApple OSS Distributions 
4604*a1e26a70SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4605*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4606*a1e26a70SApple OSS Distributions 	IOMapper                    * mapper,
4607*a1e26a70SApple OSS Distributions 	IOMemoryDescriptor          * memory,
4608*a1e26a70SApple OSS Distributions 	IODMACommand                * command,
4609*a1e26a70SApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4610*a1e26a70SApple OSS Distributions 	uint64_t                      offset,
4611*a1e26a70SApple OSS Distributions 	uint64_t                      length,
4612*a1e26a70SApple OSS Distributions 	uint64_t                    * mapAddress,
4613*a1e26a70SApple OSS Distributions 	uint64_t                    * mapLength)
4614*a1e26a70SApple OSS Distributions {
4615*a1e26a70SApple OSS Distributions 	IOReturn          err = kIOReturnSuccess;
4616*a1e26a70SApple OSS Distributions 	ioGMDData *       dataP;
4617*a1e26a70SApple OSS Distributions 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4618*a1e26a70SApple OSS Distributions 
4619*a1e26a70SApple OSS Distributions 	*mapAddress = 0;
4620*a1e26a70SApple OSS Distributions 	if (kIOMemoryHostOnly & _flags) {
4621*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
4622*a1e26a70SApple OSS Distributions 	}
4623*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4624*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
4625*a1e26a70SApple OSS Distributions 	}
4626*a1e26a70SApple OSS Distributions 
4627*a1e26a70SApple OSS Distributions 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4628*a1e26a70SApple OSS Distributions 	    || offset || (length != _length)) {
4629*a1e26a70SApple OSS Distributions 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4630*a1e26a70SApple OSS Distributions 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4631*a1e26a70SApple OSS Distributions 		const ioPLBlock * ioplList = getIOPLList(dataP);
4632*a1e26a70SApple OSS Distributions 		upl_page_info_t * pageList;
4633*a1e26a70SApple OSS Distributions 		uint32_t          mapOptions = 0;
4634*a1e26a70SApple OSS Distributions 
4635*a1e26a70SApple OSS Distributions 		IODMAMapSpecification mapSpec;
4636*a1e26a70SApple OSS Distributions 		bzero(&mapSpec, sizeof(mapSpec));
4637*a1e26a70SApple OSS Distributions 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4638*a1e26a70SApple OSS Distributions 		mapSpec.alignment = dataP->fDMAMapAlignment;
4639*a1e26a70SApple OSS Distributions 
4640*a1e26a70SApple OSS Distributions 		// For external UPLs the fPageInfo field points directly to
4641*a1e26a70SApple OSS Distributions 		// the upl's upl_page_info_t array.
4642*a1e26a70SApple OSS Distributions 		if (ioplList->fFlags & kIOPLExternUPL) {
4643*a1e26a70SApple OSS Distributions 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4644*a1e26a70SApple OSS Distributions 			mapOptions |= kIODMAMapPagingPath;
4645*a1e26a70SApple OSS Distributions 		} else {
4646*a1e26a70SApple OSS Distributions 			pageList = getPageList(dataP);
4647*a1e26a70SApple OSS Distributions 		}
4648*a1e26a70SApple OSS Distributions 
4649*a1e26a70SApple OSS Distributions 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4650*a1e26a70SApple OSS Distributions 			mapOptions |= kIODMAMapPageListFullyOccupied;
4651*a1e26a70SApple OSS Distributions 		}
4652*a1e26a70SApple OSS Distributions 
4653*a1e26a70SApple OSS Distributions 		assert(dataP->fDMAAccess);
4654*a1e26a70SApple OSS Distributions 		mapOptions |= dataP->fDMAAccess;
4655*a1e26a70SApple OSS Distributions 
4656*a1e26a70SApple OSS Distributions 		// Check for direct device non-paged memory
4657*a1e26a70SApple OSS Distributions 		if (ioplList->fFlags & kIOPLOnDevice) {
4658*a1e26a70SApple OSS Distributions 			mapOptions |= kIODMAMapPhysicallyContiguous;
4659*a1e26a70SApple OSS Distributions 		}
4660*a1e26a70SApple OSS Distributions 
4661*a1e26a70SApple OSS Distributions 		IODMAMapPageList dmaPageList =
4662*a1e26a70SApple OSS Distributions 		{
4663*a1e26a70SApple OSS Distributions 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4664*a1e26a70SApple OSS Distributions 			.pageListCount = _pages,
4665*a1e26a70SApple OSS Distributions 			.pageList      = &pageList[0]
4666*a1e26a70SApple OSS Distributions 		};
4667*a1e26a70SApple OSS Distributions 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4668*a1e26a70SApple OSS Distributions 		    command, &dmaPageList, mapAddress, mapLength);
4669*a1e26a70SApple OSS Distributions 
4670*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess == err) {
4671*a1e26a70SApple OSS Distributions 			dmaMapRecord(mapper, command, *mapLength);
4672*a1e26a70SApple OSS Distributions 		}
4673*a1e26a70SApple OSS Distributions 	}
4674*a1e26a70SApple OSS Distributions 
4675*a1e26a70SApple OSS Distributions 	return err;
4676*a1e26a70SApple OSS Distributions }
4677*a1e26a70SApple OSS Distributions 
4678*a1e26a70SApple OSS Distributions /*
4679*a1e26a70SApple OSS Distributions  * prepare
4680*a1e26a70SApple OSS Distributions  *
4681*a1e26a70SApple OSS Distributions  * Prepare the memory for an I/O transfer.  This involves paging in
4682*a1e26a70SApple OSS Distributions  * the memory, if necessary, and wiring it down for the duration of
4683*a1e26a70SApple OSS Distributions  * the transfer.  The complete() method completes the processing of
4684*a1e26a70SApple OSS Distributions  * the memory after the I/O transfer finishes.  This method needn't
4685*a1e26a70SApple OSS Distributions  * called for non-pageable memory.
4686*a1e26a70SApple OSS Distributions  */
4687*a1e26a70SApple OSS Distributions 
4688*a1e26a70SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4689*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4690*a1e26a70SApple OSS Distributions {
4691*a1e26a70SApple OSS Distributions 	IOReturn     error    = kIOReturnSuccess;
4692*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4693*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4694*a1e26a70SApple OSS Distributions 
4695*a1e26a70SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4696*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4697*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
4698*a1e26a70SApple OSS Distributions 	}
4699*a1e26a70SApple OSS Distributions 
4700*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4701*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4702*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4703*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
4704*a1e26a70SApple OSS Distributions 	}
4705*a1e26a70SApple OSS Distributions 
4706*a1e26a70SApple OSS Distributions 	if (_prepareLock) {
4707*a1e26a70SApple OSS Distributions 		IOLockLock(_prepareLock);
4708*a1e26a70SApple OSS Distributions 	}
4709*a1e26a70SApple OSS Distributions 
4710*a1e26a70SApple OSS Distributions 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4711*a1e26a70SApple OSS Distributions 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4712*a1e26a70SApple OSS Distributions 			error = kIOReturnNotReady;
4713*a1e26a70SApple OSS Distributions 			goto finish;
4714*a1e26a70SApple OSS Distributions 		}
4715*a1e26a70SApple OSS Distributions 		error = wireVirtual(forDirection);
4716*a1e26a70SApple OSS Distributions 	}
4717*a1e26a70SApple OSS Distributions 
4718*a1e26a70SApple OSS Distributions 	if (kIOReturnSuccess == error) {
4719*a1e26a70SApple OSS Distributions 		if (1 == ++_wireCount) {
4720*a1e26a70SApple OSS Distributions 			if (kIOMemoryClearEncrypt & _flags) {
4721*a1e26a70SApple OSS Distributions 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4722*a1e26a70SApple OSS Distributions 			}
4723*a1e26a70SApple OSS Distributions 
4724*a1e26a70SApple OSS Distributions 			ktraceEmitPhysicalSegments();
4725*a1e26a70SApple OSS Distributions 		}
4726*a1e26a70SApple OSS Distributions 	}
4727*a1e26a70SApple OSS Distributions 
4728*a1e26a70SApple OSS Distributions finish:
4729*a1e26a70SApple OSS Distributions 
4730*a1e26a70SApple OSS Distributions 	if (_prepareLock) {
4731*a1e26a70SApple OSS Distributions 		IOLockUnlock(_prepareLock);
4732*a1e26a70SApple OSS Distributions 	}
4733*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(error);
4734*a1e26a70SApple OSS Distributions 
4735*a1e26a70SApple OSS Distributions 	return error;
4736*a1e26a70SApple OSS Distributions }
4737*a1e26a70SApple OSS Distributions 
4738*a1e26a70SApple OSS Distributions /*
4739*a1e26a70SApple OSS Distributions  * complete
4740*a1e26a70SApple OSS Distributions  *
4741*a1e26a70SApple OSS Distributions  * Complete processing of the memory after an I/O transfer finishes.
4742*a1e26a70SApple OSS Distributions  * This method should not be called unless a prepare was previously
4743*a1e26a70SApple OSS Distributions  * issued; the prepare() and complete() must occur in pairs, before
4744*a1e26a70SApple OSS Distributions  * before and after an I/O transfer involving pageable memory.
4745*a1e26a70SApple OSS Distributions  */
4746*a1e26a70SApple OSS Distributions 
4747*a1e26a70SApple OSS Distributions IOReturn
complete(IODirection forDirection)4748*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4749*a1e26a70SApple OSS Distributions {
4750*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4751*a1e26a70SApple OSS Distributions 	ioGMDData  * dataP;
4752*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4753*a1e26a70SApple OSS Distributions 
4754*a1e26a70SApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4755*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4756*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
4757*a1e26a70SApple OSS Distributions 	}
4758*a1e26a70SApple OSS Distributions 
4759*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4760*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4761*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4762*a1e26a70SApple OSS Distributions 		return kIOReturnNotAttached;
4763*a1e26a70SApple OSS Distributions 	}
4764*a1e26a70SApple OSS Distributions 
4765*a1e26a70SApple OSS Distributions 	if (_prepareLock) {
4766*a1e26a70SApple OSS Distributions 		IOLockLock(_prepareLock);
4767*a1e26a70SApple OSS Distributions 	}
4768*a1e26a70SApple OSS Distributions 	do{
4769*a1e26a70SApple OSS Distributions 		assert(_wireCount);
4770*a1e26a70SApple OSS Distributions 		if (!_wireCount) {
4771*a1e26a70SApple OSS Distributions 			break;
4772*a1e26a70SApple OSS Distributions 		}
4773*a1e26a70SApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4774*a1e26a70SApple OSS Distributions 		if (!dataP) {
4775*a1e26a70SApple OSS Distributions 			break;
4776*a1e26a70SApple OSS Distributions 		}
4777*a1e26a70SApple OSS Distributions 
4778*a1e26a70SApple OSS Distributions 		if (kIODirectionCompleteWithError & forDirection) {
4779*a1e26a70SApple OSS Distributions 			dataP->fCompletionError = true;
4780*a1e26a70SApple OSS Distributions 		}
4781*a1e26a70SApple OSS Distributions 
4782*a1e26a70SApple OSS Distributions 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4783*a1e26a70SApple OSS Distributions 			performOperation(kIOMemorySetEncrypted, 0, _length);
4784*a1e26a70SApple OSS Distributions 		}
4785*a1e26a70SApple OSS Distributions 
4786*a1e26a70SApple OSS Distributions 		_wireCount--;
4787*a1e26a70SApple OSS Distributions 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4788*a1e26a70SApple OSS Distributions 			ioPLBlock *ioplList = getIOPLList(dataP);
4789*a1e26a70SApple OSS Distributions 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4790*a1e26a70SApple OSS Distributions 
4791*a1e26a70SApple OSS Distributions 			if (_wireCount) {
4792*a1e26a70SApple OSS Distributions 				// kIODirectionCompleteWithDataValid & forDirection
4793*a1e26a70SApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*a1e26a70SApple OSS Distributions 					vm_tag_t tag;
4795*a1e26a70SApple OSS Distributions 					tag = (typeof(tag))getVMTag(kernel_map);
4796*a1e26a70SApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4797*a1e26a70SApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4798*a1e26a70SApple OSS Distributions 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4799*a1e26a70SApple OSS Distributions 						}
4800*a1e26a70SApple OSS Distributions 					}
4801*a1e26a70SApple OSS Distributions 				}
4802*a1e26a70SApple OSS Distributions 			} else {
4803*a1e26a70SApple OSS Distributions 				if (_dmaReferences) {
4804*a1e26a70SApple OSS Distributions 					panic("complete() while dma active");
4805*a1e26a70SApple OSS Distributions 				}
4806*a1e26a70SApple OSS Distributions 
4807*a1e26a70SApple OSS Distributions 				if (dataP->fMappedBaseValid) {
4808*a1e26a70SApple OSS Distributions 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4809*a1e26a70SApple OSS Distributions 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4810*a1e26a70SApple OSS Distributions 				}
4811*a1e26a70SApple OSS Distributions #if IOTRACKING
4812*a1e26a70SApple OSS Distributions 				if (dataP->fWireTracking.link.next) {
4813*a1e26a70SApple OSS Distributions 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4814*a1e26a70SApple OSS Distributions 				}
4815*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
4816*a1e26a70SApple OSS Distributions 				// Only complete iopls that we created which are for TypeVirtual
4817*a1e26a70SApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4818*a1e26a70SApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4819*a1e26a70SApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4820*a1e26a70SApple OSS Distributions 							if (dataP->fCompletionError) {
4821*a1e26a70SApple OSS Distributions 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4822*a1e26a70SApple OSS Distributions 							} else {
4823*a1e26a70SApple OSS Distributions 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4824*a1e26a70SApple OSS Distributions 							}
4825*a1e26a70SApple OSS Distributions 							upl_deallocate(ioplList[ind].fIOPL);
4826*a1e26a70SApple OSS Distributions 						}
4827*a1e26a70SApple OSS Distributions 					}
4828*a1e26a70SApple OSS Distributions 				} else if (kIOMemoryTypeUPL == type) {
4829*a1e26a70SApple OSS Distributions 					upl_set_referenced(ioplList[0].fIOPL, false);
4830*a1e26a70SApple OSS Distributions 				}
4831*a1e26a70SApple OSS Distributions 
4832*a1e26a70SApple OSS Distributions 				_memoryEntries->setLength(computeDataSize(0, 0));
4833*a1e26a70SApple OSS Distributions 
4834*a1e26a70SApple OSS Distributions 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4835*a1e26a70SApple OSS Distributions 				_flags &= ~kIOMemoryPreparedReadOnly;
4836*a1e26a70SApple OSS Distributions 
4837*a1e26a70SApple OSS Distributions 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4838*a1e26a70SApple OSS Distributions 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4839*a1e26a70SApple OSS Distributions 				}
4840*a1e26a70SApple OSS Distributions 			}
4841*a1e26a70SApple OSS Distributions 		}
4842*a1e26a70SApple OSS Distributions 	}while (false);
4843*a1e26a70SApple OSS Distributions 
4844*a1e26a70SApple OSS Distributions 	if (_prepareLock) {
4845*a1e26a70SApple OSS Distributions 		IOLockUnlock(_prepareLock);
4846*a1e26a70SApple OSS Distributions 	}
4847*a1e26a70SApple OSS Distributions 
4848*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4849*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
4850*a1e26a70SApple OSS Distributions }
4851*a1e26a70SApple OSS Distributions 
4852*a1e26a70SApple OSS Distributions IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4853*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4854*a1e26a70SApple OSS Distributions {
4855*a1e26a70SApple OSS Distributions 	IOOptionBits createOptions = 0;
4856*a1e26a70SApple OSS Distributions 
4857*a1e26a70SApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4858*a1e26a70SApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
4859*a1e26a70SApple OSS Distributions 	}
4860*a1e26a70SApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
4861*a1e26a70SApple OSS Distributions 		createOptions |= kIOMemoryReferenceWrite;
4862*a1e26a70SApple OSS Distributions #if DEVELOPMENT || DEBUG
4863*a1e26a70SApple OSS Distributions 		if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4864*a1e26a70SApple OSS Distributions 		    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4865*a1e26a70SApple OSS Distributions 			OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4866*a1e26a70SApple OSS Distributions 		}
4867*a1e26a70SApple OSS Distributions #endif
4868*a1e26a70SApple OSS Distributions 	}
4869*a1e26a70SApple OSS Distributions 	return createOptions;
4870*a1e26a70SApple OSS Distributions }
4871*a1e26a70SApple OSS Distributions 
4872*a1e26a70SApple OSS Distributions /*
4873*a1e26a70SApple OSS Distributions  * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4874*a1e26a70SApple OSS Distributions  * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4875*a1e26a70SApple OSS Distributions  * creation.
4876*a1e26a70SApple OSS Distributions  */
4877*a1e26a70SApple OSS Distributions 
4878*a1e26a70SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4879*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::makeMapping(
4880*a1e26a70SApple OSS Distributions 	IOMemoryDescriptor *    owner,
4881*a1e26a70SApple OSS Distributions 	task_t                  __intoTask,
4882*a1e26a70SApple OSS Distributions 	IOVirtualAddress        __address,
4883*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
4884*a1e26a70SApple OSS Distributions 	IOByteCount             __offset,
4885*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
4886*a1e26a70SApple OSS Distributions {
4887*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
4888*a1e26a70SApple OSS Distributions 	IOMemoryMap * mapping;
4889*a1e26a70SApple OSS Distributions 
4890*a1e26a70SApple OSS Distributions 	if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4891*a1e26a70SApple OSS Distributions 		struct IOMemoryReference * newRef;
4892*a1e26a70SApple OSS Distributions 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4893*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess == err) {
4894*a1e26a70SApple OSS Distributions 			if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4895*a1e26a70SApple OSS Distributions 				memoryReferenceFree(newRef);
4896*a1e26a70SApple OSS Distributions 			}
4897*a1e26a70SApple OSS Distributions 		}
4898*a1e26a70SApple OSS Distributions 	}
4899*a1e26a70SApple OSS Distributions 	if (kIOReturnSuccess != err) {
4900*a1e26a70SApple OSS Distributions 		return NULL;
4901*a1e26a70SApple OSS Distributions 	}
4902*a1e26a70SApple OSS Distributions 	mapping = IOMemoryDescriptor::makeMapping(
4903*a1e26a70SApple OSS Distributions 		owner, __intoTask, __address, options, __offset, __length);
4904*a1e26a70SApple OSS Distributions 
4905*a1e26a70SApple OSS Distributions #if IOTRACKING
4906*a1e26a70SApple OSS Distributions 	if ((mapping == (IOMemoryMap *) __address)
4907*a1e26a70SApple OSS Distributions 	    && (0 == (kIOMapStatic & mapping->fOptions))
4908*a1e26a70SApple OSS Distributions 	    && (NULL == mapping->fSuperMap)
4909*a1e26a70SApple OSS Distributions 	    && ((kIOTracking & gIOKitDebug) || _task)) {
4910*a1e26a70SApple OSS Distributions 		// only dram maps in the default on development case
4911*a1e26a70SApple OSS Distributions 		IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4912*a1e26a70SApple OSS Distributions 	}
4913*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
4914*a1e26a70SApple OSS Distributions 
4915*a1e26a70SApple OSS Distributions 	return mapping;
4916*a1e26a70SApple OSS Distributions }
4917*a1e26a70SApple OSS Distributions 
4918*a1e26a70SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4919*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4920*a1e26a70SApple OSS Distributions 	vm_map_t                __addressMap,
4921*a1e26a70SApple OSS Distributions 	IOVirtualAddress *      __address,
4922*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
4923*a1e26a70SApple OSS Distributions 	IOByteCount             __offset,
4924*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
4925*a1e26a70SApple OSS Distributions {
4926*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4927*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4928*a1e26a70SApple OSS Distributions #ifndef __LP64__
4929*a1e26a70SApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4930*a1e26a70SApple OSS Distributions 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4931*a1e26a70SApple OSS Distributions 	}
4932*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
4933*a1e26a70SApple OSS Distributions 
4934*a1e26a70SApple OSS Distributions 	kern_return_t  err;
4935*a1e26a70SApple OSS Distributions 
4936*a1e26a70SApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4937*a1e26a70SApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4938*a1e26a70SApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
4939*a1e26a70SApple OSS Distributions 
4940*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4941*a1e26a70SApple OSS Distributions 	Ranges vec = _ranges;
4942*a1e26a70SApple OSS Distributions 
4943*a1e26a70SApple OSS Distributions 	mach_vm_address_t range0Addr = 0;
4944*a1e26a70SApple OSS Distributions 	mach_vm_size_t    range0Len = 0;
4945*a1e26a70SApple OSS Distributions 
4946*a1e26a70SApple OSS Distributions 	if ((offset >= _length) || ((offset + length) > _length)) {
4947*a1e26a70SApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnBadArgument);
4948*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4949*a1e26a70SApple OSS Distributions 		// assert(offset == 0 && _length == 0 && length == 0);
4950*a1e26a70SApple OSS Distributions 		return kIOReturnBadArgument;
4951*a1e26a70SApple OSS Distributions 	}
4952*a1e26a70SApple OSS Distributions 
4953*a1e26a70SApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4954*a1e26a70SApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4955*a1e26a70SApple OSS Distributions 		return 0;
4956*a1e26a70SApple OSS Distributions 	}
4957*a1e26a70SApple OSS Distributions 
4958*a1e26a70SApple OSS Distributions 	if (vec.v) {
4959*a1e26a70SApple OSS Distributions 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4960*a1e26a70SApple OSS Distributions 	}
4961*a1e26a70SApple OSS Distributions 
4962*a1e26a70SApple OSS Distributions 	// mapping source == dest? (could be much better)
4963*a1e26a70SApple OSS Distributions 	if (_task
4964*a1e26a70SApple OSS Distributions 	    && (mapping->fAddressTask == _task)
4965*a1e26a70SApple OSS Distributions 	    && (mapping->fAddressMap == get_task_map(_task))
4966*a1e26a70SApple OSS Distributions 	    && (options & kIOMapAnywhere)
4967*a1e26a70SApple OSS Distributions 	    && (!(kIOMapUnique & options))
4968*a1e26a70SApple OSS Distributions 	    && (!(kIOMapGuardedMask & options))
4969*a1e26a70SApple OSS Distributions 	    && (1 == _rangesCount)
4970*a1e26a70SApple OSS Distributions 	    && (0 == offset)
4971*a1e26a70SApple OSS Distributions 	    && range0Addr
4972*a1e26a70SApple OSS Distributions 	    && (length <= range0Len)) {
4973*a1e26a70SApple OSS Distributions 		mapping->fAddress = range0Addr;
4974*a1e26a70SApple OSS Distributions 		mapping->fOptions |= kIOMapStatic;
4975*a1e26a70SApple OSS Distributions 
4976*a1e26a70SApple OSS Distributions 		return kIOReturnSuccess;
4977*a1e26a70SApple OSS Distributions 	}
4978*a1e26a70SApple OSS Distributions 
4979*a1e26a70SApple OSS Distributions 	if (!_memRef) {
4980*a1e26a70SApple OSS Distributions 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4981*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess != err) {
4982*a1e26a70SApple OSS Distributions 			traceInterval.setEndArg1(err);
4983*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4984*a1e26a70SApple OSS Distributions 			return err;
4985*a1e26a70SApple OSS Distributions 		}
4986*a1e26a70SApple OSS Distributions 	}
4987*a1e26a70SApple OSS Distributions 
4988*a1e26a70SApple OSS Distributions 
4989*a1e26a70SApple OSS Distributions 	memory_object_t pager;
4990*a1e26a70SApple OSS Distributions 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4991*a1e26a70SApple OSS Distributions 
4992*a1e26a70SApple OSS Distributions 	// <upl_transpose //
4993*a1e26a70SApple OSS Distributions 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4994*a1e26a70SApple OSS Distributions 		do{
4995*a1e26a70SApple OSS Distributions 			upl_t               redirUPL2;
4996*a1e26a70SApple OSS Distributions 			upl_size_t          size;
4997*a1e26a70SApple OSS Distributions 			upl_control_flags_t flags;
4998*a1e26a70SApple OSS Distributions 			unsigned int        lock_count;
4999*a1e26a70SApple OSS Distributions 
5000*a1e26a70SApple OSS Distributions 			if (!_memRef || (1 != _memRef->count)) {
5001*a1e26a70SApple OSS Distributions 				err = kIOReturnNotReadable;
5002*a1e26a70SApple OSS Distributions 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5003*a1e26a70SApple OSS Distributions 				break;
5004*a1e26a70SApple OSS Distributions 			}
5005*a1e26a70SApple OSS Distributions 
5006*a1e26a70SApple OSS Distributions 			size = (upl_size_t) round_page(mapping->fLength);
5007*a1e26a70SApple OSS Distributions 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5008*a1e26a70SApple OSS Distributions 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5009*a1e26a70SApple OSS Distributions 
5010*a1e26a70SApple OSS Distributions 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5011*a1e26a70SApple OSS Distributions 			    NULL, NULL,
5012*a1e26a70SApple OSS Distributions 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
5013*a1e26a70SApple OSS Distributions 				redirUPL2 = NULL;
5014*a1e26a70SApple OSS Distributions 			}
5015*a1e26a70SApple OSS Distributions 
5016*a1e26a70SApple OSS Distributions 			for (lock_count = 0;
5017*a1e26a70SApple OSS Distributions 			    IORecursiveLockHaveLock(gIOMemoryLock);
5018*a1e26a70SApple OSS Distributions 			    lock_count++) {
5019*a1e26a70SApple OSS Distributions 				UNLOCK;
5020*a1e26a70SApple OSS Distributions 			}
5021*a1e26a70SApple OSS Distributions 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5022*a1e26a70SApple OSS Distributions 			for (;
5023*a1e26a70SApple OSS Distributions 			    lock_count;
5024*a1e26a70SApple OSS Distributions 			    lock_count--) {
5025*a1e26a70SApple OSS Distributions 				LOCK;
5026*a1e26a70SApple OSS Distributions 			}
5027*a1e26a70SApple OSS Distributions 
5028*a1e26a70SApple OSS Distributions 			if (kIOReturnSuccess != err) {
5029*a1e26a70SApple OSS Distributions 				IOLog("upl_transpose(%x)\n", err);
5030*a1e26a70SApple OSS Distributions 				err = kIOReturnSuccess;
5031*a1e26a70SApple OSS Distributions 			}
5032*a1e26a70SApple OSS Distributions 
5033*a1e26a70SApple OSS Distributions 			if (redirUPL2) {
5034*a1e26a70SApple OSS Distributions 				upl_commit(redirUPL2, NULL, 0);
5035*a1e26a70SApple OSS Distributions 				upl_deallocate(redirUPL2);
5036*a1e26a70SApple OSS Distributions 				redirUPL2 = NULL;
5037*a1e26a70SApple OSS Distributions 			}
5038*a1e26a70SApple OSS Distributions 			{
5039*a1e26a70SApple OSS Distributions 				// swap the memEntries since they now refer to different vm_objects
5040*a1e26a70SApple OSS Distributions 				IOMemoryReference * me = _memRef;
5041*a1e26a70SApple OSS Distributions 				_memRef = mapping->fMemory->_memRef;
5042*a1e26a70SApple OSS Distributions 				mapping->fMemory->_memRef = me;
5043*a1e26a70SApple OSS Distributions 			}
5044*a1e26a70SApple OSS Distributions 			if (pager) {
5045*a1e26a70SApple OSS Distributions 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5046*a1e26a70SApple OSS Distributions 			}
5047*a1e26a70SApple OSS Distributions 		}while (false);
5048*a1e26a70SApple OSS Distributions 	}
5049*a1e26a70SApple OSS Distributions 	// upl_transpose> //
5050*a1e26a70SApple OSS Distributions 	else {
5051*a1e26a70SApple OSS Distributions 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5052*a1e26a70SApple OSS Distributions 		if (err) {
5053*a1e26a70SApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5054*a1e26a70SApple OSS Distributions 		}
5055*a1e26a70SApple OSS Distributions 		if ((err == KERN_SUCCESS) && pager) {
5056*a1e26a70SApple OSS Distributions 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5057*a1e26a70SApple OSS Distributions 
5058*a1e26a70SApple OSS Distributions 			if (err != KERN_SUCCESS) {
5059*a1e26a70SApple OSS Distributions 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5060*a1e26a70SApple OSS Distributions 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5061*a1e26a70SApple OSS Distributions 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5062*a1e26a70SApple OSS Distributions 			}
5063*a1e26a70SApple OSS Distributions 		}
5064*a1e26a70SApple OSS Distributions 	}
5065*a1e26a70SApple OSS Distributions 
5066*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(err);
5067*a1e26a70SApple OSS Distributions 	if (err) {
5068*a1e26a70SApple OSS Distributions 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5069*a1e26a70SApple OSS Distributions 	}
5070*a1e26a70SApple OSS Distributions 	return err;
5071*a1e26a70SApple OSS Distributions }
5072*a1e26a70SApple OSS Distributions 
5073*a1e26a70SApple OSS Distributions #if IOTRACKING
5074*a1e26a70SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5075*a1e26a70SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5076*a1e26a70SApple OSS Distributions     mach_vm_address_t * address, mach_vm_size_t * size)
5077*a1e26a70SApple OSS Distributions {
5078*a1e26a70SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5079*a1e26a70SApple OSS Distributions 
5080*a1e26a70SApple OSS Distributions 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5081*a1e26a70SApple OSS Distributions 
5082*a1e26a70SApple OSS Distributions 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5083*a1e26a70SApple OSS Distributions 		return kIOReturnNotReady;
5084*a1e26a70SApple OSS Distributions 	}
5085*a1e26a70SApple OSS Distributions 
5086*a1e26a70SApple OSS Distributions 	*task    = map->fAddressTask;
5087*a1e26a70SApple OSS Distributions 	*address = map->fAddress;
5088*a1e26a70SApple OSS Distributions 	*size    = map->fLength;
5089*a1e26a70SApple OSS Distributions 
5090*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
5091*a1e26a70SApple OSS Distributions }
5092*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
5093*a1e26a70SApple OSS Distributions 
5094*a1e26a70SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5095*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5096*a1e26a70SApple OSS Distributions 	vm_map_t                addressMap,
5097*a1e26a70SApple OSS Distributions 	IOVirtualAddress        __address,
5098*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
5099*a1e26a70SApple OSS Distributions {
5100*a1e26a70SApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5101*a1e26a70SApple OSS Distributions 	IOReturn ret;
5102*a1e26a70SApple OSS Distributions 	ret = super::doUnmap(addressMap, __address, __length);
5103*a1e26a70SApple OSS Distributions 	traceInterval.setEndArg1(ret);
5104*a1e26a70SApple OSS Distributions 	return ret;
5105*a1e26a70SApple OSS Distributions }
5106*a1e26a70SApple OSS Distributions 
5107*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5108*a1e26a70SApple OSS Distributions 
5109*a1e26a70SApple OSS Distributions #undef super
5110*a1e26a70SApple OSS Distributions #define super OSObject
5111*a1e26a70SApple OSS Distributions 
5112*a1e26a70SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5113*a1e26a70SApple OSS Distributions 
5114*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5115*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5116*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5117*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5118*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5119*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5120*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5121*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5122*a1e26a70SApple OSS Distributions 
5123*a1e26a70SApple OSS Distributions /* ex-inline function implementation */
5124*a1e26a70SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5125*a1e26a70SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5126*a1e26a70SApple OSS Distributions {
5127*a1e26a70SApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
5128*a1e26a70SApple OSS Distributions }
5129*a1e26a70SApple OSS Distributions 
5130*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5131*a1e26a70SApple OSS Distributions 
5132*a1e26a70SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5133*a1e26a70SApple OSS Distributions IOMemoryMap::init(
5134*a1e26a70SApple OSS Distributions 	task_t                  intoTask,
5135*a1e26a70SApple OSS Distributions 	mach_vm_address_t       toAddress,
5136*a1e26a70SApple OSS Distributions 	IOOptionBits            _options,
5137*a1e26a70SApple OSS Distributions 	mach_vm_size_t          _offset,
5138*a1e26a70SApple OSS Distributions 	mach_vm_size_t          _length )
5139*a1e26a70SApple OSS Distributions {
5140*a1e26a70SApple OSS Distributions 	if (!intoTask) {
5141*a1e26a70SApple OSS Distributions 		return false;
5142*a1e26a70SApple OSS Distributions 	}
5143*a1e26a70SApple OSS Distributions 
5144*a1e26a70SApple OSS Distributions 	if (!super::init()) {
5145*a1e26a70SApple OSS Distributions 		return false;
5146*a1e26a70SApple OSS Distributions 	}
5147*a1e26a70SApple OSS Distributions 
5148*a1e26a70SApple OSS Distributions 	fAddressMap  = get_task_map(intoTask);
5149*a1e26a70SApple OSS Distributions 	if (!fAddressMap) {
5150*a1e26a70SApple OSS Distributions 		return false;
5151*a1e26a70SApple OSS Distributions 	}
5152*a1e26a70SApple OSS Distributions 	vm_map_reference(fAddressMap);
5153*a1e26a70SApple OSS Distributions 
5154*a1e26a70SApple OSS Distributions 	fAddressTask = intoTask;
5155*a1e26a70SApple OSS Distributions 	fOptions     = _options;
5156*a1e26a70SApple OSS Distributions 	fLength      = _length;
5157*a1e26a70SApple OSS Distributions 	fOffset      = _offset;
5158*a1e26a70SApple OSS Distributions 	fAddress     = toAddress;
5159*a1e26a70SApple OSS Distributions 
5160*a1e26a70SApple OSS Distributions 	return true;
5161*a1e26a70SApple OSS Distributions }
5162*a1e26a70SApple OSS Distributions 
5163*a1e26a70SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5164*a1e26a70SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5165*a1e26a70SApple OSS Distributions {
5166*a1e26a70SApple OSS Distributions 	if (!_memory) {
5167*a1e26a70SApple OSS Distributions 		return false;
5168*a1e26a70SApple OSS Distributions 	}
5169*a1e26a70SApple OSS Distributions 
5170*a1e26a70SApple OSS Distributions 	if (!fSuperMap) {
5171*a1e26a70SApple OSS Distributions 		if ((_offset + fLength) > _memory->getLength()) {
5172*a1e26a70SApple OSS Distributions 			return false;
5173*a1e26a70SApple OSS Distributions 		}
5174*a1e26a70SApple OSS Distributions 		fOffset = _offset;
5175*a1e26a70SApple OSS Distributions 	}
5176*a1e26a70SApple OSS Distributions 
5177*a1e26a70SApple OSS Distributions 
5178*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5179*a1e26a70SApple OSS Distributions 	if (fMemory) {
5180*a1e26a70SApple OSS Distributions 		if (fMemory != _memory) {
5181*a1e26a70SApple OSS Distributions 			fMemory->removeMapping(this);
5182*a1e26a70SApple OSS Distributions 		}
5183*a1e26a70SApple OSS Distributions 	}
5184*a1e26a70SApple OSS Distributions 	fMemory = os::move(tempval);
5185*a1e26a70SApple OSS Distributions 
5186*a1e26a70SApple OSS Distributions 	return true;
5187*a1e26a70SApple OSS Distributions }
5188*a1e26a70SApple OSS Distributions 
5189*a1e26a70SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5190*a1e26a70SApple OSS Distributions IOMemoryDescriptor::doMap(
5191*a1e26a70SApple OSS Distributions 	vm_map_t                __addressMap,
5192*a1e26a70SApple OSS Distributions 	IOVirtualAddress *      __address,
5193*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
5194*a1e26a70SApple OSS Distributions 	IOByteCount             __offset,
5195*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
5196*a1e26a70SApple OSS Distributions {
5197*a1e26a70SApple OSS Distributions 	return kIOReturnUnsupported;
5198*a1e26a70SApple OSS Distributions }
5199*a1e26a70SApple OSS Distributions 
5200*a1e26a70SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5201*a1e26a70SApple OSS Distributions IOMemoryDescriptor::handleFault(
5202*a1e26a70SApple OSS Distributions 	void *                  _pager,
5203*a1e26a70SApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5204*a1e26a70SApple OSS Distributions 	mach_vm_size_t          length)
5205*a1e26a70SApple OSS Distributions {
5206*a1e26a70SApple OSS Distributions 	if (kIOMemoryRedirected & _flags) {
5207*a1e26a70SApple OSS Distributions #if DEBUG
5208*a1e26a70SApple OSS Distributions 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5209*a1e26a70SApple OSS Distributions #endif
5210*a1e26a70SApple OSS Distributions 		do {
5211*a1e26a70SApple OSS Distributions 			SLEEP;
5212*a1e26a70SApple OSS Distributions 		} while (kIOMemoryRedirected & _flags);
5213*a1e26a70SApple OSS Distributions 	}
5214*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
5215*a1e26a70SApple OSS Distributions }
5216*a1e26a70SApple OSS Distributions 
5217*a1e26a70SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5218*a1e26a70SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5219*a1e26a70SApple OSS Distributions 	void *                  _pager,
5220*a1e26a70SApple OSS Distributions 	vm_map_t                addressMap,
5221*a1e26a70SApple OSS Distributions 	mach_vm_address_t       address,
5222*a1e26a70SApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5223*a1e26a70SApple OSS Distributions 	mach_vm_size_t          length,
5224*a1e26a70SApple OSS Distributions 	IOOptionBits            options )
5225*a1e26a70SApple OSS Distributions {
5226*a1e26a70SApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5227*a1e26a70SApple OSS Distributions 	memory_object_t     pager = (memory_object_t) _pager;
5228*a1e26a70SApple OSS Distributions 	mach_vm_size_t      size;
5229*a1e26a70SApple OSS Distributions 	mach_vm_size_t      bytes;
5230*a1e26a70SApple OSS Distributions 	mach_vm_size_t      page;
5231*a1e26a70SApple OSS Distributions 	mach_vm_size_t      pageOffset;
5232*a1e26a70SApple OSS Distributions 	mach_vm_size_t      pagerOffset;
5233*a1e26a70SApple OSS Distributions 	IOPhysicalLength    segLen, chunk;
5234*a1e26a70SApple OSS Distributions 	addr64_t            physAddr;
5235*a1e26a70SApple OSS Distributions 	IOOptionBits        type;
5236*a1e26a70SApple OSS Distributions 
5237*a1e26a70SApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
5238*a1e26a70SApple OSS Distributions 
5239*a1e26a70SApple OSS Distributions 	if (reserved->dp.pagerContig) {
5240*a1e26a70SApple OSS Distributions 		sourceOffset = 0;
5241*a1e26a70SApple OSS Distributions 		pagerOffset  = 0;
5242*a1e26a70SApple OSS Distributions 	}
5243*a1e26a70SApple OSS Distributions 
5244*a1e26a70SApple OSS Distributions 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5245*a1e26a70SApple OSS Distributions 	assert( physAddr );
5246*a1e26a70SApple OSS Distributions 	pageOffset = physAddr - trunc_page_64( physAddr );
5247*a1e26a70SApple OSS Distributions 	pagerOffset = sourceOffset;
5248*a1e26a70SApple OSS Distributions 
5249*a1e26a70SApple OSS Distributions 	size = length + pageOffset;
5250*a1e26a70SApple OSS Distributions 	physAddr -= pageOffset;
5251*a1e26a70SApple OSS Distributions 
5252*a1e26a70SApple OSS Distributions 	segLen += pageOffset;
5253*a1e26a70SApple OSS Distributions 	bytes = size;
5254*a1e26a70SApple OSS Distributions 	do{
5255*a1e26a70SApple OSS Distributions 		// in the middle of the loop only map whole pages
5256*a1e26a70SApple OSS Distributions 		if (segLen >= bytes) {
5257*a1e26a70SApple OSS Distributions 			segLen = bytes;
5258*a1e26a70SApple OSS Distributions 		} else if (segLen != trunc_page_64(segLen)) {
5259*a1e26a70SApple OSS Distributions 			err = kIOReturnVMError;
5260*a1e26a70SApple OSS Distributions 		}
5261*a1e26a70SApple OSS Distributions 		if (physAddr != trunc_page_64(physAddr)) {
5262*a1e26a70SApple OSS Distributions 			err = kIOReturnBadArgument;
5263*a1e26a70SApple OSS Distributions 		}
5264*a1e26a70SApple OSS Distributions 
5265*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess != err) {
5266*a1e26a70SApple OSS Distributions 			break;
5267*a1e26a70SApple OSS Distributions 		}
5268*a1e26a70SApple OSS Distributions 
5269*a1e26a70SApple OSS Distributions #if DEBUG || DEVELOPMENT
5270*a1e26a70SApple OSS Distributions 		if ((kIOMemoryTypeUPL != type)
5271*a1e26a70SApple OSS Distributions 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5272*a1e26a70SApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5273*a1e26a70SApple OSS Distributions 			    physAddr, (uint64_t)segLen);
5274*a1e26a70SApple OSS Distributions 		}
5275*a1e26a70SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5276*a1e26a70SApple OSS Distributions 
5277*a1e26a70SApple OSS Distributions 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5278*a1e26a70SApple OSS Distributions 		for (page = 0;
5279*a1e26a70SApple OSS Distributions 		    (page < segLen) && (KERN_SUCCESS == err);
5280*a1e26a70SApple OSS Distributions 		    page += chunk) {
5281*a1e26a70SApple OSS Distributions 			err = device_pager_populate_object(pager, pagerOffset,
5282*a1e26a70SApple OSS Distributions 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5283*a1e26a70SApple OSS Distributions 			pagerOffset += chunk;
5284*a1e26a70SApple OSS Distributions 		}
5285*a1e26a70SApple OSS Distributions 
5286*a1e26a70SApple OSS Distributions 		assert(KERN_SUCCESS == err);
5287*a1e26a70SApple OSS Distributions 		if (err) {
5288*a1e26a70SApple OSS Distributions 			break;
5289*a1e26a70SApple OSS Distributions 		}
5290*a1e26a70SApple OSS Distributions 
5291*a1e26a70SApple OSS Distributions 		// This call to vm_fault causes an early pmap level resolution
5292*a1e26a70SApple OSS Distributions 		// of the mappings created above for kernel mappings, since
5293*a1e26a70SApple OSS Distributions 		// faulting in later can't take place from interrupt level.
5294*a1e26a70SApple OSS Distributions 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5295*a1e26a70SApple OSS Distributions 			err = vm_fault(addressMap,
5296*a1e26a70SApple OSS Distributions 			    (vm_map_offset_t)trunc_page_64(address),
5297*a1e26a70SApple OSS Distributions 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5298*a1e26a70SApple OSS Distributions 			    FALSE, VM_KERN_MEMORY_NONE,
5299*a1e26a70SApple OSS Distributions 			    THREAD_UNINT, NULL,
5300*a1e26a70SApple OSS Distributions 			    (vm_map_offset_t)0);
5301*a1e26a70SApple OSS Distributions 
5302*a1e26a70SApple OSS Distributions 			if (KERN_SUCCESS != err) {
5303*a1e26a70SApple OSS Distributions 				break;
5304*a1e26a70SApple OSS Distributions 			}
5305*a1e26a70SApple OSS Distributions 		}
5306*a1e26a70SApple OSS Distributions 
5307*a1e26a70SApple OSS Distributions 		sourceOffset += segLen - pageOffset;
5308*a1e26a70SApple OSS Distributions 		address += segLen;
5309*a1e26a70SApple OSS Distributions 		bytes -= segLen;
5310*a1e26a70SApple OSS Distributions 		pageOffset = 0;
5311*a1e26a70SApple OSS Distributions 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5312*a1e26a70SApple OSS Distributions 
5313*a1e26a70SApple OSS Distributions 	if (bytes) {
5314*a1e26a70SApple OSS Distributions 		err = kIOReturnBadArgument;
5315*a1e26a70SApple OSS Distributions 	}
5316*a1e26a70SApple OSS Distributions 
5317*a1e26a70SApple OSS Distributions 	return err;
5318*a1e26a70SApple OSS Distributions }
5319*a1e26a70SApple OSS Distributions 
5320*a1e26a70SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5321*a1e26a70SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5322*a1e26a70SApple OSS Distributions 	vm_map_t                addressMap,
5323*a1e26a70SApple OSS Distributions 	IOVirtualAddress        __address,
5324*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
5325*a1e26a70SApple OSS Distributions {
5326*a1e26a70SApple OSS Distributions 	IOReturn          err;
5327*a1e26a70SApple OSS Distributions 	IOMemoryMap *     mapping;
5328*a1e26a70SApple OSS Distributions 	mach_vm_address_t address;
5329*a1e26a70SApple OSS Distributions 	mach_vm_size_t    length;
5330*a1e26a70SApple OSS Distributions 
5331*a1e26a70SApple OSS Distributions 	if (__length) {
5332*a1e26a70SApple OSS Distributions 		panic("doUnmap");
5333*a1e26a70SApple OSS Distributions 	}
5334*a1e26a70SApple OSS Distributions 
5335*a1e26a70SApple OSS Distributions 	mapping = (IOMemoryMap *) __address;
5336*a1e26a70SApple OSS Distributions 	addressMap = mapping->fAddressMap;
5337*a1e26a70SApple OSS Distributions 	address    = mapping->fAddress;
5338*a1e26a70SApple OSS Distributions 	length     = mapping->fLength;
5339*a1e26a70SApple OSS Distributions 
5340*a1e26a70SApple OSS Distributions 	if (kIOMapOverwrite & mapping->fOptions) {
5341*a1e26a70SApple OSS Distributions 		err = KERN_SUCCESS;
5342*a1e26a70SApple OSS Distributions 	} else {
5343*a1e26a70SApple OSS Distributions 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5344*a1e26a70SApple OSS Distributions 			addressMap = IOPageableMapForAddress( address );
5345*a1e26a70SApple OSS Distributions 		}
5346*a1e26a70SApple OSS Distributions #if DEBUG
5347*a1e26a70SApple OSS Distributions 		if (kIOLogMapping & gIOKitDebug) {
5348*a1e26a70SApple OSS Distributions 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5349*a1e26a70SApple OSS Distributions 			    addressMap, address, length );
5350*a1e26a70SApple OSS Distributions 		}
5351*a1e26a70SApple OSS Distributions #endif
5352*a1e26a70SApple OSS Distributions 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5353*a1e26a70SApple OSS Distributions 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5354*a1e26a70SApple OSS Distributions 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5355*a1e26a70SApple OSS Distributions 		}
5356*a1e26a70SApple OSS Distributions 	}
5357*a1e26a70SApple OSS Distributions 
5358*a1e26a70SApple OSS Distributions #if IOTRACKING
5359*a1e26a70SApple OSS Distributions 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5360*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
5361*a1e26a70SApple OSS Distributions 
5362*a1e26a70SApple OSS Distributions 	return err;
5363*a1e26a70SApple OSS Distributions }
5364*a1e26a70SApple OSS Distributions 
5365*a1e26a70SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5366*a1e26a70SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5367*a1e26a70SApple OSS Distributions {
5368*a1e26a70SApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5369*a1e26a70SApple OSS Distributions 	IOMemoryMap *       mapping = NULL;
5370*a1e26a70SApple OSS Distributions 	OSSharedPtr<OSIterator>        iter;
5371*a1e26a70SApple OSS Distributions 
5372*a1e26a70SApple OSS Distributions 	LOCK;
5373*a1e26a70SApple OSS Distributions 
5374*a1e26a70SApple OSS Distributions 	if (doRedirect) {
5375*a1e26a70SApple OSS Distributions 		_flags |= kIOMemoryRedirected;
5376*a1e26a70SApple OSS Distributions 	} else {
5377*a1e26a70SApple OSS Distributions 		_flags &= ~kIOMemoryRedirected;
5378*a1e26a70SApple OSS Distributions 	}
5379*a1e26a70SApple OSS Distributions 
5380*a1e26a70SApple OSS Distributions 	do {
5381*a1e26a70SApple OSS Distributions 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5382*a1e26a70SApple OSS Distributions 			memory_object_t   pager;
5383*a1e26a70SApple OSS Distributions 
5384*a1e26a70SApple OSS Distributions 			if (reserved) {
5385*a1e26a70SApple OSS Distributions 				pager = (memory_object_t) reserved->dp.devicePager;
5386*a1e26a70SApple OSS Distributions 			} else {
5387*a1e26a70SApple OSS Distributions 				pager = MACH_PORT_NULL;
5388*a1e26a70SApple OSS Distributions 			}
5389*a1e26a70SApple OSS Distributions 
5390*a1e26a70SApple OSS Distributions 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5391*a1e26a70SApple OSS Distributions 				mapping->redirect( safeTask, doRedirect );
5392*a1e26a70SApple OSS Distributions 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5393*a1e26a70SApple OSS Distributions 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5394*a1e26a70SApple OSS Distributions 				}
5395*a1e26a70SApple OSS Distributions 			}
5396*a1e26a70SApple OSS Distributions 
5397*a1e26a70SApple OSS Distributions 			iter.reset();
5398*a1e26a70SApple OSS Distributions 		}
5399*a1e26a70SApple OSS Distributions 	} while (false);
5400*a1e26a70SApple OSS Distributions 
5401*a1e26a70SApple OSS Distributions 	if (!doRedirect) {
5402*a1e26a70SApple OSS Distributions 		WAKEUP;
5403*a1e26a70SApple OSS Distributions 	}
5404*a1e26a70SApple OSS Distributions 
5405*a1e26a70SApple OSS Distributions 	UNLOCK;
5406*a1e26a70SApple OSS Distributions 
5407*a1e26a70SApple OSS Distributions #ifndef __LP64__
5408*a1e26a70SApple OSS Distributions 	// temporary binary compatibility
5409*a1e26a70SApple OSS Distributions 	IOSubMemoryDescriptor * subMem;
5410*a1e26a70SApple OSS Distributions 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5411*a1e26a70SApple OSS Distributions 		err = subMem->redirect( safeTask, doRedirect );
5412*a1e26a70SApple OSS Distributions 	} else {
5413*a1e26a70SApple OSS Distributions 		err = kIOReturnSuccess;
5414*a1e26a70SApple OSS Distributions 	}
5415*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5416*a1e26a70SApple OSS Distributions 
5417*a1e26a70SApple OSS Distributions 	return err;
5418*a1e26a70SApple OSS Distributions }
5419*a1e26a70SApple OSS Distributions 
5420*a1e26a70SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5421*a1e26a70SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5422*a1e26a70SApple OSS Distributions {
5423*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5424*a1e26a70SApple OSS Distributions 
5425*a1e26a70SApple OSS Distributions 	if (fSuperMap) {
5426*a1e26a70SApple OSS Distributions //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5427*a1e26a70SApple OSS Distributions 	} else {
5428*a1e26a70SApple OSS Distributions 		LOCK;
5429*a1e26a70SApple OSS Distributions 
5430*a1e26a70SApple OSS Distributions 		do{
5431*a1e26a70SApple OSS Distributions 			if (!fAddress) {
5432*a1e26a70SApple OSS Distributions 				break;
5433*a1e26a70SApple OSS Distributions 			}
5434*a1e26a70SApple OSS Distributions 			if (!fAddressMap) {
5435*a1e26a70SApple OSS Distributions 				break;
5436*a1e26a70SApple OSS Distributions 			}
5437*a1e26a70SApple OSS Distributions 
5438*a1e26a70SApple OSS Distributions 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5439*a1e26a70SApple OSS Distributions 			    && (0 == (fOptions & kIOMapStatic))) {
5440*a1e26a70SApple OSS Distributions 				IOUnmapPages( fAddressMap, fAddress, fLength );
5441*a1e26a70SApple OSS Distributions 				err = kIOReturnSuccess;
5442*a1e26a70SApple OSS Distributions #if DEBUG
5443*a1e26a70SApple OSS Distributions 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5444*a1e26a70SApple OSS Distributions #endif
5445*a1e26a70SApple OSS Distributions 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5446*a1e26a70SApple OSS Distributions 				IOOptionBits newMode;
5447*a1e26a70SApple OSS Distributions 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5448*a1e26a70SApple OSS Distributions 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5449*a1e26a70SApple OSS Distributions 			}
5450*a1e26a70SApple OSS Distributions 		}while (false);
5451*a1e26a70SApple OSS Distributions 		UNLOCK;
5452*a1e26a70SApple OSS Distributions 	}
5453*a1e26a70SApple OSS Distributions 
5454*a1e26a70SApple OSS Distributions 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5455*a1e26a70SApple OSS Distributions 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5456*a1e26a70SApple OSS Distributions 	    && safeTask
5457*a1e26a70SApple OSS Distributions 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5458*a1e26a70SApple OSS Distributions 		fMemory->redirect(safeTask, doRedirect);
5459*a1e26a70SApple OSS Distributions 	}
5460*a1e26a70SApple OSS Distributions 
5461*a1e26a70SApple OSS Distributions 	return err;
5462*a1e26a70SApple OSS Distributions }
5463*a1e26a70SApple OSS Distributions 
5464*a1e26a70SApple OSS Distributions IOReturn
unmap(void)5465*a1e26a70SApple OSS Distributions IOMemoryMap::unmap( void )
5466*a1e26a70SApple OSS Distributions {
5467*a1e26a70SApple OSS Distributions 	IOReturn    err;
5468*a1e26a70SApple OSS Distributions 
5469*a1e26a70SApple OSS Distributions 	LOCK;
5470*a1e26a70SApple OSS Distributions 
5471*a1e26a70SApple OSS Distributions 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5472*a1e26a70SApple OSS Distributions 	    && (0 == (kIOMapStatic & fOptions))) {
5473*a1e26a70SApple OSS Distributions 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5474*a1e26a70SApple OSS Distributions 	} else {
5475*a1e26a70SApple OSS Distributions 		err = kIOReturnSuccess;
5476*a1e26a70SApple OSS Distributions 	}
5477*a1e26a70SApple OSS Distributions 
5478*a1e26a70SApple OSS Distributions 	if (fAddressMap) {
5479*a1e26a70SApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5480*a1e26a70SApple OSS Distributions 		fAddressMap = NULL;
5481*a1e26a70SApple OSS Distributions 	}
5482*a1e26a70SApple OSS Distributions 
5483*a1e26a70SApple OSS Distributions 	fAddress = 0;
5484*a1e26a70SApple OSS Distributions 
5485*a1e26a70SApple OSS Distributions 	UNLOCK;
5486*a1e26a70SApple OSS Distributions 
5487*a1e26a70SApple OSS Distributions 	return err;
5488*a1e26a70SApple OSS Distributions }
5489*a1e26a70SApple OSS Distributions 
5490*a1e26a70SApple OSS Distributions void
taskDied(void)5491*a1e26a70SApple OSS Distributions IOMemoryMap::taskDied( void )
5492*a1e26a70SApple OSS Distributions {
5493*a1e26a70SApple OSS Distributions 	LOCK;
5494*a1e26a70SApple OSS Distributions 	if (fUserClientUnmap) {
5495*a1e26a70SApple OSS Distributions 		unmap();
5496*a1e26a70SApple OSS Distributions 	}
5497*a1e26a70SApple OSS Distributions #if IOTRACKING
5498*a1e26a70SApple OSS Distributions 	else {
5499*a1e26a70SApple OSS Distributions 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5500*a1e26a70SApple OSS Distributions 	}
5501*a1e26a70SApple OSS Distributions #endif /* IOTRACKING */
5502*a1e26a70SApple OSS Distributions 
5503*a1e26a70SApple OSS Distributions 	if (fAddressMap) {
5504*a1e26a70SApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5505*a1e26a70SApple OSS Distributions 		fAddressMap = NULL;
5506*a1e26a70SApple OSS Distributions 	}
5507*a1e26a70SApple OSS Distributions 	fAddressTask = NULL;
5508*a1e26a70SApple OSS Distributions 	fAddress     = 0;
5509*a1e26a70SApple OSS Distributions 	UNLOCK;
5510*a1e26a70SApple OSS Distributions }
5511*a1e26a70SApple OSS Distributions 
5512*a1e26a70SApple OSS Distributions IOReturn
userClientUnmap(void)5513*a1e26a70SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5514*a1e26a70SApple OSS Distributions {
5515*a1e26a70SApple OSS Distributions 	fUserClientUnmap = true;
5516*a1e26a70SApple OSS Distributions 	return kIOReturnSuccess;
5517*a1e26a70SApple OSS Distributions }
5518*a1e26a70SApple OSS Distributions 
5519*a1e26a70SApple OSS Distributions // Overload the release mechanism.  All mappings must be a member
5520*a1e26a70SApple OSS Distributions // of a memory descriptors _mappings set.  This means that we
5521*a1e26a70SApple OSS Distributions // always have 2 references on a mapping.  When either of these mappings
5522*a1e26a70SApple OSS Distributions // are released we need to free ourselves.
5523*a1e26a70SApple OSS Distributions void
taggedRelease(const void * tag) const5524*a1e26a70SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5525*a1e26a70SApple OSS Distributions {
5526*a1e26a70SApple OSS Distributions 	LOCK;
5527*a1e26a70SApple OSS Distributions 	super::taggedRelease(tag, 2);
5528*a1e26a70SApple OSS Distributions 	UNLOCK;
5529*a1e26a70SApple OSS Distributions }
5530*a1e26a70SApple OSS Distributions 
5531*a1e26a70SApple OSS Distributions void
free()5532*a1e26a70SApple OSS Distributions IOMemoryMap::free()
5533*a1e26a70SApple OSS Distributions {
5534*a1e26a70SApple OSS Distributions 	unmap();
5535*a1e26a70SApple OSS Distributions 
5536*a1e26a70SApple OSS Distributions 	if (fMemory) {
5537*a1e26a70SApple OSS Distributions 		LOCK;
5538*a1e26a70SApple OSS Distributions 		fMemory->removeMapping(this);
5539*a1e26a70SApple OSS Distributions 		UNLOCK;
5540*a1e26a70SApple OSS Distributions 		fMemory.reset();
5541*a1e26a70SApple OSS Distributions 	}
5542*a1e26a70SApple OSS Distributions 
5543*a1e26a70SApple OSS Distributions 	if (fSuperMap) {
5544*a1e26a70SApple OSS Distributions 		fSuperMap.reset();
5545*a1e26a70SApple OSS Distributions 	}
5546*a1e26a70SApple OSS Distributions 
5547*a1e26a70SApple OSS Distributions 	if (fRedirUPL) {
5548*a1e26a70SApple OSS Distributions 		upl_commit(fRedirUPL, NULL, 0);
5549*a1e26a70SApple OSS Distributions 		upl_deallocate(fRedirUPL);
5550*a1e26a70SApple OSS Distributions 	}
5551*a1e26a70SApple OSS Distributions 
5552*a1e26a70SApple OSS Distributions 	super::free();
5553*a1e26a70SApple OSS Distributions }
5554*a1e26a70SApple OSS Distributions 
5555*a1e26a70SApple OSS Distributions IOByteCount
getLength()5556*a1e26a70SApple OSS Distributions IOMemoryMap::getLength()
5557*a1e26a70SApple OSS Distributions {
5558*a1e26a70SApple OSS Distributions 	return fLength;
5559*a1e26a70SApple OSS Distributions }
5560*a1e26a70SApple OSS Distributions 
5561*a1e26a70SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5562*a1e26a70SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5563*a1e26a70SApple OSS Distributions {
5564*a1e26a70SApple OSS Distributions #ifndef __LP64__
5565*a1e26a70SApple OSS Distributions 	if (fSuperMap) {
5566*a1e26a70SApple OSS Distributions 		fSuperMap->getVirtualAddress();
5567*a1e26a70SApple OSS Distributions 	} else if (fAddressMap
5568*a1e26a70SApple OSS Distributions 	    && vm_map_is_64bit(fAddressMap)
5569*a1e26a70SApple OSS Distributions 	    && (sizeof(IOVirtualAddress) < 8)) {
5570*a1e26a70SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5571*a1e26a70SApple OSS Distributions 	}
5572*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5573*a1e26a70SApple OSS Distributions 
5574*a1e26a70SApple OSS Distributions 	return fAddress;
5575*a1e26a70SApple OSS Distributions }
5576*a1e26a70SApple OSS Distributions 
5577*a1e26a70SApple OSS Distributions #ifndef __LP64__
5578*a1e26a70SApple OSS Distributions mach_vm_address_t
getAddress()5579*a1e26a70SApple OSS Distributions IOMemoryMap::getAddress()
5580*a1e26a70SApple OSS Distributions {
5581*a1e26a70SApple OSS Distributions 	return fAddress;
5582*a1e26a70SApple OSS Distributions }
5583*a1e26a70SApple OSS Distributions 
5584*a1e26a70SApple OSS Distributions mach_vm_size_t
getSize()5585*a1e26a70SApple OSS Distributions IOMemoryMap::getSize()
5586*a1e26a70SApple OSS Distributions {
5587*a1e26a70SApple OSS Distributions 	return fLength;
5588*a1e26a70SApple OSS Distributions }
5589*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5590*a1e26a70SApple OSS Distributions 
5591*a1e26a70SApple OSS Distributions 
5592*a1e26a70SApple OSS Distributions task_t
getAddressTask()5593*a1e26a70SApple OSS Distributions IOMemoryMap::getAddressTask()
5594*a1e26a70SApple OSS Distributions {
5595*a1e26a70SApple OSS Distributions 	if (fSuperMap) {
5596*a1e26a70SApple OSS Distributions 		return fSuperMap->getAddressTask();
5597*a1e26a70SApple OSS Distributions 	} else {
5598*a1e26a70SApple OSS Distributions 		return fAddressTask;
5599*a1e26a70SApple OSS Distributions 	}
5600*a1e26a70SApple OSS Distributions }
5601*a1e26a70SApple OSS Distributions 
5602*a1e26a70SApple OSS Distributions IOOptionBits
getMapOptions()5603*a1e26a70SApple OSS Distributions IOMemoryMap::getMapOptions()
5604*a1e26a70SApple OSS Distributions {
5605*a1e26a70SApple OSS Distributions 	return fOptions;
5606*a1e26a70SApple OSS Distributions }
5607*a1e26a70SApple OSS Distributions 
5608*a1e26a70SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5609*a1e26a70SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5610*a1e26a70SApple OSS Distributions {
5611*a1e26a70SApple OSS Distributions 	return fMemory.get();
5612*a1e26a70SApple OSS Distributions }
5613*a1e26a70SApple OSS Distributions 
5614*a1e26a70SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5615*a1e26a70SApple OSS Distributions IOMemoryMap::copyCompatible(
5616*a1e26a70SApple OSS Distributions 	IOMemoryMap * newMapping )
5617*a1e26a70SApple OSS Distributions {
5618*a1e26a70SApple OSS Distributions 	task_t              task      = newMapping->getAddressTask();
5619*a1e26a70SApple OSS Distributions 	mach_vm_address_t   toAddress = newMapping->fAddress;
5620*a1e26a70SApple OSS Distributions 	IOOptionBits        _options  = newMapping->fOptions;
5621*a1e26a70SApple OSS Distributions 	mach_vm_size_t      _offset   = newMapping->fOffset;
5622*a1e26a70SApple OSS Distributions 	mach_vm_size_t      _length   = newMapping->fLength;
5623*a1e26a70SApple OSS Distributions 
5624*a1e26a70SApple OSS Distributions 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5625*a1e26a70SApple OSS Distributions 		return NULL;
5626*a1e26a70SApple OSS Distributions 	}
5627*a1e26a70SApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5628*a1e26a70SApple OSS Distributions 		return NULL;
5629*a1e26a70SApple OSS Distributions 	}
5630*a1e26a70SApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5631*a1e26a70SApple OSS Distributions 		return NULL;
5632*a1e26a70SApple OSS Distributions 	}
5633*a1e26a70SApple OSS Distributions 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5634*a1e26a70SApple OSS Distributions 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5635*a1e26a70SApple OSS Distributions 		return NULL;
5636*a1e26a70SApple OSS Distributions 	}
5637*a1e26a70SApple OSS Distributions 
5638*a1e26a70SApple OSS Distributions 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5639*a1e26a70SApple OSS Distributions 		return NULL;
5640*a1e26a70SApple OSS Distributions 	}
5641*a1e26a70SApple OSS Distributions 
5642*a1e26a70SApple OSS Distributions 	if (_offset < fOffset) {
5643*a1e26a70SApple OSS Distributions 		return NULL;
5644*a1e26a70SApple OSS Distributions 	}
5645*a1e26a70SApple OSS Distributions 
5646*a1e26a70SApple OSS Distributions 	_offset -= fOffset;
5647*a1e26a70SApple OSS Distributions 
5648*a1e26a70SApple OSS Distributions 	if ((_offset + _length) > fLength) {
5649*a1e26a70SApple OSS Distributions 		return NULL;
5650*a1e26a70SApple OSS Distributions 	}
5651*a1e26a70SApple OSS Distributions 
5652*a1e26a70SApple OSS Distributions 	if ((fLength == _length) && (!_offset)) {
5653*a1e26a70SApple OSS Distributions 		retain();
5654*a1e26a70SApple OSS Distributions 		newMapping = this;
5655*a1e26a70SApple OSS Distributions 	} else {
5656*a1e26a70SApple OSS Distributions 		newMapping->fSuperMap.reset(this, OSRetain);
5657*a1e26a70SApple OSS Distributions 		newMapping->fOffset   = fOffset + _offset;
5658*a1e26a70SApple OSS Distributions 		newMapping->fAddress  = fAddress + _offset;
5659*a1e26a70SApple OSS Distributions 	}
5660*a1e26a70SApple OSS Distributions 
5661*a1e26a70SApple OSS Distributions 	return newMapping;
5662*a1e26a70SApple OSS Distributions }
5663*a1e26a70SApple OSS Distributions 
5664*a1e26a70SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5665*a1e26a70SApple OSS Distributions IOMemoryMap::wireRange(
5666*a1e26a70SApple OSS Distributions 	uint32_t                options,
5667*a1e26a70SApple OSS Distributions 	mach_vm_size_t          offset,
5668*a1e26a70SApple OSS Distributions 	mach_vm_size_t          length)
5669*a1e26a70SApple OSS Distributions {
5670*a1e26a70SApple OSS Distributions 	IOReturn kr;
5671*a1e26a70SApple OSS Distributions 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5672*a1e26a70SApple OSS Distributions 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5673*a1e26a70SApple OSS Distributions 	vm_prot_t prot;
5674*a1e26a70SApple OSS Distributions 
5675*a1e26a70SApple OSS Distributions 	prot = (kIODirectionOutIn & options);
5676*a1e26a70SApple OSS Distributions 	if (prot) {
5677*a1e26a70SApple OSS Distributions 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5678*a1e26a70SApple OSS Distributions 	} else {
5679*a1e26a70SApple OSS Distributions 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5680*a1e26a70SApple OSS Distributions 	}
5681*a1e26a70SApple OSS Distributions 
5682*a1e26a70SApple OSS Distributions 	return kr;
5683*a1e26a70SApple OSS Distributions }
5684*a1e26a70SApple OSS Distributions 
5685*a1e26a70SApple OSS Distributions 
5686*a1e26a70SApple OSS Distributions IOPhysicalAddress
5687*a1e26a70SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5688*a1e26a70SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5689*a1e26a70SApple OSS Distributions #else /* !__LP64__ */
5690*a1e26a70SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5691*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5692*a1e26a70SApple OSS Distributions {
5693*a1e26a70SApple OSS Distributions 	IOPhysicalAddress   address;
5694*a1e26a70SApple OSS Distributions 
5695*a1e26a70SApple OSS Distributions 	LOCK;
5696*a1e26a70SApple OSS Distributions #ifdef __LP64__
5697*a1e26a70SApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5698*a1e26a70SApple OSS Distributions #else /* !__LP64__ */
5699*a1e26a70SApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5700*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5701*a1e26a70SApple OSS Distributions 	UNLOCK;
5702*a1e26a70SApple OSS Distributions 
5703*a1e26a70SApple OSS Distributions 	return address;
5704*a1e26a70SApple OSS Distributions }
5705*a1e26a70SApple OSS Distributions 
5706*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5707*a1e26a70SApple OSS Distributions 
5708*a1e26a70SApple OSS Distributions #undef super
5709*a1e26a70SApple OSS Distributions #define super OSObject
5710*a1e26a70SApple OSS Distributions 
5711*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5712*a1e26a70SApple OSS Distributions 
5713*a1e26a70SApple OSS Distributions void
initialize(void)5714*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5715*a1e26a70SApple OSS Distributions {
5716*a1e26a70SApple OSS Distributions 	if (NULL == gIOMemoryLock) {
5717*a1e26a70SApple OSS Distributions 		gIOMemoryLock = IORecursiveLockAlloc();
5718*a1e26a70SApple OSS Distributions 	}
5719*a1e26a70SApple OSS Distributions 
5720*a1e26a70SApple OSS Distributions 	gIOLastPage = IOGetLastPageNumber();
5721*a1e26a70SApple OSS Distributions }
5722*a1e26a70SApple OSS Distributions 
5723*a1e26a70SApple OSS Distributions void
free(void)5724*a1e26a70SApple OSS Distributions IOMemoryDescriptor::free( void )
5725*a1e26a70SApple OSS Distributions {
5726*a1e26a70SApple OSS Distributions 	if (_mappings) {
5727*a1e26a70SApple OSS Distributions 		_mappings.reset();
5728*a1e26a70SApple OSS Distributions 	}
5729*a1e26a70SApple OSS Distributions 
5730*a1e26a70SApple OSS Distributions 	if (reserved) {
5731*a1e26a70SApple OSS Distributions 		cleanKernelReserved(reserved);
5732*a1e26a70SApple OSS Distributions 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5733*a1e26a70SApple OSS Distributions 		reserved = NULL;
5734*a1e26a70SApple OSS Distributions 	}
5735*a1e26a70SApple OSS Distributions 	super::free();
5736*a1e26a70SApple OSS Distributions }
5737*a1e26a70SApple OSS Distributions 
5738*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5739*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setMapping(
5740*a1e26a70SApple OSS Distributions 	task_t                  intoTask,
5741*a1e26a70SApple OSS Distributions 	IOVirtualAddress        mapAddress,
5742*a1e26a70SApple OSS Distributions 	IOOptionBits            options )
5743*a1e26a70SApple OSS Distributions {
5744*a1e26a70SApple OSS Distributions 	return createMappingInTask( intoTask, mapAddress,
5745*a1e26a70SApple OSS Distributions 	           options | kIOMapStatic,
5746*a1e26a70SApple OSS Distributions 	           0, getLength());
5747*a1e26a70SApple OSS Distributions }
5748*a1e26a70SApple OSS Distributions 
5749*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5750*a1e26a70SApple OSS Distributions IOMemoryDescriptor::map(
5751*a1e26a70SApple OSS Distributions 	IOOptionBits            options )
5752*a1e26a70SApple OSS Distributions {
5753*a1e26a70SApple OSS Distributions 	return createMappingInTask( kernel_task, 0,
5754*a1e26a70SApple OSS Distributions 	           options | kIOMapAnywhere,
5755*a1e26a70SApple OSS Distributions 	           0, getLength());
5756*a1e26a70SApple OSS Distributions }
5757*a1e26a70SApple OSS Distributions 
5758*a1e26a70SApple OSS Distributions #ifndef __LP64__
5759*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5760*a1e26a70SApple OSS Distributions IOMemoryDescriptor::map(
5761*a1e26a70SApple OSS Distributions 	task_t                  intoTask,
5762*a1e26a70SApple OSS Distributions 	IOVirtualAddress        atAddress,
5763*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
5764*a1e26a70SApple OSS Distributions 	IOByteCount             offset,
5765*a1e26a70SApple OSS Distributions 	IOByteCount             length )
5766*a1e26a70SApple OSS Distributions {
5767*a1e26a70SApple OSS Distributions 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5768*a1e26a70SApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5769*a1e26a70SApple OSS Distributions 		return NULL;
5770*a1e26a70SApple OSS Distributions 	}
5771*a1e26a70SApple OSS Distributions 
5772*a1e26a70SApple OSS Distributions 	return createMappingInTask(intoTask, atAddress,
5773*a1e26a70SApple OSS Distributions 	           options, offset, length);
5774*a1e26a70SApple OSS Distributions }
5775*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5776*a1e26a70SApple OSS Distributions 
5777*a1e26a70SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5778*a1e26a70SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5779*a1e26a70SApple OSS Distributions 	task_t                  intoTask,
5780*a1e26a70SApple OSS Distributions 	mach_vm_address_t       atAddress,
5781*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
5782*a1e26a70SApple OSS Distributions 	mach_vm_size_t          offset,
5783*a1e26a70SApple OSS Distributions 	mach_vm_size_t          length)
5784*a1e26a70SApple OSS Distributions {
5785*a1e26a70SApple OSS Distributions 	IOMemoryMap * result;
5786*a1e26a70SApple OSS Distributions 	IOMemoryMap * mapping;
5787*a1e26a70SApple OSS Distributions 
5788*a1e26a70SApple OSS Distributions 	if (0 == length) {
5789*a1e26a70SApple OSS Distributions 		length = getLength();
5790*a1e26a70SApple OSS Distributions 	}
5791*a1e26a70SApple OSS Distributions 
5792*a1e26a70SApple OSS Distributions 	mapping = new IOMemoryMap;
5793*a1e26a70SApple OSS Distributions 
5794*a1e26a70SApple OSS Distributions #if 136275805
5795*a1e26a70SApple OSS Distributions 	/*
5796*a1e26a70SApple OSS Distributions 	 * XXX: Redundantly check the mapping size here so that failure stack traces
5797*a1e26a70SApple OSS Distributions 	 *      are more useful. This has no functional value but is helpful because
5798*a1e26a70SApple OSS Distributions 	 *      telemetry traps can currently only capture the last five calls and
5799*a1e26a70SApple OSS Distributions 	 *      so we want to trap as shallow as possible in a select few cases
5800*a1e26a70SApple OSS Distributions 	 *      where we anticipate issues.
5801*a1e26a70SApple OSS Distributions 	 *
5802*a1e26a70SApple OSS Distributions 	 *      When telemetry collection is complete, this will be removed.
5803*a1e26a70SApple OSS Distributions 	 */
5804*a1e26a70SApple OSS Distributions 	if (__improbable(mapping && !vm_map_is_map_size_valid(
5805*a1e26a70SApple OSS Distributions 		    get_task_map(intoTask), length, /* no_soft_limit */ false))) {
5806*a1e26a70SApple OSS Distributions 		mapping->release();
5807*a1e26a70SApple OSS Distributions 		mapping = NULL;
5808*a1e26a70SApple OSS Distributions 	}
5809*a1e26a70SApple OSS Distributions #endif /* 136275805 */
5810*a1e26a70SApple OSS Distributions 
5811*a1e26a70SApple OSS Distributions 	if (mapping
5812*a1e26a70SApple OSS Distributions 	    && !mapping->init( intoTask, atAddress,
5813*a1e26a70SApple OSS Distributions 	    options, offset, length )) {
5814*a1e26a70SApple OSS Distributions 		mapping->release();
5815*a1e26a70SApple OSS Distributions 		mapping = NULL;
5816*a1e26a70SApple OSS Distributions 	}
5817*a1e26a70SApple OSS Distributions 
5818*a1e26a70SApple OSS Distributions 	if (mapping) {
5819*a1e26a70SApple OSS Distributions 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5820*a1e26a70SApple OSS Distributions 	} else {
5821*a1e26a70SApple OSS Distributions 		result = nullptr;
5822*a1e26a70SApple OSS Distributions 	}
5823*a1e26a70SApple OSS Distributions 
5824*a1e26a70SApple OSS Distributions #if DEBUG
5825*a1e26a70SApple OSS Distributions 	if (!result) {
5826*a1e26a70SApple OSS Distributions 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5827*a1e26a70SApple OSS Distributions 		    this, atAddress, (uint32_t) options, offset, length);
5828*a1e26a70SApple OSS Distributions 	}
5829*a1e26a70SApple OSS Distributions #endif
5830*a1e26a70SApple OSS Distributions 
5831*a1e26a70SApple OSS Distributions 	// already retained through makeMapping
5832*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5833*a1e26a70SApple OSS Distributions 
5834*a1e26a70SApple OSS Distributions 	return retval;
5835*a1e26a70SApple OSS Distributions }
5836*a1e26a70SApple OSS Distributions 
5837*a1e26a70SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5838*a1e26a70SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5839*a1e26a70SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5840*a1e26a70SApple OSS Distributions     IOOptionBits         options,
5841*a1e26a70SApple OSS Distributions     IOByteCount          offset)
5842*a1e26a70SApple OSS Distributions {
5843*a1e26a70SApple OSS Distributions 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5844*a1e26a70SApple OSS Distributions }
5845*a1e26a70SApple OSS Distributions #endif
5846*a1e26a70SApple OSS Distributions 
5847*a1e26a70SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5848*a1e26a70SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5849*a1e26a70SApple OSS Distributions     IOOptionBits         options,
5850*a1e26a70SApple OSS Distributions     mach_vm_size_t       offset)
5851*a1e26a70SApple OSS Distributions {
5852*a1e26a70SApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5853*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> physMem;
5854*a1e26a70SApple OSS Distributions 
5855*a1e26a70SApple OSS Distributions 	LOCK;
5856*a1e26a70SApple OSS Distributions 
5857*a1e26a70SApple OSS Distributions 	if (fAddress && fAddressMap) {
5858*a1e26a70SApple OSS Distributions 		do{
5859*a1e26a70SApple OSS Distributions 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5860*a1e26a70SApple OSS Distributions 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5861*a1e26a70SApple OSS Distributions 				physMem = fMemory;
5862*a1e26a70SApple OSS Distributions 			}
5863*a1e26a70SApple OSS Distributions 
5864*a1e26a70SApple OSS Distributions 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5865*a1e26a70SApple OSS Distributions 				upl_size_t          size = (typeof(size))round_page(fLength);
5866*a1e26a70SApple OSS Distributions 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5867*a1e26a70SApple OSS Distributions 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5868*a1e26a70SApple OSS Distributions 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5869*a1e26a70SApple OSS Distributions 				    NULL, NULL,
5870*a1e26a70SApple OSS Distributions 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5871*a1e26a70SApple OSS Distributions 					fRedirUPL = NULL;
5872*a1e26a70SApple OSS Distributions 				}
5873*a1e26a70SApple OSS Distributions 
5874*a1e26a70SApple OSS Distributions 				if (physMem) {
5875*a1e26a70SApple OSS Distributions 					IOUnmapPages( fAddressMap, fAddress, fLength );
5876*a1e26a70SApple OSS Distributions 					if ((false)) {
5877*a1e26a70SApple OSS Distributions 						physMem->redirect(NULL, true);
5878*a1e26a70SApple OSS Distributions 					}
5879*a1e26a70SApple OSS Distributions 				}
5880*a1e26a70SApple OSS Distributions 			}
5881*a1e26a70SApple OSS Distributions 
5882*a1e26a70SApple OSS Distributions 			if (newBackingMemory) {
5883*a1e26a70SApple OSS Distributions 				if (newBackingMemory != fMemory) {
5884*a1e26a70SApple OSS Distributions 					fOffset = 0;
5885*a1e26a70SApple OSS Distributions 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5886*a1e26a70SApple OSS Distributions 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5887*a1e26a70SApple OSS Distributions 					    offset, fLength)) {
5888*a1e26a70SApple OSS Distributions 						err = kIOReturnError;
5889*a1e26a70SApple OSS Distributions 					}
5890*a1e26a70SApple OSS Distributions 				}
5891*a1e26a70SApple OSS Distributions 				if (fRedirUPL) {
5892*a1e26a70SApple OSS Distributions 					upl_commit(fRedirUPL, NULL, 0);
5893*a1e26a70SApple OSS Distributions 					upl_deallocate(fRedirUPL);
5894*a1e26a70SApple OSS Distributions 					fRedirUPL = NULL;
5895*a1e26a70SApple OSS Distributions 				}
5896*a1e26a70SApple OSS Distributions 				if ((false) && physMem) {
5897*a1e26a70SApple OSS Distributions 					physMem->redirect(NULL, false);
5898*a1e26a70SApple OSS Distributions 				}
5899*a1e26a70SApple OSS Distributions 			}
5900*a1e26a70SApple OSS Distributions 		}while (false);
5901*a1e26a70SApple OSS Distributions 	}
5902*a1e26a70SApple OSS Distributions 
5903*a1e26a70SApple OSS Distributions 	UNLOCK;
5904*a1e26a70SApple OSS Distributions 
5905*a1e26a70SApple OSS Distributions 	return err;
5906*a1e26a70SApple OSS Distributions }
5907*a1e26a70SApple OSS Distributions 
5908*a1e26a70SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5909*a1e26a70SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5910*a1e26a70SApple OSS Distributions 	IOMemoryDescriptor *    owner,
5911*a1e26a70SApple OSS Distributions 	task_t                  __intoTask,
5912*a1e26a70SApple OSS Distributions 	IOVirtualAddress        __address,
5913*a1e26a70SApple OSS Distributions 	IOOptionBits            options,
5914*a1e26a70SApple OSS Distributions 	IOByteCount             __offset,
5915*a1e26a70SApple OSS Distributions 	IOByteCount             __length )
5916*a1e26a70SApple OSS Distributions {
5917*a1e26a70SApple OSS Distributions #ifndef __LP64__
5918*a1e26a70SApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
5919*a1e26a70SApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
5920*a1e26a70SApple OSS Distributions 	}
5921*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
5922*a1e26a70SApple OSS Distributions 
5923*a1e26a70SApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5924*a1e26a70SApple OSS Distributions 	__block IOMemoryMap * result  = NULL;
5925*a1e26a70SApple OSS Distributions 
5926*a1e26a70SApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5927*a1e26a70SApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5928*a1e26a70SApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
5929*a1e26a70SApple OSS Distributions 
5930*a1e26a70SApple OSS Distributions 	mapping->fOffset = offset;
5931*a1e26a70SApple OSS Distributions 
5932*a1e26a70SApple OSS Distributions 	LOCK;
5933*a1e26a70SApple OSS Distributions 
5934*a1e26a70SApple OSS Distributions 	do{
5935*a1e26a70SApple OSS Distributions 		if (kIOMapStatic & options) {
5936*a1e26a70SApple OSS Distributions 			result = mapping;
5937*a1e26a70SApple OSS Distributions 			addMapping(mapping);
5938*a1e26a70SApple OSS Distributions 			mapping->setMemoryDescriptor(this, 0);
5939*a1e26a70SApple OSS Distributions 			continue;
5940*a1e26a70SApple OSS Distributions 		}
5941*a1e26a70SApple OSS Distributions 
5942*a1e26a70SApple OSS Distributions 		if (kIOMapUnique & options) {
5943*a1e26a70SApple OSS Distributions 			addr64_t phys;
5944*a1e26a70SApple OSS Distributions 			IOByteCount       physLen;
5945*a1e26a70SApple OSS Distributions 
5946*a1e26a70SApple OSS Distributions //	    if (owner != this)		continue;
5947*a1e26a70SApple OSS Distributions 
5948*a1e26a70SApple OSS Distributions 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5949*a1e26a70SApple OSS Distributions 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5950*a1e26a70SApple OSS Distributions 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5951*a1e26a70SApple OSS Distributions 				if (!phys || (physLen < length)) {
5952*a1e26a70SApple OSS Distributions 					continue;
5953*a1e26a70SApple OSS Distributions 				}
5954*a1e26a70SApple OSS Distributions 
5955*a1e26a70SApple OSS Distributions 				mapDesc = IOMemoryDescriptor::withAddressRange(
5956*a1e26a70SApple OSS Distributions 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5957*a1e26a70SApple OSS Distributions 				if (!mapDesc) {
5958*a1e26a70SApple OSS Distributions 					continue;
5959*a1e26a70SApple OSS Distributions 				}
5960*a1e26a70SApple OSS Distributions 				offset = 0;
5961*a1e26a70SApple OSS Distributions 				mapping->fOffset = offset;
5962*a1e26a70SApple OSS Distributions 			}
5963*a1e26a70SApple OSS Distributions 		} else {
5964*a1e26a70SApple OSS Distributions 			// look for a compatible existing mapping
5965*a1e26a70SApple OSS Distributions 			if (_mappings) {
5966*a1e26a70SApple OSS Distributions 				_mappings->iterateObjects(^(OSObject * object)
5967*a1e26a70SApple OSS Distributions 				{
5968*a1e26a70SApple OSS Distributions 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5969*a1e26a70SApple OSS Distributions 					if ((result = lookMapping->copyCompatible(mapping))) {
5970*a1e26a70SApple OSS Distributions 					        addMapping(result);
5971*a1e26a70SApple OSS Distributions 					        result->setMemoryDescriptor(this, offset);
5972*a1e26a70SApple OSS Distributions 					        return true;
5973*a1e26a70SApple OSS Distributions 					}
5974*a1e26a70SApple OSS Distributions 					return false;
5975*a1e26a70SApple OSS Distributions 				});
5976*a1e26a70SApple OSS Distributions 			}
5977*a1e26a70SApple OSS Distributions 			if (result || (options & kIOMapReference)) {
5978*a1e26a70SApple OSS Distributions 				if (result != mapping) {
5979*a1e26a70SApple OSS Distributions 					mapping->release();
5980*a1e26a70SApple OSS Distributions 					mapping = NULL;
5981*a1e26a70SApple OSS Distributions 				}
5982*a1e26a70SApple OSS Distributions 				continue;
5983*a1e26a70SApple OSS Distributions 			}
5984*a1e26a70SApple OSS Distributions 		}
5985*a1e26a70SApple OSS Distributions 
5986*a1e26a70SApple OSS Distributions 		if (!mapDesc) {
5987*a1e26a70SApple OSS Distributions 			mapDesc.reset(this, OSRetain);
5988*a1e26a70SApple OSS Distributions 		}
5989*a1e26a70SApple OSS Distributions 		IOReturn
5990*a1e26a70SApple OSS Distributions 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5991*a1e26a70SApple OSS Distributions 		if (kIOReturnSuccess == kr) {
5992*a1e26a70SApple OSS Distributions 			result = mapping;
5993*a1e26a70SApple OSS Distributions 			mapDesc->addMapping(result);
5994*a1e26a70SApple OSS Distributions 			result->setMemoryDescriptor(mapDesc.get(), offset);
5995*a1e26a70SApple OSS Distributions 		} else {
5996*a1e26a70SApple OSS Distributions 			mapping->release();
5997*a1e26a70SApple OSS Distributions 			mapping = NULL;
5998*a1e26a70SApple OSS Distributions 		}
5999*a1e26a70SApple OSS Distributions 	}while (false);
6000*a1e26a70SApple OSS Distributions 
6001*a1e26a70SApple OSS Distributions 	UNLOCK;
6002*a1e26a70SApple OSS Distributions 
6003*a1e26a70SApple OSS Distributions 	return result;
6004*a1e26a70SApple OSS Distributions }
6005*a1e26a70SApple OSS Distributions 
6006*a1e26a70SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)6007*a1e26a70SApple OSS Distributions IOMemoryDescriptor::addMapping(
6008*a1e26a70SApple OSS Distributions 	IOMemoryMap * mapping )
6009*a1e26a70SApple OSS Distributions {
6010*a1e26a70SApple OSS Distributions 	if (mapping) {
6011*a1e26a70SApple OSS Distributions 		if (NULL == _mappings) {
6012*a1e26a70SApple OSS Distributions 			_mappings = OSSet::withCapacity(1);
6013*a1e26a70SApple OSS Distributions 		}
6014*a1e26a70SApple OSS Distributions 		if (_mappings) {
6015*a1e26a70SApple OSS Distributions 			_mappings->setObject( mapping );
6016*a1e26a70SApple OSS Distributions 		}
6017*a1e26a70SApple OSS Distributions 	}
6018*a1e26a70SApple OSS Distributions }
6019*a1e26a70SApple OSS Distributions 
6020*a1e26a70SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)6021*a1e26a70SApple OSS Distributions IOMemoryDescriptor::removeMapping(
6022*a1e26a70SApple OSS Distributions 	IOMemoryMap * mapping )
6023*a1e26a70SApple OSS Distributions {
6024*a1e26a70SApple OSS Distributions 	if (_mappings) {
6025*a1e26a70SApple OSS Distributions 		_mappings->removeObject( mapping);
6026*a1e26a70SApple OSS Distributions 	}
6027*a1e26a70SApple OSS Distributions }
6028*a1e26a70SApple OSS Distributions 
6029*a1e26a70SApple OSS Distributions void
setMapperOptions(uint16_t options)6030*a1e26a70SApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
6031*a1e26a70SApple OSS Distributions {
6032*a1e26a70SApple OSS Distributions 	_iomapperOptions = options;
6033*a1e26a70SApple OSS Distributions }
6034*a1e26a70SApple OSS Distributions 
6035*a1e26a70SApple OSS Distributions uint16_t
getMapperOptions(void)6036*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
6037*a1e26a70SApple OSS Distributions {
6038*a1e26a70SApple OSS Distributions 	return _iomapperOptions;
6039*a1e26a70SApple OSS Distributions }
6040*a1e26a70SApple OSS Distributions 
6041*a1e26a70SApple OSS Distributions #ifndef __LP64__
6042*a1e26a70SApple OSS Distributions // obsolete initializers
6043*a1e26a70SApple OSS Distributions // - initWithOptions is the designated initializer
6044*a1e26a70SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6045*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void *      address,
6046*a1e26a70SApple OSS Distributions     IOByteCount   length,
6047*a1e26a70SApple OSS Distributions     IODirection direction)
6048*a1e26a70SApple OSS Distributions {
6049*a1e26a70SApple OSS Distributions 	return false;
6050*a1e26a70SApple OSS Distributions }
6051*a1e26a70SApple OSS Distributions 
6052*a1e26a70SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6053*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6054*a1e26a70SApple OSS Distributions     IOByteCount    length,
6055*a1e26a70SApple OSS Distributions     IODirection  direction,
6056*a1e26a70SApple OSS Distributions     task_t       task)
6057*a1e26a70SApple OSS Distributions {
6058*a1e26a70SApple OSS Distributions 	return false;
6059*a1e26a70SApple OSS Distributions }
6060*a1e26a70SApple OSS Distributions 
6061*a1e26a70SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6062*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
6063*a1e26a70SApple OSS Distributions 	IOPhysicalAddress      address,
6064*a1e26a70SApple OSS Distributions 	IOByteCount            length,
6065*a1e26a70SApple OSS Distributions 	IODirection            direction )
6066*a1e26a70SApple OSS Distributions {
6067*a1e26a70SApple OSS Distributions 	return false;
6068*a1e26a70SApple OSS Distributions }
6069*a1e26a70SApple OSS Distributions 
6070*a1e26a70SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6071*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
6072*a1e26a70SApple OSS Distributions 	IOVirtualRange * ranges,
6073*a1e26a70SApple OSS Distributions 	UInt32           withCount,
6074*a1e26a70SApple OSS Distributions 	IODirection      direction,
6075*a1e26a70SApple OSS Distributions 	task_t           task,
6076*a1e26a70SApple OSS Distributions 	bool             asReference)
6077*a1e26a70SApple OSS Distributions {
6078*a1e26a70SApple OSS Distributions 	return false;
6079*a1e26a70SApple OSS Distributions }
6080*a1e26a70SApple OSS Distributions 
6081*a1e26a70SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6082*a1e26a70SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
6083*a1e26a70SApple OSS Distributions     UInt32           withCount,
6084*a1e26a70SApple OSS Distributions     IODirection      direction,
6085*a1e26a70SApple OSS Distributions     bool             asReference)
6086*a1e26a70SApple OSS Distributions {
6087*a1e26a70SApple OSS Distributions 	return false;
6088*a1e26a70SApple OSS Distributions }
6089*a1e26a70SApple OSS Distributions 
6090*a1e26a70SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6091*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6092*a1e26a70SApple OSS Distributions     IOByteCount * lengthOfSegment)
6093*a1e26a70SApple OSS Distributions {
6094*a1e26a70SApple OSS Distributions 	return NULL;
6095*a1e26a70SApple OSS Distributions }
6096*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
6097*a1e26a70SApple OSS Distributions 
6098*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6099*a1e26a70SApple OSS Distributions 
6100*a1e26a70SApple OSS Distributions bool
serialize(OSSerialize * s) const6101*a1e26a70SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6102*a1e26a70SApple OSS Distributions {
6103*a1e26a70SApple OSS Distributions 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6104*a1e26a70SApple OSS Distributions 	OSSharedPtr<OSObject>           values[2] = {NULL};
6105*a1e26a70SApple OSS Distributions 	OSSharedPtr<OSArray>            array;
6106*a1e26a70SApple OSS Distributions 
6107*a1e26a70SApple OSS Distributions 	struct SerData {
6108*a1e26a70SApple OSS Distributions 		user_addr_t address;
6109*a1e26a70SApple OSS Distributions 		user_size_t length;
6110*a1e26a70SApple OSS Distributions 	};
6111*a1e26a70SApple OSS Distributions 
6112*a1e26a70SApple OSS Distributions 	unsigned int index;
6113*a1e26a70SApple OSS Distributions 
6114*a1e26a70SApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6115*a1e26a70SApple OSS Distributions 
6116*a1e26a70SApple OSS Distributions 	if (s == NULL) {
6117*a1e26a70SApple OSS Distributions 		return false;
6118*a1e26a70SApple OSS Distributions 	}
6119*a1e26a70SApple OSS Distributions 
6120*a1e26a70SApple OSS Distributions 	array = OSArray::withCapacity(4);
6121*a1e26a70SApple OSS Distributions 	if (!array) {
6122*a1e26a70SApple OSS Distributions 		return false;
6123*a1e26a70SApple OSS Distributions 	}
6124*a1e26a70SApple OSS Distributions 
6125*a1e26a70SApple OSS Distributions 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6126*a1e26a70SApple OSS Distributions 	if (!vcopy) {
6127*a1e26a70SApple OSS Distributions 		return false;
6128*a1e26a70SApple OSS Distributions 	}
6129*a1e26a70SApple OSS Distributions 
6130*a1e26a70SApple OSS Distributions 	keys[0] = OSSymbol::withCString("address");
6131*a1e26a70SApple OSS Distributions 	keys[1] = OSSymbol::withCString("length");
6132*a1e26a70SApple OSS Distributions 
6133*a1e26a70SApple OSS Distributions 	// Copy the volatile data so we don't have to allocate memory
6134*a1e26a70SApple OSS Distributions 	// while the lock is held.
6135*a1e26a70SApple OSS Distributions 	LOCK;
6136*a1e26a70SApple OSS Distributions 	if (vcopy.size() == _rangesCount) {
6137*a1e26a70SApple OSS Distributions 		Ranges vec = _ranges;
6138*a1e26a70SApple OSS Distributions 		for (index = 0; index < vcopy.size(); index++) {
6139*a1e26a70SApple OSS Distributions 			mach_vm_address_t addr; mach_vm_size_t len;
6140*a1e26a70SApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, index, _task);
6141*a1e26a70SApple OSS Distributions 			vcopy[index].address = addr;
6142*a1e26a70SApple OSS Distributions 			vcopy[index].length  = len;
6143*a1e26a70SApple OSS Distributions 		}
6144*a1e26a70SApple OSS Distributions 	} else {
6145*a1e26a70SApple OSS Distributions 		// The descriptor changed out from under us.  Give up.
6146*a1e26a70SApple OSS Distributions 		UNLOCK;
6147*a1e26a70SApple OSS Distributions 		return false;
6148*a1e26a70SApple OSS Distributions 	}
6149*a1e26a70SApple OSS Distributions 	UNLOCK;
6150*a1e26a70SApple OSS Distributions 
6151*a1e26a70SApple OSS Distributions 	for (index = 0; index < vcopy.size(); index++) {
6152*a1e26a70SApple OSS Distributions 		user_addr_t addr = vcopy[index].address;
6153*a1e26a70SApple OSS Distributions 		IOByteCount len = (IOByteCount) vcopy[index].length;
6154*a1e26a70SApple OSS Distributions 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6155*a1e26a70SApple OSS Distributions 		if (values[0] == NULL) {
6156*a1e26a70SApple OSS Distributions 			return false;
6157*a1e26a70SApple OSS Distributions 		}
6158*a1e26a70SApple OSS Distributions 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6159*a1e26a70SApple OSS Distributions 		if (values[1] == NULL) {
6160*a1e26a70SApple OSS Distributions 			return false;
6161*a1e26a70SApple OSS Distributions 		}
6162*a1e26a70SApple OSS Distributions 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6163*a1e26a70SApple OSS Distributions 		if (dict == NULL) {
6164*a1e26a70SApple OSS Distributions 			return false;
6165*a1e26a70SApple OSS Distributions 		}
6166*a1e26a70SApple OSS Distributions 		array->setObject(dict.get());
6167*a1e26a70SApple OSS Distributions 		dict.reset();
6168*a1e26a70SApple OSS Distributions 		values[0].reset();
6169*a1e26a70SApple OSS Distributions 		values[1].reset();
6170*a1e26a70SApple OSS Distributions 	}
6171*a1e26a70SApple OSS Distributions 
6172*a1e26a70SApple OSS Distributions 	return array->serialize(s);
6173*a1e26a70SApple OSS Distributions }
6174*a1e26a70SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6175*a1e26a70SApple OSS Distributions 
6176*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6177*a1e26a70SApple OSS Distributions #ifdef __LP64__
6178*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6179*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6180*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6181*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6182*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6183*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6184*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6185*a1e26a70SApple OSS Distributions #else /* !__LP64__ */
6186*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6187*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6188*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6189*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6190*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6191*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6192*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6193*a1e26a70SApple OSS Distributions #endif /* !__LP64__ */
6194*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6195*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6196*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6197*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6198*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6199*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6200*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6201*a1e26a70SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6202*a1e26a70SApple OSS Distributions 
6203*a1e26a70SApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6204*a1e26a70SApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6205*a1e26a70SApple OSS Distributions     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6206*a1e26a70SApple OSS Distributions 
6207*a1e26a70SApple OSS Distributions /* ex-inline function implementation */
6208*a1e26a70SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6209*a1e26a70SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6210*a1e26a70SApple OSS Distributions {
6211*a1e26a70SApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
6212*a1e26a70SApple OSS Distributions }
6213*a1e26a70SApple OSS Distributions 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6214*a1e26a70SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6215*a1e26a70SApple OSS Distributions 
6216*a1e26a70SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6217*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6218*a1e26a70SApple OSS Distributions {
6219*a1e26a70SApple OSS Distributions 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6220*a1e26a70SApple OSS Distributions 	if (me && !me->initWithCapacity(capacity)) {
6221*a1e26a70SApple OSS Distributions 		return nullptr;
6222*a1e26a70SApple OSS Distributions 	}
6223*a1e26a70SApple OSS Distributions 	return me;
6224*a1e26a70SApple OSS Distributions }
6225*a1e26a70SApple OSS Distributions 
6226*a1e26a70SApple OSS Distributions bool
initWithCapacity(size_t capacity)6227*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6228*a1e26a70SApple OSS Distributions {
6229*a1e26a70SApple OSS Distributions 	if (_data && (!capacity || (_capacity < capacity))) {
6230*a1e26a70SApple OSS Distributions 		freeMemory();
6231*a1e26a70SApple OSS Distributions 	}
6232*a1e26a70SApple OSS Distributions 
6233*a1e26a70SApple OSS Distributions 	if (!OSObject::init()) {
6234*a1e26a70SApple OSS Distributions 		return false;
6235*a1e26a70SApple OSS Distributions 	}
6236*a1e26a70SApple OSS Distributions 
6237*a1e26a70SApple OSS Distributions 	if (!_data && capacity) {
6238*a1e26a70SApple OSS Distributions 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6239*a1e26a70SApple OSS Distributions 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6240*a1e26a70SApple OSS Distributions 		if (!_data) {
6241*a1e26a70SApple OSS Distributions 			return false;
6242*a1e26a70SApple OSS Distributions 		}
6243*a1e26a70SApple OSS Distributions 		_capacity = capacity;
6244*a1e26a70SApple OSS Distributions 	}
6245*a1e26a70SApple OSS Distributions 
6246*a1e26a70SApple OSS Distributions 	_length = 0;
6247*a1e26a70SApple OSS Distributions 
6248*a1e26a70SApple OSS Distributions 	return true;
6249*a1e26a70SApple OSS Distributions }
6250*a1e26a70SApple OSS Distributions 
6251*a1e26a70SApple OSS Distributions void
free()6252*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6253*a1e26a70SApple OSS Distributions {
6254*a1e26a70SApple OSS Distributions 	freeMemory();
6255*a1e26a70SApple OSS Distributions 	OSObject::free();
6256*a1e26a70SApple OSS Distributions }
6257*a1e26a70SApple OSS Distributions 
6258*a1e26a70SApple OSS Distributions void
freeMemory()6259*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6260*a1e26a70SApple OSS Distributions {
6261*a1e26a70SApple OSS Distributions 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6262*a1e26a70SApple OSS Distributions 	_data = nullptr;
6263*a1e26a70SApple OSS Distributions 	_capacity = _length = 0;
6264*a1e26a70SApple OSS Distributions }
6265*a1e26a70SApple OSS Distributions 
6266*a1e26a70SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6267*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6268*a1e26a70SApple OSS Distributions {
6269*a1e26a70SApple OSS Distributions 	const auto oldLength = getLength();
6270*a1e26a70SApple OSS Distributions 	size_t newLength;
6271*a1e26a70SApple OSS Distributions 	if (os_add_overflow(oldLength, length, &newLength)) {
6272*a1e26a70SApple OSS Distributions 		return false;
6273*a1e26a70SApple OSS Distributions 	}
6274*a1e26a70SApple OSS Distributions 
6275*a1e26a70SApple OSS Distributions 	if (!setLength(newLength)) {
6276*a1e26a70SApple OSS Distributions 		return false;
6277*a1e26a70SApple OSS Distributions 	}
6278*a1e26a70SApple OSS Distributions 
6279*a1e26a70SApple OSS Distributions 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6280*a1e26a70SApple OSS Distributions 	if (bytes) {
6281*a1e26a70SApple OSS Distributions 		bcopy(bytes, dest, length);
6282*a1e26a70SApple OSS Distributions 	}
6283*a1e26a70SApple OSS Distributions 
6284*a1e26a70SApple OSS Distributions 	return true;
6285*a1e26a70SApple OSS Distributions }
6286*a1e26a70SApple OSS Distributions 
6287*a1e26a70SApple OSS Distributions bool
setLength(size_t length)6288*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6289*a1e26a70SApple OSS Distributions {
6290*a1e26a70SApple OSS Distributions 	if (!_data || (length > _capacity)) {
6291*a1e26a70SApple OSS Distributions 		void *newData;
6292*a1e26a70SApple OSS Distributions 
6293*a1e26a70SApple OSS Distributions 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6294*a1e26a70SApple OSS Distributions 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6295*a1e26a70SApple OSS Distributions 		    NULL);
6296*a1e26a70SApple OSS Distributions 		if (!newData) {
6297*a1e26a70SApple OSS Distributions 			return false;
6298*a1e26a70SApple OSS Distributions 		}
6299*a1e26a70SApple OSS Distributions 
6300*a1e26a70SApple OSS Distributions 		_data = newData;
6301*a1e26a70SApple OSS Distributions 		_capacity = length;
6302*a1e26a70SApple OSS Distributions 	}
6303*a1e26a70SApple OSS Distributions 
6304*a1e26a70SApple OSS Distributions 	_length = length;
6305*a1e26a70SApple OSS Distributions 	return true;
6306*a1e26a70SApple OSS Distributions }
6307*a1e26a70SApple OSS Distributions 
6308*a1e26a70SApple OSS Distributions const void *
getBytes() const6309*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6310*a1e26a70SApple OSS Distributions {
6311*a1e26a70SApple OSS Distributions 	return _length ? _data : nullptr;
6312*a1e26a70SApple OSS Distributions }
6313*a1e26a70SApple OSS Distributions 
6314*a1e26a70SApple OSS Distributions size_t
getLength() const6315*a1e26a70SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6316*a1e26a70SApple OSS Distributions {
6317*a1e26a70SApple OSS Distributions 	return _data ? _length : 0;
6318*a1e26a70SApple OSS Distributions }
6319