xref: /xnu-11215.81.4/iokit/Kernel/IOMemoryDescriptor.cpp (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1*d4514f0bSApple OSS Distributions /*
2*d4514f0bSApple OSS Distributions  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*d4514f0bSApple OSS Distributions  *
4*d4514f0bSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*d4514f0bSApple OSS Distributions  *
6*d4514f0bSApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*d4514f0bSApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*d4514f0bSApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*d4514f0bSApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*d4514f0bSApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*d4514f0bSApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*d4514f0bSApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*d4514f0bSApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*d4514f0bSApple OSS Distributions  *
15*d4514f0bSApple OSS Distributions  * Please obtain a copy of the License at
16*d4514f0bSApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*d4514f0bSApple OSS Distributions  *
18*d4514f0bSApple OSS Distributions  * The Original Code and all software distributed under the License are
19*d4514f0bSApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*d4514f0bSApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*d4514f0bSApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*d4514f0bSApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*d4514f0bSApple OSS Distributions  * Please see the License for the specific language governing rights and
24*d4514f0bSApple OSS Distributions  * limitations under the License.
25*d4514f0bSApple OSS Distributions  *
26*d4514f0bSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*d4514f0bSApple OSS Distributions  */
28*d4514f0bSApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*d4514f0bSApple OSS Distributions 
30*d4514f0bSApple OSS Distributions #include <sys/cdefs.h>
31*d4514f0bSApple OSS Distributions 
32*d4514f0bSApple OSS Distributions #include <IOKit/assert.h>
33*d4514f0bSApple OSS Distributions #include <IOKit/system.h>
34*d4514f0bSApple OSS Distributions #include <IOKit/IOLib.h>
35*d4514f0bSApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*d4514f0bSApple OSS Distributions #include <IOKit/IOMapper.h>
37*d4514f0bSApple OSS Distributions #include <IOKit/IODMACommand.h>
38*d4514f0bSApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*d4514f0bSApple OSS Distributions 
40*d4514f0bSApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*d4514f0bSApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*d4514f0bSApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*d4514f0bSApple OSS Distributions 
44*d4514f0bSApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*d4514f0bSApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*d4514f0bSApple OSS Distributions #include <libkern/OSDebug.h>
47*d4514f0bSApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*d4514f0bSApple OSS Distributions 
49*d4514f0bSApple OSS Distributions #include "IOKitKernelInternal.h"
50*d4514f0bSApple OSS Distributions 
51*d4514f0bSApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*d4514f0bSApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*d4514f0bSApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*d4514f0bSApple OSS Distributions #include <libkern/c++/OSArray.h>
55*d4514f0bSApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*d4514f0bSApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*d4514f0bSApple OSS Distributions #include <os/overflow.h>
58*d4514f0bSApple OSS Distributions #include <os/cpp_util.h>
59*d4514f0bSApple OSS Distributions #include <os/base_private.h>
60*d4514f0bSApple OSS Distributions 
61*d4514f0bSApple OSS Distributions #include <sys/uio.h>
62*d4514f0bSApple OSS Distributions 
63*d4514f0bSApple OSS Distributions __BEGIN_DECLS
64*d4514f0bSApple OSS Distributions #include <vm/pmap.h>
65*d4514f0bSApple OSS Distributions #include <vm/vm_pageout_xnu.h>
66*d4514f0bSApple OSS Distributions #include <mach/memory_object_types.h>
67*d4514f0bSApple OSS Distributions #include <device/device_port.h>
68*d4514f0bSApple OSS Distributions 
69*d4514f0bSApple OSS Distributions #include <mach/vm_prot.h>
70*d4514f0bSApple OSS Distributions #include <mach/mach_vm.h>
71*d4514f0bSApple OSS Distributions #include <mach/memory_entry.h>
72*d4514f0bSApple OSS Distributions #include <mach/mach_host.h>
73*d4514f0bSApple OSS Distributions #include <vm/vm_fault_xnu.h>
74*d4514f0bSApple OSS Distributions #include <vm/vm_protos.h>
75*d4514f0bSApple OSS Distributions #include <vm/vm_memory_entry.h>
76*d4514f0bSApple OSS Distributions #include <vm/vm_kern_xnu.h>
77*d4514f0bSApple OSS Distributions #include <vm/vm_iokit.h>
78*d4514f0bSApple OSS Distributions #include <vm/vm_map_xnu.h>
79*d4514f0bSApple OSS Distributions 
80*d4514f0bSApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
81*d4514f0bSApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
82*d4514f0bSApple OSS Distributions 
83*d4514f0bSApple OSS Distributions __END_DECLS
84*d4514f0bSApple OSS Distributions 
85*d4514f0bSApple OSS Distributions #define kIOMapperWaitSystem     ((IOMapper *) 1)
86*d4514f0bSApple OSS Distributions 
87*d4514f0bSApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
88*d4514f0bSApple OSS Distributions 
89*d4514f0bSApple OSS Distributions ppnum_t           gIOLastPage;
90*d4514f0bSApple OSS Distributions 
91*d4514f0bSApple OSS Distributions enum {
92*d4514f0bSApple OSS Distributions 	kIOMapGuardSizeLarge = 65536
93*d4514f0bSApple OSS Distributions };
94*d4514f0bSApple OSS Distributions 
95*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96*d4514f0bSApple OSS Distributions 
97*d4514f0bSApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
98*d4514f0bSApple OSS Distributions 
99*d4514f0bSApple OSS Distributions #define super IOMemoryDescriptor
100*d4514f0bSApple OSS Distributions 
101*d4514f0bSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
102*d4514f0bSApple OSS Distributions     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
103*d4514f0bSApple OSS Distributions 
104*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105*d4514f0bSApple OSS Distributions 
106*d4514f0bSApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
107*d4514f0bSApple OSS Distributions 
108*d4514f0bSApple OSS Distributions #define LOCK    IORecursiveLockLock( gIOMemoryLock)
109*d4514f0bSApple OSS Distributions #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
110*d4514f0bSApple OSS Distributions #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
111*d4514f0bSApple OSS Distributions #define WAKEUP  \
112*d4514f0bSApple OSS Distributions     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
113*d4514f0bSApple OSS Distributions 
114*d4514f0bSApple OSS Distributions #if 0
115*d4514f0bSApple OSS Distributions #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
116*d4514f0bSApple OSS Distributions #else
117*d4514f0bSApple OSS Distributions #define DEBG(fmt, args...)      {}
118*d4514f0bSApple OSS Distributions #endif
119*d4514f0bSApple OSS Distributions 
120*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121*d4514f0bSApple OSS Distributions 
122*d4514f0bSApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
123*d4514f0bSApple OSS Distributions // Function
124*d4514f0bSApple OSS Distributions 
125*d4514f0bSApple OSS Distributions enum ioPLBlockFlags {
126*d4514f0bSApple OSS Distributions 	kIOPLOnDevice  = 0x00000001,
127*d4514f0bSApple OSS Distributions 	kIOPLExternUPL = 0x00000002,
128*d4514f0bSApple OSS Distributions };
129*d4514f0bSApple OSS Distributions 
130*d4514f0bSApple OSS Distributions struct IOMDPersistentInitData {
131*d4514f0bSApple OSS Distributions 	const IOGeneralMemoryDescriptor * fMD;
132*d4514f0bSApple OSS Distributions 	IOMemoryReference               * fMemRef;
133*d4514f0bSApple OSS Distributions };
134*d4514f0bSApple OSS Distributions 
135*d4514f0bSApple OSS Distributions struct ioPLBlock {
136*d4514f0bSApple OSS Distributions 	upl_t fIOPL;
137*d4514f0bSApple OSS Distributions 	vm_address_t fPageInfo; // Pointer to page list or index into it
138*d4514f0bSApple OSS Distributions 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
139*d4514f0bSApple OSS Distributions 	ppnum_t fMappedPage;        // Page number of first page in this iopl
140*d4514f0bSApple OSS Distributions 	unsigned int fPageOffset;   // Offset within first page of iopl
141*d4514f0bSApple OSS Distributions 	unsigned int fFlags;        // Flags
142*d4514f0bSApple OSS Distributions };
143*d4514f0bSApple OSS Distributions 
144*d4514f0bSApple OSS Distributions enum { kMaxWireTags = 6 };
145*d4514f0bSApple OSS Distributions 
146*d4514f0bSApple OSS Distributions struct ioGMDData {
147*d4514f0bSApple OSS Distributions 	IOMapper *  fMapper;
148*d4514f0bSApple OSS Distributions 	uint64_t    fDMAMapAlignment;
149*d4514f0bSApple OSS Distributions 	uint64_t    fMappedBase;
150*d4514f0bSApple OSS Distributions 	uint64_t    fMappedLength;
151*d4514f0bSApple OSS Distributions 	uint64_t    fPreparationID;
152*d4514f0bSApple OSS Distributions #if IOTRACKING
153*d4514f0bSApple OSS Distributions 	IOTracking  fWireTracking;
154*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
155*d4514f0bSApple OSS Distributions 	unsigned int      fPageCnt;
156*d4514f0bSApple OSS Distributions 	uint8_t           fDMAMapNumAddressBits;
157*d4514f0bSApple OSS Distributions 	unsigned char     fCompletionError:1;
158*d4514f0bSApple OSS Distributions 	unsigned char     fMappedBaseValid:1;
159*d4514f0bSApple OSS Distributions 	unsigned char     _resv:4;
160*d4514f0bSApple OSS Distributions 	unsigned char     fDMAAccess:2;
161*d4514f0bSApple OSS Distributions 
162*d4514f0bSApple OSS Distributions 	/* variable length arrays */
163*d4514f0bSApple OSS Distributions 	upl_page_info_t fPageList[1]
164*d4514f0bSApple OSS Distributions #if __LP64__
165*d4514f0bSApple OSS Distributions 	// align fPageList as for ioPLBlock
166*d4514f0bSApple OSS Distributions 	__attribute__((aligned(sizeof(upl_t))))
167*d4514f0bSApple OSS Distributions #endif
168*d4514f0bSApple OSS Distributions 	;
169*d4514f0bSApple OSS Distributions 	//ioPLBlock fBlocks[1];
170*d4514f0bSApple OSS Distributions };
171*d4514f0bSApple OSS Distributions 
172*d4514f0bSApple OSS Distributions #pragma GCC visibility push(hidden)
173*d4514f0bSApple OSS Distributions 
174*d4514f0bSApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
175*d4514f0bSApple OSS Distributions {
176*d4514f0bSApple OSS Distributions 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
177*d4514f0bSApple OSS Distributions 
178*d4514f0bSApple OSS Distributions public:
179*d4514f0bSApple OSS Distributions 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
180*d4514f0bSApple OSS Distributions 	bool initWithCapacity(size_t capacity);
181*d4514f0bSApple OSS Distributions 	virtual void free() APPLE_KEXT_OVERRIDE;
182*d4514f0bSApple OSS Distributions 
183*d4514f0bSApple OSS Distributions 	bool appendBytes(const void * bytes, size_t length);
184*d4514f0bSApple OSS Distributions 	bool setLength(size_t length);
185*d4514f0bSApple OSS Distributions 
186*d4514f0bSApple OSS Distributions 	const void * getBytes() const;
187*d4514f0bSApple OSS Distributions 	size_t getLength() const;
188*d4514f0bSApple OSS Distributions 
189*d4514f0bSApple OSS Distributions private:
190*d4514f0bSApple OSS Distributions 	void freeMemory();
191*d4514f0bSApple OSS Distributions 
192*d4514f0bSApple OSS Distributions 	void *  _data = nullptr;
193*d4514f0bSApple OSS Distributions 	size_t  _length = 0;
194*d4514f0bSApple OSS Distributions 	size_t  _capacity = 0;
195*d4514f0bSApple OSS Distributions };
196*d4514f0bSApple OSS Distributions 
197*d4514f0bSApple OSS Distributions #pragma GCC visibility pop
198*d4514f0bSApple OSS Distributions 
199*d4514f0bSApple OSS Distributions #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
200*d4514f0bSApple OSS Distributions #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
201*d4514f0bSApple OSS Distributions #define getNumIOPL(osd, d)      \
202*d4514f0bSApple OSS Distributions     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
203*d4514f0bSApple OSS Distributions #define getPageList(d)  (&(d->fPageList[0]))
204*d4514f0bSApple OSS Distributions #define computeDataSize(p, u) \
205*d4514f0bSApple OSS Distributions     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
206*d4514f0bSApple OSS Distributions 
207*d4514f0bSApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
208*d4514f0bSApple OSS Distributions 
209*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
210*d4514f0bSApple OSS Distributions 
211*d4514f0bSApple OSS Distributions extern "C" {
212*d4514f0bSApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)213*d4514f0bSApple OSS Distributions device_data_action(
214*d4514f0bSApple OSS Distributions 	uintptr_t               device_handle,
215*d4514f0bSApple OSS Distributions 	ipc_port_t              device_pager,
216*d4514f0bSApple OSS Distributions 	vm_prot_t               protection,
217*d4514f0bSApple OSS Distributions 	vm_object_offset_t      offset,
218*d4514f0bSApple OSS Distributions 	vm_size_t               size)
219*d4514f0bSApple OSS Distributions {
220*d4514f0bSApple OSS Distributions 	kern_return_t        kr;
221*d4514f0bSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
222*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> memDesc;
223*d4514f0bSApple OSS Distributions 
224*d4514f0bSApple OSS Distributions 	LOCK;
225*d4514f0bSApple OSS Distributions 	if (ref->dp.memory) {
226*d4514f0bSApple OSS Distributions 		memDesc.reset(ref->dp.memory, OSRetain);
227*d4514f0bSApple OSS Distributions 		kr = memDesc->handleFault(device_pager, offset, size);
228*d4514f0bSApple OSS Distributions 		memDesc.reset();
229*d4514f0bSApple OSS Distributions 	} else {
230*d4514f0bSApple OSS Distributions 		kr = KERN_ABORTED;
231*d4514f0bSApple OSS Distributions 	}
232*d4514f0bSApple OSS Distributions 	UNLOCK;
233*d4514f0bSApple OSS Distributions 
234*d4514f0bSApple OSS Distributions 	return kr;
235*d4514f0bSApple OSS Distributions }
236*d4514f0bSApple OSS Distributions 
237*d4514f0bSApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)238*d4514f0bSApple OSS Distributions device_close(
239*d4514f0bSApple OSS Distributions 	uintptr_t     device_handle)
240*d4514f0bSApple OSS Distributions {
241*d4514f0bSApple OSS Distributions 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
242*d4514f0bSApple OSS Distributions 
243*d4514f0bSApple OSS Distributions 	IOFreeType( ref, IOMemoryDescriptorReserved );
244*d4514f0bSApple OSS Distributions 
245*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
246*d4514f0bSApple OSS Distributions }
247*d4514f0bSApple OSS Distributions };      // end extern "C"
248*d4514f0bSApple OSS Distributions 
249*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250*d4514f0bSApple OSS Distributions 
251*d4514f0bSApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
252*d4514f0bSApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
253*d4514f0bSApple OSS Distributions // checked for as a NULL reference is illegal.
254*d4514f0bSApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)255*d4514f0bSApple OSS Distributions getAddrLenForInd(
256*d4514f0bSApple OSS Distributions 	mach_vm_address_t                &addr,
257*d4514f0bSApple OSS Distributions 	mach_vm_size_t                   &len, // Output variables
258*d4514f0bSApple OSS Distributions 	UInt32                            type,
259*d4514f0bSApple OSS Distributions 	IOGeneralMemoryDescriptor::Ranges r,
260*d4514f0bSApple OSS Distributions 	UInt32                            ind,
261*d4514f0bSApple OSS Distributions 	task_t                            task __unused)
262*d4514f0bSApple OSS Distributions {
263*d4514f0bSApple OSS Distributions 	assert(kIOMemoryTypeUIO == type
264*d4514f0bSApple OSS Distributions 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
265*d4514f0bSApple OSS Distributions 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
266*d4514f0bSApple OSS Distributions 	if (kIOMemoryTypeUIO == type) {
267*d4514f0bSApple OSS Distributions 		user_size_t us;
268*d4514f0bSApple OSS Distributions 		user_addr_t ad;
269*d4514f0bSApple OSS Distributions 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
270*d4514f0bSApple OSS Distributions 	}
271*d4514f0bSApple OSS Distributions #ifndef __LP64__
272*d4514f0bSApple OSS Distributions 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
273*d4514f0bSApple OSS Distributions 		IOAddressRange cur = r.v64[ind];
274*d4514f0bSApple OSS Distributions 		addr = cur.address;
275*d4514f0bSApple OSS Distributions 		len  = cur.length;
276*d4514f0bSApple OSS Distributions 	}
277*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
278*d4514f0bSApple OSS Distributions 	else {
279*d4514f0bSApple OSS Distributions 		IOVirtualRange cur = r.v[ind];
280*d4514f0bSApple OSS Distributions 		addr = cur.address;
281*d4514f0bSApple OSS Distributions 		len  = cur.length;
282*d4514f0bSApple OSS Distributions 	}
283*d4514f0bSApple OSS Distributions #if CONFIG_PROB_GZALLOC
284*d4514f0bSApple OSS Distributions 	if (task == kernel_task) {
285*d4514f0bSApple OSS Distributions 		addr = pgz_decode(addr, len);
286*d4514f0bSApple OSS Distributions 	}
287*d4514f0bSApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
288*d4514f0bSApple OSS Distributions }
289*d4514f0bSApple OSS Distributions 
290*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
291*d4514f0bSApple OSS Distributions 
292*d4514f0bSApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)293*d4514f0bSApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
294*d4514f0bSApple OSS Distributions {
295*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
296*d4514f0bSApple OSS Distributions 
297*d4514f0bSApple OSS Distributions 	*control = VM_PURGABLE_SET_STATE;
298*d4514f0bSApple OSS Distributions 
299*d4514f0bSApple OSS Distributions 	enum { kIOMemoryPurgeableControlMask = 15 };
300*d4514f0bSApple OSS Distributions 
301*d4514f0bSApple OSS Distributions 	switch (kIOMemoryPurgeableControlMask & newState) {
302*d4514f0bSApple OSS Distributions 	case kIOMemoryPurgeableKeepCurrent:
303*d4514f0bSApple OSS Distributions 		*control = VM_PURGABLE_GET_STATE;
304*d4514f0bSApple OSS Distributions 		break;
305*d4514f0bSApple OSS Distributions 
306*d4514f0bSApple OSS Distributions 	case kIOMemoryPurgeableNonVolatile:
307*d4514f0bSApple OSS Distributions 		*state = VM_PURGABLE_NONVOLATILE;
308*d4514f0bSApple OSS Distributions 		break;
309*d4514f0bSApple OSS Distributions 	case kIOMemoryPurgeableVolatile:
310*d4514f0bSApple OSS Distributions 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
311*d4514f0bSApple OSS Distributions 		break;
312*d4514f0bSApple OSS Distributions 	case kIOMemoryPurgeableEmpty:
313*d4514f0bSApple OSS Distributions 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
314*d4514f0bSApple OSS Distributions 		break;
315*d4514f0bSApple OSS Distributions 	default:
316*d4514f0bSApple OSS Distributions 		err = kIOReturnBadArgument;
317*d4514f0bSApple OSS Distributions 		break;
318*d4514f0bSApple OSS Distributions 	}
319*d4514f0bSApple OSS Distributions 
320*d4514f0bSApple OSS Distributions 	if (*control == VM_PURGABLE_SET_STATE) {
321*d4514f0bSApple OSS Distributions 		// let VM know this call is from the kernel and is allowed to alter
322*d4514f0bSApple OSS Distributions 		// the volatility of the memory entry even if it was created with
323*d4514f0bSApple OSS Distributions 		// MAP_MEM_PURGABLE_KERNEL_ONLY
324*d4514f0bSApple OSS Distributions 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
325*d4514f0bSApple OSS Distributions 	}
326*d4514f0bSApple OSS Distributions 
327*d4514f0bSApple OSS Distributions 	return err;
328*d4514f0bSApple OSS Distributions }
329*d4514f0bSApple OSS Distributions 
330*d4514f0bSApple OSS Distributions static IOReturn
purgeableStateBits(int * state)331*d4514f0bSApple OSS Distributions purgeableStateBits(int * state)
332*d4514f0bSApple OSS Distributions {
333*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
334*d4514f0bSApple OSS Distributions 
335*d4514f0bSApple OSS Distributions 	switch (VM_PURGABLE_STATE_MASK & *state) {
336*d4514f0bSApple OSS Distributions 	case VM_PURGABLE_NONVOLATILE:
337*d4514f0bSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
338*d4514f0bSApple OSS Distributions 		break;
339*d4514f0bSApple OSS Distributions 	case VM_PURGABLE_VOLATILE:
340*d4514f0bSApple OSS Distributions 		*state = kIOMemoryPurgeableVolatile;
341*d4514f0bSApple OSS Distributions 		break;
342*d4514f0bSApple OSS Distributions 	case VM_PURGABLE_EMPTY:
343*d4514f0bSApple OSS Distributions 		*state = kIOMemoryPurgeableEmpty;
344*d4514f0bSApple OSS Distributions 		break;
345*d4514f0bSApple OSS Distributions 	default:
346*d4514f0bSApple OSS Distributions 		*state = kIOMemoryPurgeableNonVolatile;
347*d4514f0bSApple OSS Distributions 		err = kIOReturnNotReady;
348*d4514f0bSApple OSS Distributions 		break;
349*d4514f0bSApple OSS Distributions 	}
350*d4514f0bSApple OSS Distributions 	return err;
351*d4514f0bSApple OSS Distributions }
352*d4514f0bSApple OSS Distributions 
353*d4514f0bSApple OSS Distributions typedef struct {
354*d4514f0bSApple OSS Distributions 	unsigned int wimg;
355*d4514f0bSApple OSS Distributions 	unsigned int object_type;
356*d4514f0bSApple OSS Distributions } iokit_memtype_entry;
357*d4514f0bSApple OSS Distributions 
358*d4514f0bSApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
359*d4514f0bSApple OSS Distributions 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
360*d4514f0bSApple OSS Distributions 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
361*d4514f0bSApple OSS Distributions 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
362*d4514f0bSApple OSS Distributions 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
363*d4514f0bSApple OSS Distributions 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
364*d4514f0bSApple OSS Distributions 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
365*d4514f0bSApple OSS Distributions 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
366*d4514f0bSApple OSS Distributions 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
367*d4514f0bSApple OSS Distributions 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
368*d4514f0bSApple OSS Distributions 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
369*d4514f0bSApple OSS Distributions };
370*d4514f0bSApple OSS Distributions 
371*d4514f0bSApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)372*d4514f0bSApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
373*d4514f0bSApple OSS Distributions {
374*d4514f0bSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
375*d4514f0bSApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
376*d4514f0bSApple OSS Distributions 		cacheMode = kIODefaultCache;
377*d4514f0bSApple OSS Distributions 	}
378*d4514f0bSApple OSS Distributions 	vm_prot_t prot = 0;
379*d4514f0bSApple OSS Distributions 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
380*d4514f0bSApple OSS Distributions 	return prot;
381*d4514f0bSApple OSS Distributions }
382*d4514f0bSApple OSS Distributions 
383*d4514f0bSApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)384*d4514f0bSApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
385*d4514f0bSApple OSS Distributions {
386*d4514f0bSApple OSS Distributions 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
387*d4514f0bSApple OSS Distributions 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
388*d4514f0bSApple OSS Distributions 		cacheMode = kIODefaultCache;
389*d4514f0bSApple OSS Distributions 	}
390*d4514f0bSApple OSS Distributions 	if (cacheMode == kIODefaultCache) {
391*d4514f0bSApple OSS Distributions 		return -1U;
392*d4514f0bSApple OSS Distributions 	}
393*d4514f0bSApple OSS Distributions 	return iomd_mem_types[cacheMode].wimg;
394*d4514f0bSApple OSS Distributions }
395*d4514f0bSApple OSS Distributions 
396*d4514f0bSApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)397*d4514f0bSApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
398*d4514f0bSApple OSS Distributions {
399*d4514f0bSApple OSS Distributions 	pagerFlags &= VM_WIMG_MASK;
400*d4514f0bSApple OSS Distributions 	IOOptionBits cacheMode = kIODefaultCache;
401*d4514f0bSApple OSS Distributions 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
402*d4514f0bSApple OSS Distributions 		if (iomd_mem_types[i].wimg == pagerFlags) {
403*d4514f0bSApple OSS Distributions 			cacheMode = i;
404*d4514f0bSApple OSS Distributions 			break;
405*d4514f0bSApple OSS Distributions 		}
406*d4514f0bSApple OSS Distributions 	}
407*d4514f0bSApple OSS Distributions 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
408*d4514f0bSApple OSS Distributions }
409*d4514f0bSApple OSS Distributions 
410*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412*d4514f0bSApple OSS Distributions 
413*d4514f0bSApple OSS Distributions struct IOMemoryEntry {
414*d4514f0bSApple OSS Distributions 	ipc_port_t entry;
415*d4514f0bSApple OSS Distributions 	int64_t    offset;
416*d4514f0bSApple OSS Distributions 	uint64_t   size;
417*d4514f0bSApple OSS Distributions 	uint64_t   start;
418*d4514f0bSApple OSS Distributions };
419*d4514f0bSApple OSS Distributions 
420*d4514f0bSApple OSS Distributions struct IOMemoryReference {
421*d4514f0bSApple OSS Distributions 	volatile SInt32             refCount;
422*d4514f0bSApple OSS Distributions 	vm_prot_t                   prot;
423*d4514f0bSApple OSS Distributions 	uint32_t                    capacity;
424*d4514f0bSApple OSS Distributions 	uint32_t                    count;
425*d4514f0bSApple OSS Distributions 	struct IOMemoryReference  * mapRef;
426*d4514f0bSApple OSS Distributions 	IOMemoryEntry               entries[0];
427*d4514f0bSApple OSS Distributions };
428*d4514f0bSApple OSS Distributions 
429*d4514f0bSApple OSS Distributions enum{
430*d4514f0bSApple OSS Distributions 	kIOMemoryReferenceReuse = 0x00000001,
431*d4514f0bSApple OSS Distributions 	kIOMemoryReferenceWrite = 0x00000002,
432*d4514f0bSApple OSS Distributions 	kIOMemoryReferenceCOW   = 0x00000004,
433*d4514f0bSApple OSS Distributions };
434*d4514f0bSApple OSS Distributions 
435*d4514f0bSApple OSS Distributions SInt32 gIOMemoryReferenceCount;
436*d4514f0bSApple OSS Distributions 
437*d4514f0bSApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)438*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
439*d4514f0bSApple OSS Distributions {
440*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref;
441*d4514f0bSApple OSS Distributions 	size_t              oldCapacity;
442*d4514f0bSApple OSS Distributions 
443*d4514f0bSApple OSS Distributions 	if (realloc) {
444*d4514f0bSApple OSS Distributions 		oldCapacity = realloc->capacity;
445*d4514f0bSApple OSS Distributions 	} else {
446*d4514f0bSApple OSS Distributions 		oldCapacity = 0;
447*d4514f0bSApple OSS Distributions 	}
448*d4514f0bSApple OSS Distributions 
449*d4514f0bSApple OSS Distributions 	// Use the kalloc API instead of manually handling the reallocation
450*d4514f0bSApple OSS Distributions 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
451*d4514f0bSApple OSS Distributions 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
452*d4514f0bSApple OSS Distributions 	if (ref) {
453*d4514f0bSApple OSS Distributions 		if (oldCapacity == 0) {
454*d4514f0bSApple OSS Distributions 			ref->refCount = 1;
455*d4514f0bSApple OSS Distributions 			OSIncrementAtomic(&gIOMemoryReferenceCount);
456*d4514f0bSApple OSS Distributions 		}
457*d4514f0bSApple OSS Distributions 		ref->capacity = capacity;
458*d4514f0bSApple OSS Distributions 	}
459*d4514f0bSApple OSS Distributions 	return ref;
460*d4514f0bSApple OSS Distributions }
461*d4514f0bSApple OSS Distributions 
462*d4514f0bSApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)463*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
464*d4514f0bSApple OSS Distributions {
465*d4514f0bSApple OSS Distributions 	IOMemoryEntry * entries;
466*d4514f0bSApple OSS Distributions 
467*d4514f0bSApple OSS Distributions 	if (ref->mapRef) {
468*d4514f0bSApple OSS Distributions 		memoryReferenceFree(ref->mapRef);
469*d4514f0bSApple OSS Distributions 		ref->mapRef = NULL;
470*d4514f0bSApple OSS Distributions 	}
471*d4514f0bSApple OSS Distributions 
472*d4514f0bSApple OSS Distributions 	entries = ref->entries + ref->count;
473*d4514f0bSApple OSS Distributions 	while (entries > &ref->entries[0]) {
474*d4514f0bSApple OSS Distributions 		entries--;
475*d4514f0bSApple OSS Distributions 		ipc_port_release_send(entries->entry);
476*d4514f0bSApple OSS Distributions 	}
477*d4514f0bSApple OSS Distributions 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
478*d4514f0bSApple OSS Distributions 
479*d4514f0bSApple OSS Distributions 	OSDecrementAtomic(&gIOMemoryReferenceCount);
480*d4514f0bSApple OSS Distributions }
481*d4514f0bSApple OSS Distributions 
482*d4514f0bSApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)483*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
484*d4514f0bSApple OSS Distributions {
485*d4514f0bSApple OSS Distributions 	if (1 == OSDecrementAtomic(&ref->refCount)) {
486*d4514f0bSApple OSS Distributions 		memoryReferenceFree(ref);
487*d4514f0bSApple OSS Distributions 	}
488*d4514f0bSApple OSS Distributions }
489*d4514f0bSApple OSS Distributions 
490*d4514f0bSApple OSS Distributions 
491*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)492*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
493*d4514f0bSApple OSS Distributions 	IOOptionBits         options,
494*d4514f0bSApple OSS Distributions 	IOMemoryReference ** reference)
495*d4514f0bSApple OSS Distributions {
496*d4514f0bSApple OSS Distributions 	enum { kCapacity = 4, kCapacityInc = 4 };
497*d4514f0bSApple OSS Distributions 
498*d4514f0bSApple OSS Distributions 	kern_return_t        err;
499*d4514f0bSApple OSS Distributions 	IOMemoryReference *  ref;
500*d4514f0bSApple OSS Distributions 	IOMemoryEntry *      entries;
501*d4514f0bSApple OSS Distributions 	IOMemoryEntry *      cloneEntries = NULL;
502*d4514f0bSApple OSS Distributions 	vm_map_t             map;
503*d4514f0bSApple OSS Distributions 	ipc_port_t           entry, cloneEntry;
504*d4514f0bSApple OSS Distributions 	vm_prot_t            prot;
505*d4514f0bSApple OSS Distributions 	memory_object_size_t actualSize;
506*d4514f0bSApple OSS Distributions 	uint32_t             rangeIdx;
507*d4514f0bSApple OSS Distributions 	uint32_t             count;
508*d4514f0bSApple OSS Distributions 	mach_vm_address_t    entryAddr, endAddr, entrySize;
509*d4514f0bSApple OSS Distributions 	mach_vm_size_t       srcAddr, srcLen;
510*d4514f0bSApple OSS Distributions 	mach_vm_size_t       nextAddr, nextLen;
511*d4514f0bSApple OSS Distributions 	mach_vm_size_t       offset, remain;
512*d4514f0bSApple OSS Distributions 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
513*d4514f0bSApple OSS Distributions 	int                  misaligned_start = 0, misaligned_end = 0;
514*d4514f0bSApple OSS Distributions 	IOByteCount          physLen;
515*d4514f0bSApple OSS Distributions 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
516*d4514f0bSApple OSS Distributions 	IOOptionBits         cacheMode;
517*d4514f0bSApple OSS Distributions 	unsigned int         pagerFlags;
518*d4514f0bSApple OSS Distributions 	vm_tag_t             tag;
519*d4514f0bSApple OSS Distributions 	vm_named_entry_kernel_flags_t vmne_kflags;
520*d4514f0bSApple OSS Distributions 
521*d4514f0bSApple OSS Distributions 	ref = memoryReferenceAlloc(kCapacity, NULL);
522*d4514f0bSApple OSS Distributions 	if (!ref) {
523*d4514f0bSApple OSS Distributions 		return kIOReturnNoMemory;
524*d4514f0bSApple OSS Distributions 	}
525*d4514f0bSApple OSS Distributions 
526*d4514f0bSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(kernel_map);
527*d4514f0bSApple OSS Distributions 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
528*d4514f0bSApple OSS Distributions 	entries = &ref->entries[0];
529*d4514f0bSApple OSS Distributions 	count = 0;
530*d4514f0bSApple OSS Distributions 	err = KERN_SUCCESS;
531*d4514f0bSApple OSS Distributions 
532*d4514f0bSApple OSS Distributions 	offset = 0;
533*d4514f0bSApple OSS Distributions 	rangeIdx = 0;
534*d4514f0bSApple OSS Distributions 	remain = _length;
535*d4514f0bSApple OSS Distributions 	if (_task) {
536*d4514f0bSApple OSS Distributions 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
537*d4514f0bSApple OSS Distributions 
538*d4514f0bSApple OSS Distributions 		// account for IOBMD setLength(), use its capacity as length
539*d4514f0bSApple OSS Distributions 		IOBufferMemoryDescriptor * bmd;
540*d4514f0bSApple OSS Distributions 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
541*d4514f0bSApple OSS Distributions 			nextLen = bmd->getCapacity();
542*d4514f0bSApple OSS Distributions 			remain  = nextLen;
543*d4514f0bSApple OSS Distributions 		}
544*d4514f0bSApple OSS Distributions 	} else {
545*d4514f0bSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
546*d4514f0bSApple OSS Distributions 		nextLen = physLen;
547*d4514f0bSApple OSS Distributions 
548*d4514f0bSApple OSS Distributions 		// default cache mode for physical
549*d4514f0bSApple OSS Distributions 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
550*d4514f0bSApple OSS Distributions 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
551*d4514f0bSApple OSS Distributions 			_flags |= (mode << kIOMemoryBufferCacheShift);
552*d4514f0bSApple OSS Distributions 		}
553*d4514f0bSApple OSS Distributions 	}
554*d4514f0bSApple OSS Distributions 
555*d4514f0bSApple OSS Distributions 	// cache mode & vm_prot
556*d4514f0bSApple OSS Distributions 	prot = VM_PROT_READ;
557*d4514f0bSApple OSS Distributions 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
558*d4514f0bSApple OSS Distributions 	prot |= vmProtForCacheMode(cacheMode);
559*d4514f0bSApple OSS Distributions 	// VM system requires write access to change cache mode
560*d4514f0bSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
561*d4514f0bSApple OSS Distributions 		prot |= VM_PROT_WRITE;
562*d4514f0bSApple OSS Distributions 	}
563*d4514f0bSApple OSS Distributions 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
564*d4514f0bSApple OSS Distributions 		prot |= VM_PROT_WRITE;
565*d4514f0bSApple OSS Distributions 	}
566*d4514f0bSApple OSS Distributions 	if (kIOMemoryReferenceWrite & options) {
567*d4514f0bSApple OSS Distributions 		prot |= VM_PROT_WRITE;
568*d4514f0bSApple OSS Distributions 	}
569*d4514f0bSApple OSS Distributions 	if (kIOMemoryReferenceCOW   & options) {
570*d4514f0bSApple OSS Distributions 		prot |= MAP_MEM_VM_COPY;
571*d4514f0bSApple OSS Distributions 	}
572*d4514f0bSApple OSS Distributions 
573*d4514f0bSApple OSS Distributions 	if (kIOMemoryUseReserve & _flags) {
574*d4514f0bSApple OSS Distributions 		prot |= MAP_MEM_GRAB_SECLUDED;
575*d4514f0bSApple OSS Distributions 	}
576*d4514f0bSApple OSS Distributions 
577*d4514f0bSApple OSS Distributions 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
578*d4514f0bSApple OSS Distributions 		cloneEntries = &_memRef->entries[0];
579*d4514f0bSApple OSS Distributions 		prot |= MAP_MEM_NAMED_REUSE;
580*d4514f0bSApple OSS Distributions 	}
581*d4514f0bSApple OSS Distributions 
582*d4514f0bSApple OSS Distributions 	if (_task) {
583*d4514f0bSApple OSS Distributions 		// virtual ranges
584*d4514f0bSApple OSS Distributions 
585*d4514f0bSApple OSS Distributions 		if (kIOMemoryBufferPageable & _flags) {
586*d4514f0bSApple OSS Distributions 			int ledger_tag, ledger_no_footprint;
587*d4514f0bSApple OSS Distributions 
588*d4514f0bSApple OSS Distributions 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
589*d4514f0bSApple OSS Distributions 			prot |= MAP_MEM_NAMED_CREATE;
590*d4514f0bSApple OSS Distributions 
591*d4514f0bSApple OSS Distributions 			// default accounting settings:
592*d4514f0bSApple OSS Distributions 			//   + "none" ledger tag
593*d4514f0bSApple OSS Distributions 			//   + include in footprint
594*d4514f0bSApple OSS Distributions 			// can be changed later with ::setOwnership()
595*d4514f0bSApple OSS Distributions 			ledger_tag = VM_LEDGER_TAG_NONE;
596*d4514f0bSApple OSS Distributions 			ledger_no_footprint = 0;
597*d4514f0bSApple OSS Distributions 
598*d4514f0bSApple OSS Distributions 			if (kIOMemoryBufferPurgeable & _flags) {
599*d4514f0bSApple OSS Distributions 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
600*d4514f0bSApple OSS Distributions 				if (VM_KERN_MEMORY_SKYWALK == tag) {
601*d4514f0bSApple OSS Distributions 					// Skywalk purgeable memory accounting:
602*d4514f0bSApple OSS Distributions 					//    + "network" ledger tag
603*d4514f0bSApple OSS Distributions 					//    + not included in footprint
604*d4514f0bSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NETWORK;
605*d4514f0bSApple OSS Distributions 					ledger_no_footprint = 1;
606*d4514f0bSApple OSS Distributions 				} else {
607*d4514f0bSApple OSS Distributions 					// regular purgeable memory accounting:
608*d4514f0bSApple OSS Distributions 					//    + no ledger tag
609*d4514f0bSApple OSS Distributions 					//    + included in footprint
610*d4514f0bSApple OSS Distributions 					ledger_tag = VM_LEDGER_TAG_NONE;
611*d4514f0bSApple OSS Distributions 					ledger_no_footprint = 0;
612*d4514f0bSApple OSS Distributions 				}
613*d4514f0bSApple OSS Distributions 			}
614*d4514f0bSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
615*d4514f0bSApple OSS Distributions 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
616*d4514f0bSApple OSS Distributions 			if (kIOMemoryUseReserve & _flags) {
617*d4514f0bSApple OSS Distributions 				prot |= MAP_MEM_GRAB_SECLUDED;
618*d4514f0bSApple OSS Distributions 			}
619*d4514f0bSApple OSS Distributions 
620*d4514f0bSApple OSS Distributions 			prot |= VM_PROT_WRITE;
621*d4514f0bSApple OSS Distributions 			map = NULL;
622*d4514f0bSApple OSS Distributions 		} else {
623*d4514f0bSApple OSS Distributions 			prot |= MAP_MEM_USE_DATA_ADDR;
624*d4514f0bSApple OSS Distributions 			map = get_task_map(_task);
625*d4514f0bSApple OSS Distributions 		}
626*d4514f0bSApple OSS Distributions 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
627*d4514f0bSApple OSS Distributions 
628*d4514f0bSApple OSS Distributions 		while (remain) {
629*d4514f0bSApple OSS Distributions 			srcAddr  = nextAddr;
630*d4514f0bSApple OSS Distributions 			srcLen   = nextLen;
631*d4514f0bSApple OSS Distributions 			nextAddr = 0;
632*d4514f0bSApple OSS Distributions 			nextLen  = 0;
633*d4514f0bSApple OSS Distributions 			// coalesce addr range
634*d4514f0bSApple OSS Distributions 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
635*d4514f0bSApple OSS Distributions 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
636*d4514f0bSApple OSS Distributions 				if ((srcAddr + srcLen) != nextAddr) {
637*d4514f0bSApple OSS Distributions 					break;
638*d4514f0bSApple OSS Distributions 				}
639*d4514f0bSApple OSS Distributions 				srcLen += nextLen;
640*d4514f0bSApple OSS Distributions 			}
641*d4514f0bSApple OSS Distributions 
642*d4514f0bSApple OSS Distributions 			if (MAP_MEM_USE_DATA_ADDR & prot) {
643*d4514f0bSApple OSS Distributions 				entryAddr = srcAddr;
644*d4514f0bSApple OSS Distributions 				endAddr   = srcAddr + srcLen;
645*d4514f0bSApple OSS Distributions 			} else {
646*d4514f0bSApple OSS Distributions 				entryAddr = trunc_page_64(srcAddr);
647*d4514f0bSApple OSS Distributions 				endAddr   = round_page_64(srcAddr + srcLen);
648*d4514f0bSApple OSS Distributions 			}
649*d4514f0bSApple OSS Distributions 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
650*d4514f0bSApple OSS Distributions 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
651*d4514f0bSApple OSS Distributions 			}
652*d4514f0bSApple OSS Distributions 
653*d4514f0bSApple OSS Distributions 			do{
654*d4514f0bSApple OSS Distributions 				entrySize = (endAddr - entryAddr);
655*d4514f0bSApple OSS Distributions 				if (!entrySize) {
656*d4514f0bSApple OSS Distributions 					break;
657*d4514f0bSApple OSS Distributions 				}
658*d4514f0bSApple OSS Distributions 				actualSize = entrySize;
659*d4514f0bSApple OSS Distributions 
660*d4514f0bSApple OSS Distributions 				cloneEntry = MACH_PORT_NULL;
661*d4514f0bSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
662*d4514f0bSApple OSS Distributions 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
663*d4514f0bSApple OSS Distributions 						cloneEntry = cloneEntries->entry;
664*d4514f0bSApple OSS Distributions 					} else {
665*d4514f0bSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
666*d4514f0bSApple OSS Distributions 					}
667*d4514f0bSApple OSS Distributions 				}
668*d4514f0bSApple OSS Distributions 
669*d4514f0bSApple OSS Distributions 				err = mach_make_memory_entry_internal(map,
670*d4514f0bSApple OSS Distributions 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
671*d4514f0bSApple OSS Distributions 
672*d4514f0bSApple OSS Distributions 				if (KERN_SUCCESS != err) {
673*d4514f0bSApple OSS Distributions 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
674*d4514f0bSApple OSS Distributions 					break;
675*d4514f0bSApple OSS Distributions 				}
676*d4514f0bSApple OSS Distributions 				if (MAP_MEM_USE_DATA_ADDR & prot) {
677*d4514f0bSApple OSS Distributions 					if (actualSize > entrySize) {
678*d4514f0bSApple OSS Distributions 						actualSize = entrySize;
679*d4514f0bSApple OSS Distributions 					}
680*d4514f0bSApple OSS Distributions 				} else if (actualSize > entrySize) {
681*d4514f0bSApple OSS Distributions 					panic("mach_make_memory_entry_64 actualSize");
682*d4514f0bSApple OSS Distributions 				}
683*d4514f0bSApple OSS Distributions 
684*d4514f0bSApple OSS Distributions 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
685*d4514f0bSApple OSS Distributions 
686*d4514f0bSApple OSS Distributions 				if (count && overmap_start) {
687*d4514f0bSApple OSS Distributions 					/*
688*d4514f0bSApple OSS Distributions 					 * Track misaligned start for all
689*d4514f0bSApple OSS Distributions 					 * except the first entry.
690*d4514f0bSApple OSS Distributions 					 */
691*d4514f0bSApple OSS Distributions 					misaligned_start++;
692*d4514f0bSApple OSS Distributions 				}
693*d4514f0bSApple OSS Distributions 
694*d4514f0bSApple OSS Distributions 				if (overmap_end) {
695*d4514f0bSApple OSS Distributions 					/*
696*d4514f0bSApple OSS Distributions 					 * Ignore misaligned end for the
697*d4514f0bSApple OSS Distributions 					 * last entry.
698*d4514f0bSApple OSS Distributions 					 */
699*d4514f0bSApple OSS Distributions 					if ((entryAddr + actualSize) != endAddr) {
700*d4514f0bSApple OSS Distributions 						misaligned_end++;
701*d4514f0bSApple OSS Distributions 					}
702*d4514f0bSApple OSS Distributions 				}
703*d4514f0bSApple OSS Distributions 
704*d4514f0bSApple OSS Distributions 				if (count) {
705*d4514f0bSApple OSS Distributions 					/* Middle entries */
706*d4514f0bSApple OSS Distributions 					if (misaligned_start || misaligned_end) {
707*d4514f0bSApple OSS Distributions 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
708*d4514f0bSApple OSS Distributions 						ipc_port_release_send(entry);
709*d4514f0bSApple OSS Distributions 						err = KERN_NOT_SUPPORTED;
710*d4514f0bSApple OSS Distributions 						break;
711*d4514f0bSApple OSS Distributions 					}
712*d4514f0bSApple OSS Distributions 				}
713*d4514f0bSApple OSS Distributions 
714*d4514f0bSApple OSS Distributions 				if (count >= ref->capacity) {
715*d4514f0bSApple OSS Distributions 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
716*d4514f0bSApple OSS Distributions 					entries = &ref->entries[count];
717*d4514f0bSApple OSS Distributions 				}
718*d4514f0bSApple OSS Distributions 				entries->entry  = entry;
719*d4514f0bSApple OSS Distributions 				entries->size   = actualSize;
720*d4514f0bSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
721*d4514f0bSApple OSS Distributions 				entries->start = entryAddr;
722*d4514f0bSApple OSS Distributions 				entryAddr += actualSize;
723*d4514f0bSApple OSS Distributions 				if (MAP_MEM_NAMED_REUSE & prot) {
724*d4514f0bSApple OSS Distributions 					if ((cloneEntries->entry == entries->entry)
725*d4514f0bSApple OSS Distributions 					    && (cloneEntries->size == entries->size)
726*d4514f0bSApple OSS Distributions 					    && (cloneEntries->offset == entries->offset)) {
727*d4514f0bSApple OSS Distributions 						cloneEntries++;
728*d4514f0bSApple OSS Distributions 					} else {
729*d4514f0bSApple OSS Distributions 						prot &= ~MAP_MEM_NAMED_REUSE;
730*d4514f0bSApple OSS Distributions 					}
731*d4514f0bSApple OSS Distributions 				}
732*d4514f0bSApple OSS Distributions 				entries++;
733*d4514f0bSApple OSS Distributions 				count++;
734*d4514f0bSApple OSS Distributions 			}while (true);
735*d4514f0bSApple OSS Distributions 			offset += srcLen;
736*d4514f0bSApple OSS Distributions 			remain -= srcLen;
737*d4514f0bSApple OSS Distributions 		}
738*d4514f0bSApple OSS Distributions 	} else {
739*d4514f0bSApple OSS Distributions 		// _task == 0, physical or kIOMemoryTypeUPL
740*d4514f0bSApple OSS Distributions 		memory_object_t pager;
741*d4514f0bSApple OSS Distributions 		vm_size_t       size = ptoa_64(_pages);
742*d4514f0bSApple OSS Distributions 
743*d4514f0bSApple OSS Distributions 		if (!getKernelReserved()) {
744*d4514f0bSApple OSS Distributions 			panic("getKernelReserved");
745*d4514f0bSApple OSS Distributions 		}
746*d4514f0bSApple OSS Distributions 
747*d4514f0bSApple OSS Distributions 		reserved->dp.pagerContig = (1 == _rangesCount);
748*d4514f0bSApple OSS Distributions 		reserved->dp.memory      = this;
749*d4514f0bSApple OSS Distributions 
750*d4514f0bSApple OSS Distributions 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
751*d4514f0bSApple OSS Distributions 		if (-1U == pagerFlags) {
752*d4514f0bSApple OSS Distributions 			panic("phys is kIODefaultCache");
753*d4514f0bSApple OSS Distributions 		}
754*d4514f0bSApple OSS Distributions 		if (reserved->dp.pagerContig) {
755*d4514f0bSApple OSS Distributions 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
756*d4514f0bSApple OSS Distributions 		}
757*d4514f0bSApple OSS Distributions 
758*d4514f0bSApple OSS Distributions 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
759*d4514f0bSApple OSS Distributions 		    size, pagerFlags);
760*d4514f0bSApple OSS Distributions 		assert(pager);
761*d4514f0bSApple OSS Distributions 		if (!pager) {
762*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
763*d4514f0bSApple OSS Distributions 			err = kIOReturnVMError;
764*d4514f0bSApple OSS Distributions 		} else {
765*d4514f0bSApple OSS Distributions 			srcAddr  = nextAddr;
766*d4514f0bSApple OSS Distributions 			entryAddr = trunc_page_64(srcAddr);
767*d4514f0bSApple OSS Distributions 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
768*d4514f0bSApple OSS Distributions 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
769*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
770*d4514f0bSApple OSS Distributions 			if (KERN_SUCCESS != err) {
771*d4514f0bSApple OSS Distributions 				device_pager_deallocate(pager);
772*d4514f0bSApple OSS Distributions 			} else {
773*d4514f0bSApple OSS Distributions 				reserved->dp.devicePager = pager;
774*d4514f0bSApple OSS Distributions 				entries->entry  = entry;
775*d4514f0bSApple OSS Distributions 				entries->size   = size;
776*d4514f0bSApple OSS Distributions 				entries->offset = offset + (entryAddr - srcAddr);
777*d4514f0bSApple OSS Distributions 				entries++;
778*d4514f0bSApple OSS Distributions 				count++;
779*d4514f0bSApple OSS Distributions 			}
780*d4514f0bSApple OSS Distributions 		}
781*d4514f0bSApple OSS Distributions 	}
782*d4514f0bSApple OSS Distributions 
783*d4514f0bSApple OSS Distributions 	ref->count = count;
784*d4514f0bSApple OSS Distributions 	ref->prot  = prot;
785*d4514f0bSApple OSS Distributions 
786*d4514f0bSApple OSS Distributions 	if (_task && (KERN_SUCCESS == err)
787*d4514f0bSApple OSS Distributions 	    && (kIOMemoryMapCopyOnWrite & _flags)
788*d4514f0bSApple OSS Distributions 	    && !(kIOMemoryReferenceCOW & options)) {
789*d4514f0bSApple OSS Distributions 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
790*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
791*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
792*d4514f0bSApple OSS Distributions 		}
793*d4514f0bSApple OSS Distributions 	}
794*d4514f0bSApple OSS Distributions 
795*d4514f0bSApple OSS Distributions 	if (KERN_SUCCESS == err) {
796*d4514f0bSApple OSS Distributions 		if (MAP_MEM_NAMED_REUSE & prot) {
797*d4514f0bSApple OSS Distributions 			memoryReferenceFree(ref);
798*d4514f0bSApple OSS Distributions 			OSIncrementAtomic(&_memRef->refCount);
799*d4514f0bSApple OSS Distributions 			ref = _memRef;
800*d4514f0bSApple OSS Distributions 		}
801*d4514f0bSApple OSS Distributions 	} else {
802*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
803*d4514f0bSApple OSS Distributions 		memoryReferenceFree(ref);
804*d4514f0bSApple OSS Distributions 		ref = NULL;
805*d4514f0bSApple OSS Distributions 	}
806*d4514f0bSApple OSS Distributions 
807*d4514f0bSApple OSS Distributions 	*reference = ref;
808*d4514f0bSApple OSS Distributions 
809*d4514f0bSApple OSS Distributions 	return err;
810*d4514f0bSApple OSS Distributions }
811*d4514f0bSApple OSS Distributions 
812*d4514f0bSApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)813*d4514f0bSApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
814*d4514f0bSApple OSS Distributions {
815*d4514f0bSApple OSS Distributions 	switch (kIOMapGuardedMask & options) {
816*d4514f0bSApple OSS Distributions 	default:
817*d4514f0bSApple OSS Distributions 	case kIOMapGuardedSmall:
818*d4514f0bSApple OSS Distributions 		return vm_map_page_size(map);
819*d4514f0bSApple OSS Distributions 	case kIOMapGuardedLarge:
820*d4514f0bSApple OSS Distributions 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
821*d4514f0bSApple OSS Distributions 		return kIOMapGuardSizeLarge;
822*d4514f0bSApple OSS Distributions 	}
823*d4514f0bSApple OSS Distributions 	;
824*d4514f0bSApple OSS Distributions }
825*d4514f0bSApple OSS Distributions 
826*d4514f0bSApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)827*d4514f0bSApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
828*d4514f0bSApple OSS Distributions     vm_map_offset_t addr, mach_vm_size_t size)
829*d4514f0bSApple OSS Distributions {
830*d4514f0bSApple OSS Distributions 	kern_return_t   kr;
831*d4514f0bSApple OSS Distributions 	vm_map_offset_t actualAddr;
832*d4514f0bSApple OSS Distributions 	mach_vm_size_t  actualSize;
833*d4514f0bSApple OSS Distributions 
834*d4514f0bSApple OSS Distributions 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
835*d4514f0bSApple OSS Distributions 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
836*d4514f0bSApple OSS Distributions 
837*d4514f0bSApple OSS Distributions 	if (kIOMapGuardedMask & options) {
838*d4514f0bSApple OSS Distributions 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
839*d4514f0bSApple OSS Distributions 		actualAddr -= guardSize;
840*d4514f0bSApple OSS Distributions 		actualSize += 2 * guardSize;
841*d4514f0bSApple OSS Distributions 	}
842*d4514f0bSApple OSS Distributions 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
843*d4514f0bSApple OSS Distributions 
844*d4514f0bSApple OSS Distributions 	return kr;
845*d4514f0bSApple OSS Distributions }
846*d4514f0bSApple OSS Distributions 
847*d4514f0bSApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)848*d4514f0bSApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
849*d4514f0bSApple OSS Distributions {
850*d4514f0bSApple OSS Distributions 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
851*d4514f0bSApple OSS Distributions 	IOReturn                        err;
852*d4514f0bSApple OSS Distributions 	vm_map_offset_t                 addr;
853*d4514f0bSApple OSS Distributions 	mach_vm_size_t                  size;
854*d4514f0bSApple OSS Distributions 	mach_vm_size_t                  guardSize;
855*d4514f0bSApple OSS Distributions 	vm_map_kernel_flags_t           vmk_flags;
856*d4514f0bSApple OSS Distributions 
857*d4514f0bSApple OSS Distributions 	addr = ref->mapped;
858*d4514f0bSApple OSS Distributions 	size = ref->size;
859*d4514f0bSApple OSS Distributions 	guardSize = 0;
860*d4514f0bSApple OSS Distributions 
861*d4514f0bSApple OSS Distributions 	if (kIOMapGuardedMask & ref->options) {
862*d4514f0bSApple OSS Distributions 		if (!(kIOMapAnywhere & ref->options)) {
863*d4514f0bSApple OSS Distributions 			return kIOReturnBadArgument;
864*d4514f0bSApple OSS Distributions 		}
865*d4514f0bSApple OSS Distributions 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
866*d4514f0bSApple OSS Distributions 		size += 2 * guardSize;
867*d4514f0bSApple OSS Distributions 	}
868*d4514f0bSApple OSS Distributions 	if (kIOMapAnywhere & ref->options) {
869*d4514f0bSApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
870*d4514f0bSApple OSS Distributions 	} else {
871*d4514f0bSApple OSS Distributions 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
872*d4514f0bSApple OSS Distributions 	}
873*d4514f0bSApple OSS Distributions 	vmk_flags.vm_tag = ref->tag;
874*d4514f0bSApple OSS Distributions 
875*d4514f0bSApple OSS Distributions 	/*
876*d4514f0bSApple OSS Distributions 	 * Mapping memory into the kernel_map using IOMDs use the data range.
877*d4514f0bSApple OSS Distributions 	 * Memory being mapped should not contain kernel pointers.
878*d4514f0bSApple OSS Distributions 	 */
879*d4514f0bSApple OSS Distributions 	if (map == kernel_map) {
880*d4514f0bSApple OSS Distributions 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
881*d4514f0bSApple OSS Distributions 	}
882*d4514f0bSApple OSS Distributions 
883*d4514f0bSApple OSS Distributions 	err = mach_vm_map_kernel(map, &addr, size,
884*d4514f0bSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
885*d4514f0bSApple OSS Distributions 	    // TODO4K this should not be necessary...
886*d4514f0bSApple OSS Distributions 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
887*d4514f0bSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
888*d4514f0bSApple OSS Distributions 	    (vm_map_offset_t) 0,
889*d4514f0bSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
890*d4514f0bSApple OSS Distributions 	    vmk_flags,
891*d4514f0bSApple OSS Distributions 	    IPC_PORT_NULL,
892*d4514f0bSApple OSS Distributions 	    (memory_object_offset_t) 0,
893*d4514f0bSApple OSS Distributions 	    false,                       /* copy */
894*d4514f0bSApple OSS Distributions 	    ref->prot,
895*d4514f0bSApple OSS Distributions 	    ref->prot,
896*d4514f0bSApple OSS Distributions 	    VM_INHERIT_NONE);
897*d4514f0bSApple OSS Distributions 	if (KERN_SUCCESS == err) {
898*d4514f0bSApple OSS Distributions 		ref->mapped = (mach_vm_address_t) addr;
899*d4514f0bSApple OSS Distributions 		ref->map = map;
900*d4514f0bSApple OSS Distributions 		if (kIOMapGuardedMask & ref->options) {
901*d4514f0bSApple OSS Distributions 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
902*d4514f0bSApple OSS Distributions 
903*d4514f0bSApple OSS Distributions 			err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
904*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
905*d4514f0bSApple OSS Distributions 			err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
906*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
907*d4514f0bSApple OSS Distributions 			ref->mapped += guardSize;
908*d4514f0bSApple OSS Distributions 		}
909*d4514f0bSApple OSS Distributions 	}
910*d4514f0bSApple OSS Distributions 
911*d4514f0bSApple OSS Distributions 	return err;
912*d4514f0bSApple OSS Distributions }
913*d4514f0bSApple OSS Distributions 
914*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)915*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
916*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
917*d4514f0bSApple OSS Distributions 	vm_map_t            map,
918*d4514f0bSApple OSS Distributions 	mach_vm_size_t      inoffset,
919*d4514f0bSApple OSS Distributions 	mach_vm_size_t      size,
920*d4514f0bSApple OSS Distributions 	IOOptionBits        options,
921*d4514f0bSApple OSS Distributions 	mach_vm_address_t * inaddr)
922*d4514f0bSApple OSS Distributions {
923*d4514f0bSApple OSS Distributions 	IOReturn        err;
924*d4514f0bSApple OSS Distributions 	int64_t         offset = inoffset;
925*d4514f0bSApple OSS Distributions 	uint32_t        rangeIdx, entryIdx;
926*d4514f0bSApple OSS Distributions 	vm_map_offset_t addr, mapAddr;
927*d4514f0bSApple OSS Distributions 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
928*d4514f0bSApple OSS Distributions 
929*d4514f0bSApple OSS Distributions 	mach_vm_address_t nextAddr;
930*d4514f0bSApple OSS Distributions 	mach_vm_size_t    nextLen;
931*d4514f0bSApple OSS Distributions 	IOByteCount       physLen;
932*d4514f0bSApple OSS Distributions 	IOMemoryEntry   * entry;
933*d4514f0bSApple OSS Distributions 	vm_prot_t         prot, memEntryCacheMode;
934*d4514f0bSApple OSS Distributions 	IOOptionBits      type;
935*d4514f0bSApple OSS Distributions 	IOOptionBits      cacheMode;
936*d4514f0bSApple OSS Distributions 	vm_tag_t          tag;
937*d4514f0bSApple OSS Distributions 	// for the kIOMapPrefault option.
938*d4514f0bSApple OSS Distributions 	upl_page_info_t * pageList = NULL;
939*d4514f0bSApple OSS Distributions 	UInt              currentPageIndex = 0;
940*d4514f0bSApple OSS Distributions 	bool              didAlloc;
941*d4514f0bSApple OSS Distributions 
942*d4514f0bSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
943*d4514f0bSApple OSS Distributions 
944*d4514f0bSApple OSS Distributions 	if (ref->mapRef) {
945*d4514f0bSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
946*d4514f0bSApple OSS Distributions 		return err;
947*d4514f0bSApple OSS Distributions 	}
948*d4514f0bSApple OSS Distributions 
949*d4514f0bSApple OSS Distributions 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
950*d4514f0bSApple OSS Distributions 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
951*d4514f0bSApple OSS Distributions 		return err;
952*d4514f0bSApple OSS Distributions 	}
953*d4514f0bSApple OSS Distributions 
954*d4514f0bSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
955*d4514f0bSApple OSS Distributions 
956*d4514f0bSApple OSS Distributions 	prot = VM_PROT_READ;
957*d4514f0bSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
958*d4514f0bSApple OSS Distributions 		prot |= VM_PROT_WRITE;
959*d4514f0bSApple OSS Distributions 	}
960*d4514f0bSApple OSS Distributions 	prot &= ref->prot;
961*d4514f0bSApple OSS Distributions 
962*d4514f0bSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
963*d4514f0bSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
964*d4514f0bSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
965*d4514f0bSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
966*d4514f0bSApple OSS Distributions 	}
967*d4514f0bSApple OSS Distributions 
968*d4514f0bSApple OSS Distributions 	tag = (typeof(tag))getVMTag(map);
969*d4514f0bSApple OSS Distributions 
970*d4514f0bSApple OSS Distributions 	if (_task) {
971*d4514f0bSApple OSS Distributions 		// Find first range for offset
972*d4514f0bSApple OSS Distributions 		if (!_rangesCount) {
973*d4514f0bSApple OSS Distributions 			return kIOReturnBadArgument;
974*d4514f0bSApple OSS Distributions 		}
975*d4514f0bSApple OSS Distributions 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
976*d4514f0bSApple OSS Distributions 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
977*d4514f0bSApple OSS Distributions 			if (remain < nextLen) {
978*d4514f0bSApple OSS Distributions 				break;
979*d4514f0bSApple OSS Distributions 			}
980*d4514f0bSApple OSS Distributions 			remain -= nextLen;
981*d4514f0bSApple OSS Distributions 		}
982*d4514f0bSApple OSS Distributions 	} else {
983*d4514f0bSApple OSS Distributions 		rangeIdx = 0;
984*d4514f0bSApple OSS Distributions 		remain   = 0;
985*d4514f0bSApple OSS Distributions 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
986*d4514f0bSApple OSS Distributions 		nextLen  = size;
987*d4514f0bSApple OSS Distributions 	}
988*d4514f0bSApple OSS Distributions 
989*d4514f0bSApple OSS Distributions 	assert(remain < nextLen);
990*d4514f0bSApple OSS Distributions 	if (remain >= nextLen) {
991*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
992*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
993*d4514f0bSApple OSS Distributions 	}
994*d4514f0bSApple OSS Distributions 
995*d4514f0bSApple OSS Distributions 	nextAddr  += remain;
996*d4514f0bSApple OSS Distributions 	nextLen   -= remain;
997*d4514f0bSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
998*d4514f0bSApple OSS Distributions 	pageOffset = (vm_map_page_mask(map) & nextAddr);
999*d4514f0bSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1000*d4514f0bSApple OSS Distributions 	pageOffset = (page_mask & nextAddr);
1001*d4514f0bSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1002*d4514f0bSApple OSS Distributions 	addr       = 0;
1003*d4514f0bSApple OSS Distributions 	didAlloc   = false;
1004*d4514f0bSApple OSS Distributions 
1005*d4514f0bSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1006*d4514f0bSApple OSS Distributions 		addr = *inaddr;
1007*d4514f0bSApple OSS Distributions 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1008*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1009*d4514f0bSApple OSS Distributions 		}
1010*d4514f0bSApple OSS Distributions 		addr -= pageOffset;
1011*d4514f0bSApple OSS Distributions 	}
1012*d4514f0bSApple OSS Distributions 
1013*d4514f0bSApple OSS Distributions 	// find first entry for offset
1014*d4514f0bSApple OSS Distributions 	for (entryIdx = 0;
1015*d4514f0bSApple OSS Distributions 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1016*d4514f0bSApple OSS Distributions 	    entryIdx++) {
1017*d4514f0bSApple OSS Distributions 	}
1018*d4514f0bSApple OSS Distributions 	entryIdx--;
1019*d4514f0bSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1020*d4514f0bSApple OSS Distributions 
1021*d4514f0bSApple OSS Distributions 	// allocate VM
1022*d4514f0bSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1023*d4514f0bSApple OSS Distributions 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1024*d4514f0bSApple OSS Distributions #else
1025*d4514f0bSApple OSS Distributions 	size = round_page_64(size + pageOffset);
1026*d4514f0bSApple OSS Distributions #endif
1027*d4514f0bSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1028*d4514f0bSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1029*d4514f0bSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1030*d4514f0bSApple OSS Distributions 		}
1031*d4514f0bSApple OSS Distributions 		err = KERN_SUCCESS;
1032*d4514f0bSApple OSS Distributions 	} else {
1033*d4514f0bSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1034*d4514f0bSApple OSS Distributions 		ref.map     = map;
1035*d4514f0bSApple OSS Distributions 		ref.tag     = tag;
1036*d4514f0bSApple OSS Distributions 		ref.options = options;
1037*d4514f0bSApple OSS Distributions 		ref.size    = size;
1038*d4514f0bSApple OSS Distributions 		ref.prot    = prot;
1039*d4514f0bSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1040*d4514f0bSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1041*d4514f0bSApple OSS Distributions 			ref.mapped = 0;
1042*d4514f0bSApple OSS Distributions 		} else {
1043*d4514f0bSApple OSS Distributions 			ref.mapped = addr;
1044*d4514f0bSApple OSS Distributions 		}
1045*d4514f0bSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1046*d4514f0bSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1047*d4514f0bSApple OSS Distributions 		} else {
1048*d4514f0bSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1049*d4514f0bSApple OSS Distributions 		}
1050*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1051*d4514f0bSApple OSS Distributions 			addr     = ref.mapped;
1052*d4514f0bSApple OSS Distributions 			map      = ref.map;
1053*d4514f0bSApple OSS Distributions 			didAlloc = true;
1054*d4514f0bSApple OSS Distributions 		}
1055*d4514f0bSApple OSS Distributions 	}
1056*d4514f0bSApple OSS Distributions 
1057*d4514f0bSApple OSS Distributions 	/*
1058*d4514f0bSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1059*d4514f0bSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1060*d4514f0bSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1061*d4514f0bSApple OSS Distributions 	 * operations.
1062*d4514f0bSApple OSS Distributions 	 */
1063*d4514f0bSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1064*d4514f0bSApple OSS Distributions 		options &= ~kIOMapPrefault;
1065*d4514f0bSApple OSS Distributions 	}
1066*d4514f0bSApple OSS Distributions 
1067*d4514f0bSApple OSS Distributions 	/*
1068*d4514f0bSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1069*d4514f0bSApple OSS Distributions 	 * memory type, and the underlying data.
1070*d4514f0bSApple OSS Distributions 	 */
1071*d4514f0bSApple OSS Distributions 	if (options & kIOMapPrefault) {
1072*d4514f0bSApple OSS Distributions 		/*
1073*d4514f0bSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1074*d4514f0bSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1075*d4514f0bSApple OSS Distributions 		 */
1076*d4514f0bSApple OSS Distributions 		assert(_wireCount != 0);
1077*d4514f0bSApple OSS Distributions 		assert(_memoryEntries != NULL);
1078*d4514f0bSApple OSS Distributions 		if ((_wireCount == 0) ||
1079*d4514f0bSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1080*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1081*d4514f0bSApple OSS Distributions 			return kIOReturnBadArgument;
1082*d4514f0bSApple OSS Distributions 		}
1083*d4514f0bSApple OSS Distributions 
1084*d4514f0bSApple OSS Distributions 		// Get the page list.
1085*d4514f0bSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1086*d4514f0bSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1087*d4514f0bSApple OSS Distributions 		pageList = getPageList(dataP);
1088*d4514f0bSApple OSS Distributions 
1089*d4514f0bSApple OSS Distributions 		// Get the number of IOPLs.
1090*d4514f0bSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1091*d4514f0bSApple OSS Distributions 
1092*d4514f0bSApple OSS Distributions 		/*
1093*d4514f0bSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1094*d4514f0bSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1095*d4514f0bSApple OSS Distributions 		 * right range at the end.
1096*d4514f0bSApple OSS Distributions 		 */
1097*d4514f0bSApple OSS Distributions 		UInt ioplIndex = 0;
1098*d4514f0bSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1099*d4514f0bSApple OSS Distributions 			ioplIndex++;
1100*d4514f0bSApple OSS Distributions 		}
1101*d4514f0bSApple OSS Distributions 		ioplIndex--;
1102*d4514f0bSApple OSS Distributions 
1103*d4514f0bSApple OSS Distributions 		// Retrieve the IOPL info block.
1104*d4514f0bSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1105*d4514f0bSApple OSS Distributions 
1106*d4514f0bSApple OSS Distributions 		/*
1107*d4514f0bSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1108*d4514f0bSApple OSS Distributions 		 * array.
1109*d4514f0bSApple OSS Distributions 		 */
1110*d4514f0bSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1111*d4514f0bSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1112*d4514f0bSApple OSS Distributions 		} else {
1113*d4514f0bSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1114*d4514f0bSApple OSS Distributions 		}
1115*d4514f0bSApple OSS Distributions 
1116*d4514f0bSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1117*d4514f0bSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1118*d4514f0bSApple OSS Distributions 
1119*d4514f0bSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1120*d4514f0bSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1121*d4514f0bSApple OSS Distributions 	}
1122*d4514f0bSApple OSS Distributions 
1123*d4514f0bSApple OSS Distributions 	// enter mappings
1124*d4514f0bSApple OSS Distributions 	remain  = size;
1125*d4514f0bSApple OSS Distributions 	mapAddr = addr;
1126*d4514f0bSApple OSS Distributions 	addr    += pageOffset;
1127*d4514f0bSApple OSS Distributions 
1128*d4514f0bSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1129*d4514f0bSApple OSS Distributions 		entryOffset = offset - entry->offset;
1130*d4514f0bSApple OSS Distributions 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1131*d4514f0bSApple OSS Distributions 			err = kIOReturnNotAligned;
1132*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1133*d4514f0bSApple OSS Distributions 			break;
1134*d4514f0bSApple OSS Distributions 		}
1135*d4514f0bSApple OSS Distributions 
1136*d4514f0bSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1137*d4514f0bSApple OSS Distributions 			vm_size_t unused = 0;
1138*d4514f0bSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1139*d4514f0bSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1140*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1141*d4514f0bSApple OSS Distributions 		}
1142*d4514f0bSApple OSS Distributions 
1143*d4514f0bSApple OSS Distributions 		entryOffset -= pageOffset;
1144*d4514f0bSApple OSS Distributions 		if (entryOffset >= entry->size) {
1145*d4514f0bSApple OSS Distributions 			panic("entryOffset");
1146*d4514f0bSApple OSS Distributions 		}
1147*d4514f0bSApple OSS Distributions 		chunk = entry->size - entryOffset;
1148*d4514f0bSApple OSS Distributions 		if (chunk) {
1149*d4514f0bSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1150*d4514f0bSApple OSS Distributions 				.vmf_fixed = true,
1151*d4514f0bSApple OSS Distributions 				.vmf_overwrite = true,
1152*d4514f0bSApple OSS Distributions 				.vm_tag = tag,
1153*d4514f0bSApple OSS Distributions 				.vmkf_iokit_acct = true,
1154*d4514f0bSApple OSS Distributions 			};
1155*d4514f0bSApple OSS Distributions 
1156*d4514f0bSApple OSS Distributions 			if (chunk > remain) {
1157*d4514f0bSApple OSS Distributions 				chunk = remain;
1158*d4514f0bSApple OSS Distributions 			}
1159*d4514f0bSApple OSS Distributions 			if (options & kIOMapPrefault) {
1160*d4514f0bSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1161*d4514f0bSApple OSS Distributions 
1162*d4514f0bSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1163*d4514f0bSApple OSS Distributions 				    &mapAddr,
1164*d4514f0bSApple OSS Distributions 				    chunk, 0 /* mask */,
1165*d4514f0bSApple OSS Distributions 				    vmk_flags,
1166*d4514f0bSApple OSS Distributions 				    entry->entry,
1167*d4514f0bSApple OSS Distributions 				    entryOffset,
1168*d4514f0bSApple OSS Distributions 				    prot,                        // cur
1169*d4514f0bSApple OSS Distributions 				    prot,                        // max
1170*d4514f0bSApple OSS Distributions 				    &pageList[currentPageIndex],
1171*d4514f0bSApple OSS Distributions 				    nb_pages);
1172*d4514f0bSApple OSS Distributions 
1173*d4514f0bSApple OSS Distributions 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1174*d4514f0bSApple OSS Distributions 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1175*d4514f0bSApple OSS Distributions 				}
1176*d4514f0bSApple OSS Distributions 				// Compute the next index in the page list.
1177*d4514f0bSApple OSS Distributions 				currentPageIndex += nb_pages;
1178*d4514f0bSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1179*d4514f0bSApple OSS Distributions 			} else {
1180*d4514f0bSApple OSS Distributions 				err = mach_vm_map_kernel(map,
1181*d4514f0bSApple OSS Distributions 				    &mapAddr,
1182*d4514f0bSApple OSS Distributions 				    chunk, 0 /* mask */,
1183*d4514f0bSApple OSS Distributions 				    vmk_flags,
1184*d4514f0bSApple OSS Distributions 				    entry->entry,
1185*d4514f0bSApple OSS Distributions 				    entryOffset,
1186*d4514f0bSApple OSS Distributions 				    false,               // copy
1187*d4514f0bSApple OSS Distributions 				    prot,               // cur
1188*d4514f0bSApple OSS Distributions 				    prot,               // max
1189*d4514f0bSApple OSS Distributions 				    VM_INHERIT_NONE);
1190*d4514f0bSApple OSS Distributions 			}
1191*d4514f0bSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1192*d4514f0bSApple OSS Distributions 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1193*d4514f0bSApple OSS Distributions 				break;
1194*d4514f0bSApple OSS Distributions 			}
1195*d4514f0bSApple OSS Distributions 			remain -= chunk;
1196*d4514f0bSApple OSS Distributions 			if (!remain) {
1197*d4514f0bSApple OSS Distributions 				break;
1198*d4514f0bSApple OSS Distributions 			}
1199*d4514f0bSApple OSS Distributions 			mapAddr  += chunk;
1200*d4514f0bSApple OSS Distributions 			offset   += chunk - pageOffset;
1201*d4514f0bSApple OSS Distributions 		}
1202*d4514f0bSApple OSS Distributions 		pageOffset = 0;
1203*d4514f0bSApple OSS Distributions 		entry++;
1204*d4514f0bSApple OSS Distributions 		entryIdx++;
1205*d4514f0bSApple OSS Distributions 		if (entryIdx >= ref->count) {
1206*d4514f0bSApple OSS Distributions 			err = kIOReturnOverrun;
1207*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1208*d4514f0bSApple OSS Distributions 			break;
1209*d4514f0bSApple OSS Distributions 		}
1210*d4514f0bSApple OSS Distributions 	}
1211*d4514f0bSApple OSS Distributions 
1212*d4514f0bSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1213*d4514f0bSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1214*d4514f0bSApple OSS Distributions 		addr = 0;
1215*d4514f0bSApple OSS Distributions 	}
1216*d4514f0bSApple OSS Distributions 	*inaddr = addr;
1217*d4514f0bSApple OSS Distributions 
1218*d4514f0bSApple OSS Distributions 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1219*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1220*d4514f0bSApple OSS Distributions 	}
1221*d4514f0bSApple OSS Distributions 	return err;
1222*d4514f0bSApple OSS Distributions }
1223*d4514f0bSApple OSS Distributions 
1224*d4514f0bSApple OSS Distributions #define LOGUNALIGN 0
1225*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1226*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1227*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
1228*d4514f0bSApple OSS Distributions 	vm_map_t            map,
1229*d4514f0bSApple OSS Distributions 	mach_vm_size_t      inoffset,
1230*d4514f0bSApple OSS Distributions 	mach_vm_size_t      size,
1231*d4514f0bSApple OSS Distributions 	IOOptionBits        options,
1232*d4514f0bSApple OSS Distributions 	mach_vm_address_t * inaddr)
1233*d4514f0bSApple OSS Distributions {
1234*d4514f0bSApple OSS Distributions 	IOReturn            err;
1235*d4514f0bSApple OSS Distributions 	int64_t             offset = inoffset;
1236*d4514f0bSApple OSS Distributions 	uint32_t            entryIdx, firstEntryIdx;
1237*d4514f0bSApple OSS Distributions 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1238*d4514f0bSApple OSS Distributions 	vm_map_offset_t     entryOffset, remain, chunk;
1239*d4514f0bSApple OSS Distributions 
1240*d4514f0bSApple OSS Distributions 	IOMemoryEntry    * entry;
1241*d4514f0bSApple OSS Distributions 	vm_prot_t          prot, memEntryCacheMode;
1242*d4514f0bSApple OSS Distributions 	IOOptionBits       type;
1243*d4514f0bSApple OSS Distributions 	IOOptionBits       cacheMode;
1244*d4514f0bSApple OSS Distributions 	vm_tag_t           tag;
1245*d4514f0bSApple OSS Distributions 	// for the kIOMapPrefault option.
1246*d4514f0bSApple OSS Distributions 	upl_page_info_t  * pageList = NULL;
1247*d4514f0bSApple OSS Distributions 	UInt               currentPageIndex = 0;
1248*d4514f0bSApple OSS Distributions 	bool               didAlloc;
1249*d4514f0bSApple OSS Distributions 
1250*d4514f0bSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1251*d4514f0bSApple OSS Distributions 
1252*d4514f0bSApple OSS Distributions 	if (ref->mapRef) {
1253*d4514f0bSApple OSS Distributions 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1254*d4514f0bSApple OSS Distributions 		return err;
1255*d4514f0bSApple OSS Distributions 	}
1256*d4514f0bSApple OSS Distributions 
1257*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1258*d4514f0bSApple OSS Distributions 	printf("MAP offset %qx, %qx\n", inoffset, size);
1259*d4514f0bSApple OSS Distributions #endif
1260*d4514f0bSApple OSS Distributions 
1261*d4514f0bSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
1262*d4514f0bSApple OSS Distributions 
1263*d4514f0bSApple OSS Distributions 	prot = VM_PROT_READ;
1264*d4514f0bSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
1265*d4514f0bSApple OSS Distributions 		prot |= VM_PROT_WRITE;
1266*d4514f0bSApple OSS Distributions 	}
1267*d4514f0bSApple OSS Distributions 	prot &= ref->prot;
1268*d4514f0bSApple OSS Distributions 
1269*d4514f0bSApple OSS Distributions 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1270*d4514f0bSApple OSS Distributions 	if (kIODefaultCache != cacheMode) {
1271*d4514f0bSApple OSS Distributions 		// VM system requires write access to update named entry cache mode
1272*d4514f0bSApple OSS Distributions 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1273*d4514f0bSApple OSS Distributions 	}
1274*d4514f0bSApple OSS Distributions 
1275*d4514f0bSApple OSS Distributions 	tag = (vm_tag_t) getVMTag(map);
1276*d4514f0bSApple OSS Distributions 
1277*d4514f0bSApple OSS Distributions 	addr       = 0;
1278*d4514f0bSApple OSS Distributions 	didAlloc   = false;
1279*d4514f0bSApple OSS Distributions 
1280*d4514f0bSApple OSS Distributions 	if (!(options & kIOMapAnywhere)) {
1281*d4514f0bSApple OSS Distributions 		addr = *inaddr;
1282*d4514f0bSApple OSS Distributions 	}
1283*d4514f0bSApple OSS Distributions 
1284*d4514f0bSApple OSS Distributions 	// find first entry for offset
1285*d4514f0bSApple OSS Distributions 	for (firstEntryIdx = 0;
1286*d4514f0bSApple OSS Distributions 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1287*d4514f0bSApple OSS Distributions 	    firstEntryIdx++) {
1288*d4514f0bSApple OSS Distributions 	}
1289*d4514f0bSApple OSS Distributions 	firstEntryIdx--;
1290*d4514f0bSApple OSS Distributions 
1291*d4514f0bSApple OSS Distributions 	// calculate required VM space
1292*d4514f0bSApple OSS Distributions 
1293*d4514f0bSApple OSS Distributions 	entryIdx = firstEntryIdx;
1294*d4514f0bSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1295*d4514f0bSApple OSS Distributions 
1296*d4514f0bSApple OSS Distributions 	remain  = size;
1297*d4514f0bSApple OSS Distributions 	int64_t iteroffset = offset;
1298*d4514f0bSApple OSS Distributions 	uint64_t mapSize = 0;
1299*d4514f0bSApple OSS Distributions 	while (remain) {
1300*d4514f0bSApple OSS Distributions 		entryOffset = iteroffset - entry->offset;
1301*d4514f0bSApple OSS Distributions 		if (entryOffset >= entry->size) {
1302*d4514f0bSApple OSS Distributions 			panic("entryOffset");
1303*d4514f0bSApple OSS Distributions 		}
1304*d4514f0bSApple OSS Distributions 
1305*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1306*d4514f0bSApple OSS Distributions 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1307*d4514f0bSApple OSS Distributions 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1308*d4514f0bSApple OSS Distributions #endif
1309*d4514f0bSApple OSS Distributions 
1310*d4514f0bSApple OSS Distributions 		chunk = entry->size - entryOffset;
1311*d4514f0bSApple OSS Distributions 		if (chunk) {
1312*d4514f0bSApple OSS Distributions 			if (chunk > remain) {
1313*d4514f0bSApple OSS Distributions 				chunk = remain;
1314*d4514f0bSApple OSS Distributions 			}
1315*d4514f0bSApple OSS Distributions 			mach_vm_size_t entrySize;
1316*d4514f0bSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1317*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1318*d4514f0bSApple OSS Distributions 			mapSize += entrySize;
1319*d4514f0bSApple OSS Distributions 
1320*d4514f0bSApple OSS Distributions 			remain -= chunk;
1321*d4514f0bSApple OSS Distributions 			if (!remain) {
1322*d4514f0bSApple OSS Distributions 				break;
1323*d4514f0bSApple OSS Distributions 			}
1324*d4514f0bSApple OSS Distributions 			iteroffset   += chunk; // - pageOffset;
1325*d4514f0bSApple OSS Distributions 		}
1326*d4514f0bSApple OSS Distributions 		entry++;
1327*d4514f0bSApple OSS Distributions 		entryIdx++;
1328*d4514f0bSApple OSS Distributions 		if (entryIdx >= ref->count) {
1329*d4514f0bSApple OSS Distributions 			panic("overrun");
1330*d4514f0bSApple OSS Distributions 			err = kIOReturnOverrun;
1331*d4514f0bSApple OSS Distributions 			break;
1332*d4514f0bSApple OSS Distributions 		}
1333*d4514f0bSApple OSS Distributions 	}
1334*d4514f0bSApple OSS Distributions 
1335*d4514f0bSApple OSS Distributions 	if (kIOMapOverwrite & options) {
1336*d4514f0bSApple OSS Distributions 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1337*d4514f0bSApple OSS Distributions 			map = IOPageableMapForAddress(addr);
1338*d4514f0bSApple OSS Distributions 		}
1339*d4514f0bSApple OSS Distributions 		err = KERN_SUCCESS;
1340*d4514f0bSApple OSS Distributions 	} else {
1341*d4514f0bSApple OSS Distributions 		IOMemoryDescriptorMapAllocRef ref;
1342*d4514f0bSApple OSS Distributions 		ref.map     = map;
1343*d4514f0bSApple OSS Distributions 		ref.tag     = tag;
1344*d4514f0bSApple OSS Distributions 		ref.options = options;
1345*d4514f0bSApple OSS Distributions 		ref.size    = mapSize;
1346*d4514f0bSApple OSS Distributions 		ref.prot    = prot;
1347*d4514f0bSApple OSS Distributions 		if (options & kIOMapAnywhere) {
1348*d4514f0bSApple OSS Distributions 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1349*d4514f0bSApple OSS Distributions 			ref.mapped = 0;
1350*d4514f0bSApple OSS Distributions 		} else {
1351*d4514f0bSApple OSS Distributions 			ref.mapped = addr;
1352*d4514f0bSApple OSS Distributions 		}
1353*d4514f0bSApple OSS Distributions 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1354*d4514f0bSApple OSS Distributions 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1355*d4514f0bSApple OSS Distributions 		} else {
1356*d4514f0bSApple OSS Distributions 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1357*d4514f0bSApple OSS Distributions 		}
1358*d4514f0bSApple OSS Distributions 
1359*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS == err) {
1360*d4514f0bSApple OSS Distributions 			addr     = ref.mapped;
1361*d4514f0bSApple OSS Distributions 			map      = ref.map;
1362*d4514f0bSApple OSS Distributions 			didAlloc = true;
1363*d4514f0bSApple OSS Distributions 		}
1364*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1365*d4514f0bSApple OSS Distributions 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1366*d4514f0bSApple OSS Distributions #endif
1367*d4514f0bSApple OSS Distributions 	}
1368*d4514f0bSApple OSS Distributions 
1369*d4514f0bSApple OSS Distributions 	/*
1370*d4514f0bSApple OSS Distributions 	 * If the memory is associated with a device pager but doesn't have a UPL,
1371*d4514f0bSApple OSS Distributions 	 * it will be immediately faulted in through the pager via populateDevicePager().
1372*d4514f0bSApple OSS Distributions 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1373*d4514f0bSApple OSS Distributions 	 * operations.
1374*d4514f0bSApple OSS Distributions 	 */
1375*d4514f0bSApple OSS Distributions 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1376*d4514f0bSApple OSS Distributions 		options &= ~kIOMapPrefault;
1377*d4514f0bSApple OSS Distributions 	}
1378*d4514f0bSApple OSS Distributions 
1379*d4514f0bSApple OSS Distributions 	/*
1380*d4514f0bSApple OSS Distributions 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1381*d4514f0bSApple OSS Distributions 	 * memory type, and the underlying data.
1382*d4514f0bSApple OSS Distributions 	 */
1383*d4514f0bSApple OSS Distributions 	if (options & kIOMapPrefault) {
1384*d4514f0bSApple OSS Distributions 		/*
1385*d4514f0bSApple OSS Distributions 		 * The memory must have been wired by calling ::prepare(), otherwise
1386*d4514f0bSApple OSS Distributions 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1387*d4514f0bSApple OSS Distributions 		 */
1388*d4514f0bSApple OSS Distributions 		assert(_wireCount != 0);
1389*d4514f0bSApple OSS Distributions 		assert(_memoryEntries != NULL);
1390*d4514f0bSApple OSS Distributions 		if ((_wireCount == 0) ||
1391*d4514f0bSApple OSS Distributions 		    (_memoryEntries == NULL)) {
1392*d4514f0bSApple OSS Distributions 			return kIOReturnBadArgument;
1393*d4514f0bSApple OSS Distributions 		}
1394*d4514f0bSApple OSS Distributions 
1395*d4514f0bSApple OSS Distributions 		// Get the page list.
1396*d4514f0bSApple OSS Distributions 		ioGMDData* dataP = getDataP(_memoryEntries);
1397*d4514f0bSApple OSS Distributions 		ioPLBlock const* ioplList = getIOPLList(dataP);
1398*d4514f0bSApple OSS Distributions 		pageList = getPageList(dataP);
1399*d4514f0bSApple OSS Distributions 
1400*d4514f0bSApple OSS Distributions 		// Get the number of IOPLs.
1401*d4514f0bSApple OSS Distributions 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1402*d4514f0bSApple OSS Distributions 
1403*d4514f0bSApple OSS Distributions 		/*
1404*d4514f0bSApple OSS Distributions 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1405*d4514f0bSApple OSS Distributions 		 * the offset. The research will go past it, so we'll need to go back to the
1406*d4514f0bSApple OSS Distributions 		 * right range at the end.
1407*d4514f0bSApple OSS Distributions 		 */
1408*d4514f0bSApple OSS Distributions 		UInt ioplIndex = 0;
1409*d4514f0bSApple OSS Distributions 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1410*d4514f0bSApple OSS Distributions 			ioplIndex++;
1411*d4514f0bSApple OSS Distributions 		}
1412*d4514f0bSApple OSS Distributions 		ioplIndex--;
1413*d4514f0bSApple OSS Distributions 
1414*d4514f0bSApple OSS Distributions 		// Retrieve the IOPL info block.
1415*d4514f0bSApple OSS Distributions 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1416*d4514f0bSApple OSS Distributions 
1417*d4514f0bSApple OSS Distributions 		/*
1418*d4514f0bSApple OSS Distributions 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1419*d4514f0bSApple OSS Distributions 		 * array.
1420*d4514f0bSApple OSS Distributions 		 */
1421*d4514f0bSApple OSS Distributions 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1422*d4514f0bSApple OSS Distributions 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1423*d4514f0bSApple OSS Distributions 		} else {
1424*d4514f0bSApple OSS Distributions 			pageList = &pageList[ioplInfo.fPageInfo];
1425*d4514f0bSApple OSS Distributions 		}
1426*d4514f0bSApple OSS Distributions 
1427*d4514f0bSApple OSS Distributions 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1428*d4514f0bSApple OSS Distributions 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1429*d4514f0bSApple OSS Distributions 
1430*d4514f0bSApple OSS Distributions 		// Retrieve the index of the first page corresponding to the offset.
1431*d4514f0bSApple OSS Distributions 		currentPageIndex = atop_32(offsetInIOPL);
1432*d4514f0bSApple OSS Distributions 	}
1433*d4514f0bSApple OSS Distributions 
1434*d4514f0bSApple OSS Distributions 	// enter mappings
1435*d4514f0bSApple OSS Distributions 	remain   = size;
1436*d4514f0bSApple OSS Distributions 	mapAddr  = addr;
1437*d4514f0bSApple OSS Distributions 	entryIdx = firstEntryIdx;
1438*d4514f0bSApple OSS Distributions 	entry = &ref->entries[entryIdx];
1439*d4514f0bSApple OSS Distributions 
1440*d4514f0bSApple OSS Distributions 	while (remain && (KERN_SUCCESS == err)) {
1441*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1442*d4514f0bSApple OSS Distributions 		printf("offset %qx, %qx\n", offset, entry->offset);
1443*d4514f0bSApple OSS Distributions #endif
1444*d4514f0bSApple OSS Distributions 		if (kIODefaultCache != cacheMode) {
1445*d4514f0bSApple OSS Distributions 			vm_size_t unused = 0;
1446*d4514f0bSApple OSS Distributions 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1447*d4514f0bSApple OSS Distributions 			    memEntryCacheMode, NULL, entry->entry);
1448*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1449*d4514f0bSApple OSS Distributions 		}
1450*d4514f0bSApple OSS Distributions 		entryOffset = offset - entry->offset;
1451*d4514f0bSApple OSS Distributions 		if (entryOffset >= entry->size) {
1452*d4514f0bSApple OSS Distributions 			panic("entryOffset");
1453*d4514f0bSApple OSS Distributions 		}
1454*d4514f0bSApple OSS Distributions 		chunk = entry->size - entryOffset;
1455*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1456*d4514f0bSApple OSS Distributions 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1457*d4514f0bSApple OSS Distributions #endif
1458*d4514f0bSApple OSS Distributions 		if (chunk) {
1459*d4514f0bSApple OSS Distributions 			vm_map_kernel_flags_t vmk_flags = {
1460*d4514f0bSApple OSS Distributions 				.vmf_fixed = true,
1461*d4514f0bSApple OSS Distributions 				.vmf_overwrite = true,
1462*d4514f0bSApple OSS Distributions 				.vmf_return_data_addr = true,
1463*d4514f0bSApple OSS Distributions 				.vm_tag = tag,
1464*d4514f0bSApple OSS Distributions 				.vmkf_iokit_acct = true,
1465*d4514f0bSApple OSS Distributions 			};
1466*d4514f0bSApple OSS Distributions 
1467*d4514f0bSApple OSS Distributions 			if (chunk > remain) {
1468*d4514f0bSApple OSS Distributions 				chunk = remain;
1469*d4514f0bSApple OSS Distributions 			}
1470*d4514f0bSApple OSS Distributions 			mapAddrOut = mapAddr;
1471*d4514f0bSApple OSS Distributions 			if (options & kIOMapPrefault) {
1472*d4514f0bSApple OSS Distributions 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1473*d4514f0bSApple OSS Distributions 
1474*d4514f0bSApple OSS Distributions 				err = vm_map_enter_mem_object_prefault(map,
1475*d4514f0bSApple OSS Distributions 				    &mapAddrOut,
1476*d4514f0bSApple OSS Distributions 				    chunk, 0 /* mask */,
1477*d4514f0bSApple OSS Distributions 				    vmk_flags,
1478*d4514f0bSApple OSS Distributions 				    entry->entry,
1479*d4514f0bSApple OSS Distributions 				    entryOffset,
1480*d4514f0bSApple OSS Distributions 				    prot,                        // cur
1481*d4514f0bSApple OSS Distributions 				    prot,                        // max
1482*d4514f0bSApple OSS Distributions 				    &pageList[currentPageIndex],
1483*d4514f0bSApple OSS Distributions 				    nb_pages);
1484*d4514f0bSApple OSS Distributions 
1485*d4514f0bSApple OSS Distributions 				// Compute the next index in the page list.
1486*d4514f0bSApple OSS Distributions 				currentPageIndex += nb_pages;
1487*d4514f0bSApple OSS Distributions 				assert(currentPageIndex <= _pages);
1488*d4514f0bSApple OSS Distributions 			} else {
1489*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1490*d4514f0bSApple OSS Distributions 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1491*d4514f0bSApple OSS Distributions #endif
1492*d4514f0bSApple OSS Distributions 				err = mach_vm_map_kernel(map,
1493*d4514f0bSApple OSS Distributions 				    &mapAddrOut,
1494*d4514f0bSApple OSS Distributions 				    chunk, 0 /* mask */,
1495*d4514f0bSApple OSS Distributions 				    vmk_flags,
1496*d4514f0bSApple OSS Distributions 				    entry->entry,
1497*d4514f0bSApple OSS Distributions 				    entryOffset,
1498*d4514f0bSApple OSS Distributions 				    false,               // copy
1499*d4514f0bSApple OSS Distributions 				    prot,               // cur
1500*d4514f0bSApple OSS Distributions 				    prot,               // max
1501*d4514f0bSApple OSS Distributions 				    VM_INHERIT_NONE);
1502*d4514f0bSApple OSS Distributions 			}
1503*d4514f0bSApple OSS Distributions 			if (KERN_SUCCESS != err) {
1504*d4514f0bSApple OSS Distributions 				panic("map enter err %x", err);
1505*d4514f0bSApple OSS Distributions 				break;
1506*d4514f0bSApple OSS Distributions 			}
1507*d4514f0bSApple OSS Distributions #if LOGUNALIGN
1508*d4514f0bSApple OSS Distributions 			printf("mapAddr o %qx\n", mapAddrOut);
1509*d4514f0bSApple OSS Distributions #endif
1510*d4514f0bSApple OSS Distributions 			if (entryIdx == firstEntryIdx) {
1511*d4514f0bSApple OSS Distributions 				addr = mapAddrOut;
1512*d4514f0bSApple OSS Distributions 			}
1513*d4514f0bSApple OSS Distributions 			remain -= chunk;
1514*d4514f0bSApple OSS Distributions 			if (!remain) {
1515*d4514f0bSApple OSS Distributions 				break;
1516*d4514f0bSApple OSS Distributions 			}
1517*d4514f0bSApple OSS Distributions 			mach_vm_size_t entrySize;
1518*d4514f0bSApple OSS Distributions 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1519*d4514f0bSApple OSS Distributions 			assert(KERN_SUCCESS == err);
1520*d4514f0bSApple OSS Distributions 			mapAddr += entrySize;
1521*d4514f0bSApple OSS Distributions 			offset  += chunk;
1522*d4514f0bSApple OSS Distributions 		}
1523*d4514f0bSApple OSS Distributions 
1524*d4514f0bSApple OSS Distributions 		entry++;
1525*d4514f0bSApple OSS Distributions 		entryIdx++;
1526*d4514f0bSApple OSS Distributions 		if (entryIdx >= ref->count) {
1527*d4514f0bSApple OSS Distributions 			err = kIOReturnOverrun;
1528*d4514f0bSApple OSS Distributions 			break;
1529*d4514f0bSApple OSS Distributions 		}
1530*d4514f0bSApple OSS Distributions 	}
1531*d4514f0bSApple OSS Distributions 
1532*d4514f0bSApple OSS Distributions 	if (KERN_SUCCESS != err) {
1533*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1534*d4514f0bSApple OSS Distributions 	}
1535*d4514f0bSApple OSS Distributions 
1536*d4514f0bSApple OSS Distributions 	if ((KERN_SUCCESS != err) && didAlloc) {
1537*d4514f0bSApple OSS Distributions 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1538*d4514f0bSApple OSS Distributions 		addr = 0;
1539*d4514f0bSApple OSS Distributions 	}
1540*d4514f0bSApple OSS Distributions 	*inaddr = addr;
1541*d4514f0bSApple OSS Distributions 
1542*d4514f0bSApple OSS Distributions 	return err;
1543*d4514f0bSApple OSS Distributions }
1544*d4514f0bSApple OSS Distributions 
1545*d4514f0bSApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1546*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1547*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
1548*d4514f0bSApple OSS Distributions 	uint64_t          * offset)
1549*d4514f0bSApple OSS Distributions {
1550*d4514f0bSApple OSS Distributions 	kern_return_t kr;
1551*d4514f0bSApple OSS Distributions 	vm_object_offset_t data_offset = 0;
1552*d4514f0bSApple OSS Distributions 	uint64_t total;
1553*d4514f0bSApple OSS Distributions 	uint32_t idx;
1554*d4514f0bSApple OSS Distributions 
1555*d4514f0bSApple OSS Distributions 	assert(ref->count);
1556*d4514f0bSApple OSS Distributions 	if (offset) {
1557*d4514f0bSApple OSS Distributions 		*offset = (uint64_t) data_offset;
1558*d4514f0bSApple OSS Distributions 	}
1559*d4514f0bSApple OSS Distributions 	total = 0;
1560*d4514f0bSApple OSS Distributions 	for (idx = 0; idx < ref->count; idx++) {
1561*d4514f0bSApple OSS Distributions 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1562*d4514f0bSApple OSS Distributions 		    &data_offset);
1563*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != kr) {
1564*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1565*d4514f0bSApple OSS Distributions 		} else if (0 != data_offset) {
1566*d4514f0bSApple OSS Distributions 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1567*d4514f0bSApple OSS Distributions 		}
1568*d4514f0bSApple OSS Distributions 		if (offset && !idx) {
1569*d4514f0bSApple OSS Distributions 			*offset = (uint64_t) data_offset;
1570*d4514f0bSApple OSS Distributions 		}
1571*d4514f0bSApple OSS Distributions 		total += round_page(data_offset + ref->entries[idx].size);
1572*d4514f0bSApple OSS Distributions 	}
1573*d4514f0bSApple OSS Distributions 
1574*d4514f0bSApple OSS Distributions 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1575*d4514f0bSApple OSS Distributions 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1576*d4514f0bSApple OSS Distributions 
1577*d4514f0bSApple OSS Distributions 	return total;
1578*d4514f0bSApple OSS Distributions }
1579*d4514f0bSApple OSS Distributions 
1580*d4514f0bSApple OSS Distributions 
1581*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1582*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1583*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
1584*d4514f0bSApple OSS Distributions 	IOByteCount       * residentPageCount,
1585*d4514f0bSApple OSS Distributions 	IOByteCount       * dirtyPageCount)
1586*d4514f0bSApple OSS Distributions {
1587*d4514f0bSApple OSS Distributions 	IOReturn        err;
1588*d4514f0bSApple OSS Distributions 	IOMemoryEntry * entries;
1589*d4514f0bSApple OSS Distributions 	unsigned int resident, dirty;
1590*d4514f0bSApple OSS Distributions 	unsigned int totalResident, totalDirty;
1591*d4514f0bSApple OSS Distributions 
1592*d4514f0bSApple OSS Distributions 	totalResident = totalDirty = 0;
1593*d4514f0bSApple OSS Distributions 	err = kIOReturnSuccess;
1594*d4514f0bSApple OSS Distributions 	entries = ref->entries + ref->count;
1595*d4514f0bSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1596*d4514f0bSApple OSS Distributions 		entries--;
1597*d4514f0bSApple OSS Distributions 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1598*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1599*d4514f0bSApple OSS Distributions 			break;
1600*d4514f0bSApple OSS Distributions 		}
1601*d4514f0bSApple OSS Distributions 		totalResident += resident;
1602*d4514f0bSApple OSS Distributions 		totalDirty    += dirty;
1603*d4514f0bSApple OSS Distributions 	}
1604*d4514f0bSApple OSS Distributions 
1605*d4514f0bSApple OSS Distributions 	if (residentPageCount) {
1606*d4514f0bSApple OSS Distributions 		*residentPageCount = totalResident;
1607*d4514f0bSApple OSS Distributions 	}
1608*d4514f0bSApple OSS Distributions 	if (dirtyPageCount) {
1609*d4514f0bSApple OSS Distributions 		*dirtyPageCount    = totalDirty;
1610*d4514f0bSApple OSS Distributions 	}
1611*d4514f0bSApple OSS Distributions 	return err;
1612*d4514f0bSApple OSS Distributions }
1613*d4514f0bSApple OSS Distributions 
1614*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1615*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1616*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
1617*d4514f0bSApple OSS Distributions 	IOOptionBits        newState,
1618*d4514f0bSApple OSS Distributions 	IOOptionBits      * oldState)
1619*d4514f0bSApple OSS Distributions {
1620*d4514f0bSApple OSS Distributions 	IOReturn        err;
1621*d4514f0bSApple OSS Distributions 	IOMemoryEntry * entries;
1622*d4514f0bSApple OSS Distributions 	vm_purgable_t   control;
1623*d4514f0bSApple OSS Distributions 	int             totalState, state;
1624*d4514f0bSApple OSS Distributions 
1625*d4514f0bSApple OSS Distributions 	totalState = kIOMemoryPurgeableNonVolatile;
1626*d4514f0bSApple OSS Distributions 	err = kIOReturnSuccess;
1627*d4514f0bSApple OSS Distributions 	entries = ref->entries + ref->count;
1628*d4514f0bSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1629*d4514f0bSApple OSS Distributions 		entries--;
1630*d4514f0bSApple OSS Distributions 
1631*d4514f0bSApple OSS Distributions 		err = purgeableControlBits(newState, &control, &state);
1632*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1633*d4514f0bSApple OSS Distributions 			break;
1634*d4514f0bSApple OSS Distributions 		}
1635*d4514f0bSApple OSS Distributions 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1636*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1637*d4514f0bSApple OSS Distributions 			break;
1638*d4514f0bSApple OSS Distributions 		}
1639*d4514f0bSApple OSS Distributions 		err = purgeableStateBits(&state);
1640*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1641*d4514f0bSApple OSS Distributions 			break;
1642*d4514f0bSApple OSS Distributions 		}
1643*d4514f0bSApple OSS Distributions 
1644*d4514f0bSApple OSS Distributions 		if (kIOMemoryPurgeableEmpty == state) {
1645*d4514f0bSApple OSS Distributions 			totalState = kIOMemoryPurgeableEmpty;
1646*d4514f0bSApple OSS Distributions 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1647*d4514f0bSApple OSS Distributions 			continue;
1648*d4514f0bSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1649*d4514f0bSApple OSS Distributions 			continue;
1650*d4514f0bSApple OSS Distributions 		} else if (kIOMemoryPurgeableVolatile == state) {
1651*d4514f0bSApple OSS Distributions 			totalState = kIOMemoryPurgeableVolatile;
1652*d4514f0bSApple OSS Distributions 		} else {
1653*d4514f0bSApple OSS Distributions 			totalState = kIOMemoryPurgeableNonVolatile;
1654*d4514f0bSApple OSS Distributions 		}
1655*d4514f0bSApple OSS Distributions 	}
1656*d4514f0bSApple OSS Distributions 
1657*d4514f0bSApple OSS Distributions 	if (oldState) {
1658*d4514f0bSApple OSS Distributions 		*oldState = totalState;
1659*d4514f0bSApple OSS Distributions 	}
1660*d4514f0bSApple OSS Distributions 	return err;
1661*d4514f0bSApple OSS Distributions }
1662*d4514f0bSApple OSS Distributions 
1663*d4514f0bSApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1664*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1665*d4514f0bSApple OSS Distributions 	IOMemoryReference * ref,
1666*d4514f0bSApple OSS Distributions 	task_t              newOwner,
1667*d4514f0bSApple OSS Distributions 	int                 newLedgerTag,
1668*d4514f0bSApple OSS Distributions 	IOOptionBits        newLedgerOptions)
1669*d4514f0bSApple OSS Distributions {
1670*d4514f0bSApple OSS Distributions 	IOReturn        err, totalErr;
1671*d4514f0bSApple OSS Distributions 	IOMemoryEntry * entries;
1672*d4514f0bSApple OSS Distributions 
1673*d4514f0bSApple OSS Distributions 	totalErr = kIOReturnSuccess;
1674*d4514f0bSApple OSS Distributions 	entries = ref->entries + ref->count;
1675*d4514f0bSApple OSS Distributions 	while (entries > &ref->entries[0]) {
1676*d4514f0bSApple OSS Distributions 		entries--;
1677*d4514f0bSApple OSS Distributions 
1678*d4514f0bSApple OSS Distributions 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1679*d4514f0bSApple OSS Distributions 		if (KERN_SUCCESS != err) {
1680*d4514f0bSApple OSS Distributions 			totalErr = err;
1681*d4514f0bSApple OSS Distributions 		}
1682*d4514f0bSApple OSS Distributions 	}
1683*d4514f0bSApple OSS Distributions 
1684*d4514f0bSApple OSS Distributions 	return totalErr;
1685*d4514f0bSApple OSS Distributions }
1686*d4514f0bSApple OSS Distributions 
1687*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1688*d4514f0bSApple OSS Distributions 
1689*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1690*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withAddress(void *      address,
1691*d4514f0bSApple OSS Distributions     IOByteCount   length,
1692*d4514f0bSApple OSS Distributions     IODirection direction)
1693*d4514f0bSApple OSS Distributions {
1694*d4514f0bSApple OSS Distributions 	return IOMemoryDescriptor::
1695*d4514f0bSApple OSS Distributions 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1696*d4514f0bSApple OSS Distributions }
1697*d4514f0bSApple OSS Distributions 
1698*d4514f0bSApple OSS Distributions #ifndef __LP64__
1699*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1700*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1701*d4514f0bSApple OSS Distributions     IOByteCount  length,
1702*d4514f0bSApple OSS Distributions     IODirection  direction,
1703*d4514f0bSApple OSS Distributions     task_t       task)
1704*d4514f0bSApple OSS Distributions {
1705*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1706*d4514f0bSApple OSS Distributions 	if (that) {
1707*d4514f0bSApple OSS Distributions 		if (that->initWithAddress(address, length, direction, task)) {
1708*d4514f0bSApple OSS Distributions 			return os::move(that);
1709*d4514f0bSApple OSS Distributions 		}
1710*d4514f0bSApple OSS Distributions 	}
1711*d4514f0bSApple OSS Distributions 	return nullptr;
1712*d4514f0bSApple OSS Distributions }
1713*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
1714*d4514f0bSApple OSS Distributions 
1715*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1716*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1717*d4514f0bSApple OSS Distributions 	IOPhysicalAddress       address,
1718*d4514f0bSApple OSS Distributions 	IOByteCount             length,
1719*d4514f0bSApple OSS Distributions 	IODirection             direction )
1720*d4514f0bSApple OSS Distributions {
1721*d4514f0bSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1722*d4514f0bSApple OSS Distributions }
1723*d4514f0bSApple OSS Distributions 
1724*d4514f0bSApple OSS Distributions #ifndef __LP64__
1725*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1726*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1727*d4514f0bSApple OSS Distributions     UInt32           withCount,
1728*d4514f0bSApple OSS Distributions     IODirection      direction,
1729*d4514f0bSApple OSS Distributions     task_t           task,
1730*d4514f0bSApple OSS Distributions     bool             asReference)
1731*d4514f0bSApple OSS Distributions {
1732*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1733*d4514f0bSApple OSS Distributions 	if (that) {
1734*d4514f0bSApple OSS Distributions 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1735*d4514f0bSApple OSS Distributions 			return os::move(that);
1736*d4514f0bSApple OSS Distributions 		}
1737*d4514f0bSApple OSS Distributions 	}
1738*d4514f0bSApple OSS Distributions 	return nullptr;
1739*d4514f0bSApple OSS Distributions }
1740*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
1741*d4514f0bSApple OSS Distributions 
1742*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1743*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1744*d4514f0bSApple OSS Distributions     mach_vm_size_t length,
1745*d4514f0bSApple OSS Distributions     IOOptionBits   options,
1746*d4514f0bSApple OSS Distributions     task_t         task)
1747*d4514f0bSApple OSS Distributions {
1748*d4514f0bSApple OSS Distributions 	IOAddressRange range = { address, length };
1749*d4514f0bSApple OSS Distributions 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1750*d4514f0bSApple OSS Distributions }
1751*d4514f0bSApple OSS Distributions 
1752*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1753*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1754*d4514f0bSApple OSS Distributions     UInt32           rangeCount,
1755*d4514f0bSApple OSS Distributions     IOOptionBits     options,
1756*d4514f0bSApple OSS Distributions     task_t           task)
1757*d4514f0bSApple OSS Distributions {
1758*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1759*d4514f0bSApple OSS Distributions 	if (that) {
1760*d4514f0bSApple OSS Distributions 		if (task) {
1761*d4514f0bSApple OSS Distributions 			options |= kIOMemoryTypeVirtual64;
1762*d4514f0bSApple OSS Distributions 		} else {
1763*d4514f0bSApple OSS Distributions 			options |= kIOMemoryTypePhysical64;
1764*d4514f0bSApple OSS Distributions 		}
1765*d4514f0bSApple OSS Distributions 
1766*d4514f0bSApple OSS Distributions 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1767*d4514f0bSApple OSS Distributions 			return os::move(that);
1768*d4514f0bSApple OSS Distributions 		}
1769*d4514f0bSApple OSS Distributions 	}
1770*d4514f0bSApple OSS Distributions 
1771*d4514f0bSApple OSS Distributions 	return nullptr;
1772*d4514f0bSApple OSS Distributions }
1773*d4514f0bSApple OSS Distributions 
1774*d4514f0bSApple OSS Distributions 
1775*d4514f0bSApple OSS Distributions /*
1776*d4514f0bSApple OSS Distributions  * withOptions:
1777*d4514f0bSApple OSS Distributions  *
1778*d4514f0bSApple OSS Distributions  * Create a new IOMemoryDescriptor. The buffer is made up of several
1779*d4514f0bSApple OSS Distributions  * virtual address ranges, from a given task.
1780*d4514f0bSApple OSS Distributions  *
1781*d4514f0bSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1782*d4514f0bSApple OSS Distributions  */
1783*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1784*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withOptions(void *          buffers,
1785*d4514f0bSApple OSS Distributions     UInt32          count,
1786*d4514f0bSApple OSS Distributions     UInt32          offset,
1787*d4514f0bSApple OSS Distributions     task_t          task,
1788*d4514f0bSApple OSS Distributions     IOOptionBits    opts,
1789*d4514f0bSApple OSS Distributions     IOMapper *      mapper)
1790*d4514f0bSApple OSS Distributions {
1791*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1792*d4514f0bSApple OSS Distributions 
1793*d4514f0bSApple OSS Distributions 	if (self
1794*d4514f0bSApple OSS Distributions 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1795*d4514f0bSApple OSS Distributions 		return nullptr;
1796*d4514f0bSApple OSS Distributions 	}
1797*d4514f0bSApple OSS Distributions 
1798*d4514f0bSApple OSS Distributions 	return os::move(self);
1799*d4514f0bSApple OSS Distributions }
1800*d4514f0bSApple OSS Distributions 
1801*d4514f0bSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1802*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithOptions(void *         buffers,
1803*d4514f0bSApple OSS Distributions     UInt32         count,
1804*d4514f0bSApple OSS Distributions     UInt32         offset,
1805*d4514f0bSApple OSS Distributions     task_t         task,
1806*d4514f0bSApple OSS Distributions     IOOptionBits   options,
1807*d4514f0bSApple OSS Distributions     IOMapper *     mapper)
1808*d4514f0bSApple OSS Distributions {
1809*d4514f0bSApple OSS Distributions 	return false;
1810*d4514f0bSApple OSS Distributions }
1811*d4514f0bSApple OSS Distributions 
1812*d4514f0bSApple OSS Distributions #ifndef __LP64__
1813*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1814*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1815*d4514f0bSApple OSS Distributions     UInt32          withCount,
1816*d4514f0bSApple OSS Distributions     IODirection     direction,
1817*d4514f0bSApple OSS Distributions     bool            asReference)
1818*d4514f0bSApple OSS Distributions {
1819*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1820*d4514f0bSApple OSS Distributions 	if (that) {
1821*d4514f0bSApple OSS Distributions 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1822*d4514f0bSApple OSS Distributions 			return os::move(that);
1823*d4514f0bSApple OSS Distributions 		}
1824*d4514f0bSApple OSS Distributions 	}
1825*d4514f0bSApple OSS Distributions 	return nullptr;
1826*d4514f0bSApple OSS Distributions }
1827*d4514f0bSApple OSS Distributions 
1828*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1829*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1830*d4514f0bSApple OSS Distributions     IOByteCount             offset,
1831*d4514f0bSApple OSS Distributions     IOByteCount             length,
1832*d4514f0bSApple OSS Distributions     IODirection             direction)
1833*d4514f0bSApple OSS Distributions {
1834*d4514f0bSApple OSS Distributions 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1835*d4514f0bSApple OSS Distributions }
1836*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
1837*d4514f0bSApple OSS Distributions 
1838*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1839*d4514f0bSApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1840*d4514f0bSApple OSS Distributions {
1841*d4514f0bSApple OSS Distributions 	IOGeneralMemoryDescriptor *origGenMD =
1842*d4514f0bSApple OSS Distributions 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1843*d4514f0bSApple OSS Distributions 
1844*d4514f0bSApple OSS Distributions 	if (origGenMD) {
1845*d4514f0bSApple OSS Distributions 		return IOGeneralMemoryDescriptor::
1846*d4514f0bSApple OSS Distributions 		       withPersistentMemoryDescriptor(origGenMD);
1847*d4514f0bSApple OSS Distributions 	} else {
1848*d4514f0bSApple OSS Distributions 		return nullptr;
1849*d4514f0bSApple OSS Distributions 	}
1850*d4514f0bSApple OSS Distributions }
1851*d4514f0bSApple OSS Distributions 
1852*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1853*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1854*d4514f0bSApple OSS Distributions {
1855*d4514f0bSApple OSS Distributions 	IOMemoryReference * memRef;
1856*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1857*d4514f0bSApple OSS Distributions 
1858*d4514f0bSApple OSS Distributions 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1859*d4514f0bSApple OSS Distributions 		return nullptr;
1860*d4514f0bSApple OSS Distributions 	}
1861*d4514f0bSApple OSS Distributions 
1862*d4514f0bSApple OSS Distributions 	if (memRef == originalMD->_memRef) {
1863*d4514f0bSApple OSS Distributions 		self.reset(originalMD, OSRetain);
1864*d4514f0bSApple OSS Distributions 		originalMD->memoryReferenceRelease(memRef);
1865*d4514f0bSApple OSS Distributions 		return os::move(self);
1866*d4514f0bSApple OSS Distributions 	}
1867*d4514f0bSApple OSS Distributions 
1868*d4514f0bSApple OSS Distributions 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1869*d4514f0bSApple OSS Distributions 	IOMDPersistentInitData initData = { originalMD, memRef };
1870*d4514f0bSApple OSS Distributions 
1871*d4514f0bSApple OSS Distributions 	if (self
1872*d4514f0bSApple OSS Distributions 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1873*d4514f0bSApple OSS Distributions 		return nullptr;
1874*d4514f0bSApple OSS Distributions 	}
1875*d4514f0bSApple OSS Distributions 	return os::move(self);
1876*d4514f0bSApple OSS Distributions }
1877*d4514f0bSApple OSS Distributions 
1878*d4514f0bSApple OSS Distributions #ifndef __LP64__
1879*d4514f0bSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1880*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1881*d4514f0bSApple OSS Distributions     IOByteCount   withLength,
1882*d4514f0bSApple OSS Distributions     IODirection withDirection)
1883*d4514f0bSApple OSS Distributions {
1884*d4514f0bSApple OSS Distributions 	_singleRange.v.address = (vm_offset_t) address;
1885*d4514f0bSApple OSS Distributions 	_singleRange.v.length  = withLength;
1886*d4514f0bSApple OSS Distributions 
1887*d4514f0bSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1888*d4514f0bSApple OSS Distributions }
1889*d4514f0bSApple OSS Distributions 
1890*d4514f0bSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1891*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1892*d4514f0bSApple OSS Distributions     IOByteCount    withLength,
1893*d4514f0bSApple OSS Distributions     IODirection  withDirection,
1894*d4514f0bSApple OSS Distributions     task_t       withTask)
1895*d4514f0bSApple OSS Distributions {
1896*d4514f0bSApple OSS Distributions 	_singleRange.v.address = address;
1897*d4514f0bSApple OSS Distributions 	_singleRange.v.length  = withLength;
1898*d4514f0bSApple OSS Distributions 
1899*d4514f0bSApple OSS Distributions 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1900*d4514f0bSApple OSS Distributions }
1901*d4514f0bSApple OSS Distributions 
1902*d4514f0bSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1903*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1904*d4514f0bSApple OSS Distributions 	IOPhysicalAddress      address,
1905*d4514f0bSApple OSS Distributions 	IOByteCount            withLength,
1906*d4514f0bSApple OSS Distributions 	IODirection            withDirection )
1907*d4514f0bSApple OSS Distributions {
1908*d4514f0bSApple OSS Distributions 	_singleRange.p.address = address;
1909*d4514f0bSApple OSS Distributions 	_singleRange.p.length  = withLength;
1910*d4514f0bSApple OSS Distributions 
1911*d4514f0bSApple OSS Distributions 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1912*d4514f0bSApple OSS Distributions }
1913*d4514f0bSApple OSS Distributions 
1914*d4514f0bSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1915*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1916*d4514f0bSApple OSS Distributions 	IOPhysicalRange * ranges,
1917*d4514f0bSApple OSS Distributions 	UInt32            count,
1918*d4514f0bSApple OSS Distributions 	IODirection       direction,
1919*d4514f0bSApple OSS Distributions 	bool              reference)
1920*d4514f0bSApple OSS Distributions {
1921*d4514f0bSApple OSS Distributions 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1922*d4514f0bSApple OSS Distributions 
1923*d4514f0bSApple OSS Distributions 	if (reference) {
1924*d4514f0bSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1925*d4514f0bSApple OSS Distributions 	}
1926*d4514f0bSApple OSS Distributions 
1927*d4514f0bSApple OSS Distributions 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1928*d4514f0bSApple OSS Distributions }
1929*d4514f0bSApple OSS Distributions 
1930*d4514f0bSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1931*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1932*d4514f0bSApple OSS Distributions 	IOVirtualRange * ranges,
1933*d4514f0bSApple OSS Distributions 	UInt32           count,
1934*d4514f0bSApple OSS Distributions 	IODirection      direction,
1935*d4514f0bSApple OSS Distributions 	task_t           task,
1936*d4514f0bSApple OSS Distributions 	bool             reference)
1937*d4514f0bSApple OSS Distributions {
1938*d4514f0bSApple OSS Distributions 	IOOptionBits mdOpts = direction;
1939*d4514f0bSApple OSS Distributions 
1940*d4514f0bSApple OSS Distributions 	if (reference) {
1941*d4514f0bSApple OSS Distributions 		mdOpts |= kIOMemoryAsReference;
1942*d4514f0bSApple OSS Distributions 	}
1943*d4514f0bSApple OSS Distributions 
1944*d4514f0bSApple OSS Distributions 	if (task) {
1945*d4514f0bSApple OSS Distributions 		mdOpts |= kIOMemoryTypeVirtual;
1946*d4514f0bSApple OSS Distributions 
1947*d4514f0bSApple OSS Distributions 		// Auto-prepare if this is a kernel memory descriptor as very few
1948*d4514f0bSApple OSS Distributions 		// clients bother to prepare() kernel memory.
1949*d4514f0bSApple OSS Distributions 		// But it was not enforced so what are you going to do?
1950*d4514f0bSApple OSS Distributions 		if (task == kernel_task) {
1951*d4514f0bSApple OSS Distributions 			mdOpts |= kIOMemoryAutoPrepare;
1952*d4514f0bSApple OSS Distributions 		}
1953*d4514f0bSApple OSS Distributions 	} else {
1954*d4514f0bSApple OSS Distributions 		mdOpts |= kIOMemoryTypePhysical;
1955*d4514f0bSApple OSS Distributions 	}
1956*d4514f0bSApple OSS Distributions 
1957*d4514f0bSApple OSS Distributions 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1958*d4514f0bSApple OSS Distributions }
1959*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
1960*d4514f0bSApple OSS Distributions 
1961*d4514f0bSApple OSS Distributions /*
1962*d4514f0bSApple OSS Distributions  * initWithOptions:
1963*d4514f0bSApple OSS Distributions  *
1964*d4514f0bSApple OSS Distributions  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1965*d4514f0bSApple OSS Distributions  * from a given task, several physical ranges, an UPL from the ubc
1966*d4514f0bSApple OSS Distributions  * system or a uio (may be 64bit) from the BSD subsystem.
1967*d4514f0bSApple OSS Distributions  *
1968*d4514f0bSApple OSS Distributions  * Passing the ranges as a reference will avoid an extra allocation.
1969*d4514f0bSApple OSS Distributions  *
1970*d4514f0bSApple OSS Distributions  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1971*d4514f0bSApple OSS Distributions  * existing instance -- note this behavior is not commonly supported in other
1972*d4514f0bSApple OSS Distributions  * I/O Kit classes, although it is supported here.
1973*d4514f0bSApple OSS Distributions  */
1974*d4514f0bSApple OSS Distributions 
1975*d4514f0bSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1976*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1977*d4514f0bSApple OSS Distributions     UInt32       count,
1978*d4514f0bSApple OSS Distributions     UInt32       offset,
1979*d4514f0bSApple OSS Distributions     task_t       task,
1980*d4514f0bSApple OSS Distributions     IOOptionBits options,
1981*d4514f0bSApple OSS Distributions     IOMapper *   mapper)
1982*d4514f0bSApple OSS Distributions {
1983*d4514f0bSApple OSS Distributions 	IOOptionBits type = options & kIOMemoryTypeMask;
1984*d4514f0bSApple OSS Distributions 
1985*d4514f0bSApple OSS Distributions #ifndef __LP64__
1986*d4514f0bSApple OSS Distributions 	if (task
1987*d4514f0bSApple OSS Distributions 	    && (kIOMemoryTypeVirtual == type)
1988*d4514f0bSApple OSS Distributions 	    && vm_map_is_64bit(get_task_map(task))
1989*d4514f0bSApple OSS Distributions 	    && ((IOVirtualRange *) buffers)->address) {
1990*d4514f0bSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1991*d4514f0bSApple OSS Distributions 		return false;
1992*d4514f0bSApple OSS Distributions 	}
1993*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
1994*d4514f0bSApple OSS Distributions 
1995*d4514f0bSApple OSS Distributions 	// Grab the original MD's configuation data to initialse the
1996*d4514f0bSApple OSS Distributions 	// arguments to this function.
1997*d4514f0bSApple OSS Distributions 	if (kIOMemoryTypePersistentMD == type) {
1998*d4514f0bSApple OSS Distributions 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1999*d4514f0bSApple OSS Distributions 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2000*d4514f0bSApple OSS Distributions 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2001*d4514f0bSApple OSS Distributions 
2002*d4514f0bSApple OSS Distributions 		// Only accept persistent memory descriptors with valid dataP data.
2003*d4514f0bSApple OSS Distributions 		assert(orig->_rangesCount == 1);
2004*d4514f0bSApple OSS Distributions 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2005*d4514f0bSApple OSS Distributions 			return false;
2006*d4514f0bSApple OSS Distributions 		}
2007*d4514f0bSApple OSS Distributions 
2008*d4514f0bSApple OSS Distributions 		_memRef = initData->fMemRef; // Grab the new named entry
2009*d4514f0bSApple OSS Distributions 		options = orig->_flags & ~kIOMemoryAsReference;
2010*d4514f0bSApple OSS Distributions 		type = options & kIOMemoryTypeMask;
2011*d4514f0bSApple OSS Distributions 		buffers = orig->_ranges.v;
2012*d4514f0bSApple OSS Distributions 		count = orig->_rangesCount;
2013*d4514f0bSApple OSS Distributions 
2014*d4514f0bSApple OSS Distributions 		// Now grab the original task and whatever mapper was previously used
2015*d4514f0bSApple OSS Distributions 		task = orig->_task;
2016*d4514f0bSApple OSS Distributions 		mapper = dataP->fMapper;
2017*d4514f0bSApple OSS Distributions 
2018*d4514f0bSApple OSS Distributions 		// We are ready to go through the original initialisation now
2019*d4514f0bSApple OSS Distributions 	}
2020*d4514f0bSApple OSS Distributions 
2021*d4514f0bSApple OSS Distributions 	switch (type) {
2022*d4514f0bSApple OSS Distributions 	case kIOMemoryTypeUIO:
2023*d4514f0bSApple OSS Distributions 	case kIOMemoryTypeVirtual:
2024*d4514f0bSApple OSS Distributions #ifndef __LP64__
2025*d4514f0bSApple OSS Distributions 	case kIOMemoryTypeVirtual64:
2026*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2027*d4514f0bSApple OSS Distributions 		assert(task);
2028*d4514f0bSApple OSS Distributions 		if (!task) {
2029*d4514f0bSApple OSS Distributions 			return false;
2030*d4514f0bSApple OSS Distributions 		}
2031*d4514f0bSApple OSS Distributions 		break;
2032*d4514f0bSApple OSS Distributions 
2033*d4514f0bSApple OSS Distributions 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2034*d4514f0bSApple OSS Distributions #ifndef __LP64__
2035*d4514f0bSApple OSS Distributions 	case kIOMemoryTypePhysical64:
2036*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2037*d4514f0bSApple OSS Distributions 	case kIOMemoryTypeUPL:
2038*d4514f0bSApple OSS Distributions 		assert(!task);
2039*d4514f0bSApple OSS Distributions 		break;
2040*d4514f0bSApple OSS Distributions 	default:
2041*d4514f0bSApple OSS Distributions 		return false; /* bad argument */
2042*d4514f0bSApple OSS Distributions 	}
2043*d4514f0bSApple OSS Distributions 
2044*d4514f0bSApple OSS Distributions 	assert(buffers);
2045*d4514f0bSApple OSS Distributions 	assert(count);
2046*d4514f0bSApple OSS Distributions 
2047*d4514f0bSApple OSS Distributions 	/*
2048*d4514f0bSApple OSS Distributions 	 * We can check the _initialized  instance variable before having ever set
2049*d4514f0bSApple OSS Distributions 	 * it to an initial value because I/O Kit guarantees that all our instance
2050*d4514f0bSApple OSS Distributions 	 * variables are zeroed on an object's allocation.
2051*d4514f0bSApple OSS Distributions 	 */
2052*d4514f0bSApple OSS Distributions 
2053*d4514f0bSApple OSS Distributions 	if (_initialized) {
2054*d4514f0bSApple OSS Distributions 		/*
2055*d4514f0bSApple OSS Distributions 		 * An existing memory descriptor is being retargeted to point to
2056*d4514f0bSApple OSS Distributions 		 * somewhere else.  Clean up our present state.
2057*d4514f0bSApple OSS Distributions 		 */
2058*d4514f0bSApple OSS Distributions 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2059*d4514f0bSApple OSS Distributions 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2060*d4514f0bSApple OSS Distributions 			while (_wireCount) {
2061*d4514f0bSApple OSS Distributions 				complete();
2062*d4514f0bSApple OSS Distributions 			}
2063*d4514f0bSApple OSS Distributions 		}
2064*d4514f0bSApple OSS Distributions 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2065*d4514f0bSApple OSS Distributions 			if (kIOMemoryTypeUIO == type) {
2066*d4514f0bSApple OSS Distributions 				uio_free((uio_t) _ranges.v);
2067*d4514f0bSApple OSS Distributions 			}
2068*d4514f0bSApple OSS Distributions #ifndef __LP64__
2069*d4514f0bSApple OSS Distributions 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2070*d4514f0bSApple OSS Distributions 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2071*d4514f0bSApple OSS Distributions 			}
2072*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2073*d4514f0bSApple OSS Distributions 			else {
2074*d4514f0bSApple OSS Distributions 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2075*d4514f0bSApple OSS Distributions 			}
2076*d4514f0bSApple OSS Distributions 		}
2077*d4514f0bSApple OSS Distributions 
2078*d4514f0bSApple OSS Distributions 		options |= (kIOMemoryRedirected & _flags);
2079*d4514f0bSApple OSS Distributions 		if (!(kIOMemoryRedirected & options)) {
2080*d4514f0bSApple OSS Distributions 			if (_memRef) {
2081*d4514f0bSApple OSS Distributions 				memoryReferenceRelease(_memRef);
2082*d4514f0bSApple OSS Distributions 				_memRef = NULL;
2083*d4514f0bSApple OSS Distributions 			}
2084*d4514f0bSApple OSS Distributions 			if (_mappings) {
2085*d4514f0bSApple OSS Distributions 				_mappings->flushCollection();
2086*d4514f0bSApple OSS Distributions 			}
2087*d4514f0bSApple OSS Distributions 		}
2088*d4514f0bSApple OSS Distributions 	} else {
2089*d4514f0bSApple OSS Distributions 		if (!super::init()) {
2090*d4514f0bSApple OSS Distributions 			return false;
2091*d4514f0bSApple OSS Distributions 		}
2092*d4514f0bSApple OSS Distributions 		_initialized = true;
2093*d4514f0bSApple OSS Distributions 	}
2094*d4514f0bSApple OSS Distributions 
2095*d4514f0bSApple OSS Distributions 	// Grab the appropriate mapper
2096*d4514f0bSApple OSS Distributions 	if (kIOMemoryHostOrRemote & options) {
2097*d4514f0bSApple OSS Distributions 		options |= kIOMemoryMapperNone;
2098*d4514f0bSApple OSS Distributions 	}
2099*d4514f0bSApple OSS Distributions 	if (kIOMemoryMapperNone & options) {
2100*d4514f0bSApple OSS Distributions 		mapper = NULL; // No Mapper
2101*d4514f0bSApple OSS Distributions 	} else if (mapper == kIOMapperSystem) {
2102*d4514f0bSApple OSS Distributions 		IOMapper::checkForSystemMapper();
2103*d4514f0bSApple OSS Distributions 		gIOSystemMapper = mapper = IOMapper::gSystem;
2104*d4514f0bSApple OSS Distributions 	}
2105*d4514f0bSApple OSS Distributions 
2106*d4514f0bSApple OSS Distributions 	// Remove the dynamic internal use flags from the initial setting
2107*d4514f0bSApple OSS Distributions 	options               &= ~(kIOMemoryPreparedReadOnly);
2108*d4514f0bSApple OSS Distributions 	_flags                 = options;
2109*d4514f0bSApple OSS Distributions 	_task                  = task;
2110*d4514f0bSApple OSS Distributions 
2111*d4514f0bSApple OSS Distributions #ifndef __LP64__
2112*d4514f0bSApple OSS Distributions 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2113*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2114*d4514f0bSApple OSS Distributions 
2115*d4514f0bSApple OSS Distributions 	_dmaReferences = 0;
2116*d4514f0bSApple OSS Distributions 	__iomd_reservedA = 0;
2117*d4514f0bSApple OSS Distributions 	__iomd_reservedB = 0;
2118*d4514f0bSApple OSS Distributions 	_highestPage = 0;
2119*d4514f0bSApple OSS Distributions 
2120*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & options) {
2121*d4514f0bSApple OSS Distributions 		if (!_prepareLock) {
2122*d4514f0bSApple OSS Distributions 			_prepareLock = IOLockAlloc();
2123*d4514f0bSApple OSS Distributions 		}
2124*d4514f0bSApple OSS Distributions 	} else if (_prepareLock) {
2125*d4514f0bSApple OSS Distributions 		IOLockFree(_prepareLock);
2126*d4514f0bSApple OSS Distributions 		_prepareLock = NULL;
2127*d4514f0bSApple OSS Distributions 	}
2128*d4514f0bSApple OSS Distributions 
2129*d4514f0bSApple OSS Distributions 	if (kIOMemoryTypeUPL == type) {
2130*d4514f0bSApple OSS Distributions 		ioGMDData *dataP;
2131*d4514f0bSApple OSS Distributions 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2132*d4514f0bSApple OSS Distributions 
2133*d4514f0bSApple OSS Distributions 		if (!initMemoryEntries(dataSize, mapper)) {
2134*d4514f0bSApple OSS Distributions 			return false;
2135*d4514f0bSApple OSS Distributions 		}
2136*d4514f0bSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
2137*d4514f0bSApple OSS Distributions 		dataP->fPageCnt = 0;
2138*d4514f0bSApple OSS Distributions 		switch (kIOMemoryDirectionMask & options) {
2139*d4514f0bSApple OSS Distributions 		case kIODirectionOut:
2140*d4514f0bSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapReadAccess;
2141*d4514f0bSApple OSS Distributions 			break;
2142*d4514f0bSApple OSS Distributions 		case kIODirectionIn:
2143*d4514f0bSApple OSS Distributions 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2144*d4514f0bSApple OSS Distributions 			break;
2145*d4514f0bSApple OSS Distributions 		case kIODirectionNone:
2146*d4514f0bSApple OSS Distributions 		case kIODirectionOutIn:
2147*d4514f0bSApple OSS Distributions 		default:
2148*d4514f0bSApple OSS Distributions 			panic("bad dir for upl 0x%x", (int) options);
2149*d4514f0bSApple OSS Distributions 			break;
2150*d4514f0bSApple OSS Distributions 		}
2151*d4514f0bSApple OSS Distributions 		//       _wireCount++;	// UPLs start out life wired
2152*d4514f0bSApple OSS Distributions 
2153*d4514f0bSApple OSS Distributions 		_length    = count;
2154*d4514f0bSApple OSS Distributions 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2155*d4514f0bSApple OSS Distributions 
2156*d4514f0bSApple OSS Distributions 		ioPLBlock iopl;
2157*d4514f0bSApple OSS Distributions 		iopl.fIOPL = (upl_t) buffers;
2158*d4514f0bSApple OSS Distributions 		upl_set_referenced(iopl.fIOPL, true);
2159*d4514f0bSApple OSS Distributions 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2160*d4514f0bSApple OSS Distributions 
2161*d4514f0bSApple OSS Distributions 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2162*d4514f0bSApple OSS Distributions 			panic("short external upl");
2163*d4514f0bSApple OSS Distributions 		}
2164*d4514f0bSApple OSS Distributions 
2165*d4514f0bSApple OSS Distributions 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2166*d4514f0bSApple OSS Distributions 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2167*d4514f0bSApple OSS Distributions 
2168*d4514f0bSApple OSS Distributions 		// Set the flag kIOPLOnDevice convieniently equal to 1
2169*d4514f0bSApple OSS Distributions 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2170*d4514f0bSApple OSS Distributions 		if (!pageList->device) {
2171*d4514f0bSApple OSS Distributions 			// Pre-compute the offset into the UPL's page list
2172*d4514f0bSApple OSS Distributions 			pageList = &pageList[atop_32(offset)];
2173*d4514f0bSApple OSS Distributions 			offset &= PAGE_MASK;
2174*d4514f0bSApple OSS Distributions 		}
2175*d4514f0bSApple OSS Distributions 		iopl.fIOMDOffset = 0;
2176*d4514f0bSApple OSS Distributions 		iopl.fMappedPage = 0;
2177*d4514f0bSApple OSS Distributions 		iopl.fPageInfo = (vm_address_t) pageList;
2178*d4514f0bSApple OSS Distributions 		iopl.fPageOffset = offset;
2179*d4514f0bSApple OSS Distributions 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2180*d4514f0bSApple OSS Distributions 	} else {
2181*d4514f0bSApple OSS Distributions 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2182*d4514f0bSApple OSS Distributions 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2183*d4514f0bSApple OSS Distributions 
2184*d4514f0bSApple OSS Distributions 		// Initialize the memory descriptor
2185*d4514f0bSApple OSS Distributions 		if (options & kIOMemoryAsReference) {
2186*d4514f0bSApple OSS Distributions #ifndef __LP64__
2187*d4514f0bSApple OSS Distributions 			_rangesIsAllocated = false;
2188*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2189*d4514f0bSApple OSS Distributions 
2190*d4514f0bSApple OSS Distributions 			// Hack assignment to get the buffer arg into _ranges.
2191*d4514f0bSApple OSS Distributions 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2192*d4514f0bSApple OSS Distributions 			// work, C++ sigh.
2193*d4514f0bSApple OSS Distributions 			// This also initialises the uio & physical ranges.
2194*d4514f0bSApple OSS Distributions 			_ranges.v = (IOVirtualRange *) buffers;
2195*d4514f0bSApple OSS Distributions 		} else {
2196*d4514f0bSApple OSS Distributions #ifndef __LP64__
2197*d4514f0bSApple OSS Distributions 			_rangesIsAllocated = true;
2198*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2199*d4514f0bSApple OSS Distributions 			switch (type) {
2200*d4514f0bSApple OSS Distributions 			case kIOMemoryTypeUIO:
2201*d4514f0bSApple OSS Distributions 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2202*d4514f0bSApple OSS Distributions 				break;
2203*d4514f0bSApple OSS Distributions 
2204*d4514f0bSApple OSS Distributions #ifndef __LP64__
2205*d4514f0bSApple OSS Distributions 			case kIOMemoryTypeVirtual64:
2206*d4514f0bSApple OSS Distributions 			case kIOMemoryTypePhysical64:
2207*d4514f0bSApple OSS Distributions 				if (count == 1
2208*d4514f0bSApple OSS Distributions #ifndef __arm__
2209*d4514f0bSApple OSS Distributions 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2210*d4514f0bSApple OSS Distributions #endif
2211*d4514f0bSApple OSS Distributions 				    ) {
2212*d4514f0bSApple OSS Distributions 					if (kIOMemoryTypeVirtual64 == type) {
2213*d4514f0bSApple OSS Distributions 						type = kIOMemoryTypeVirtual;
2214*d4514f0bSApple OSS Distributions 					} else {
2215*d4514f0bSApple OSS Distributions 						type = kIOMemoryTypePhysical;
2216*d4514f0bSApple OSS Distributions 					}
2217*d4514f0bSApple OSS Distributions 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2218*d4514f0bSApple OSS Distributions 					_rangesIsAllocated = false;
2219*d4514f0bSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2220*d4514f0bSApple OSS Distributions 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2221*d4514f0bSApple OSS Distributions 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2222*d4514f0bSApple OSS Distributions 					break;
2223*d4514f0bSApple OSS Distributions 				}
2224*d4514f0bSApple OSS Distributions 				_ranges.v64 = IONew(IOAddressRange, count);
2225*d4514f0bSApple OSS Distributions 				if (!_ranges.v64) {
2226*d4514f0bSApple OSS Distributions 					return false;
2227*d4514f0bSApple OSS Distributions 				}
2228*d4514f0bSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2229*d4514f0bSApple OSS Distributions 				break;
2230*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2231*d4514f0bSApple OSS Distributions 			case kIOMemoryTypeVirtual:
2232*d4514f0bSApple OSS Distributions 			case kIOMemoryTypePhysical:
2233*d4514f0bSApple OSS Distributions 				if (count == 1) {
2234*d4514f0bSApple OSS Distributions 					_flags |= kIOMemoryAsReference;
2235*d4514f0bSApple OSS Distributions #ifndef __LP64__
2236*d4514f0bSApple OSS Distributions 					_rangesIsAllocated = false;
2237*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2238*d4514f0bSApple OSS Distributions 					_ranges.v = &_singleRange.v;
2239*d4514f0bSApple OSS Distributions 				} else {
2240*d4514f0bSApple OSS Distributions 					_ranges.v = IONew(IOVirtualRange, count);
2241*d4514f0bSApple OSS Distributions 					if (!_ranges.v) {
2242*d4514f0bSApple OSS Distributions 						return false;
2243*d4514f0bSApple OSS Distributions 					}
2244*d4514f0bSApple OSS Distributions 				}
2245*d4514f0bSApple OSS Distributions 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2246*d4514f0bSApple OSS Distributions 				break;
2247*d4514f0bSApple OSS Distributions 			}
2248*d4514f0bSApple OSS Distributions 		}
2249*d4514f0bSApple OSS Distributions 		_rangesCount = count;
2250*d4514f0bSApple OSS Distributions 
2251*d4514f0bSApple OSS Distributions 		// Find starting address within the vector of ranges
2252*d4514f0bSApple OSS Distributions 		Ranges vec = _ranges;
2253*d4514f0bSApple OSS Distributions 		mach_vm_size_t totalLength = 0;
2254*d4514f0bSApple OSS Distributions 		unsigned int ind, pages = 0;
2255*d4514f0bSApple OSS Distributions 		for (ind = 0; ind < count; ind++) {
2256*d4514f0bSApple OSS Distributions 			mach_vm_address_t addr;
2257*d4514f0bSApple OSS Distributions 			mach_vm_address_t endAddr;
2258*d4514f0bSApple OSS Distributions 			mach_vm_size_t    len;
2259*d4514f0bSApple OSS Distributions 
2260*d4514f0bSApple OSS Distributions 			// addr & len are returned by this function
2261*d4514f0bSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2262*d4514f0bSApple OSS Distributions 			if (_task) {
2263*d4514f0bSApple OSS Distributions 				mach_vm_size_t phys_size;
2264*d4514f0bSApple OSS Distributions 				kern_return_t kret;
2265*d4514f0bSApple OSS Distributions 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2266*d4514f0bSApple OSS Distributions 				if (KERN_SUCCESS != kret) {
2267*d4514f0bSApple OSS Distributions 					break;
2268*d4514f0bSApple OSS Distributions 				}
2269*d4514f0bSApple OSS Distributions 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2270*d4514f0bSApple OSS Distributions 					break;
2271*d4514f0bSApple OSS Distributions 				}
2272*d4514f0bSApple OSS Distributions 			} else {
2273*d4514f0bSApple OSS Distributions 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2274*d4514f0bSApple OSS Distributions 					break;
2275*d4514f0bSApple OSS Distributions 				}
2276*d4514f0bSApple OSS Distributions 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2277*d4514f0bSApple OSS Distributions 					break;
2278*d4514f0bSApple OSS Distributions 				}
2279*d4514f0bSApple OSS Distributions 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2280*d4514f0bSApple OSS Distributions 					break;
2281*d4514f0bSApple OSS Distributions 				}
2282*d4514f0bSApple OSS Distributions 			}
2283*d4514f0bSApple OSS Distributions 			if (os_add_overflow(totalLength, len, &totalLength)) {
2284*d4514f0bSApple OSS Distributions 				break;
2285*d4514f0bSApple OSS Distributions 			}
2286*d4514f0bSApple OSS Distributions 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2287*d4514f0bSApple OSS Distributions 				uint64_t highPage = atop_64(addr + len - 1);
2288*d4514f0bSApple OSS Distributions 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2289*d4514f0bSApple OSS Distributions 					_highestPage = (ppnum_t) highPage;
2290*d4514f0bSApple OSS Distributions 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2291*d4514f0bSApple OSS Distributions 				}
2292*d4514f0bSApple OSS Distributions 			}
2293*d4514f0bSApple OSS Distributions 		}
2294*d4514f0bSApple OSS Distributions 		if ((ind < count)
2295*d4514f0bSApple OSS Distributions 		    || (totalLength != ((IOByteCount) totalLength))) {
2296*d4514f0bSApple OSS Distributions 			return false;                                   /* overflow */
2297*d4514f0bSApple OSS Distributions 		}
2298*d4514f0bSApple OSS Distributions 		_length      = totalLength;
2299*d4514f0bSApple OSS Distributions 		_pages       = pages;
2300*d4514f0bSApple OSS Distributions 
2301*d4514f0bSApple OSS Distributions 		// Auto-prepare memory at creation time.
2302*d4514f0bSApple OSS Distributions 		// Implied completion when descriptor is free-ed
2303*d4514f0bSApple OSS Distributions 
2304*d4514f0bSApple OSS Distributions 
2305*d4514f0bSApple OSS Distributions 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2306*d4514f0bSApple OSS Distributions 			_wireCount++; // Physical MDs are, by definition, wired
2307*d4514f0bSApple OSS Distributions 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2308*d4514f0bSApple OSS Distributions 			ioGMDData *dataP;
2309*d4514f0bSApple OSS Distributions 			unsigned dataSize;
2310*d4514f0bSApple OSS Distributions 
2311*d4514f0bSApple OSS Distributions 			if (_pages > atop_64(max_mem)) {
2312*d4514f0bSApple OSS Distributions 				return false;
2313*d4514f0bSApple OSS Distributions 			}
2314*d4514f0bSApple OSS Distributions 
2315*d4514f0bSApple OSS Distributions 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2316*d4514f0bSApple OSS Distributions 			if (!initMemoryEntries(dataSize, mapper)) {
2317*d4514f0bSApple OSS Distributions 				return false;
2318*d4514f0bSApple OSS Distributions 			}
2319*d4514f0bSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2320*d4514f0bSApple OSS Distributions 			dataP->fPageCnt = _pages;
2321*d4514f0bSApple OSS Distributions 
2322*d4514f0bSApple OSS Distributions 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2323*d4514f0bSApple OSS Distributions 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2324*d4514f0bSApple OSS Distributions 				_kernelTag = IOMemoryTag(kernel_map);
2325*d4514f0bSApple OSS Distributions 				if (_kernelTag == gIOSurfaceTag) {
2326*d4514f0bSApple OSS Distributions 					_userTag = VM_MEMORY_IOSURFACE;
2327*d4514f0bSApple OSS Distributions 				}
2328*d4514f0bSApple OSS Distributions 			}
2329*d4514f0bSApple OSS Distributions 
2330*d4514f0bSApple OSS Distributions 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2331*d4514f0bSApple OSS Distributions 				IOReturn
2332*d4514f0bSApple OSS Distributions 				    err = memoryReferenceCreate(0, &_memRef);
2333*d4514f0bSApple OSS Distributions 				if (kIOReturnSuccess != err) {
2334*d4514f0bSApple OSS Distributions 					return false;
2335*d4514f0bSApple OSS Distributions 				}
2336*d4514f0bSApple OSS Distributions 			}
2337*d4514f0bSApple OSS Distributions 
2338*d4514f0bSApple OSS Distributions 			if ((_flags & kIOMemoryAutoPrepare)
2339*d4514f0bSApple OSS Distributions 			    && prepare() != kIOReturnSuccess) {
2340*d4514f0bSApple OSS Distributions 				return false;
2341*d4514f0bSApple OSS Distributions 			}
2342*d4514f0bSApple OSS Distributions 		}
2343*d4514f0bSApple OSS Distributions 	}
2344*d4514f0bSApple OSS Distributions 
2345*d4514f0bSApple OSS Distributions 	return true;
2346*d4514f0bSApple OSS Distributions }
2347*d4514f0bSApple OSS Distributions 
2348*d4514f0bSApple OSS Distributions /*
2349*d4514f0bSApple OSS Distributions  * free
2350*d4514f0bSApple OSS Distributions  *
2351*d4514f0bSApple OSS Distributions  * Free resources.
2352*d4514f0bSApple OSS Distributions  */
2353*d4514f0bSApple OSS Distributions void
free()2354*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::free()
2355*d4514f0bSApple OSS Distributions {
2356*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2357*d4514f0bSApple OSS Distributions 
2358*d4514f0bSApple OSS Distributions 	if (reserved && reserved->dp.memory) {
2359*d4514f0bSApple OSS Distributions 		LOCK;
2360*d4514f0bSApple OSS Distributions 		reserved->dp.memory = NULL;
2361*d4514f0bSApple OSS Distributions 		UNLOCK;
2362*d4514f0bSApple OSS Distributions 	}
2363*d4514f0bSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2364*d4514f0bSApple OSS Distributions 		ioGMDData * dataP;
2365*d4514f0bSApple OSS Distributions 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2366*d4514f0bSApple OSS Distributions 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2367*d4514f0bSApple OSS Distributions 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2368*d4514f0bSApple OSS Distributions 		}
2369*d4514f0bSApple OSS Distributions 	} else {
2370*d4514f0bSApple OSS Distributions 		while (_wireCount) {
2371*d4514f0bSApple OSS Distributions 			complete();
2372*d4514f0bSApple OSS Distributions 		}
2373*d4514f0bSApple OSS Distributions 	}
2374*d4514f0bSApple OSS Distributions 
2375*d4514f0bSApple OSS Distributions 	if (_memoryEntries) {
2376*d4514f0bSApple OSS Distributions 		_memoryEntries.reset();
2377*d4514f0bSApple OSS Distributions 	}
2378*d4514f0bSApple OSS Distributions 
2379*d4514f0bSApple OSS Distributions 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2380*d4514f0bSApple OSS Distributions 		if (kIOMemoryTypeUIO == type) {
2381*d4514f0bSApple OSS Distributions 			uio_free((uio_t) _ranges.v);
2382*d4514f0bSApple OSS Distributions 		}
2383*d4514f0bSApple OSS Distributions #ifndef __LP64__
2384*d4514f0bSApple OSS Distributions 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2385*d4514f0bSApple OSS Distributions 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2386*d4514f0bSApple OSS Distributions 		}
2387*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2388*d4514f0bSApple OSS Distributions 		else {
2389*d4514f0bSApple OSS Distributions 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2390*d4514f0bSApple OSS Distributions 		}
2391*d4514f0bSApple OSS Distributions 
2392*d4514f0bSApple OSS Distributions 		_ranges.v = NULL;
2393*d4514f0bSApple OSS Distributions 	}
2394*d4514f0bSApple OSS Distributions 
2395*d4514f0bSApple OSS Distributions 	if (reserved) {
2396*d4514f0bSApple OSS Distributions 		cleanKernelReserved(reserved);
2397*d4514f0bSApple OSS Distributions 		if (reserved->dp.devicePager) {
2398*d4514f0bSApple OSS Distributions 			// memEntry holds a ref on the device pager which owns reserved
2399*d4514f0bSApple OSS Distributions 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2400*d4514f0bSApple OSS Distributions 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2401*d4514f0bSApple OSS Distributions 		} else {
2402*d4514f0bSApple OSS Distributions 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2403*d4514f0bSApple OSS Distributions 		}
2404*d4514f0bSApple OSS Distributions 		reserved = NULL;
2405*d4514f0bSApple OSS Distributions 	}
2406*d4514f0bSApple OSS Distributions 
2407*d4514f0bSApple OSS Distributions 	if (_memRef) {
2408*d4514f0bSApple OSS Distributions 		memoryReferenceRelease(_memRef);
2409*d4514f0bSApple OSS Distributions 	}
2410*d4514f0bSApple OSS Distributions 	if (_prepareLock) {
2411*d4514f0bSApple OSS Distributions 		IOLockFree(_prepareLock);
2412*d4514f0bSApple OSS Distributions 	}
2413*d4514f0bSApple OSS Distributions 
2414*d4514f0bSApple OSS Distributions 	super::free();
2415*d4514f0bSApple OSS Distributions }
2416*d4514f0bSApple OSS Distributions 
2417*d4514f0bSApple OSS Distributions #ifndef __LP64__
2418*d4514f0bSApple OSS Distributions void
unmapFromKernel()2419*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2420*d4514f0bSApple OSS Distributions {
2421*d4514f0bSApple OSS Distributions 	panic("IOGMD::unmapFromKernel deprecated");
2422*d4514f0bSApple OSS Distributions }
2423*d4514f0bSApple OSS Distributions 
2424*d4514f0bSApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2425*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2426*d4514f0bSApple OSS Distributions {
2427*d4514f0bSApple OSS Distributions 	panic("IOGMD::mapIntoKernel deprecated");
2428*d4514f0bSApple OSS Distributions }
2429*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2430*d4514f0bSApple OSS Distributions 
2431*d4514f0bSApple OSS Distributions /*
2432*d4514f0bSApple OSS Distributions  * getDirection:
2433*d4514f0bSApple OSS Distributions  *
2434*d4514f0bSApple OSS Distributions  * Get the direction of the transfer.
2435*d4514f0bSApple OSS Distributions  */
2436*d4514f0bSApple OSS Distributions IODirection
getDirection() const2437*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getDirection() const
2438*d4514f0bSApple OSS Distributions {
2439*d4514f0bSApple OSS Distributions #ifndef __LP64__
2440*d4514f0bSApple OSS Distributions 	if (_direction) {
2441*d4514f0bSApple OSS Distributions 		return _direction;
2442*d4514f0bSApple OSS Distributions 	}
2443*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2444*d4514f0bSApple OSS Distributions 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2445*d4514f0bSApple OSS Distributions }
2446*d4514f0bSApple OSS Distributions 
2447*d4514f0bSApple OSS Distributions /*
2448*d4514f0bSApple OSS Distributions  * getLength:
2449*d4514f0bSApple OSS Distributions  *
2450*d4514f0bSApple OSS Distributions  * Get the length of the transfer (over all ranges).
2451*d4514f0bSApple OSS Distributions  */
2452*d4514f0bSApple OSS Distributions IOByteCount
getLength() const2453*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getLength() const
2454*d4514f0bSApple OSS Distributions {
2455*d4514f0bSApple OSS Distributions 	return _length;
2456*d4514f0bSApple OSS Distributions }
2457*d4514f0bSApple OSS Distributions 
2458*d4514f0bSApple OSS Distributions void
setTag(IOOptionBits tag)2459*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2460*d4514f0bSApple OSS Distributions {
2461*d4514f0bSApple OSS Distributions 	_tag = tag;
2462*d4514f0bSApple OSS Distributions }
2463*d4514f0bSApple OSS Distributions 
2464*d4514f0bSApple OSS Distributions IOOptionBits
getTag(void)2465*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getTag( void )
2466*d4514f0bSApple OSS Distributions {
2467*d4514f0bSApple OSS Distributions 	return _tag;
2468*d4514f0bSApple OSS Distributions }
2469*d4514f0bSApple OSS Distributions 
2470*d4514f0bSApple OSS Distributions uint64_t
getFlags(void)2471*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2472*d4514f0bSApple OSS Distributions {
2473*d4514f0bSApple OSS Distributions 	return _flags;
2474*d4514f0bSApple OSS Distributions }
2475*d4514f0bSApple OSS Distributions 
2476*d4514f0bSApple OSS Distributions OSObject *
copyContext(void) const2477*d4514f0bSApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2478*d4514f0bSApple OSS Distributions {
2479*d4514f0bSApple OSS Distributions 	if (reserved) {
2480*d4514f0bSApple OSS Distributions 		OSObject * context = reserved->contextObject;
2481*d4514f0bSApple OSS Distributions 		if (context) {
2482*d4514f0bSApple OSS Distributions 			context->retain();
2483*d4514f0bSApple OSS Distributions 		}
2484*d4514f0bSApple OSS Distributions 		return context;
2485*d4514f0bSApple OSS Distributions 	} else {
2486*d4514f0bSApple OSS Distributions 		return NULL;
2487*d4514f0bSApple OSS Distributions 	}
2488*d4514f0bSApple OSS Distributions }
2489*d4514f0bSApple OSS Distributions 
2490*d4514f0bSApple OSS Distributions void
setContext(OSObject * obj)2491*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2492*d4514f0bSApple OSS Distributions {
2493*d4514f0bSApple OSS Distributions 	if (this->reserved == NULL && obj == NULL) {
2494*d4514f0bSApple OSS Distributions 		// No existing object, and no object to set
2495*d4514f0bSApple OSS Distributions 		return;
2496*d4514f0bSApple OSS Distributions 	}
2497*d4514f0bSApple OSS Distributions 
2498*d4514f0bSApple OSS Distributions 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2499*d4514f0bSApple OSS Distributions 	if (reserved) {
2500*d4514f0bSApple OSS Distributions 		OSObject * oldObject = reserved->contextObject;
2501*d4514f0bSApple OSS Distributions 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2502*d4514f0bSApple OSS Distributions 			oldObject->release();
2503*d4514f0bSApple OSS Distributions 		}
2504*d4514f0bSApple OSS Distributions 		if (obj != NULL) {
2505*d4514f0bSApple OSS Distributions 			obj->retain();
2506*d4514f0bSApple OSS Distributions 			reserved->contextObject = obj;
2507*d4514f0bSApple OSS Distributions 		}
2508*d4514f0bSApple OSS Distributions 	}
2509*d4514f0bSApple OSS Distributions }
2510*d4514f0bSApple OSS Distributions 
2511*d4514f0bSApple OSS Distributions #ifndef __LP64__
2512*d4514f0bSApple OSS Distributions #pragma clang diagnostic push
2513*d4514f0bSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2514*d4514f0bSApple OSS Distributions 
2515*d4514f0bSApple OSS Distributions // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2516*d4514f0bSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2517*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2518*d4514f0bSApple OSS Distributions {
2519*d4514f0bSApple OSS Distributions 	addr64_t physAddr = 0;
2520*d4514f0bSApple OSS Distributions 
2521*d4514f0bSApple OSS Distributions 	if (prepare() == kIOReturnSuccess) {
2522*d4514f0bSApple OSS Distributions 		physAddr = getPhysicalSegment64( offset, length );
2523*d4514f0bSApple OSS Distributions 		complete();
2524*d4514f0bSApple OSS Distributions 	}
2525*d4514f0bSApple OSS Distributions 
2526*d4514f0bSApple OSS Distributions 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2527*d4514f0bSApple OSS Distributions }
2528*d4514f0bSApple OSS Distributions 
2529*d4514f0bSApple OSS Distributions #pragma clang diagnostic pop
2530*d4514f0bSApple OSS Distributions 
2531*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2532*d4514f0bSApple OSS Distributions 
2533*d4514f0bSApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2534*d4514f0bSApple OSS Distributions IOMemoryDescriptor::readBytes
2535*d4514f0bSApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2536*d4514f0bSApple OSS Distributions {
2537*d4514f0bSApple OSS Distributions 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2538*d4514f0bSApple OSS Distributions 	IOByteCount endoffset;
2539*d4514f0bSApple OSS Distributions 	IOByteCount remaining;
2540*d4514f0bSApple OSS Distributions 
2541*d4514f0bSApple OSS Distributions 
2542*d4514f0bSApple OSS Distributions 	// Check that this entire I/O is within the available range
2543*d4514f0bSApple OSS Distributions 	if ((offset > _length)
2544*d4514f0bSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2545*d4514f0bSApple OSS Distributions 	    || (endoffset > _length)) {
2546*d4514f0bSApple OSS Distributions 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2547*d4514f0bSApple OSS Distributions 		return 0;
2548*d4514f0bSApple OSS Distributions 	}
2549*d4514f0bSApple OSS Distributions 	if (offset >= _length) {
2550*d4514f0bSApple OSS Distributions 		return 0;
2551*d4514f0bSApple OSS Distributions 	}
2552*d4514f0bSApple OSS Distributions 
2553*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2554*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2555*d4514f0bSApple OSS Distributions 		return 0;
2556*d4514f0bSApple OSS Distributions 	}
2557*d4514f0bSApple OSS Distributions 
2558*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2559*d4514f0bSApple OSS Distributions 		LOCK;
2560*d4514f0bSApple OSS Distributions 	}
2561*d4514f0bSApple OSS Distributions 
2562*d4514f0bSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2563*d4514f0bSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2564*d4514f0bSApple OSS Distributions 		addr64_t        srcAddr64;
2565*d4514f0bSApple OSS Distributions 		IOByteCount     srcLen;
2566*d4514f0bSApple OSS Distributions 
2567*d4514f0bSApple OSS Distributions 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2568*d4514f0bSApple OSS Distributions 		if (!srcAddr64) {
2569*d4514f0bSApple OSS Distributions 			break;
2570*d4514f0bSApple OSS Distributions 		}
2571*d4514f0bSApple OSS Distributions 
2572*d4514f0bSApple OSS Distributions 		// Clip segment length to remaining
2573*d4514f0bSApple OSS Distributions 		if (srcLen > remaining) {
2574*d4514f0bSApple OSS Distributions 			srcLen = remaining;
2575*d4514f0bSApple OSS Distributions 		}
2576*d4514f0bSApple OSS Distributions 
2577*d4514f0bSApple OSS Distributions 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2578*d4514f0bSApple OSS Distributions 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2579*d4514f0bSApple OSS Distributions 		}
2580*d4514f0bSApple OSS Distributions 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2581*d4514f0bSApple OSS Distributions 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2582*d4514f0bSApple OSS Distributions 
2583*d4514f0bSApple OSS Distributions 		dstAddr   += srcLen;
2584*d4514f0bSApple OSS Distributions 		offset    += srcLen;
2585*d4514f0bSApple OSS Distributions 		remaining -= srcLen;
2586*d4514f0bSApple OSS Distributions 	}
2587*d4514f0bSApple OSS Distributions 
2588*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2589*d4514f0bSApple OSS Distributions 		UNLOCK;
2590*d4514f0bSApple OSS Distributions 	}
2591*d4514f0bSApple OSS Distributions 
2592*d4514f0bSApple OSS Distributions 	assert(!remaining);
2593*d4514f0bSApple OSS Distributions 
2594*d4514f0bSApple OSS Distributions 	return length - remaining;
2595*d4514f0bSApple OSS Distributions }
2596*d4514f0bSApple OSS Distributions 
2597*d4514f0bSApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2598*d4514f0bSApple OSS Distributions IOMemoryDescriptor::writeBytes
2599*d4514f0bSApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2600*d4514f0bSApple OSS Distributions {
2601*d4514f0bSApple OSS Distributions 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2602*d4514f0bSApple OSS Distributions 	IOByteCount remaining;
2603*d4514f0bSApple OSS Distributions 	IOByteCount endoffset;
2604*d4514f0bSApple OSS Distributions 	IOByteCount offset = inoffset;
2605*d4514f0bSApple OSS Distributions 
2606*d4514f0bSApple OSS Distributions 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2607*d4514f0bSApple OSS Distributions 
2608*d4514f0bSApple OSS Distributions 	// Check that this entire I/O is within the available range
2609*d4514f0bSApple OSS Distributions 	if ((offset > _length)
2610*d4514f0bSApple OSS Distributions 	    || os_add_overflow(length, offset, &endoffset)
2611*d4514f0bSApple OSS Distributions 	    || (endoffset > _length)) {
2612*d4514f0bSApple OSS Distributions 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2613*d4514f0bSApple OSS Distributions 		return 0;
2614*d4514f0bSApple OSS Distributions 	}
2615*d4514f0bSApple OSS Distributions 	if (kIOMemoryPreparedReadOnly & _flags) {
2616*d4514f0bSApple OSS Distributions 		return 0;
2617*d4514f0bSApple OSS Distributions 	}
2618*d4514f0bSApple OSS Distributions 	if (offset >= _length) {
2619*d4514f0bSApple OSS Distributions 		return 0;
2620*d4514f0bSApple OSS Distributions 	}
2621*d4514f0bSApple OSS Distributions 
2622*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
2623*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
2624*d4514f0bSApple OSS Distributions 		return 0;
2625*d4514f0bSApple OSS Distributions 	}
2626*d4514f0bSApple OSS Distributions 
2627*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2628*d4514f0bSApple OSS Distributions 		LOCK;
2629*d4514f0bSApple OSS Distributions 	}
2630*d4514f0bSApple OSS Distributions 
2631*d4514f0bSApple OSS Distributions 	remaining = length = min(length, _length - offset);
2632*d4514f0bSApple OSS Distributions 	while (remaining) { // (process another target segment?)
2633*d4514f0bSApple OSS Distributions 		addr64_t    dstAddr64;
2634*d4514f0bSApple OSS Distributions 		IOByteCount dstLen;
2635*d4514f0bSApple OSS Distributions 
2636*d4514f0bSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2637*d4514f0bSApple OSS Distributions 		if (!dstAddr64) {
2638*d4514f0bSApple OSS Distributions 			break;
2639*d4514f0bSApple OSS Distributions 		}
2640*d4514f0bSApple OSS Distributions 
2641*d4514f0bSApple OSS Distributions 		// Clip segment length to remaining
2642*d4514f0bSApple OSS Distributions 		if (dstLen > remaining) {
2643*d4514f0bSApple OSS Distributions 			dstLen = remaining;
2644*d4514f0bSApple OSS Distributions 		}
2645*d4514f0bSApple OSS Distributions 
2646*d4514f0bSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2647*d4514f0bSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2648*d4514f0bSApple OSS Distributions 		}
2649*d4514f0bSApple OSS Distributions 		if (!srcAddr) {
2650*d4514f0bSApple OSS Distributions 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2651*d4514f0bSApple OSS Distributions 		} else {
2652*d4514f0bSApple OSS Distributions 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2653*d4514f0bSApple OSS Distributions 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2654*d4514f0bSApple OSS Distributions 			srcAddr   += dstLen;
2655*d4514f0bSApple OSS Distributions 		}
2656*d4514f0bSApple OSS Distributions 		offset    += dstLen;
2657*d4514f0bSApple OSS Distributions 		remaining -= dstLen;
2658*d4514f0bSApple OSS Distributions 	}
2659*d4514f0bSApple OSS Distributions 
2660*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
2661*d4514f0bSApple OSS Distributions 		UNLOCK;
2662*d4514f0bSApple OSS Distributions 	}
2663*d4514f0bSApple OSS Distributions 
2664*d4514f0bSApple OSS Distributions 	assert(!remaining);
2665*d4514f0bSApple OSS Distributions 
2666*d4514f0bSApple OSS Distributions #if defined(__x86_64__)
2667*d4514f0bSApple OSS Distributions 	// copypv does not cppvFsnk on intel
2668*d4514f0bSApple OSS Distributions #else
2669*d4514f0bSApple OSS Distributions 	if (!srcAddr) {
2670*d4514f0bSApple OSS Distributions 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2671*d4514f0bSApple OSS Distributions 	}
2672*d4514f0bSApple OSS Distributions #endif
2673*d4514f0bSApple OSS Distributions 
2674*d4514f0bSApple OSS Distributions 	return length - remaining;
2675*d4514f0bSApple OSS Distributions }
2676*d4514f0bSApple OSS Distributions 
2677*d4514f0bSApple OSS Distributions #ifndef __LP64__
2678*d4514f0bSApple OSS Distributions void
setPosition(IOByteCount position)2679*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2680*d4514f0bSApple OSS Distributions {
2681*d4514f0bSApple OSS Distributions 	panic("IOGMD::setPosition deprecated");
2682*d4514f0bSApple OSS Distributions }
2683*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
2684*d4514f0bSApple OSS Distributions 
2685*d4514f0bSApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2686*d4514f0bSApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2687*d4514f0bSApple OSS Distributions 
2688*d4514f0bSApple OSS Distributions uint64_t
getPreparationID(void)2689*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2690*d4514f0bSApple OSS Distributions {
2691*d4514f0bSApple OSS Distributions 	ioGMDData *dataP;
2692*d4514f0bSApple OSS Distributions 
2693*d4514f0bSApple OSS Distributions 	if (!_wireCount) {
2694*d4514f0bSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2695*d4514f0bSApple OSS Distributions 	}
2696*d4514f0bSApple OSS Distributions 
2697*d4514f0bSApple OSS Distributions 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2698*d4514f0bSApple OSS Distributions 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2699*d4514f0bSApple OSS Distributions 		IOMemoryDescriptor::setPreparationID();
2700*d4514f0bSApple OSS Distributions 		return IOMemoryDescriptor::getPreparationID();
2701*d4514f0bSApple OSS Distributions 	}
2702*d4514f0bSApple OSS Distributions 
2703*d4514f0bSApple OSS Distributions 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2704*d4514f0bSApple OSS Distributions 		return kIOPreparationIDUnprepared;
2705*d4514f0bSApple OSS Distributions 	}
2706*d4514f0bSApple OSS Distributions 
2707*d4514f0bSApple OSS Distributions 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2708*d4514f0bSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2709*d4514f0bSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2710*d4514f0bSApple OSS Distributions 	}
2711*d4514f0bSApple OSS Distributions 	return dataP->fPreparationID;
2712*d4514f0bSApple OSS Distributions }
2713*d4514f0bSApple OSS Distributions 
2714*d4514f0bSApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2715*d4514f0bSApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2716*d4514f0bSApple OSS Distributions {
2717*d4514f0bSApple OSS Distributions 	if (reserved->creator) {
2718*d4514f0bSApple OSS Distributions 		task_deallocate(reserved->creator);
2719*d4514f0bSApple OSS Distributions 		reserved->creator = NULL;
2720*d4514f0bSApple OSS Distributions 	}
2721*d4514f0bSApple OSS Distributions 
2722*d4514f0bSApple OSS Distributions 	if (reserved->contextObject) {
2723*d4514f0bSApple OSS Distributions 		reserved->contextObject->release();
2724*d4514f0bSApple OSS Distributions 		reserved->contextObject = NULL;
2725*d4514f0bSApple OSS Distributions 	}
2726*d4514f0bSApple OSS Distributions }
2727*d4514f0bSApple OSS Distributions 
2728*d4514f0bSApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2729*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2730*d4514f0bSApple OSS Distributions {
2731*d4514f0bSApple OSS Distributions 	if (!reserved) {
2732*d4514f0bSApple OSS Distributions 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2733*d4514f0bSApple OSS Distributions 	}
2734*d4514f0bSApple OSS Distributions 	return reserved;
2735*d4514f0bSApple OSS Distributions }
2736*d4514f0bSApple OSS Distributions 
2737*d4514f0bSApple OSS Distributions void
setPreparationID(void)2738*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2739*d4514f0bSApple OSS Distributions {
2740*d4514f0bSApple OSS Distributions 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2741*d4514f0bSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2742*d4514f0bSApple OSS Distributions 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2743*d4514f0bSApple OSS Distributions 	}
2744*d4514f0bSApple OSS Distributions }
2745*d4514f0bSApple OSS Distributions 
2746*d4514f0bSApple OSS Distributions uint64_t
getPreparationID(void)2747*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2748*d4514f0bSApple OSS Distributions {
2749*d4514f0bSApple OSS Distributions 	if (reserved) {
2750*d4514f0bSApple OSS Distributions 		return reserved->preparationID;
2751*d4514f0bSApple OSS Distributions 	} else {
2752*d4514f0bSApple OSS Distributions 		return kIOPreparationIDUnsupported;
2753*d4514f0bSApple OSS Distributions 	}
2754*d4514f0bSApple OSS Distributions }
2755*d4514f0bSApple OSS Distributions 
2756*d4514f0bSApple OSS Distributions void
setDescriptorID(void)2757*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2758*d4514f0bSApple OSS Distributions {
2759*d4514f0bSApple OSS Distributions 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2760*d4514f0bSApple OSS Distributions 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2761*d4514f0bSApple OSS Distributions 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2762*d4514f0bSApple OSS Distributions 	}
2763*d4514f0bSApple OSS Distributions }
2764*d4514f0bSApple OSS Distributions 
2765*d4514f0bSApple OSS Distributions uint64_t
getDescriptorID(void)2766*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2767*d4514f0bSApple OSS Distributions {
2768*d4514f0bSApple OSS Distributions 	setDescriptorID();
2769*d4514f0bSApple OSS Distributions 
2770*d4514f0bSApple OSS Distributions 	if (reserved) {
2771*d4514f0bSApple OSS Distributions 		return reserved->descriptorID;
2772*d4514f0bSApple OSS Distributions 	} else {
2773*d4514f0bSApple OSS Distributions 		return kIODescriptorIDInvalid;
2774*d4514f0bSApple OSS Distributions 	}
2775*d4514f0bSApple OSS Distributions }
2776*d4514f0bSApple OSS Distributions 
2777*d4514f0bSApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2778*d4514f0bSApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2779*d4514f0bSApple OSS Distributions {
2780*d4514f0bSApple OSS Distributions 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2781*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
2782*d4514f0bSApple OSS Distributions 	}
2783*d4514f0bSApple OSS Distributions 
2784*d4514f0bSApple OSS Distributions 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2785*d4514f0bSApple OSS Distributions 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2786*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
2787*d4514f0bSApple OSS Distributions 	}
2788*d4514f0bSApple OSS Distributions 
2789*d4514f0bSApple OSS Distributions 	uint64_t descriptorID = getDescriptorID();
2790*d4514f0bSApple OSS Distributions 	assert(descriptorID != kIODescriptorIDInvalid);
2791*d4514f0bSApple OSS Distributions 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2792*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
2793*d4514f0bSApple OSS Distributions 	}
2794*d4514f0bSApple OSS Distributions 
2795*d4514f0bSApple OSS Distributions 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2796*d4514f0bSApple OSS Distributions 
2797*d4514f0bSApple OSS Distributions #if __LP64__
2798*d4514f0bSApple OSS Distributions 	static const uint8_t num_segments_page = 8;
2799*d4514f0bSApple OSS Distributions #else
2800*d4514f0bSApple OSS Distributions 	static const uint8_t num_segments_page = 4;
2801*d4514f0bSApple OSS Distributions #endif
2802*d4514f0bSApple OSS Distributions 	static const uint8_t num_segments_long = 2;
2803*d4514f0bSApple OSS Distributions 
2804*d4514f0bSApple OSS Distributions 	IOPhysicalAddress segments_page[num_segments_page];
2805*d4514f0bSApple OSS Distributions 	IOPhysicalRange   segments_long[num_segments_long];
2806*d4514f0bSApple OSS Distributions 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2807*d4514f0bSApple OSS Distributions 	memset(segments_long, 0, sizeof(segments_long));
2808*d4514f0bSApple OSS Distributions 
2809*d4514f0bSApple OSS Distributions 	uint8_t segment_page_idx = 0;
2810*d4514f0bSApple OSS Distributions 	uint8_t segment_long_idx = 0;
2811*d4514f0bSApple OSS Distributions 
2812*d4514f0bSApple OSS Distributions 	IOPhysicalRange physical_segment;
2813*d4514f0bSApple OSS Distributions 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2814*d4514f0bSApple OSS Distributions 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2815*d4514f0bSApple OSS Distributions 
2816*d4514f0bSApple OSS Distributions 		if (physical_segment.length == 0) {
2817*d4514f0bSApple OSS Distributions 			break;
2818*d4514f0bSApple OSS Distributions 		}
2819*d4514f0bSApple OSS Distributions 
2820*d4514f0bSApple OSS Distributions 		/**
2821*d4514f0bSApple OSS Distributions 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2822*d4514f0bSApple OSS Distributions 		 * buffer memory, pack segment events according to the following.
2823*d4514f0bSApple OSS Distributions 		 *
2824*d4514f0bSApple OSS Distributions 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2825*d4514f0bSApple OSS Distributions 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2826*d4514f0bSApple OSS Distributions 		 *
2827*d4514f0bSApple OSS Distributions 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2828*d4514f0bSApple OSS Distributions 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2829*d4514f0bSApple OSS Distributions 		 * - unmapped pages will have a ppn of MAX_INT_32
2830*d4514f0bSApple OSS Distributions 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2831*d4514f0bSApple OSS Distributions 		 * - address_0, length_0, address_0, length_1
2832*d4514f0bSApple OSS Distributions 		 * - unmapped pages will have an address of 0
2833*d4514f0bSApple OSS Distributions 		 *
2834*d4514f0bSApple OSS Distributions 		 * During each iteration do the following depending on the length of the mapping:
2835*d4514f0bSApple OSS Distributions 		 * 1. add the current segment to the appropriate queue of pending segments
2836*d4514f0bSApple OSS Distributions 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2837*d4514f0bSApple OSS Distributions 		 * 1a. if FALSE emit and reset all events in the previous queue
2838*d4514f0bSApple OSS Distributions 		 * 2. check if we have filled up the current queue of pending events
2839*d4514f0bSApple OSS Distributions 		 * 2a. if TRUE emit and reset all events in the pending queue
2840*d4514f0bSApple OSS Distributions 		 * 3. after completing all iterations emit events in the current queue
2841*d4514f0bSApple OSS Distributions 		 */
2842*d4514f0bSApple OSS Distributions 
2843*d4514f0bSApple OSS Distributions 		bool emit_page = false;
2844*d4514f0bSApple OSS Distributions 		bool emit_long = false;
2845*d4514f0bSApple OSS Distributions 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2846*d4514f0bSApple OSS Distributions 			segments_page[segment_page_idx] = physical_segment.address;
2847*d4514f0bSApple OSS Distributions 			segment_page_idx++;
2848*d4514f0bSApple OSS Distributions 
2849*d4514f0bSApple OSS Distributions 			emit_long = segment_long_idx != 0;
2850*d4514f0bSApple OSS Distributions 			emit_page = segment_page_idx == num_segments_page;
2851*d4514f0bSApple OSS Distributions 
2852*d4514f0bSApple OSS Distributions 			if (os_unlikely(emit_long)) {
2853*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2854*d4514f0bSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2855*d4514f0bSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2856*d4514f0bSApple OSS Distributions 			}
2857*d4514f0bSApple OSS Distributions 
2858*d4514f0bSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2859*d4514f0bSApple OSS Distributions #if __LP64__
2860*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2861*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2862*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2863*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2864*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2865*d4514f0bSApple OSS Distributions #else
2866*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2867*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2868*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2869*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2870*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2871*d4514f0bSApple OSS Distributions #endif
2872*d4514f0bSApple OSS Distributions 			}
2873*d4514f0bSApple OSS Distributions 		} else {
2874*d4514f0bSApple OSS Distributions 			segments_long[segment_long_idx] = physical_segment;
2875*d4514f0bSApple OSS Distributions 			segment_long_idx++;
2876*d4514f0bSApple OSS Distributions 
2877*d4514f0bSApple OSS Distributions 			emit_page = segment_page_idx != 0;
2878*d4514f0bSApple OSS Distributions 			emit_long = segment_long_idx == num_segments_long;
2879*d4514f0bSApple OSS Distributions 
2880*d4514f0bSApple OSS Distributions 			if (os_unlikely(emit_page)) {
2881*d4514f0bSApple OSS Distributions #if __LP64__
2882*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2883*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2884*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2885*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2886*d4514f0bSApple OSS Distributions 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2887*d4514f0bSApple OSS Distributions #else
2888*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2889*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[1]),
2890*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[2]),
2891*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[3]),
2892*d4514f0bSApple OSS Distributions 				    (ppnum_t) atop_32(segments_page[4]));
2893*d4514f0bSApple OSS Distributions #endif
2894*d4514f0bSApple OSS Distributions 			}
2895*d4514f0bSApple OSS Distributions 
2896*d4514f0bSApple OSS Distributions 			if (emit_long) {
2897*d4514f0bSApple OSS Distributions 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2898*d4514f0bSApple OSS Distributions 				    segments_long[0].address, segments_long[0].length,
2899*d4514f0bSApple OSS Distributions 				    segments_long[1].address, segments_long[1].length);
2900*d4514f0bSApple OSS Distributions 			}
2901*d4514f0bSApple OSS Distributions 		}
2902*d4514f0bSApple OSS Distributions 
2903*d4514f0bSApple OSS Distributions 		if (os_unlikely(emit_page)) {
2904*d4514f0bSApple OSS Distributions 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2905*d4514f0bSApple OSS Distributions 			segment_page_idx = 0;
2906*d4514f0bSApple OSS Distributions 		}
2907*d4514f0bSApple OSS Distributions 
2908*d4514f0bSApple OSS Distributions 		if (os_unlikely(emit_long)) {
2909*d4514f0bSApple OSS Distributions 			memset(segments_long, 0, sizeof(segments_long));
2910*d4514f0bSApple OSS Distributions 			segment_long_idx = 0;
2911*d4514f0bSApple OSS Distributions 		}
2912*d4514f0bSApple OSS Distributions 	}
2913*d4514f0bSApple OSS Distributions 
2914*d4514f0bSApple OSS Distributions 	if (segment_page_idx != 0) {
2915*d4514f0bSApple OSS Distributions 		assert(segment_long_idx == 0);
2916*d4514f0bSApple OSS Distributions #if __LP64__
2917*d4514f0bSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2918*d4514f0bSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2919*d4514f0bSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2920*d4514f0bSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2921*d4514f0bSApple OSS Distributions 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2922*d4514f0bSApple OSS Distributions #else
2923*d4514f0bSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2924*d4514f0bSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[1]),
2925*d4514f0bSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[2]),
2926*d4514f0bSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[3]),
2927*d4514f0bSApple OSS Distributions 		    (ppnum_t) atop_32(segments_page[4]));
2928*d4514f0bSApple OSS Distributions #endif
2929*d4514f0bSApple OSS Distributions 	} else if (segment_long_idx != 0) {
2930*d4514f0bSApple OSS Distributions 		assert(segment_page_idx == 0);
2931*d4514f0bSApple OSS Distributions 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2932*d4514f0bSApple OSS Distributions 		    segments_long[0].address, segments_long[0].length,
2933*d4514f0bSApple OSS Distributions 		    segments_long[1].address, segments_long[1].length);
2934*d4514f0bSApple OSS Distributions 	}
2935*d4514f0bSApple OSS Distributions 
2936*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
2937*d4514f0bSApple OSS Distributions }
2938*d4514f0bSApple OSS Distributions 
2939*d4514f0bSApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2940*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2941*d4514f0bSApple OSS Distributions {
2942*d4514f0bSApple OSS Distributions 	_kernelTag = (vm_tag_t) kernelTag;
2943*d4514f0bSApple OSS Distributions 	_userTag   = (vm_tag_t) userTag;
2944*d4514f0bSApple OSS Distributions }
2945*d4514f0bSApple OSS Distributions 
2946*d4514f0bSApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2947*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2948*d4514f0bSApple OSS Distributions {
2949*d4514f0bSApple OSS Distributions 	if (vm_kernel_map_is_kernel(map)) {
2950*d4514f0bSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2951*d4514f0bSApple OSS Distributions 			return (uint32_t) _kernelTag;
2952*d4514f0bSApple OSS Distributions 		}
2953*d4514f0bSApple OSS Distributions 	} else {
2954*d4514f0bSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE != _userTag) {
2955*d4514f0bSApple OSS Distributions 			return (uint32_t) _userTag;
2956*d4514f0bSApple OSS Distributions 		}
2957*d4514f0bSApple OSS Distributions 	}
2958*d4514f0bSApple OSS Distributions 	return IOMemoryTag(map);
2959*d4514f0bSApple OSS Distributions }
2960*d4514f0bSApple OSS Distributions 
2961*d4514f0bSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2962*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2963*d4514f0bSApple OSS Distributions {
2964*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
2965*d4514f0bSApple OSS Distributions 	DMACommandOps params;
2966*d4514f0bSApple OSS Distributions 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2967*d4514f0bSApple OSS Distributions 	ioGMDData *dataP;
2968*d4514f0bSApple OSS Distributions 
2969*d4514f0bSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
2970*d4514f0bSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
2971*d4514f0bSApple OSS Distributions 
2972*d4514f0bSApple OSS Distributions 	if (kIOMDDMAMap == op) {
2973*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2974*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
2975*d4514f0bSApple OSS Distributions 		}
2976*d4514f0bSApple OSS Distributions 
2977*d4514f0bSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2978*d4514f0bSApple OSS Distributions 
2979*d4514f0bSApple OSS Distributions 		if (!_memoryEntries
2980*d4514f0bSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2981*d4514f0bSApple OSS Distributions 			return kIOReturnNoMemory;
2982*d4514f0bSApple OSS Distributions 		}
2983*d4514f0bSApple OSS Distributions 
2984*d4514f0bSApple OSS Distributions 		if (_memoryEntries && data->fMapper) {
2985*d4514f0bSApple OSS Distributions 			bool remap, keepMap;
2986*d4514f0bSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
2987*d4514f0bSApple OSS Distributions 
2988*d4514f0bSApple OSS Distributions 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2989*d4514f0bSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2990*d4514f0bSApple OSS Distributions 			}
2991*d4514f0bSApple OSS Distributions 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2992*d4514f0bSApple OSS Distributions 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2993*d4514f0bSApple OSS Distributions 			}
2994*d4514f0bSApple OSS Distributions 
2995*d4514f0bSApple OSS Distributions 			keepMap = (data->fMapper == gIOSystemMapper);
2996*d4514f0bSApple OSS Distributions 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2997*d4514f0bSApple OSS Distributions 
2998*d4514f0bSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2999*d4514f0bSApple OSS Distributions 				IOLockLock(_prepareLock);
3000*d4514f0bSApple OSS Distributions 			}
3001*d4514f0bSApple OSS Distributions 
3002*d4514f0bSApple OSS Distributions 			remap = (!keepMap);
3003*d4514f0bSApple OSS Distributions 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3004*d4514f0bSApple OSS Distributions 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3005*d4514f0bSApple OSS Distributions 			remap |= (dataP->fDMAMapAlignment > page_size);
3006*d4514f0bSApple OSS Distributions 
3007*d4514f0bSApple OSS Distributions 			if (remap || !dataP->fMappedBaseValid) {
3008*d4514f0bSApple OSS Distributions 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3009*d4514f0bSApple OSS Distributions 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3010*d4514f0bSApple OSS Distributions 					dataP->fMappedBase      = data->fAlloc;
3011*d4514f0bSApple OSS Distributions 					dataP->fMappedBaseValid = true;
3012*d4514f0bSApple OSS Distributions 					dataP->fMappedLength    = data->fAllocLength;
3013*d4514f0bSApple OSS Distributions 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3014*d4514f0bSApple OSS Distributions 				}
3015*d4514f0bSApple OSS Distributions 			} else {
3016*d4514f0bSApple OSS Distributions 				data->fAlloc = dataP->fMappedBase;
3017*d4514f0bSApple OSS Distributions 				data->fAllocLength = 0;         // give out IOMD map
3018*d4514f0bSApple OSS Distributions 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3019*d4514f0bSApple OSS Distributions 			}
3020*d4514f0bSApple OSS Distributions 
3021*d4514f0bSApple OSS Distributions 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3022*d4514f0bSApple OSS Distributions 				IOLockUnlock(_prepareLock);
3023*d4514f0bSApple OSS Distributions 			}
3024*d4514f0bSApple OSS Distributions 		}
3025*d4514f0bSApple OSS Distributions 		return err;
3026*d4514f0bSApple OSS Distributions 	}
3027*d4514f0bSApple OSS Distributions 	if (kIOMDDMAUnmap == op) {
3028*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3029*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3030*d4514f0bSApple OSS Distributions 		}
3031*d4514f0bSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3032*d4514f0bSApple OSS Distributions 
3033*d4514f0bSApple OSS Distributions 		if (_pages) {
3034*d4514f0bSApple OSS Distributions 			err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3035*d4514f0bSApple OSS Distributions 		}
3036*d4514f0bSApple OSS Distributions 
3037*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3038*d4514f0bSApple OSS Distributions 	}
3039*d4514f0bSApple OSS Distributions 
3040*d4514f0bSApple OSS Distributions 	if (kIOMDAddDMAMapSpec == op) {
3041*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IODMAMapSpecification)) {
3042*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3043*d4514f0bSApple OSS Distributions 		}
3044*d4514f0bSApple OSS Distributions 
3045*d4514f0bSApple OSS Distributions 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046*d4514f0bSApple OSS Distributions 
3047*d4514f0bSApple OSS Distributions 		if (!_memoryEntries
3048*d4514f0bSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049*d4514f0bSApple OSS Distributions 			return kIOReturnNoMemory;
3050*d4514f0bSApple OSS Distributions 		}
3051*d4514f0bSApple OSS Distributions 
3052*d4514f0bSApple OSS Distributions 		if (_memoryEntries) {
3053*d4514f0bSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3054*d4514f0bSApple OSS Distributions 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055*d4514f0bSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056*d4514f0bSApple OSS Distributions 			}
3057*d4514f0bSApple OSS Distributions 			if (data->alignment > dataP->fDMAMapAlignment) {
3058*d4514f0bSApple OSS Distributions 				dataP->fDMAMapAlignment = data->alignment;
3059*d4514f0bSApple OSS Distributions 			}
3060*d4514f0bSApple OSS Distributions 		}
3061*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3062*d4514f0bSApple OSS Distributions 	}
3063*d4514f0bSApple OSS Distributions 
3064*d4514f0bSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3065*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3067*d4514f0bSApple OSS Distributions 		}
3068*d4514f0bSApple OSS Distributions 
3069*d4514f0bSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070*d4514f0bSApple OSS Distributions 		data->fLength = _length;
3071*d4514f0bSApple OSS Distributions 		data->fSGCount = _rangesCount;
3072*d4514f0bSApple OSS Distributions 		data->fPages = _pages;
3073*d4514f0bSApple OSS Distributions 		data->fDirection = getDirection();
3074*d4514f0bSApple OSS Distributions 		if (!_wireCount) {
3075*d4514f0bSApple OSS Distributions 			data->fIsPrepared = false;
3076*d4514f0bSApple OSS Distributions 		} else {
3077*d4514f0bSApple OSS Distributions 			data->fIsPrepared = true;
3078*d4514f0bSApple OSS Distributions 			data->fHighestPage = _highestPage;
3079*d4514f0bSApple OSS Distributions 			if (_memoryEntries) {
3080*d4514f0bSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
3081*d4514f0bSApple OSS Distributions 				ioPLBlock *ioplList = getIOPLList(dataP);
3082*d4514f0bSApple OSS Distributions 				UInt count = getNumIOPL(_memoryEntries, dataP);
3083*d4514f0bSApple OSS Distributions 				if (count == 1) {
3084*d4514f0bSApple OSS Distributions 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085*d4514f0bSApple OSS Distributions 				}
3086*d4514f0bSApple OSS Distributions 			}
3087*d4514f0bSApple OSS Distributions 		}
3088*d4514f0bSApple OSS Distributions 
3089*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3090*d4514f0bSApple OSS Distributions 	} else if (kIOMDDMAActive == op) {
3091*d4514f0bSApple OSS Distributions 		if (params) {
3092*d4514f0bSApple OSS Distributions 			int16_t prior;
3093*d4514f0bSApple OSS Distributions 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3094*d4514f0bSApple OSS Distributions 			if (!prior) {
3095*d4514f0bSApple OSS Distributions 				md->_mapName = NULL;
3096*d4514f0bSApple OSS Distributions 			}
3097*d4514f0bSApple OSS Distributions 		} else {
3098*d4514f0bSApple OSS Distributions 			if (md->_dmaReferences) {
3099*d4514f0bSApple OSS Distributions 				OSAddAtomic16(-1, &md->_dmaReferences);
3100*d4514f0bSApple OSS Distributions 			} else {
3101*d4514f0bSApple OSS Distributions 				panic("_dmaReferences underflow");
3102*d4514f0bSApple OSS Distributions 			}
3103*d4514f0bSApple OSS Distributions 		}
3104*d4514f0bSApple OSS Distributions 	} else if (kIOMDWalkSegments != op) {
3105*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
3106*d4514f0bSApple OSS Distributions 	}
3107*d4514f0bSApple OSS Distributions 
3108*d4514f0bSApple OSS Distributions 	// Get the next segment
3109*d4514f0bSApple OSS Distributions 	struct InternalState {
3110*d4514f0bSApple OSS Distributions 		IOMDDMAWalkSegmentArgs fIO;
3111*d4514f0bSApple OSS Distributions 		mach_vm_size_t fOffset2Index;
3112*d4514f0bSApple OSS Distributions 		mach_vm_size_t fNextOffset;
3113*d4514f0bSApple OSS Distributions 		UInt fIndex;
3114*d4514f0bSApple OSS Distributions 	} *isP;
3115*d4514f0bSApple OSS Distributions 
3116*d4514f0bSApple OSS Distributions 	// Find the next segment
3117*d4514f0bSApple OSS Distributions 	if (dataSize < sizeof(*isP)) {
3118*d4514f0bSApple OSS Distributions 		return kIOReturnUnderrun;
3119*d4514f0bSApple OSS Distributions 	}
3120*d4514f0bSApple OSS Distributions 
3121*d4514f0bSApple OSS Distributions 	isP = (InternalState *) vData;
3122*d4514f0bSApple OSS Distributions 	uint64_t offset = isP->fIO.fOffset;
3123*d4514f0bSApple OSS Distributions 	uint8_t mapped = isP->fIO.fMapped;
3124*d4514f0bSApple OSS Distributions 	uint64_t mappedBase;
3125*d4514f0bSApple OSS Distributions 
3126*d4514f0bSApple OSS Distributions 	if (mapped && (kIOMemoryRemote & _flags)) {
3127*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3128*d4514f0bSApple OSS Distributions 	}
3129*d4514f0bSApple OSS Distributions 
3130*d4514f0bSApple OSS Distributions 	if (IOMapper::gSystem && mapped
3131*d4514f0bSApple OSS Distributions 	    && (!(kIOMemoryHostOnly & _flags))
3132*d4514f0bSApple OSS Distributions 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133*d4514f0bSApple OSS Distributions //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134*d4514f0bSApple OSS Distributions 		if (!_memoryEntries
3135*d4514f0bSApple OSS Distributions 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136*d4514f0bSApple OSS Distributions 			return kIOReturnNoMemory;
3137*d4514f0bSApple OSS Distributions 		}
3138*d4514f0bSApple OSS Distributions 
3139*d4514f0bSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
3140*d4514f0bSApple OSS Distributions 		if (dataP->fMapper) {
3141*d4514f0bSApple OSS Distributions 			IODMAMapSpecification mapSpec;
3142*d4514f0bSApple OSS Distributions 			bzero(&mapSpec, sizeof(mapSpec));
3143*d4514f0bSApple OSS Distributions 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144*d4514f0bSApple OSS Distributions 			mapSpec.alignment = dataP->fDMAMapAlignment;
3145*d4514f0bSApple OSS Distributions 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146*d4514f0bSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3147*d4514f0bSApple OSS Distributions 				return err;
3148*d4514f0bSApple OSS Distributions 			}
3149*d4514f0bSApple OSS Distributions 			dataP->fMappedBaseValid = true;
3150*d4514f0bSApple OSS Distributions 		}
3151*d4514f0bSApple OSS Distributions 	}
3152*d4514f0bSApple OSS Distributions 
3153*d4514f0bSApple OSS Distributions 	if (mapped) {
3154*d4514f0bSApple OSS Distributions 		if (IOMapper::gSystem
3155*d4514f0bSApple OSS Distributions 		    && (!(kIOMemoryHostOnly & _flags))
3156*d4514f0bSApple OSS Distributions 		    && _memoryEntries
3157*d4514f0bSApple OSS Distributions 		    && (dataP = getDataP(_memoryEntries))
3158*d4514f0bSApple OSS Distributions 		    && dataP->fMappedBaseValid) {
3159*d4514f0bSApple OSS Distributions 			mappedBase = dataP->fMappedBase;
3160*d4514f0bSApple OSS Distributions 		} else {
3161*d4514f0bSApple OSS Distributions 			mapped = 0;
3162*d4514f0bSApple OSS Distributions 		}
3163*d4514f0bSApple OSS Distributions 	}
3164*d4514f0bSApple OSS Distributions 
3165*d4514f0bSApple OSS Distributions 	if (offset >= _length) {
3166*d4514f0bSApple OSS Distributions 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167*d4514f0bSApple OSS Distributions 	}
3168*d4514f0bSApple OSS Distributions 
3169*d4514f0bSApple OSS Distributions 	// Validate the previous offset
3170*d4514f0bSApple OSS Distributions 	UInt ind;
3171*d4514f0bSApple OSS Distributions 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3172*d4514f0bSApple OSS Distributions 	if (!params
3173*d4514f0bSApple OSS Distributions 	    && offset
3174*d4514f0bSApple OSS Distributions 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175*d4514f0bSApple OSS Distributions 		ind = isP->fIndex;
3176*d4514f0bSApple OSS Distributions 	} else {
3177*d4514f0bSApple OSS Distributions 		ind = off2Ind = 0; // Start from beginning
3178*d4514f0bSApple OSS Distributions 	}
3179*d4514f0bSApple OSS Distributions 	mach_vm_size_t length;
3180*d4514f0bSApple OSS Distributions 	UInt64 address;
3181*d4514f0bSApple OSS Distributions 
3182*d4514f0bSApple OSS Distributions 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183*d4514f0bSApple OSS Distributions 		// Physical address based memory descriptor
3184*d4514f0bSApple OSS Distributions 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185*d4514f0bSApple OSS Distributions 
3186*d4514f0bSApple OSS Distributions 		// Find the range after the one that contains the offset
3187*d4514f0bSApple OSS Distributions 		mach_vm_size_t len;
3188*d4514f0bSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3189*d4514f0bSApple OSS Distributions 			len = physP[ind].length;
3190*d4514f0bSApple OSS Distributions 			off2Ind += len;
3191*d4514f0bSApple OSS Distributions 		}
3192*d4514f0bSApple OSS Distributions 
3193*d4514f0bSApple OSS Distributions 		// Calculate length within range and starting address
3194*d4514f0bSApple OSS Distributions 		length   = off2Ind - offset;
3195*d4514f0bSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3196*d4514f0bSApple OSS Distributions 
3197*d4514f0bSApple OSS Distributions 		if (true && mapped) {
3198*d4514f0bSApple OSS Distributions 			address = mappedBase + offset;
3199*d4514f0bSApple OSS Distributions 		} else {
3200*d4514f0bSApple OSS Distributions 			// see how far we can coalesce ranges
3201*d4514f0bSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3202*d4514f0bSApple OSS Distributions 				len = physP[ind].length;
3203*d4514f0bSApple OSS Distributions 				length += len;
3204*d4514f0bSApple OSS Distributions 				off2Ind += len;
3205*d4514f0bSApple OSS Distributions 				ind++;
3206*d4514f0bSApple OSS Distributions 			}
3207*d4514f0bSApple OSS Distributions 		}
3208*d4514f0bSApple OSS Distributions 
3209*d4514f0bSApple OSS Distributions 		// correct contiguous check overshoot
3210*d4514f0bSApple OSS Distributions 		ind--;
3211*d4514f0bSApple OSS Distributions 		off2Ind -= len;
3212*d4514f0bSApple OSS Distributions 	}
3213*d4514f0bSApple OSS Distributions #ifndef __LP64__
3214*d4514f0bSApple OSS Distributions 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215*d4514f0bSApple OSS Distributions 		// Physical address based memory descriptor
3216*d4514f0bSApple OSS Distributions 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217*d4514f0bSApple OSS Distributions 
3218*d4514f0bSApple OSS Distributions 		// Find the range after the one that contains the offset
3219*d4514f0bSApple OSS Distributions 		mach_vm_size_t len;
3220*d4514f0bSApple OSS Distributions 		for (len = 0; off2Ind <= offset; ind++) {
3221*d4514f0bSApple OSS Distributions 			len = physP[ind].length;
3222*d4514f0bSApple OSS Distributions 			off2Ind += len;
3223*d4514f0bSApple OSS Distributions 		}
3224*d4514f0bSApple OSS Distributions 
3225*d4514f0bSApple OSS Distributions 		// Calculate length within range and starting address
3226*d4514f0bSApple OSS Distributions 		length   = off2Ind - offset;
3227*d4514f0bSApple OSS Distributions 		address  = physP[ind - 1].address + len - length;
3228*d4514f0bSApple OSS Distributions 
3229*d4514f0bSApple OSS Distributions 		if (true && mapped) {
3230*d4514f0bSApple OSS Distributions 			address = mappedBase + offset;
3231*d4514f0bSApple OSS Distributions 		} else {
3232*d4514f0bSApple OSS Distributions 			// see how far we can coalesce ranges
3233*d4514f0bSApple OSS Distributions 			while (ind < _rangesCount && address + length == physP[ind].address) {
3234*d4514f0bSApple OSS Distributions 				len = physP[ind].length;
3235*d4514f0bSApple OSS Distributions 				length += len;
3236*d4514f0bSApple OSS Distributions 				off2Ind += len;
3237*d4514f0bSApple OSS Distributions 				ind++;
3238*d4514f0bSApple OSS Distributions 			}
3239*d4514f0bSApple OSS Distributions 		}
3240*d4514f0bSApple OSS Distributions 		// correct contiguous check overshoot
3241*d4514f0bSApple OSS Distributions 		ind--;
3242*d4514f0bSApple OSS Distributions 		off2Ind -= len;
3243*d4514f0bSApple OSS Distributions 	}
3244*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
3245*d4514f0bSApple OSS Distributions 	else {
3246*d4514f0bSApple OSS Distributions 		do {
3247*d4514f0bSApple OSS Distributions 			if (!_wireCount) {
3248*d4514f0bSApple OSS Distributions 				panic("IOGMD: not wired for the IODMACommand");
3249*d4514f0bSApple OSS Distributions 			}
3250*d4514f0bSApple OSS Distributions 
3251*d4514f0bSApple OSS Distributions 			assert(_memoryEntries);
3252*d4514f0bSApple OSS Distributions 
3253*d4514f0bSApple OSS Distributions 			dataP = getDataP(_memoryEntries);
3254*d4514f0bSApple OSS Distributions 			const ioPLBlock *ioplList = getIOPLList(dataP);
3255*d4514f0bSApple OSS Distributions 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256*d4514f0bSApple OSS Distributions 			upl_page_info_t *pageList = getPageList(dataP);
3257*d4514f0bSApple OSS Distributions 
3258*d4514f0bSApple OSS Distributions 			assert(numIOPLs > 0);
3259*d4514f0bSApple OSS Distributions 
3260*d4514f0bSApple OSS Distributions 			// Scan through iopl info blocks looking for block containing offset
3261*d4514f0bSApple OSS Distributions 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262*d4514f0bSApple OSS Distributions 				ind++;
3263*d4514f0bSApple OSS Distributions 			}
3264*d4514f0bSApple OSS Distributions 
3265*d4514f0bSApple OSS Distributions 			// Go back to actual range as search goes past it
3266*d4514f0bSApple OSS Distributions 			ioPLBlock ioplInfo = ioplList[ind - 1];
3267*d4514f0bSApple OSS Distributions 			off2Ind = ioplInfo.fIOMDOffset;
3268*d4514f0bSApple OSS Distributions 
3269*d4514f0bSApple OSS Distributions 			if (ind < numIOPLs) {
3270*d4514f0bSApple OSS Distributions 				length = ioplList[ind].fIOMDOffset;
3271*d4514f0bSApple OSS Distributions 			} else {
3272*d4514f0bSApple OSS Distributions 				length = _length;
3273*d4514f0bSApple OSS Distributions 			}
3274*d4514f0bSApple OSS Distributions 			length -= offset;       // Remainder within iopl
3275*d4514f0bSApple OSS Distributions 
3276*d4514f0bSApple OSS Distributions 			// Subtract offset till this iopl in total list
3277*d4514f0bSApple OSS Distributions 			offset -= off2Ind;
3278*d4514f0bSApple OSS Distributions 
3279*d4514f0bSApple OSS Distributions 			// If a mapped address is requested and this is a pre-mapped IOPL
3280*d4514f0bSApple OSS Distributions 			// then just need to compute an offset relative to the mapped base.
3281*d4514f0bSApple OSS Distributions 			if (mapped) {
3282*d4514f0bSApple OSS Distributions 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283*d4514f0bSApple OSS Distributions 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284*d4514f0bSApple OSS Distributions 				continue; // Done leave do/while(false) now
3285*d4514f0bSApple OSS Distributions 			}
3286*d4514f0bSApple OSS Distributions 
3287*d4514f0bSApple OSS Distributions 			// The offset is rebased into the current iopl.
3288*d4514f0bSApple OSS Distributions 			// Now add the iopl 1st page offset.
3289*d4514f0bSApple OSS Distributions 			offset += ioplInfo.fPageOffset;
3290*d4514f0bSApple OSS Distributions 
3291*d4514f0bSApple OSS Distributions 			// For external UPLs the fPageInfo field points directly to
3292*d4514f0bSApple OSS Distributions 			// the upl's upl_page_info_t array.
3293*d4514f0bSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3294*d4514f0bSApple OSS Distributions 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295*d4514f0bSApple OSS Distributions 			} else {
3296*d4514f0bSApple OSS Distributions 				pageList = &pageList[ioplInfo.fPageInfo];
3297*d4514f0bSApple OSS Distributions 			}
3298*d4514f0bSApple OSS Distributions 
3299*d4514f0bSApple OSS Distributions 			// Check for direct device non-paged memory
3300*d4514f0bSApple OSS Distributions 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3301*d4514f0bSApple OSS Distributions 				address = ptoa_64(pageList->phys_addr) + offset;
3302*d4514f0bSApple OSS Distributions 				continue; // Done leave do/while(false) now
3303*d4514f0bSApple OSS Distributions 			}
3304*d4514f0bSApple OSS Distributions 
3305*d4514f0bSApple OSS Distributions 			// Now we need compute the index into the pageList
3306*d4514f0bSApple OSS Distributions 			UInt pageInd = atop_32(offset);
3307*d4514f0bSApple OSS Distributions 			offset &= PAGE_MASK;
3308*d4514f0bSApple OSS Distributions 
3309*d4514f0bSApple OSS Distributions 			// Compute the starting address of this segment
3310*d4514f0bSApple OSS Distributions 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311*d4514f0bSApple OSS Distributions 			if (!pageAddr) {
3312*d4514f0bSApple OSS Distributions 				panic("!pageList phys_addr");
3313*d4514f0bSApple OSS Distributions 			}
3314*d4514f0bSApple OSS Distributions 
3315*d4514f0bSApple OSS Distributions 			address = ptoa_64(pageAddr) + offset;
3316*d4514f0bSApple OSS Distributions 
3317*d4514f0bSApple OSS Distributions 			// length is currently set to the length of the remainider of the iopl.
3318*d4514f0bSApple OSS Distributions 			// We need to check that the remainder of the iopl is contiguous.
3319*d4514f0bSApple OSS Distributions 			// This is indicated by pageList[ind].phys_addr being sequential.
3320*d4514f0bSApple OSS Distributions 			IOByteCount contigLength = PAGE_SIZE - offset;
3321*d4514f0bSApple OSS Distributions 			while (contigLength < length
3322*d4514f0bSApple OSS Distributions 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3323*d4514f0bSApple OSS Distributions 				contigLength += PAGE_SIZE;
3324*d4514f0bSApple OSS Distributions 			}
3325*d4514f0bSApple OSS Distributions 
3326*d4514f0bSApple OSS Distributions 			if (contigLength < length) {
3327*d4514f0bSApple OSS Distributions 				length = contigLength;
3328*d4514f0bSApple OSS Distributions 			}
3329*d4514f0bSApple OSS Distributions 
3330*d4514f0bSApple OSS Distributions 
3331*d4514f0bSApple OSS Distributions 			assert(address);
3332*d4514f0bSApple OSS Distributions 			assert(length);
3333*d4514f0bSApple OSS Distributions 		} while (false);
3334*d4514f0bSApple OSS Distributions 	}
3335*d4514f0bSApple OSS Distributions 
3336*d4514f0bSApple OSS Distributions 	// Update return values and state
3337*d4514f0bSApple OSS Distributions 	isP->fIO.fIOVMAddr = address;
3338*d4514f0bSApple OSS Distributions 	isP->fIO.fLength   = length;
3339*d4514f0bSApple OSS Distributions 	isP->fIndex        = ind;
3340*d4514f0bSApple OSS Distributions 	isP->fOffset2Index = off2Ind;
3341*d4514f0bSApple OSS Distributions 	isP->fNextOffset   = isP->fIO.fOffset + length;
3342*d4514f0bSApple OSS Distributions 
3343*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
3344*d4514f0bSApple OSS Distributions }
3345*d4514f0bSApple OSS Distributions 
3346*d4514f0bSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348*d4514f0bSApple OSS Distributions {
3349*d4514f0bSApple OSS Distributions 	IOReturn          ret;
3350*d4514f0bSApple OSS Distributions 	mach_vm_address_t address = 0;
3351*d4514f0bSApple OSS Distributions 	mach_vm_size_t    length  = 0;
3352*d4514f0bSApple OSS Distributions 	IOMapper *        mapper  = gIOSystemMapper;
3353*d4514f0bSApple OSS Distributions 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3354*d4514f0bSApple OSS Distributions 
3355*d4514f0bSApple OSS Distributions 	if (lengthOfSegment) {
3356*d4514f0bSApple OSS Distributions 		*lengthOfSegment = 0;
3357*d4514f0bSApple OSS Distributions 	}
3358*d4514f0bSApple OSS Distributions 
3359*d4514f0bSApple OSS Distributions 	if (offset >= _length) {
3360*d4514f0bSApple OSS Distributions 		return 0;
3361*d4514f0bSApple OSS Distributions 	}
3362*d4514f0bSApple OSS Distributions 
3363*d4514f0bSApple OSS Distributions 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364*d4514f0bSApple OSS Distributions 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365*d4514f0bSApple OSS Distributions 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366*d4514f0bSApple OSS Distributions 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367*d4514f0bSApple OSS Distributions 
3368*d4514f0bSApple OSS Distributions 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369*d4514f0bSApple OSS Distributions 		unsigned rangesIndex = 0;
3370*d4514f0bSApple OSS Distributions 		Ranges vec = _ranges;
3371*d4514f0bSApple OSS Distributions 		mach_vm_address_t addr;
3372*d4514f0bSApple OSS Distributions 
3373*d4514f0bSApple OSS Distributions 		// Find starting address within the vector of ranges
3374*d4514f0bSApple OSS Distributions 		for (;;) {
3375*d4514f0bSApple OSS Distributions 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376*d4514f0bSApple OSS Distributions 			if (offset < length) {
3377*d4514f0bSApple OSS Distributions 				break;
3378*d4514f0bSApple OSS Distributions 			}
3379*d4514f0bSApple OSS Distributions 			offset -= length; // (make offset relative)
3380*d4514f0bSApple OSS Distributions 			rangesIndex++;
3381*d4514f0bSApple OSS Distributions 		}
3382*d4514f0bSApple OSS Distributions 
3383*d4514f0bSApple OSS Distributions 		// Now that we have the starting range,
3384*d4514f0bSApple OSS Distributions 		// lets find the last contiguous range
3385*d4514f0bSApple OSS Distributions 		addr   += offset;
3386*d4514f0bSApple OSS Distributions 		length -= offset;
3387*d4514f0bSApple OSS Distributions 
3388*d4514f0bSApple OSS Distributions 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389*d4514f0bSApple OSS Distributions 			mach_vm_address_t newAddr;
3390*d4514f0bSApple OSS Distributions 			mach_vm_size_t    newLen;
3391*d4514f0bSApple OSS Distributions 
3392*d4514f0bSApple OSS Distributions 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393*d4514f0bSApple OSS Distributions 			if (addr + length != newAddr) {
3394*d4514f0bSApple OSS Distributions 				break;
3395*d4514f0bSApple OSS Distributions 			}
3396*d4514f0bSApple OSS Distributions 			length += newLen;
3397*d4514f0bSApple OSS Distributions 		}
3398*d4514f0bSApple OSS Distributions 		if (addr) {
3399*d4514f0bSApple OSS Distributions 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400*d4514f0bSApple OSS Distributions 		}
3401*d4514f0bSApple OSS Distributions 	} else {
3402*d4514f0bSApple OSS Distributions 		IOMDDMAWalkSegmentState _state;
3403*d4514f0bSApple OSS Distributions 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404*d4514f0bSApple OSS Distributions 
3405*d4514f0bSApple OSS Distributions 		state->fOffset = offset;
3406*d4514f0bSApple OSS Distributions 		state->fLength = _length - offset;
3407*d4514f0bSApple OSS Distributions 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408*d4514f0bSApple OSS Distributions 
3409*d4514f0bSApple OSS Distributions 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410*d4514f0bSApple OSS Distributions 
3411*d4514f0bSApple OSS Distributions 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412*d4514f0bSApple OSS Distributions 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413*d4514f0bSApple OSS Distributions 			    ret, this, state->fOffset,
3414*d4514f0bSApple OSS Distributions 			    state->fIOVMAddr, state->fLength);
3415*d4514f0bSApple OSS Distributions 		}
3416*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess == ret) {
3417*d4514f0bSApple OSS Distributions 			address = state->fIOVMAddr;
3418*d4514f0bSApple OSS Distributions 			length  = state->fLength;
3419*d4514f0bSApple OSS Distributions 		}
3420*d4514f0bSApple OSS Distributions 
3421*d4514f0bSApple OSS Distributions 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422*d4514f0bSApple OSS Distributions 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423*d4514f0bSApple OSS Distributions 
3424*d4514f0bSApple OSS Distributions 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425*d4514f0bSApple OSS Distributions 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426*d4514f0bSApple OSS Distributions 				addr64_t    origAddr = address;
3427*d4514f0bSApple OSS Distributions 				IOByteCount origLen  = length;
3428*d4514f0bSApple OSS Distributions 
3429*d4514f0bSApple OSS Distributions 				address = mapper->mapToPhysicalAddress(origAddr);
3430*d4514f0bSApple OSS Distributions 				length = page_size - (address & (page_size - 1));
3431*d4514f0bSApple OSS Distributions 				while ((length < origLen)
3432*d4514f0bSApple OSS Distributions 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433*d4514f0bSApple OSS Distributions 					length += page_size;
3434*d4514f0bSApple OSS Distributions 				}
3435*d4514f0bSApple OSS Distributions 				if (length > origLen) {
3436*d4514f0bSApple OSS Distributions 					length = origLen;
3437*d4514f0bSApple OSS Distributions 				}
3438*d4514f0bSApple OSS Distributions 			}
3439*d4514f0bSApple OSS Distributions 		}
3440*d4514f0bSApple OSS Distributions 	}
3441*d4514f0bSApple OSS Distributions 
3442*d4514f0bSApple OSS Distributions 	if (!address) {
3443*d4514f0bSApple OSS Distributions 		length = 0;
3444*d4514f0bSApple OSS Distributions 	}
3445*d4514f0bSApple OSS Distributions 
3446*d4514f0bSApple OSS Distributions 	if (lengthOfSegment) {
3447*d4514f0bSApple OSS Distributions 		*lengthOfSegment = length;
3448*d4514f0bSApple OSS Distributions 	}
3449*d4514f0bSApple OSS Distributions 
3450*d4514f0bSApple OSS Distributions 	return address;
3451*d4514f0bSApple OSS Distributions }
3452*d4514f0bSApple OSS Distributions 
3453*d4514f0bSApple OSS Distributions #ifndef __LP64__
3454*d4514f0bSApple OSS Distributions #pragma clang diagnostic push
3455*d4514f0bSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456*d4514f0bSApple OSS Distributions 
3457*d4514f0bSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459*d4514f0bSApple OSS Distributions {
3460*d4514f0bSApple OSS Distributions 	addr64_t address = 0;
3461*d4514f0bSApple OSS Distributions 
3462*d4514f0bSApple OSS Distributions 	if (options & _kIOMemorySourceSegment) {
3463*d4514f0bSApple OSS Distributions 		address = getSourceSegment(offset, lengthOfSegment);
3464*d4514f0bSApple OSS Distributions 	} else if (options & kIOMemoryMapperNone) {
3465*d4514f0bSApple OSS Distributions 		address = getPhysicalSegment64(offset, lengthOfSegment);
3466*d4514f0bSApple OSS Distributions 	} else {
3467*d4514f0bSApple OSS Distributions 		address = getPhysicalSegment(offset, lengthOfSegment);
3468*d4514f0bSApple OSS Distributions 	}
3469*d4514f0bSApple OSS Distributions 
3470*d4514f0bSApple OSS Distributions 	return address;
3471*d4514f0bSApple OSS Distributions }
3472*d4514f0bSApple OSS Distributions #pragma clang diagnostic pop
3473*d4514f0bSApple OSS Distributions 
3474*d4514f0bSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476*d4514f0bSApple OSS Distributions {
3477*d4514f0bSApple OSS Distributions 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478*d4514f0bSApple OSS Distributions }
3479*d4514f0bSApple OSS Distributions 
3480*d4514f0bSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482*d4514f0bSApple OSS Distributions {
3483*d4514f0bSApple OSS Distributions 	addr64_t    address = 0;
3484*d4514f0bSApple OSS Distributions 	IOByteCount length  = 0;
3485*d4514f0bSApple OSS Distributions 
3486*d4514f0bSApple OSS Distributions 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487*d4514f0bSApple OSS Distributions 
3488*d4514f0bSApple OSS Distributions 	if (lengthOfSegment) {
3489*d4514f0bSApple OSS Distributions 		length = *lengthOfSegment;
3490*d4514f0bSApple OSS Distributions 	}
3491*d4514f0bSApple OSS Distributions 
3492*d4514f0bSApple OSS Distributions 	if ((address + length) > 0x100000000ULL) {
3493*d4514f0bSApple OSS Distributions 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494*d4514f0bSApple OSS Distributions 		    address, (long) length, (getMetaClass())->getClassName());
3495*d4514f0bSApple OSS Distributions 	}
3496*d4514f0bSApple OSS Distributions 
3497*d4514f0bSApple OSS Distributions 	return (IOPhysicalAddress) address;
3498*d4514f0bSApple OSS Distributions }
3499*d4514f0bSApple OSS Distributions 
3500*d4514f0bSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502*d4514f0bSApple OSS Distributions {
3503*d4514f0bSApple OSS Distributions 	IOPhysicalAddress phys32;
3504*d4514f0bSApple OSS Distributions 	IOByteCount       length;
3505*d4514f0bSApple OSS Distributions 	addr64_t          phys64;
3506*d4514f0bSApple OSS Distributions 	IOMapper *        mapper = NULL;
3507*d4514f0bSApple OSS Distributions 
3508*d4514f0bSApple OSS Distributions 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509*d4514f0bSApple OSS Distributions 	if (!phys32) {
3510*d4514f0bSApple OSS Distributions 		return 0;
3511*d4514f0bSApple OSS Distributions 	}
3512*d4514f0bSApple OSS Distributions 
3513*d4514f0bSApple OSS Distributions 	if (gIOSystemMapper) {
3514*d4514f0bSApple OSS Distributions 		mapper = gIOSystemMapper;
3515*d4514f0bSApple OSS Distributions 	}
3516*d4514f0bSApple OSS Distributions 
3517*d4514f0bSApple OSS Distributions 	if (mapper) {
3518*d4514f0bSApple OSS Distributions 		IOByteCount origLen;
3519*d4514f0bSApple OSS Distributions 
3520*d4514f0bSApple OSS Distributions 		phys64 = mapper->mapToPhysicalAddress(phys32);
3521*d4514f0bSApple OSS Distributions 		origLen = *lengthOfSegment;
3522*d4514f0bSApple OSS Distributions 		length = page_size - (phys64 & (page_size - 1));
3523*d4514f0bSApple OSS Distributions 		while ((length < origLen)
3524*d4514f0bSApple OSS Distributions 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525*d4514f0bSApple OSS Distributions 			length += page_size;
3526*d4514f0bSApple OSS Distributions 		}
3527*d4514f0bSApple OSS Distributions 		if (length > origLen) {
3528*d4514f0bSApple OSS Distributions 			length = origLen;
3529*d4514f0bSApple OSS Distributions 		}
3530*d4514f0bSApple OSS Distributions 
3531*d4514f0bSApple OSS Distributions 		*lengthOfSegment = length;
3532*d4514f0bSApple OSS Distributions 	} else {
3533*d4514f0bSApple OSS Distributions 		phys64 = (addr64_t) phys32;
3534*d4514f0bSApple OSS Distributions 	}
3535*d4514f0bSApple OSS Distributions 
3536*d4514f0bSApple OSS Distributions 	return phys64;
3537*d4514f0bSApple OSS Distributions }
3538*d4514f0bSApple OSS Distributions 
3539*d4514f0bSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541*d4514f0bSApple OSS Distributions {
3542*d4514f0bSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543*d4514f0bSApple OSS Distributions }
3544*d4514f0bSApple OSS Distributions 
3545*d4514f0bSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547*d4514f0bSApple OSS Distributions {
3548*d4514f0bSApple OSS Distributions 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549*d4514f0bSApple OSS Distributions }
3550*d4514f0bSApple OSS Distributions 
3551*d4514f0bSApple OSS Distributions #pragma clang diagnostic push
3552*d4514f0bSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553*d4514f0bSApple OSS Distributions 
3554*d4514f0bSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556*d4514f0bSApple OSS Distributions     IOByteCount * lengthOfSegment)
3557*d4514f0bSApple OSS Distributions {
3558*d4514f0bSApple OSS Distributions 	if (_task == kernel_task) {
3559*d4514f0bSApple OSS Distributions 		return (void *) getSourceSegment(offset, lengthOfSegment);
3560*d4514f0bSApple OSS Distributions 	} else {
3561*d4514f0bSApple OSS Distributions 		panic("IOGMD::getVirtualSegment deprecated");
3562*d4514f0bSApple OSS Distributions 	}
3563*d4514f0bSApple OSS Distributions 
3564*d4514f0bSApple OSS Distributions 	return NULL;
3565*d4514f0bSApple OSS Distributions }
3566*d4514f0bSApple OSS Distributions #pragma clang diagnostic pop
3567*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
3568*d4514f0bSApple OSS Distributions 
3569*d4514f0bSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570*d4514f0bSApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571*d4514f0bSApple OSS Distributions {
3572*d4514f0bSApple OSS Distributions 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573*d4514f0bSApple OSS Distributions 	DMACommandOps params;
3574*d4514f0bSApple OSS Distributions 	IOReturn err;
3575*d4514f0bSApple OSS Distributions 
3576*d4514f0bSApple OSS Distributions 	params = (op & ~kIOMDDMACommandOperationMask & op);
3577*d4514f0bSApple OSS Distributions 	op &= kIOMDDMACommandOperationMask;
3578*d4514f0bSApple OSS Distributions 
3579*d4514f0bSApple OSS Distributions 	if (kIOMDGetCharacteristics == op) {
3580*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3582*d4514f0bSApple OSS Distributions 		}
3583*d4514f0bSApple OSS Distributions 
3584*d4514f0bSApple OSS Distributions 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585*d4514f0bSApple OSS Distributions 		data->fLength = getLength();
3586*d4514f0bSApple OSS Distributions 		data->fSGCount = 0;
3587*d4514f0bSApple OSS Distributions 		data->fDirection = getDirection();
3588*d4514f0bSApple OSS Distributions 		data->fIsPrepared = true; // Assume prepared - fails safe
3589*d4514f0bSApple OSS Distributions 	} else if (kIOMDWalkSegments == op) {
3590*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3592*d4514f0bSApple OSS Distributions 		}
3593*d4514f0bSApple OSS Distributions 
3594*d4514f0bSApple OSS Distributions 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595*d4514f0bSApple OSS Distributions 		IOByteCount offset  = (IOByteCount) data->fOffset;
3596*d4514f0bSApple OSS Distributions 		IOPhysicalLength length, nextLength;
3597*d4514f0bSApple OSS Distributions 		addr64_t         addr, nextAddr;
3598*d4514f0bSApple OSS Distributions 
3599*d4514f0bSApple OSS Distributions 		if (data->fMapped) {
3600*d4514f0bSApple OSS Distributions 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601*d4514f0bSApple OSS Distributions 		}
3602*d4514f0bSApple OSS Distributions 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603*d4514f0bSApple OSS Distributions 		offset += length;
3604*d4514f0bSApple OSS Distributions 		while (offset < getLength()) {
3605*d4514f0bSApple OSS Distributions 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606*d4514f0bSApple OSS Distributions 			if ((addr + length) != nextAddr) {
3607*d4514f0bSApple OSS Distributions 				break;
3608*d4514f0bSApple OSS Distributions 			}
3609*d4514f0bSApple OSS Distributions 			length += nextLength;
3610*d4514f0bSApple OSS Distributions 			offset += nextLength;
3611*d4514f0bSApple OSS Distributions 		}
3612*d4514f0bSApple OSS Distributions 		data->fIOVMAddr = addr;
3613*d4514f0bSApple OSS Distributions 		data->fLength   = length;
3614*d4514f0bSApple OSS Distributions 	} else if (kIOMDAddDMAMapSpec == op) {
3615*d4514f0bSApple OSS Distributions 		return kIOReturnUnsupported;
3616*d4514f0bSApple OSS Distributions 	} else if (kIOMDDMAMap == op) {
3617*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3619*d4514f0bSApple OSS Distributions 		}
3620*d4514f0bSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621*d4514f0bSApple OSS Distributions 
3622*d4514f0bSApple OSS Distributions 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623*d4514f0bSApple OSS Distributions 
3624*d4514f0bSApple OSS Distributions 		return err;
3625*d4514f0bSApple OSS Distributions 	} else if (kIOMDDMAUnmap == op) {
3626*d4514f0bSApple OSS Distributions 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627*d4514f0bSApple OSS Distributions 			return kIOReturnUnderrun;
3628*d4514f0bSApple OSS Distributions 		}
3629*d4514f0bSApple OSS Distributions 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630*d4514f0bSApple OSS Distributions 
3631*d4514f0bSApple OSS Distributions 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632*d4514f0bSApple OSS Distributions 
3633*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3634*d4514f0bSApple OSS Distributions 	} else {
3635*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
3636*d4514f0bSApple OSS Distributions 	}
3637*d4514f0bSApple OSS Distributions 
3638*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
3639*d4514f0bSApple OSS Distributions }
3640*d4514f0bSApple OSS Distributions 
3641*d4514f0bSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643*d4514f0bSApple OSS Distributions     IOOptionBits * oldState )
3644*d4514f0bSApple OSS Distributions {
3645*d4514f0bSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3646*d4514f0bSApple OSS Distributions 
3647*d4514f0bSApple OSS Distributions 	vm_purgable_t control;
3648*d4514f0bSApple OSS Distributions 	int           state;
3649*d4514f0bSApple OSS Distributions 
3650*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3651*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3652*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3653*d4514f0bSApple OSS Distributions 	}
3654*d4514f0bSApple OSS Distributions 
3655*d4514f0bSApple OSS Distributions 	if (_memRef) {
3656*d4514f0bSApple OSS Distributions 		err = super::setPurgeable(newState, oldState);
3657*d4514f0bSApple OSS Distributions 	} else {
3658*d4514f0bSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3659*d4514f0bSApple OSS Distributions 			LOCK;
3660*d4514f0bSApple OSS Distributions 		}
3661*d4514f0bSApple OSS Distributions 		do{
3662*d4514f0bSApple OSS Distributions 			// Find the appropriate vm_map for the given task
3663*d4514f0bSApple OSS Distributions 			vm_map_t curMap;
3664*d4514f0bSApple OSS Distributions 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665*d4514f0bSApple OSS Distributions 				err = kIOReturnNotReady;
3666*d4514f0bSApple OSS Distributions 				break;
3667*d4514f0bSApple OSS Distributions 			} else if (!_task) {
3668*d4514f0bSApple OSS Distributions 				err = kIOReturnUnsupported;
3669*d4514f0bSApple OSS Distributions 				break;
3670*d4514f0bSApple OSS Distributions 			} else {
3671*d4514f0bSApple OSS Distributions 				curMap = get_task_map(_task);
3672*d4514f0bSApple OSS Distributions 				if (NULL == curMap) {
3673*d4514f0bSApple OSS Distributions 					err = KERN_INVALID_ARGUMENT;
3674*d4514f0bSApple OSS Distributions 					break;
3675*d4514f0bSApple OSS Distributions 				}
3676*d4514f0bSApple OSS Distributions 			}
3677*d4514f0bSApple OSS Distributions 
3678*d4514f0bSApple OSS Distributions 			// can only do one range
3679*d4514f0bSApple OSS Distributions 			Ranges vec = _ranges;
3680*d4514f0bSApple OSS Distributions 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3681*d4514f0bSApple OSS Distributions 			mach_vm_address_t addr;
3682*d4514f0bSApple OSS Distributions 			mach_vm_size_t    len;
3683*d4514f0bSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3684*d4514f0bSApple OSS Distributions 
3685*d4514f0bSApple OSS Distributions 			err = purgeableControlBits(newState, &control, &state);
3686*d4514f0bSApple OSS Distributions 			if (kIOReturnSuccess != err) {
3687*d4514f0bSApple OSS Distributions 				break;
3688*d4514f0bSApple OSS Distributions 			}
3689*d4514f0bSApple OSS Distributions 			err = vm_map_purgable_control(curMap, addr, control, &state);
3690*d4514f0bSApple OSS Distributions 			if (oldState) {
3691*d4514f0bSApple OSS Distributions 				if (kIOReturnSuccess == err) {
3692*d4514f0bSApple OSS Distributions 					err = purgeableStateBits(&state);
3693*d4514f0bSApple OSS Distributions 					*oldState = state;
3694*d4514f0bSApple OSS Distributions 				}
3695*d4514f0bSApple OSS Distributions 			}
3696*d4514f0bSApple OSS Distributions 		}while (false);
3697*d4514f0bSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3698*d4514f0bSApple OSS Distributions 			UNLOCK;
3699*d4514f0bSApple OSS Distributions 		}
3700*d4514f0bSApple OSS Distributions 	}
3701*d4514f0bSApple OSS Distributions 
3702*d4514f0bSApple OSS Distributions 	return err;
3703*d4514f0bSApple OSS Distributions }
3704*d4514f0bSApple OSS Distributions 
3705*d4514f0bSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707*d4514f0bSApple OSS Distributions     IOOptionBits * oldState )
3708*d4514f0bSApple OSS Distributions {
3709*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3710*d4514f0bSApple OSS Distributions 
3711*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3712*d4514f0bSApple OSS Distributions 		LOCK;
3713*d4514f0bSApple OSS Distributions 	}
3714*d4514f0bSApple OSS Distributions 	if (_memRef) {
3715*d4514f0bSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716*d4514f0bSApple OSS Distributions 	}
3717*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3718*d4514f0bSApple OSS Distributions 		UNLOCK;
3719*d4514f0bSApple OSS Distributions 	}
3720*d4514f0bSApple OSS Distributions 
3721*d4514f0bSApple OSS Distributions 	return err;
3722*d4514f0bSApple OSS Distributions }
3723*d4514f0bSApple OSS Distributions 
3724*d4514f0bSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726*d4514f0bSApple OSS Distributions     int newLedgerTag,
3727*d4514f0bSApple OSS Distributions     IOOptionBits newLedgerOptions )
3728*d4514f0bSApple OSS Distributions {
3729*d4514f0bSApple OSS Distributions 	IOReturn      err = kIOReturnSuccess;
3730*d4514f0bSApple OSS Distributions 
3731*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3732*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3733*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3734*d4514f0bSApple OSS Distributions 	}
3735*d4514f0bSApple OSS Distributions 
3736*d4514f0bSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3737*d4514f0bSApple OSS Distributions 		return kIOReturnUnsupported;
3738*d4514f0bSApple OSS Distributions 	}
3739*d4514f0bSApple OSS Distributions 
3740*d4514f0bSApple OSS Distributions 	if (_memRef) {
3741*d4514f0bSApple OSS Distributions 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742*d4514f0bSApple OSS Distributions 	} else {
3743*d4514f0bSApple OSS Distributions 		err = kIOReturnUnsupported;
3744*d4514f0bSApple OSS Distributions 	}
3745*d4514f0bSApple OSS Distributions 
3746*d4514f0bSApple OSS Distributions 	return err;
3747*d4514f0bSApple OSS Distributions }
3748*d4514f0bSApple OSS Distributions 
3749*d4514f0bSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3751*d4514f0bSApple OSS Distributions     int newLedgerTag,
3752*d4514f0bSApple OSS Distributions     IOOptionBits newLedgerOptions )
3753*d4514f0bSApple OSS Distributions {
3754*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3755*d4514f0bSApple OSS Distributions 
3756*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3757*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3758*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3759*d4514f0bSApple OSS Distributions 	}
3760*d4514f0bSApple OSS Distributions 
3761*d4514f0bSApple OSS Distributions 	if (iokit_iomd_setownership_enabled == FALSE) {
3762*d4514f0bSApple OSS Distributions 		return kIOReturnUnsupported;
3763*d4514f0bSApple OSS Distributions 	}
3764*d4514f0bSApple OSS Distributions 
3765*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3766*d4514f0bSApple OSS Distributions 		LOCK;
3767*d4514f0bSApple OSS Distributions 	}
3768*d4514f0bSApple OSS Distributions 	if (_memRef) {
3769*d4514f0bSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770*d4514f0bSApple OSS Distributions 	} else {
3771*d4514f0bSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3772*d4514f0bSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3773*d4514f0bSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774*d4514f0bSApple OSS Distributions 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775*d4514f0bSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776*d4514f0bSApple OSS Distributions 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777*d4514f0bSApple OSS Distributions 		}
3778*d4514f0bSApple OSS Distributions 	}
3779*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3780*d4514f0bSApple OSS Distributions 		UNLOCK;
3781*d4514f0bSApple OSS Distributions 	}
3782*d4514f0bSApple OSS Distributions 
3783*d4514f0bSApple OSS Distributions 	return err;
3784*d4514f0bSApple OSS Distributions }
3785*d4514f0bSApple OSS Distributions 
3786*d4514f0bSApple OSS Distributions 
3787*d4514f0bSApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3788*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789*d4514f0bSApple OSS Distributions {
3790*d4514f0bSApple OSS Distributions 	uint64_t length;
3791*d4514f0bSApple OSS Distributions 
3792*d4514f0bSApple OSS Distributions 	if (_memRef) {
3793*d4514f0bSApple OSS Distributions 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794*d4514f0bSApple OSS Distributions 	} else {
3795*d4514f0bSApple OSS Distributions 		IOByteCount       iterate, segLen;
3796*d4514f0bSApple OSS Distributions 		IOPhysicalAddress sourceAddr, sourceAlign;
3797*d4514f0bSApple OSS Distributions 
3798*d4514f0bSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3799*d4514f0bSApple OSS Distributions 			LOCK;
3800*d4514f0bSApple OSS Distributions 		}
3801*d4514f0bSApple OSS Distributions 		length = 0;
3802*d4514f0bSApple OSS Distributions 		iterate = 0;
3803*d4514f0bSApple OSS Distributions 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804*d4514f0bSApple OSS Distributions 			sourceAlign = (sourceAddr & page_mask);
3805*d4514f0bSApple OSS Distributions 			if (offset && !iterate) {
3806*d4514f0bSApple OSS Distributions 				*offset = sourceAlign;
3807*d4514f0bSApple OSS Distributions 			}
3808*d4514f0bSApple OSS Distributions 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809*d4514f0bSApple OSS Distributions 			iterate += segLen;
3810*d4514f0bSApple OSS Distributions 		}
3811*d4514f0bSApple OSS Distributions 		if (!iterate) {
3812*d4514f0bSApple OSS Distributions 			length = getLength();
3813*d4514f0bSApple OSS Distributions 			if (offset) {
3814*d4514f0bSApple OSS Distributions 				*offset = 0;
3815*d4514f0bSApple OSS Distributions 			}
3816*d4514f0bSApple OSS Distributions 		}
3817*d4514f0bSApple OSS Distributions 		if (kIOMemoryThreadSafe & _flags) {
3818*d4514f0bSApple OSS Distributions 			UNLOCK;
3819*d4514f0bSApple OSS Distributions 		}
3820*d4514f0bSApple OSS Distributions 	}
3821*d4514f0bSApple OSS Distributions 
3822*d4514f0bSApple OSS Distributions 	return length;
3823*d4514f0bSApple OSS Distributions }
3824*d4514f0bSApple OSS Distributions 
3825*d4514f0bSApple OSS Distributions 
3826*d4514f0bSApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828*d4514f0bSApple OSS Distributions     IOByteCount * dirtyPageCount )
3829*d4514f0bSApple OSS Distributions {
3830*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnNotReady;
3831*d4514f0bSApple OSS Distributions 
3832*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3833*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3834*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3835*d4514f0bSApple OSS Distributions 	}
3836*d4514f0bSApple OSS Distributions 
3837*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3838*d4514f0bSApple OSS Distributions 		LOCK;
3839*d4514f0bSApple OSS Distributions 	}
3840*d4514f0bSApple OSS Distributions 	if (_memRef) {
3841*d4514f0bSApple OSS Distributions 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842*d4514f0bSApple OSS Distributions 	} else {
3843*d4514f0bSApple OSS Distributions 		IOMultiMemoryDescriptor * mmd;
3844*d4514f0bSApple OSS Distributions 		IOSubMemoryDescriptor   * smd;
3845*d4514f0bSApple OSS Distributions 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846*d4514f0bSApple OSS Distributions 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847*d4514f0bSApple OSS Distributions 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848*d4514f0bSApple OSS Distributions 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849*d4514f0bSApple OSS Distributions 		}
3850*d4514f0bSApple OSS Distributions 	}
3851*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3852*d4514f0bSApple OSS Distributions 		UNLOCK;
3853*d4514f0bSApple OSS Distributions 	}
3854*d4514f0bSApple OSS Distributions 
3855*d4514f0bSApple OSS Distributions 	return err;
3856*d4514f0bSApple OSS Distributions }
3857*d4514f0bSApple OSS Distributions 
3858*d4514f0bSApple OSS Distributions 
3859*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3860*d4514f0bSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861*d4514f0bSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
3863*d4514f0bSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864*d4514f0bSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
3866*d4514f0bSApple OSS Distributions 
3867*d4514f0bSApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3868*d4514f0bSApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3869*d4514f0bSApple OSS Distributions {
3870*d4514f0bSApple OSS Distributions 	ppnum_t page, end;
3871*d4514f0bSApple OSS Distributions 
3872*d4514f0bSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3873*d4514f0bSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874*d4514f0bSApple OSS Distributions 	for (; page < end; page++) {
3875*d4514f0bSApple OSS Distributions 		pmap_clear_noencrypt(page);
3876*d4514f0bSApple OSS Distributions 	}
3877*d4514f0bSApple OSS Distributions }
3878*d4514f0bSApple OSS Distributions 
3879*d4514f0bSApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880*d4514f0bSApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3881*d4514f0bSApple OSS Distributions {
3882*d4514f0bSApple OSS Distributions 	ppnum_t page, end;
3883*d4514f0bSApple OSS Distributions 
3884*d4514f0bSApple OSS Distributions 	page = (ppnum_t) atop_64(round_page_64(pa));
3885*d4514f0bSApple OSS Distributions 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886*d4514f0bSApple OSS Distributions 	for (; page < end; page++) {
3887*d4514f0bSApple OSS Distributions 		pmap_set_noencrypt(page);
3888*d4514f0bSApple OSS Distributions 	}
3889*d4514f0bSApple OSS Distributions }
3890*d4514f0bSApple OSS Distributions 
3891*d4514f0bSApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892*d4514f0bSApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3893*d4514f0bSApple OSS Distributions     IOByteCount offset, IOByteCount length )
3894*d4514f0bSApple OSS Distributions {
3895*d4514f0bSApple OSS Distributions 	IOByteCount remaining;
3896*d4514f0bSApple OSS Distributions 	unsigned int res;
3897*d4514f0bSApple OSS Distributions 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3898*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3899*d4514f0bSApple OSS Distributions 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900*d4514f0bSApple OSS Distributions #endif
3901*d4514f0bSApple OSS Distributions 
3902*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
3903*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
3904*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
3905*d4514f0bSApple OSS Distributions 	}
3906*d4514f0bSApple OSS Distributions 
3907*d4514f0bSApple OSS Distributions 	switch (options) {
3908*d4514f0bSApple OSS Distributions 	case kIOMemoryIncoherentIOFlush:
3909*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3910*d4514f0bSApple OSS Distributions 		func_ext = &dcache_incoherent_io_flush64;
3911*d4514f0bSApple OSS Distributions #if __ARM_COHERENT_IO__
3912*d4514f0bSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3913*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3914*d4514f0bSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3915*d4514f0bSApple OSS Distributions 		break;
3916*d4514f0bSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3917*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
3918*d4514f0bSApple OSS Distributions 		func = &dcache_incoherent_io_flush64;
3919*d4514f0bSApple OSS Distributions 		break;
3920*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
3921*d4514f0bSApple OSS Distributions 	case kIOMemoryIncoherentIOStore:
3922*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3923*d4514f0bSApple OSS Distributions 		func_ext = &dcache_incoherent_io_store64;
3924*d4514f0bSApple OSS Distributions #if __ARM_COHERENT_IO__
3925*d4514f0bSApple OSS Distributions 		func_ext(0, 0, 0, &res);
3926*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
3927*d4514f0bSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3928*d4514f0bSApple OSS Distributions 		break;
3929*d4514f0bSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3930*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
3931*d4514f0bSApple OSS Distributions 		func = &dcache_incoherent_io_store64;
3932*d4514f0bSApple OSS Distributions 		break;
3933*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
3934*d4514f0bSApple OSS Distributions 
3935*d4514f0bSApple OSS Distributions 	case kIOMemorySetEncrypted:
3936*d4514f0bSApple OSS Distributions 		func = &SetEncryptOp;
3937*d4514f0bSApple OSS Distributions 		break;
3938*d4514f0bSApple OSS Distributions 	case kIOMemoryClearEncrypted:
3939*d4514f0bSApple OSS Distributions 		func = &ClearEncryptOp;
3940*d4514f0bSApple OSS Distributions 		break;
3941*d4514f0bSApple OSS Distributions 	}
3942*d4514f0bSApple OSS Distributions 
3943*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3944*d4514f0bSApple OSS Distributions 	if ((func == NULL) && (func_ext == NULL)) {
3945*d4514f0bSApple OSS Distributions 		return kIOReturnUnsupported;
3946*d4514f0bSApple OSS Distributions 	}
3947*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
3948*d4514f0bSApple OSS Distributions 	if (!func) {
3949*d4514f0bSApple OSS Distributions 		return kIOReturnUnsupported;
3950*d4514f0bSApple OSS Distributions 	}
3951*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
3952*d4514f0bSApple OSS Distributions 
3953*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
3954*d4514f0bSApple OSS Distributions 		LOCK;
3955*d4514f0bSApple OSS Distributions 	}
3956*d4514f0bSApple OSS Distributions 
3957*d4514f0bSApple OSS Distributions 	res = 0x0UL;
3958*d4514f0bSApple OSS Distributions 	remaining = length = min(length, getLength() - offset);
3959*d4514f0bSApple OSS Distributions 	while (remaining) {
3960*d4514f0bSApple OSS Distributions 		// (process another target segment?)
3961*d4514f0bSApple OSS Distributions 		addr64_t    dstAddr64;
3962*d4514f0bSApple OSS Distributions 		IOByteCount dstLen;
3963*d4514f0bSApple OSS Distributions 
3964*d4514f0bSApple OSS Distributions 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965*d4514f0bSApple OSS Distributions 		if (!dstAddr64) {
3966*d4514f0bSApple OSS Distributions 			break;
3967*d4514f0bSApple OSS Distributions 		}
3968*d4514f0bSApple OSS Distributions 
3969*d4514f0bSApple OSS Distributions 		// Clip segment length to remaining
3970*d4514f0bSApple OSS Distributions 		if (dstLen > remaining) {
3971*d4514f0bSApple OSS Distributions 			dstLen = remaining;
3972*d4514f0bSApple OSS Distributions 		}
3973*d4514f0bSApple OSS Distributions 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974*d4514f0bSApple OSS Distributions 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975*d4514f0bSApple OSS Distributions 		}
3976*d4514f0bSApple OSS Distributions 		if (remaining > UINT_MAX) {
3977*d4514f0bSApple OSS Distributions 			remaining = UINT_MAX;
3978*d4514f0bSApple OSS Distributions 		}
3979*d4514f0bSApple OSS Distributions 
3980*d4514f0bSApple OSS Distributions #if defined(__arm64__)
3981*d4514f0bSApple OSS Distributions 		if (func) {
3982*d4514f0bSApple OSS Distributions 			(*func)(dstAddr64, (unsigned int) dstLen);
3983*d4514f0bSApple OSS Distributions 		}
3984*d4514f0bSApple OSS Distributions 		if (func_ext) {
3985*d4514f0bSApple OSS Distributions 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986*d4514f0bSApple OSS Distributions 			if (res != 0x0UL) {
3987*d4514f0bSApple OSS Distributions 				remaining = 0;
3988*d4514f0bSApple OSS Distributions 				break;
3989*d4514f0bSApple OSS Distributions 			}
3990*d4514f0bSApple OSS Distributions 		}
3991*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
3992*d4514f0bSApple OSS Distributions 		(*func)(dstAddr64, (unsigned int) dstLen);
3993*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
3994*d4514f0bSApple OSS Distributions 
3995*d4514f0bSApple OSS Distributions 		offset    += dstLen;
3996*d4514f0bSApple OSS Distributions 		remaining -= dstLen;
3997*d4514f0bSApple OSS Distributions 	}
3998*d4514f0bSApple OSS Distributions 
3999*d4514f0bSApple OSS Distributions 	if (kIOMemoryThreadSafe & _flags) {
4000*d4514f0bSApple OSS Distributions 		UNLOCK;
4001*d4514f0bSApple OSS Distributions 	}
4002*d4514f0bSApple OSS Distributions 
4003*d4514f0bSApple OSS Distributions 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004*d4514f0bSApple OSS Distributions }
4005*d4514f0bSApple OSS Distributions 
4006*d4514f0bSApple OSS Distributions /*
4007*d4514f0bSApple OSS Distributions  *
4008*d4514f0bSApple OSS Distributions  */
4009*d4514f0bSApple OSS Distributions 
4010*d4514f0bSApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4011*d4514f0bSApple OSS Distributions 
4012*d4514f0bSApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013*d4514f0bSApple OSS Distributions 
4014*d4514f0bSApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015*d4514f0bSApple OSS Distributions  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016*d4514f0bSApple OSS Distributions  * kernel non-text data -- should we just add another range instead?
4017*d4514f0bSApple OSS Distributions  */
4018*d4514f0bSApple OSS Distributions #define io_kernel_static_start  vm_kernel_stext
4019*d4514f0bSApple OSS Distributions #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020*d4514f0bSApple OSS Distributions 
4021*d4514f0bSApple OSS Distributions #elif defined(__arm64__)
4022*d4514f0bSApple OSS Distributions 
4023*d4514f0bSApple OSS Distributions extern vm_offset_t              static_memory_end;
4024*d4514f0bSApple OSS Distributions 
4025*d4514f0bSApple OSS Distributions #if defined(__arm64__)
4026*d4514f0bSApple OSS Distributions #define io_kernel_static_start vm_kext_base
4027*d4514f0bSApple OSS Distributions #else /* defined(__arm64__) */
4028*d4514f0bSApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4029*d4514f0bSApple OSS Distributions #endif /* defined(__arm64__) */
4030*d4514f0bSApple OSS Distributions 
4031*d4514f0bSApple OSS Distributions #define io_kernel_static_end    static_memory_end
4032*d4514f0bSApple OSS Distributions 
4033*d4514f0bSApple OSS Distributions #else
4034*d4514f0bSApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4035*d4514f0bSApple OSS Distributions #endif
4036*d4514f0bSApple OSS Distributions 
4037*d4514f0bSApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038*d4514f0bSApple OSS Distributions io_get_kernel_static_upl(
4039*d4514f0bSApple OSS Distributions 	vm_map_t                /* map */,
4040*d4514f0bSApple OSS Distributions 	uintptr_t               offset,
4041*d4514f0bSApple OSS Distributions 	upl_size_t              *upl_size,
4042*d4514f0bSApple OSS Distributions 	unsigned int            *page_offset,
4043*d4514f0bSApple OSS Distributions 	upl_t                   *upl,
4044*d4514f0bSApple OSS Distributions 	upl_page_info_array_t   page_list,
4045*d4514f0bSApple OSS Distributions 	unsigned int            *count,
4046*d4514f0bSApple OSS Distributions 	ppnum_t                 *highest_page)
4047*d4514f0bSApple OSS Distributions {
4048*d4514f0bSApple OSS Distributions 	unsigned int pageCount, page;
4049*d4514f0bSApple OSS Distributions 	ppnum_t phys;
4050*d4514f0bSApple OSS Distributions 	ppnum_t highestPage = 0;
4051*d4514f0bSApple OSS Distributions 
4052*d4514f0bSApple OSS Distributions 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053*d4514f0bSApple OSS Distributions 	if (pageCount > *count) {
4054*d4514f0bSApple OSS Distributions 		pageCount = *count;
4055*d4514f0bSApple OSS Distributions 	}
4056*d4514f0bSApple OSS Distributions 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4057*d4514f0bSApple OSS Distributions 
4058*d4514f0bSApple OSS Distributions 	*upl = NULL;
4059*d4514f0bSApple OSS Distributions 	*page_offset = ((unsigned int) page_mask & offset);
4060*d4514f0bSApple OSS Distributions 
4061*d4514f0bSApple OSS Distributions 	for (page = 0; page < pageCount; page++) {
4062*d4514f0bSApple OSS Distributions 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063*d4514f0bSApple OSS Distributions 		if (!phys) {
4064*d4514f0bSApple OSS Distributions 			break;
4065*d4514f0bSApple OSS Distributions 		}
4066*d4514f0bSApple OSS Distributions 		page_list[page].phys_addr = phys;
4067*d4514f0bSApple OSS Distributions 		page_list[page].free_when_done = 0;
4068*d4514f0bSApple OSS Distributions 		page_list[page].absent    = 0;
4069*d4514f0bSApple OSS Distributions 		page_list[page].dirty     = 0;
4070*d4514f0bSApple OSS Distributions 		page_list[page].precious  = 0;
4071*d4514f0bSApple OSS Distributions 		page_list[page].device    = 0;
4072*d4514f0bSApple OSS Distributions 		if (phys > highestPage) {
4073*d4514f0bSApple OSS Distributions 			highestPage = phys;
4074*d4514f0bSApple OSS Distributions 		}
4075*d4514f0bSApple OSS Distributions 	}
4076*d4514f0bSApple OSS Distributions 
4077*d4514f0bSApple OSS Distributions 	*highest_page = highestPage;
4078*d4514f0bSApple OSS Distributions 
4079*d4514f0bSApple OSS Distributions 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080*d4514f0bSApple OSS Distributions }
4081*d4514f0bSApple OSS Distributions 
4082*d4514f0bSApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4083*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084*d4514f0bSApple OSS Distributions {
4085*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4086*d4514f0bSApple OSS Distributions 	IOReturn error = kIOReturnSuccess;
4087*d4514f0bSApple OSS Distributions 	ioGMDData *dataP;
4088*d4514f0bSApple OSS Distributions 	upl_page_info_array_t pageInfo;
4089*d4514f0bSApple OSS Distributions 	ppnum_t mapBase;
4090*d4514f0bSApple OSS Distributions 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091*d4514f0bSApple OSS Distributions 	mach_vm_size_t numBytesWired = 0;
4092*d4514f0bSApple OSS Distributions 
4093*d4514f0bSApple OSS Distributions 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094*d4514f0bSApple OSS Distributions 
4095*d4514f0bSApple OSS Distributions 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096*d4514f0bSApple OSS Distributions 		forDirection = (IODirection) (forDirection | getDirection());
4097*d4514f0bSApple OSS Distributions 	}
4098*d4514f0bSApple OSS Distributions 
4099*d4514f0bSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4100*d4514f0bSApple OSS Distributions 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101*d4514f0bSApple OSS Distributions 	switch (kIODirectionOutIn & forDirection) {
4102*d4514f0bSApple OSS Distributions 	case kIODirectionOut:
4103*d4514f0bSApple OSS Distributions 		// Pages do not need to be marked as dirty on commit
4104*d4514f0bSApple OSS Distributions 		uplFlags = UPL_COPYOUT_FROM;
4105*d4514f0bSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess;
4106*d4514f0bSApple OSS Distributions 		break;
4107*d4514f0bSApple OSS Distributions 
4108*d4514f0bSApple OSS Distributions 	case kIODirectionIn:
4109*d4514f0bSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4110*d4514f0bSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4111*d4514f0bSApple OSS Distributions 		break;
4112*d4514f0bSApple OSS Distributions 
4113*d4514f0bSApple OSS Distributions 	default:
4114*d4514f0bSApple OSS Distributions 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115*d4514f0bSApple OSS Distributions 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4116*d4514f0bSApple OSS Distributions 		break;
4117*d4514f0bSApple OSS Distributions 	}
4118*d4514f0bSApple OSS Distributions 
4119*d4514f0bSApple OSS Distributions 	if (_wireCount) {
4120*d4514f0bSApple OSS Distributions 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121*d4514f0bSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122*d4514f0bSApple OSS Distributions 			    (size_t)VM_KERNEL_ADDRPERM(this));
4123*d4514f0bSApple OSS Distributions 			error = kIOReturnNotWritable;
4124*d4514f0bSApple OSS Distributions 		}
4125*d4514f0bSApple OSS Distributions 	} else {
4126*d4514f0bSApple OSS Distributions 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127*d4514f0bSApple OSS Distributions 		IOMapper *mapper;
4128*d4514f0bSApple OSS Distributions 
4129*d4514f0bSApple OSS Distributions 		mapper = dataP->fMapper;
4130*d4514f0bSApple OSS Distributions 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131*d4514f0bSApple OSS Distributions 
4132*d4514f0bSApple OSS Distributions 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133*d4514f0bSApple OSS Distributions 		tag = _kernelTag;
4134*d4514f0bSApple OSS Distributions 		if (VM_KERN_MEMORY_NONE == tag) {
4135*d4514f0bSApple OSS Distributions 			tag = IOMemoryTag(kernel_map);
4136*d4514f0bSApple OSS Distributions 		}
4137*d4514f0bSApple OSS Distributions 
4138*d4514f0bSApple OSS Distributions 		if (kIODirectionPrepareToPhys32 & forDirection) {
4139*d4514f0bSApple OSS Distributions 			if (!mapper) {
4140*d4514f0bSApple OSS Distributions 				uplFlags |= UPL_NEED_32BIT_ADDR;
4141*d4514f0bSApple OSS Distributions 			}
4142*d4514f0bSApple OSS Distributions 			if (dataP->fDMAMapNumAddressBits > 32) {
4143*d4514f0bSApple OSS Distributions 				dataP->fDMAMapNumAddressBits = 32;
4144*d4514f0bSApple OSS Distributions 			}
4145*d4514f0bSApple OSS Distributions 		}
4146*d4514f0bSApple OSS Distributions 		if (kIODirectionPrepareNoFault    & forDirection) {
4147*d4514f0bSApple OSS Distributions 			uplFlags |= UPL_REQUEST_NO_FAULT;
4148*d4514f0bSApple OSS Distributions 		}
4149*d4514f0bSApple OSS Distributions 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4150*d4514f0bSApple OSS Distributions 			uplFlags |= UPL_NOZEROFILLIO;
4151*d4514f0bSApple OSS Distributions 		}
4152*d4514f0bSApple OSS Distributions 		if (kIODirectionPrepareNonCoherent & forDirection) {
4153*d4514f0bSApple OSS Distributions 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154*d4514f0bSApple OSS Distributions 		}
4155*d4514f0bSApple OSS Distributions 
4156*d4514f0bSApple OSS Distributions 		mapBase = 0;
4157*d4514f0bSApple OSS Distributions 
4158*d4514f0bSApple OSS Distributions 		// Note that appendBytes(NULL) zeros the data up to the desired length
4159*d4514f0bSApple OSS Distributions 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160*d4514f0bSApple OSS Distributions 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4161*d4514f0bSApple OSS Distributions 			error = kIOReturnNoMemory;
4162*d4514f0bSApple OSS Distributions 			traceInterval.setEndArg2(error);
4163*d4514f0bSApple OSS Distributions 			return error;
4164*d4514f0bSApple OSS Distributions 		}
4165*d4514f0bSApple OSS Distributions 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166*d4514f0bSApple OSS Distributions 			error = kIOReturnNoMemory;
4167*d4514f0bSApple OSS Distributions 			traceInterval.setEndArg2(error);
4168*d4514f0bSApple OSS Distributions 			return error;
4169*d4514f0bSApple OSS Distributions 		}
4170*d4514f0bSApple OSS Distributions 		dataP = NULL;
4171*d4514f0bSApple OSS Distributions 
4172*d4514f0bSApple OSS Distributions 		// Find the appropriate vm_map for the given task
4173*d4514f0bSApple OSS Distributions 		vm_map_t curMap;
4174*d4514f0bSApple OSS Distributions 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175*d4514f0bSApple OSS Distributions 			curMap = NULL;
4176*d4514f0bSApple OSS Distributions 		} else {
4177*d4514f0bSApple OSS Distributions 			curMap = get_task_map(_task);
4178*d4514f0bSApple OSS Distributions 		}
4179*d4514f0bSApple OSS Distributions 
4180*d4514f0bSApple OSS Distributions 		// Iterate over the vector of virtual ranges
4181*d4514f0bSApple OSS Distributions 		Ranges vec = _ranges;
4182*d4514f0bSApple OSS Distributions 		unsigned int pageIndex  = 0;
4183*d4514f0bSApple OSS Distributions 		IOByteCount mdOffset    = 0;
4184*d4514f0bSApple OSS Distributions 		ppnum_t highestPage     = 0;
4185*d4514f0bSApple OSS Distributions 		bool         byteAlignUPL;
4186*d4514f0bSApple OSS Distributions 
4187*d4514f0bSApple OSS Distributions 		IOMemoryEntry * memRefEntry = NULL;
4188*d4514f0bSApple OSS Distributions 		if (_memRef) {
4189*d4514f0bSApple OSS Distributions 			memRefEntry = &_memRef->entries[0];
4190*d4514f0bSApple OSS Distributions 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191*d4514f0bSApple OSS Distributions 		} else {
4192*d4514f0bSApple OSS Distributions 			byteAlignUPL = true;
4193*d4514f0bSApple OSS Distributions 		}
4194*d4514f0bSApple OSS Distributions 
4195*d4514f0bSApple OSS Distributions 		for (UInt range = 0; mdOffset < _length; range++) {
4196*d4514f0bSApple OSS Distributions 			ioPLBlock iopl;
4197*d4514f0bSApple OSS Distributions 			mach_vm_address_t startPage, startPageOffset;
4198*d4514f0bSApple OSS Distributions 			mach_vm_size_t    numBytes;
4199*d4514f0bSApple OSS Distributions 			ppnum_t highPage = 0;
4200*d4514f0bSApple OSS Distributions 
4201*d4514f0bSApple OSS Distributions 			if (_memRef) {
4202*d4514f0bSApple OSS Distributions 				if (range >= _memRef->count) {
4203*d4514f0bSApple OSS Distributions 					panic("memRefEntry");
4204*d4514f0bSApple OSS Distributions 				}
4205*d4514f0bSApple OSS Distributions 				memRefEntry = &_memRef->entries[range];
4206*d4514f0bSApple OSS Distributions 				numBytes    = memRefEntry->size;
4207*d4514f0bSApple OSS Distributions 				startPage   = -1ULL;
4208*d4514f0bSApple OSS Distributions 				if (byteAlignUPL) {
4209*d4514f0bSApple OSS Distributions 					startPageOffset = 0;
4210*d4514f0bSApple OSS Distributions 				} else {
4211*d4514f0bSApple OSS Distributions 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4212*d4514f0bSApple OSS Distributions 				}
4213*d4514f0bSApple OSS Distributions 			} else {
4214*d4514f0bSApple OSS Distributions 				// Get the startPage address and length of vec[range]
4215*d4514f0bSApple OSS Distributions 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216*d4514f0bSApple OSS Distributions 				if (byteAlignUPL) {
4217*d4514f0bSApple OSS Distributions 					startPageOffset = 0;
4218*d4514f0bSApple OSS Distributions 				} else {
4219*d4514f0bSApple OSS Distributions 					startPageOffset = startPage & PAGE_MASK;
4220*d4514f0bSApple OSS Distributions 					startPage = trunc_page_64(startPage);
4221*d4514f0bSApple OSS Distributions 				}
4222*d4514f0bSApple OSS Distributions 			}
4223*d4514f0bSApple OSS Distributions 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224*d4514f0bSApple OSS Distributions 			numBytes += startPageOffset;
4225*d4514f0bSApple OSS Distributions 
4226*d4514f0bSApple OSS Distributions 			if (mapper) {
4227*d4514f0bSApple OSS Distributions 				iopl.fMappedPage = mapBase + pageIndex;
4228*d4514f0bSApple OSS Distributions 			} else {
4229*d4514f0bSApple OSS Distributions 				iopl.fMappedPage = 0;
4230*d4514f0bSApple OSS Distributions 			}
4231*d4514f0bSApple OSS Distributions 
4232*d4514f0bSApple OSS Distributions 			// Iterate over the current range, creating UPLs
4233*d4514f0bSApple OSS Distributions 			while (numBytes) {
4234*d4514f0bSApple OSS Distributions 				vm_address_t kernelStart = (vm_address_t) startPage;
4235*d4514f0bSApple OSS Distributions 				vm_map_t theMap;
4236*d4514f0bSApple OSS Distributions 				if (curMap) {
4237*d4514f0bSApple OSS Distributions 					theMap = curMap;
4238*d4514f0bSApple OSS Distributions 				} else if (_memRef) {
4239*d4514f0bSApple OSS Distributions 					theMap = NULL;
4240*d4514f0bSApple OSS Distributions 				} else {
4241*d4514f0bSApple OSS Distributions 					assert(_task == kernel_task);
4242*d4514f0bSApple OSS Distributions 					theMap = IOPageableMapForAddress(kernelStart);
4243*d4514f0bSApple OSS Distributions 				}
4244*d4514f0bSApple OSS Distributions 
4245*d4514f0bSApple OSS Distributions 				// ioplFlags is an in/out parameter
4246*d4514f0bSApple OSS Distributions 				upl_control_flags_t ioplFlags = uplFlags;
4247*d4514f0bSApple OSS Distributions 				dataP = getDataP(_memoryEntries);
4248*d4514f0bSApple OSS Distributions 				pageInfo = getPageList(dataP);
4249*d4514f0bSApple OSS Distributions 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250*d4514f0bSApple OSS Distributions 
4251*d4514f0bSApple OSS Distributions 				mach_vm_size_t ioplPhysSize;
4252*d4514f0bSApple OSS Distributions 				upl_size_t     ioplSize;
4253*d4514f0bSApple OSS Distributions 				unsigned int   numPageInfo;
4254*d4514f0bSApple OSS Distributions 
4255*d4514f0bSApple OSS Distributions 				if (_memRef) {
4256*d4514f0bSApple OSS Distributions 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257*d4514f0bSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258*d4514f0bSApple OSS Distributions 				} else {
4259*d4514f0bSApple OSS Distributions 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260*d4514f0bSApple OSS Distributions 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261*d4514f0bSApple OSS Distributions 				}
4262*d4514f0bSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4263*d4514f0bSApple OSS Distributions 					if (_memRef) {
4264*d4514f0bSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265*d4514f0bSApple OSS Distributions 					} else {
4266*d4514f0bSApple OSS Distributions 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267*d4514f0bSApple OSS Distributions 					}
4268*d4514f0bSApple OSS Distributions 					printf("entry size error %d\n", error);
4269*d4514f0bSApple OSS Distributions 					goto abortExit;
4270*d4514f0bSApple OSS Distributions 				}
4271*d4514f0bSApple OSS Distributions 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272*d4514f0bSApple OSS Distributions 				numPageInfo = atop_32(ioplPhysSize);
4273*d4514f0bSApple OSS Distributions 				if (byteAlignUPL) {
4274*d4514f0bSApple OSS Distributions 					if (numBytes > ioplPhysSize) {
4275*d4514f0bSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276*d4514f0bSApple OSS Distributions 					} else {
4277*d4514f0bSApple OSS Distributions 						ioplSize = ((typeof(ioplSize))numBytes);
4278*d4514f0bSApple OSS Distributions 					}
4279*d4514f0bSApple OSS Distributions 				} else {
4280*d4514f0bSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281*d4514f0bSApple OSS Distributions 				}
4282*d4514f0bSApple OSS Distributions 
4283*d4514f0bSApple OSS Distributions 				if (_memRef) {
4284*d4514f0bSApple OSS Distributions 					memory_object_offset_t entryOffset;
4285*d4514f0bSApple OSS Distributions 
4286*d4514f0bSApple OSS Distributions 					entryOffset = mdOffset;
4287*d4514f0bSApple OSS Distributions 					if (byteAlignUPL) {
4288*d4514f0bSApple OSS Distributions 						entryOffset = (entryOffset - memRefEntry->offset);
4289*d4514f0bSApple OSS Distributions 					} else {
4290*d4514f0bSApple OSS Distributions 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291*d4514f0bSApple OSS Distributions 					}
4292*d4514f0bSApple OSS Distributions 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4293*d4514f0bSApple OSS Distributions 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294*d4514f0bSApple OSS Distributions 					}
4295*d4514f0bSApple OSS Distributions 					error = memory_object_iopl_request(memRefEntry->entry,
4296*d4514f0bSApple OSS Distributions 					    entryOffset,
4297*d4514f0bSApple OSS Distributions 					    &ioplSize,
4298*d4514f0bSApple OSS Distributions 					    &iopl.fIOPL,
4299*d4514f0bSApple OSS Distributions 					    baseInfo,
4300*d4514f0bSApple OSS Distributions 					    &numPageInfo,
4301*d4514f0bSApple OSS Distributions 					    &ioplFlags,
4302*d4514f0bSApple OSS Distributions 					    tag);
4303*d4514f0bSApple OSS Distributions 				} else if ((theMap == kernel_map)
4304*d4514f0bSApple OSS Distributions 				    && (kernelStart >= io_kernel_static_start)
4305*d4514f0bSApple OSS Distributions 				    && (kernelStart < io_kernel_static_end)) {
4306*d4514f0bSApple OSS Distributions 					error = io_get_kernel_static_upl(theMap,
4307*d4514f0bSApple OSS Distributions 					    kernelStart,
4308*d4514f0bSApple OSS Distributions 					    &ioplSize,
4309*d4514f0bSApple OSS Distributions 					    &iopl.fPageOffset,
4310*d4514f0bSApple OSS Distributions 					    &iopl.fIOPL,
4311*d4514f0bSApple OSS Distributions 					    baseInfo,
4312*d4514f0bSApple OSS Distributions 					    &numPageInfo,
4313*d4514f0bSApple OSS Distributions 					    &highPage);
4314*d4514f0bSApple OSS Distributions 				} else {
4315*d4514f0bSApple OSS Distributions 					assert(theMap);
4316*d4514f0bSApple OSS Distributions 					error = vm_map_create_upl(theMap,
4317*d4514f0bSApple OSS Distributions 					    startPage,
4318*d4514f0bSApple OSS Distributions 					    (upl_size_t*)&ioplSize,
4319*d4514f0bSApple OSS Distributions 					    &iopl.fIOPL,
4320*d4514f0bSApple OSS Distributions 					    baseInfo,
4321*d4514f0bSApple OSS Distributions 					    &numPageInfo,
4322*d4514f0bSApple OSS Distributions 					    &ioplFlags,
4323*d4514f0bSApple OSS Distributions 					    tag);
4324*d4514f0bSApple OSS Distributions 				}
4325*d4514f0bSApple OSS Distributions 
4326*d4514f0bSApple OSS Distributions 				if (error != KERN_SUCCESS) {
4327*d4514f0bSApple OSS Distributions 					traceInterval.setEndArg2(error);
4328*d4514f0bSApple OSS Distributions 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329*d4514f0bSApple OSS Distributions 					goto abortExit;
4330*d4514f0bSApple OSS Distributions 				}
4331*d4514f0bSApple OSS Distributions 
4332*d4514f0bSApple OSS Distributions 				assert(ioplSize);
4333*d4514f0bSApple OSS Distributions 
4334*d4514f0bSApple OSS Distributions 				if (iopl.fIOPL) {
4335*d4514f0bSApple OSS Distributions 					highPage = upl_get_highest_page(iopl.fIOPL);
4336*d4514f0bSApple OSS Distributions 				}
4337*d4514f0bSApple OSS Distributions 				if (highPage > highestPage) {
4338*d4514f0bSApple OSS Distributions 					highestPage = highPage;
4339*d4514f0bSApple OSS Distributions 				}
4340*d4514f0bSApple OSS Distributions 
4341*d4514f0bSApple OSS Distributions 				if (baseInfo->device) {
4342*d4514f0bSApple OSS Distributions 					numPageInfo = 1;
4343*d4514f0bSApple OSS Distributions 					iopl.fFlags = kIOPLOnDevice;
4344*d4514f0bSApple OSS Distributions 				} else {
4345*d4514f0bSApple OSS Distributions 					iopl.fFlags = 0;
4346*d4514f0bSApple OSS Distributions 				}
4347*d4514f0bSApple OSS Distributions 
4348*d4514f0bSApple OSS Distributions 				if (byteAlignUPL) {
4349*d4514f0bSApple OSS Distributions 					if (iopl.fIOPL) {
4350*d4514f0bSApple OSS Distributions 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351*d4514f0bSApple OSS Distributions 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352*d4514f0bSApple OSS Distributions 					}
4353*d4514f0bSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4354*d4514f0bSApple OSS Distributions 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355*d4514f0bSApple OSS Distributions 						startPage -= iopl.fPageOffset;
4356*d4514f0bSApple OSS Distributions 					}
4357*d4514f0bSApple OSS Distributions 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358*d4514f0bSApple OSS Distributions 					numBytes += iopl.fPageOffset;
4359*d4514f0bSApple OSS Distributions 				}
4360*d4514f0bSApple OSS Distributions 
4361*d4514f0bSApple OSS Distributions 				iopl.fIOMDOffset = mdOffset;
4362*d4514f0bSApple OSS Distributions 				iopl.fPageInfo = pageIndex;
4363*d4514f0bSApple OSS Distributions 
4364*d4514f0bSApple OSS Distributions 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365*d4514f0bSApple OSS Distributions 					// Clean up partial created and unsaved iopl
4366*d4514f0bSApple OSS Distributions 					if (iopl.fIOPL) {
4367*d4514f0bSApple OSS Distributions 						upl_abort(iopl.fIOPL, 0);
4368*d4514f0bSApple OSS Distributions 						upl_deallocate(iopl.fIOPL);
4369*d4514f0bSApple OSS Distributions 					}
4370*d4514f0bSApple OSS Distributions 					error = kIOReturnNoMemory;
4371*d4514f0bSApple OSS Distributions 					traceInterval.setEndArg2(error);
4372*d4514f0bSApple OSS Distributions 					goto abortExit;
4373*d4514f0bSApple OSS Distributions 				}
4374*d4514f0bSApple OSS Distributions 				dataP = NULL;
4375*d4514f0bSApple OSS Distributions 
4376*d4514f0bSApple OSS Distributions 				// Check for a multiple iopl's in one virtual range
4377*d4514f0bSApple OSS Distributions 				pageIndex += numPageInfo;
4378*d4514f0bSApple OSS Distributions 				mdOffset -= iopl.fPageOffset;
4379*d4514f0bSApple OSS Distributions 				numBytesWired += ioplSize;
4380*d4514f0bSApple OSS Distributions 				if (ioplSize < numBytes) {
4381*d4514f0bSApple OSS Distributions 					numBytes -= ioplSize;
4382*d4514f0bSApple OSS Distributions 					if (startPage != (mach_vm_address_t)-1) {
4383*d4514f0bSApple OSS Distributions 						startPage += ioplSize;
4384*d4514f0bSApple OSS Distributions 					}
4385*d4514f0bSApple OSS Distributions 					mdOffset += ioplSize;
4386*d4514f0bSApple OSS Distributions 					iopl.fPageOffset = 0;
4387*d4514f0bSApple OSS Distributions 					if (mapper) {
4388*d4514f0bSApple OSS Distributions 						iopl.fMappedPage = mapBase + pageIndex;
4389*d4514f0bSApple OSS Distributions 					}
4390*d4514f0bSApple OSS Distributions 				} else {
4391*d4514f0bSApple OSS Distributions 					mdOffset += numBytes;
4392*d4514f0bSApple OSS Distributions 					break;
4393*d4514f0bSApple OSS Distributions 				}
4394*d4514f0bSApple OSS Distributions 			}
4395*d4514f0bSApple OSS Distributions 		}
4396*d4514f0bSApple OSS Distributions 
4397*d4514f0bSApple OSS Distributions 		_highestPage = highestPage;
4398*d4514f0bSApple OSS Distributions 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399*d4514f0bSApple OSS Distributions 
4400*d4514f0bSApple OSS Distributions 		if (UPL_COPYOUT_FROM & uplFlags) {
4401*d4514f0bSApple OSS Distributions 			_flags |= kIOMemoryPreparedReadOnly;
4402*d4514f0bSApple OSS Distributions 		}
4403*d4514f0bSApple OSS Distributions 		traceInterval.setEndCodes(numBytesWired, error);
4404*d4514f0bSApple OSS Distributions 	}
4405*d4514f0bSApple OSS Distributions 
4406*d4514f0bSApple OSS Distributions #if IOTRACKING
4407*d4514f0bSApple OSS Distributions 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408*d4514f0bSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4409*d4514f0bSApple OSS Distributions 		if (!dataP->fWireTracking.link.next) {
4410*d4514f0bSApple OSS Distributions 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411*d4514f0bSApple OSS Distributions 		}
4412*d4514f0bSApple OSS Distributions 	}
4413*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
4414*d4514f0bSApple OSS Distributions 
4415*d4514f0bSApple OSS Distributions 	return error;
4416*d4514f0bSApple OSS Distributions 
4417*d4514f0bSApple OSS Distributions abortExit:
4418*d4514f0bSApple OSS Distributions 	{
4419*d4514f0bSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4420*d4514f0bSApple OSS Distributions 		UInt done = getNumIOPL(_memoryEntries, dataP);
4421*d4514f0bSApple OSS Distributions 		ioPLBlock *ioplList = getIOPLList(dataP);
4422*d4514f0bSApple OSS Distributions 
4423*d4514f0bSApple OSS Distributions 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424*d4514f0bSApple OSS Distributions 			if (ioplList[ioplIdx].fIOPL) {
4425*d4514f0bSApple OSS Distributions 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426*d4514f0bSApple OSS Distributions 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4427*d4514f0bSApple OSS Distributions 			}
4428*d4514f0bSApple OSS Distributions 		}
4429*d4514f0bSApple OSS Distributions 		_memoryEntries->setLength(computeDataSize(0, 0));
4430*d4514f0bSApple OSS Distributions 	}
4431*d4514f0bSApple OSS Distributions 
4432*d4514f0bSApple OSS Distributions 	if (error == KERN_FAILURE) {
4433*d4514f0bSApple OSS Distributions 		error = kIOReturnCannotWire;
4434*d4514f0bSApple OSS Distributions 	} else if (error == KERN_MEMORY_ERROR) {
4435*d4514f0bSApple OSS Distributions 		error = kIOReturnNoResources;
4436*d4514f0bSApple OSS Distributions 	}
4437*d4514f0bSApple OSS Distributions 
4438*d4514f0bSApple OSS Distributions 	return error;
4439*d4514f0bSApple OSS Distributions }
4440*d4514f0bSApple OSS Distributions 
4441*d4514f0bSApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4442*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443*d4514f0bSApple OSS Distributions {
4444*d4514f0bSApple OSS Distributions 	ioGMDData * dataP;
4445*d4514f0bSApple OSS Distributions 
4446*d4514f0bSApple OSS Distributions 	if (size > UINT_MAX) {
4447*d4514f0bSApple OSS Distributions 		return false;
4448*d4514f0bSApple OSS Distributions 	}
4449*d4514f0bSApple OSS Distributions 	if (!_memoryEntries) {
4450*d4514f0bSApple OSS Distributions 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451*d4514f0bSApple OSS Distributions 		if (!_memoryEntries) {
4452*d4514f0bSApple OSS Distributions 			return false;
4453*d4514f0bSApple OSS Distributions 		}
4454*d4514f0bSApple OSS Distributions 	} else if (!_memoryEntries->initWithCapacity(size)) {
4455*d4514f0bSApple OSS Distributions 		return false;
4456*d4514f0bSApple OSS Distributions 	}
4457*d4514f0bSApple OSS Distributions 
4458*d4514f0bSApple OSS Distributions 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459*d4514f0bSApple OSS Distributions 	dataP = getDataP(_memoryEntries);
4460*d4514f0bSApple OSS Distributions 
4461*d4514f0bSApple OSS Distributions 	if (mapper == kIOMapperWaitSystem) {
4462*d4514f0bSApple OSS Distributions 		IOMapper::checkForSystemMapper();
4463*d4514f0bSApple OSS Distributions 		mapper = IOMapper::gSystem;
4464*d4514f0bSApple OSS Distributions 	}
4465*d4514f0bSApple OSS Distributions 	dataP->fMapper               = mapper;
4466*d4514f0bSApple OSS Distributions 	dataP->fPageCnt              = 0;
4467*d4514f0bSApple OSS Distributions 	dataP->fMappedBase           = 0;
4468*d4514f0bSApple OSS Distributions 	dataP->fDMAMapNumAddressBits = 64;
4469*d4514f0bSApple OSS Distributions 	dataP->fDMAMapAlignment      = 0;
4470*d4514f0bSApple OSS Distributions 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4471*d4514f0bSApple OSS Distributions 	dataP->fCompletionError      = false;
4472*d4514f0bSApple OSS Distributions 	dataP->fMappedBaseValid      = false;
4473*d4514f0bSApple OSS Distributions 
4474*d4514f0bSApple OSS Distributions 	return true;
4475*d4514f0bSApple OSS Distributions }
4476*d4514f0bSApple OSS Distributions 
4477*d4514f0bSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478*d4514f0bSApple OSS Distributions IOMemoryDescriptor::dmaMap(
4479*d4514f0bSApple OSS Distributions 	IOMapper                    * mapper,
4480*d4514f0bSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4481*d4514f0bSApple OSS Distributions 	IODMACommand                * command,
4482*d4514f0bSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4483*d4514f0bSApple OSS Distributions 	uint64_t                      offset,
4484*d4514f0bSApple OSS Distributions 	uint64_t                      length,
4485*d4514f0bSApple OSS Distributions 	uint64_t                    * mapAddress,
4486*d4514f0bSApple OSS Distributions 	uint64_t                    * mapLength)
4487*d4514f0bSApple OSS Distributions {
4488*d4514f0bSApple OSS Distributions 	IOReturn err;
4489*d4514f0bSApple OSS Distributions 	uint32_t mapOptions;
4490*d4514f0bSApple OSS Distributions 
4491*d4514f0bSApple OSS Distributions 	mapOptions = 0;
4492*d4514f0bSApple OSS Distributions 	mapOptions |= kIODMAMapReadAccess;
4493*d4514f0bSApple OSS Distributions 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494*d4514f0bSApple OSS Distributions 		mapOptions |= kIODMAMapWriteAccess;
4495*d4514f0bSApple OSS Distributions 	}
4496*d4514f0bSApple OSS Distributions 
4497*d4514f0bSApple OSS Distributions 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498*d4514f0bSApple OSS Distributions 	    mapSpec, command, NULL, mapAddress, mapLength);
4499*d4514f0bSApple OSS Distributions 
4500*d4514f0bSApple OSS Distributions 	if (kIOReturnSuccess == err) {
4501*d4514f0bSApple OSS Distributions 		dmaMapRecord(mapper, command, *mapLength);
4502*d4514f0bSApple OSS Distributions 	}
4503*d4514f0bSApple OSS Distributions 
4504*d4514f0bSApple OSS Distributions 	return err;
4505*d4514f0bSApple OSS Distributions }
4506*d4514f0bSApple OSS Distributions 
4507*d4514f0bSApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508*d4514f0bSApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4509*d4514f0bSApple OSS Distributions 	IOMapper                    * mapper,
4510*d4514f0bSApple OSS Distributions 	IODMACommand                * command,
4511*d4514f0bSApple OSS Distributions 	uint64_t                      mapLength)
4512*d4514f0bSApple OSS Distributions {
4513*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514*d4514f0bSApple OSS Distributions 	kern_allocation_name_t alloc;
4515*d4514f0bSApple OSS Distributions 	int16_t                prior;
4516*d4514f0bSApple OSS Distributions 
4517*d4514f0bSApple OSS Distributions 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518*d4514f0bSApple OSS Distributions 		kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4519*d4514f0bSApple OSS Distributions 	}
4520*d4514f0bSApple OSS Distributions 
4521*d4514f0bSApple OSS Distributions 	if (!command) {
4522*d4514f0bSApple OSS Distributions 		return;
4523*d4514f0bSApple OSS Distributions 	}
4524*d4514f0bSApple OSS Distributions 	prior = OSAddAtomic16(1, &_dmaReferences);
4525*d4514f0bSApple OSS Distributions 	if (!prior) {
4526*d4514f0bSApple OSS Distributions 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527*d4514f0bSApple OSS Distributions 			_mapName  = alloc;
4528*d4514f0bSApple OSS Distributions 			mapLength = _length;
4529*d4514f0bSApple OSS Distributions 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530*d4514f0bSApple OSS Distributions 		} else {
4531*d4514f0bSApple OSS Distributions 			_mapName = NULL;
4532*d4514f0bSApple OSS Distributions 		}
4533*d4514f0bSApple OSS Distributions 	}
4534*d4514f0bSApple OSS Distributions }
4535*d4514f0bSApple OSS Distributions 
4536*d4514f0bSApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537*d4514f0bSApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4538*d4514f0bSApple OSS Distributions 	IOMapper                    * mapper,
4539*d4514f0bSApple OSS Distributions 	IODMACommand                * command,
4540*d4514f0bSApple OSS Distributions 	uint64_t                      offset,
4541*d4514f0bSApple OSS Distributions 	uint64_t                      mapAddress,
4542*d4514f0bSApple OSS Distributions 	uint64_t                      mapLength)
4543*d4514f0bSApple OSS Distributions {
4544*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545*d4514f0bSApple OSS Distributions 	IOReturn ret;
4546*d4514f0bSApple OSS Distributions 	kern_allocation_name_t alloc;
4547*d4514f0bSApple OSS Distributions 	kern_allocation_name_t mapName;
4548*d4514f0bSApple OSS Distributions 	int16_t prior;
4549*d4514f0bSApple OSS Distributions 
4550*d4514f0bSApple OSS Distributions 	mapName = NULL;
4551*d4514f0bSApple OSS Distributions 	prior = 0;
4552*d4514f0bSApple OSS Distributions 	if (command) {
4553*d4514f0bSApple OSS Distributions 		mapName = _mapName;
4554*d4514f0bSApple OSS Distributions 		if (_dmaReferences) {
4555*d4514f0bSApple OSS Distributions 			prior = OSAddAtomic16(-1, &_dmaReferences);
4556*d4514f0bSApple OSS Distributions 		} else {
4557*d4514f0bSApple OSS Distributions 			panic("_dmaReferences underflow");
4558*d4514f0bSApple OSS Distributions 		}
4559*d4514f0bSApple OSS Distributions 	}
4560*d4514f0bSApple OSS Distributions 
4561*d4514f0bSApple OSS Distributions 	if (!mapLength) {
4562*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4563*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
4564*d4514f0bSApple OSS Distributions 	}
4565*d4514f0bSApple OSS Distributions 
4566*d4514f0bSApple OSS Distributions 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567*d4514f0bSApple OSS Distributions 
4568*d4514f0bSApple OSS Distributions 	if ((alloc = mapper->fAllocName)) {
4569*d4514f0bSApple OSS Distributions 		kern_allocation_update_size(alloc, -mapLength, NULL);
4570*d4514f0bSApple OSS Distributions 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571*d4514f0bSApple OSS Distributions 			mapLength = _length;
4572*d4514f0bSApple OSS Distributions 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573*d4514f0bSApple OSS Distributions 		}
4574*d4514f0bSApple OSS Distributions 	}
4575*d4514f0bSApple OSS Distributions 
4576*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(ret);
4577*d4514f0bSApple OSS Distributions 	return ret;
4578*d4514f0bSApple OSS Distributions }
4579*d4514f0bSApple OSS Distributions 
4580*d4514f0bSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4582*d4514f0bSApple OSS Distributions 	IOMapper                    * mapper,
4583*d4514f0bSApple OSS Distributions 	IOMemoryDescriptor          * memory,
4584*d4514f0bSApple OSS Distributions 	IODMACommand                * command,
4585*d4514f0bSApple OSS Distributions 	const IODMAMapSpecification * mapSpec,
4586*d4514f0bSApple OSS Distributions 	uint64_t                      offset,
4587*d4514f0bSApple OSS Distributions 	uint64_t                      length,
4588*d4514f0bSApple OSS Distributions 	uint64_t                    * mapAddress,
4589*d4514f0bSApple OSS Distributions 	uint64_t                    * mapLength)
4590*d4514f0bSApple OSS Distributions {
4591*d4514f0bSApple OSS Distributions 	IOReturn          err = kIOReturnSuccess;
4592*d4514f0bSApple OSS Distributions 	ioGMDData *       dataP;
4593*d4514f0bSApple OSS Distributions 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4594*d4514f0bSApple OSS Distributions 
4595*d4514f0bSApple OSS Distributions 	*mapAddress = 0;
4596*d4514f0bSApple OSS Distributions 	if (kIOMemoryHostOnly & _flags) {
4597*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
4598*d4514f0bSApple OSS Distributions 	}
4599*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4600*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
4601*d4514f0bSApple OSS Distributions 	}
4602*d4514f0bSApple OSS Distributions 
4603*d4514f0bSApple OSS Distributions 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604*d4514f0bSApple OSS Distributions 	    || offset || (length != _length)) {
4605*d4514f0bSApple OSS Distributions 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606*d4514f0bSApple OSS Distributions 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607*d4514f0bSApple OSS Distributions 		const ioPLBlock * ioplList = getIOPLList(dataP);
4608*d4514f0bSApple OSS Distributions 		upl_page_info_t * pageList;
4609*d4514f0bSApple OSS Distributions 		uint32_t          mapOptions = 0;
4610*d4514f0bSApple OSS Distributions 
4611*d4514f0bSApple OSS Distributions 		IODMAMapSpecification mapSpec;
4612*d4514f0bSApple OSS Distributions 		bzero(&mapSpec, sizeof(mapSpec));
4613*d4514f0bSApple OSS Distributions 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614*d4514f0bSApple OSS Distributions 		mapSpec.alignment = dataP->fDMAMapAlignment;
4615*d4514f0bSApple OSS Distributions 
4616*d4514f0bSApple OSS Distributions 		// For external UPLs the fPageInfo field points directly to
4617*d4514f0bSApple OSS Distributions 		// the upl's upl_page_info_t array.
4618*d4514f0bSApple OSS Distributions 		if (ioplList->fFlags & kIOPLExternUPL) {
4619*d4514f0bSApple OSS Distributions 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620*d4514f0bSApple OSS Distributions 			mapOptions |= kIODMAMapPagingPath;
4621*d4514f0bSApple OSS Distributions 		} else {
4622*d4514f0bSApple OSS Distributions 			pageList = getPageList(dataP);
4623*d4514f0bSApple OSS Distributions 		}
4624*d4514f0bSApple OSS Distributions 
4625*d4514f0bSApple OSS Distributions 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626*d4514f0bSApple OSS Distributions 			mapOptions |= kIODMAMapPageListFullyOccupied;
4627*d4514f0bSApple OSS Distributions 		}
4628*d4514f0bSApple OSS Distributions 
4629*d4514f0bSApple OSS Distributions 		assert(dataP->fDMAAccess);
4630*d4514f0bSApple OSS Distributions 		mapOptions |= dataP->fDMAAccess;
4631*d4514f0bSApple OSS Distributions 
4632*d4514f0bSApple OSS Distributions 		// Check for direct device non-paged memory
4633*d4514f0bSApple OSS Distributions 		if (ioplList->fFlags & kIOPLOnDevice) {
4634*d4514f0bSApple OSS Distributions 			mapOptions |= kIODMAMapPhysicallyContiguous;
4635*d4514f0bSApple OSS Distributions 		}
4636*d4514f0bSApple OSS Distributions 
4637*d4514f0bSApple OSS Distributions 		IODMAMapPageList dmaPageList =
4638*d4514f0bSApple OSS Distributions 		{
4639*d4514f0bSApple OSS Distributions 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4640*d4514f0bSApple OSS Distributions 			.pageListCount = _pages,
4641*d4514f0bSApple OSS Distributions 			.pageList      = &pageList[0]
4642*d4514f0bSApple OSS Distributions 		};
4643*d4514f0bSApple OSS Distributions 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644*d4514f0bSApple OSS Distributions 		    command, &dmaPageList, mapAddress, mapLength);
4645*d4514f0bSApple OSS Distributions 
4646*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess == err) {
4647*d4514f0bSApple OSS Distributions 			dmaMapRecord(mapper, command, *mapLength);
4648*d4514f0bSApple OSS Distributions 		}
4649*d4514f0bSApple OSS Distributions 	}
4650*d4514f0bSApple OSS Distributions 
4651*d4514f0bSApple OSS Distributions 	return err;
4652*d4514f0bSApple OSS Distributions }
4653*d4514f0bSApple OSS Distributions 
4654*d4514f0bSApple OSS Distributions /*
4655*d4514f0bSApple OSS Distributions  * prepare
4656*d4514f0bSApple OSS Distributions  *
4657*d4514f0bSApple OSS Distributions  * Prepare the memory for an I/O transfer.  This involves paging in
4658*d4514f0bSApple OSS Distributions  * the memory, if necessary, and wiring it down for the duration of
4659*d4514f0bSApple OSS Distributions  * the transfer.  The complete() method completes the processing of
4660*d4514f0bSApple OSS Distributions  * the memory after the I/O transfer finishes.  This method needn't
4661*d4514f0bSApple OSS Distributions  * called for non-pageable memory.
4662*d4514f0bSApple OSS Distributions  */
4663*d4514f0bSApple OSS Distributions 
4664*d4514f0bSApple OSS Distributions IOReturn
prepare(IODirection forDirection)4665*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666*d4514f0bSApple OSS Distributions {
4667*d4514f0bSApple OSS Distributions 	IOReturn     error    = kIOReturnSuccess;
4668*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4669*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670*d4514f0bSApple OSS Distributions 
4671*d4514f0bSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4673*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
4674*d4514f0bSApple OSS Distributions 	}
4675*d4514f0bSApple OSS Distributions 
4676*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4677*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4678*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4679*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
4680*d4514f0bSApple OSS Distributions 	}
4681*d4514f0bSApple OSS Distributions 
4682*d4514f0bSApple OSS Distributions 	if (_prepareLock) {
4683*d4514f0bSApple OSS Distributions 		IOLockLock(_prepareLock);
4684*d4514f0bSApple OSS Distributions 	}
4685*d4514f0bSApple OSS Distributions 
4686*d4514f0bSApple OSS Distributions 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687*d4514f0bSApple OSS Distributions 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688*d4514f0bSApple OSS Distributions 			error = kIOReturnNotReady;
4689*d4514f0bSApple OSS Distributions 			goto finish;
4690*d4514f0bSApple OSS Distributions 		}
4691*d4514f0bSApple OSS Distributions 		error = wireVirtual(forDirection);
4692*d4514f0bSApple OSS Distributions 	}
4693*d4514f0bSApple OSS Distributions 
4694*d4514f0bSApple OSS Distributions 	if (kIOReturnSuccess == error) {
4695*d4514f0bSApple OSS Distributions 		if (1 == ++_wireCount) {
4696*d4514f0bSApple OSS Distributions 			if (kIOMemoryClearEncrypt & _flags) {
4697*d4514f0bSApple OSS Distributions 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4698*d4514f0bSApple OSS Distributions 			}
4699*d4514f0bSApple OSS Distributions 
4700*d4514f0bSApple OSS Distributions 			ktraceEmitPhysicalSegments();
4701*d4514f0bSApple OSS Distributions 		}
4702*d4514f0bSApple OSS Distributions 	}
4703*d4514f0bSApple OSS Distributions 
4704*d4514f0bSApple OSS Distributions finish:
4705*d4514f0bSApple OSS Distributions 
4706*d4514f0bSApple OSS Distributions 	if (_prepareLock) {
4707*d4514f0bSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4708*d4514f0bSApple OSS Distributions 	}
4709*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(error);
4710*d4514f0bSApple OSS Distributions 
4711*d4514f0bSApple OSS Distributions 	return error;
4712*d4514f0bSApple OSS Distributions }
4713*d4514f0bSApple OSS Distributions 
4714*d4514f0bSApple OSS Distributions /*
4715*d4514f0bSApple OSS Distributions  * complete
4716*d4514f0bSApple OSS Distributions  *
4717*d4514f0bSApple OSS Distributions  * Complete processing of the memory after an I/O transfer finishes.
4718*d4514f0bSApple OSS Distributions  * This method should not be called unless a prepare was previously
4719*d4514f0bSApple OSS Distributions  * issued; the prepare() and complete() must occur in pairs, before
4720*d4514f0bSApple OSS Distributions  * before and after an I/O transfer involving pageable memory.
4721*d4514f0bSApple OSS Distributions  */
4722*d4514f0bSApple OSS Distributions 
4723*d4514f0bSApple OSS Distributions IOReturn
complete(IODirection forDirection)4724*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725*d4514f0bSApple OSS Distributions {
4726*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4727*d4514f0bSApple OSS Distributions 	ioGMDData  * dataP;
4728*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729*d4514f0bSApple OSS Distributions 
4730*d4514f0bSApple OSS Distributions 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnSuccess);
4732*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
4733*d4514f0bSApple OSS Distributions 	}
4734*d4514f0bSApple OSS Distributions 
4735*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4736*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4737*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnNotAttached);
4738*d4514f0bSApple OSS Distributions 		return kIOReturnNotAttached;
4739*d4514f0bSApple OSS Distributions 	}
4740*d4514f0bSApple OSS Distributions 
4741*d4514f0bSApple OSS Distributions 	if (_prepareLock) {
4742*d4514f0bSApple OSS Distributions 		IOLockLock(_prepareLock);
4743*d4514f0bSApple OSS Distributions 	}
4744*d4514f0bSApple OSS Distributions 	do{
4745*d4514f0bSApple OSS Distributions 		assert(_wireCount);
4746*d4514f0bSApple OSS Distributions 		if (!_wireCount) {
4747*d4514f0bSApple OSS Distributions 			break;
4748*d4514f0bSApple OSS Distributions 		}
4749*d4514f0bSApple OSS Distributions 		dataP = getDataP(_memoryEntries);
4750*d4514f0bSApple OSS Distributions 		if (!dataP) {
4751*d4514f0bSApple OSS Distributions 			break;
4752*d4514f0bSApple OSS Distributions 		}
4753*d4514f0bSApple OSS Distributions 
4754*d4514f0bSApple OSS Distributions 		if (kIODirectionCompleteWithError & forDirection) {
4755*d4514f0bSApple OSS Distributions 			dataP->fCompletionError = true;
4756*d4514f0bSApple OSS Distributions 		}
4757*d4514f0bSApple OSS Distributions 
4758*d4514f0bSApple OSS Distributions 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759*d4514f0bSApple OSS Distributions 			performOperation(kIOMemorySetEncrypted, 0, _length);
4760*d4514f0bSApple OSS Distributions 		}
4761*d4514f0bSApple OSS Distributions 
4762*d4514f0bSApple OSS Distributions 		_wireCount--;
4763*d4514f0bSApple OSS Distributions 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764*d4514f0bSApple OSS Distributions 			ioPLBlock *ioplList = getIOPLList(dataP);
4765*d4514f0bSApple OSS Distributions 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766*d4514f0bSApple OSS Distributions 
4767*d4514f0bSApple OSS Distributions 			if (_wireCount) {
4768*d4514f0bSApple OSS Distributions 				// kIODirectionCompleteWithDataValid & forDirection
4769*d4514f0bSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770*d4514f0bSApple OSS Distributions 					vm_tag_t tag;
4771*d4514f0bSApple OSS Distributions 					tag = (typeof(tag))getVMTag(kernel_map);
4772*d4514f0bSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4773*d4514f0bSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4774*d4514f0bSApple OSS Distributions 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4775*d4514f0bSApple OSS Distributions 						}
4776*d4514f0bSApple OSS Distributions 					}
4777*d4514f0bSApple OSS Distributions 				}
4778*d4514f0bSApple OSS Distributions 			} else {
4779*d4514f0bSApple OSS Distributions 				if (_dmaReferences) {
4780*d4514f0bSApple OSS Distributions 					panic("complete() while dma active");
4781*d4514f0bSApple OSS Distributions 				}
4782*d4514f0bSApple OSS Distributions 
4783*d4514f0bSApple OSS Distributions 				if (dataP->fMappedBaseValid) {
4784*d4514f0bSApple OSS Distributions 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785*d4514f0bSApple OSS Distributions 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786*d4514f0bSApple OSS Distributions 				}
4787*d4514f0bSApple OSS Distributions #if IOTRACKING
4788*d4514f0bSApple OSS Distributions 				if (dataP->fWireTracking.link.next) {
4789*d4514f0bSApple OSS Distributions 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790*d4514f0bSApple OSS Distributions 				}
4791*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
4792*d4514f0bSApple OSS Distributions 				// Only complete iopls that we created which are for TypeVirtual
4793*d4514f0bSApple OSS Distributions 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*d4514f0bSApple OSS Distributions 					for (ind = 0; ind < count; ind++) {
4795*d4514f0bSApple OSS Distributions 						if (ioplList[ind].fIOPL) {
4796*d4514f0bSApple OSS Distributions 							if (dataP->fCompletionError) {
4797*d4514f0bSApple OSS Distributions 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798*d4514f0bSApple OSS Distributions 							} else {
4799*d4514f0bSApple OSS Distributions 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800*d4514f0bSApple OSS Distributions 							}
4801*d4514f0bSApple OSS Distributions 							upl_deallocate(ioplList[ind].fIOPL);
4802*d4514f0bSApple OSS Distributions 						}
4803*d4514f0bSApple OSS Distributions 					}
4804*d4514f0bSApple OSS Distributions 				} else if (kIOMemoryTypeUPL == type) {
4805*d4514f0bSApple OSS Distributions 					upl_set_referenced(ioplList[0].fIOPL, false);
4806*d4514f0bSApple OSS Distributions 				}
4807*d4514f0bSApple OSS Distributions 
4808*d4514f0bSApple OSS Distributions 				_memoryEntries->setLength(computeDataSize(0, 0));
4809*d4514f0bSApple OSS Distributions 
4810*d4514f0bSApple OSS Distributions 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4811*d4514f0bSApple OSS Distributions 				_flags &= ~kIOMemoryPreparedReadOnly;
4812*d4514f0bSApple OSS Distributions 
4813*d4514f0bSApple OSS Distributions 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814*d4514f0bSApple OSS Distributions 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815*d4514f0bSApple OSS Distributions 				}
4816*d4514f0bSApple OSS Distributions 			}
4817*d4514f0bSApple OSS Distributions 		}
4818*d4514f0bSApple OSS Distributions 	}while (false);
4819*d4514f0bSApple OSS Distributions 
4820*d4514f0bSApple OSS Distributions 	if (_prepareLock) {
4821*d4514f0bSApple OSS Distributions 		IOLockUnlock(_prepareLock);
4822*d4514f0bSApple OSS Distributions 	}
4823*d4514f0bSApple OSS Distributions 
4824*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4825*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
4826*d4514f0bSApple OSS Distributions }
4827*d4514f0bSApple OSS Distributions 
4828*d4514f0bSApple OSS Distributions IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4829*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4830*d4514f0bSApple OSS Distributions {
4831*d4514f0bSApple OSS Distributions 	IOOptionBits createOptions = 0;
4832*d4514f0bSApple OSS Distributions 
4833*d4514f0bSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4834*d4514f0bSApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
4835*d4514f0bSApple OSS Distributions 	}
4836*d4514f0bSApple OSS Distributions 	if (!(kIOMapReadOnly & options)) {
4837*d4514f0bSApple OSS Distributions 		createOptions |= kIOMemoryReferenceWrite;
4838*d4514f0bSApple OSS Distributions #if DEVELOPMENT || DEBUG
4839*d4514f0bSApple OSS Distributions 		if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4840*d4514f0bSApple OSS Distributions 		    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4841*d4514f0bSApple OSS Distributions 			OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4842*d4514f0bSApple OSS Distributions 		}
4843*d4514f0bSApple OSS Distributions #endif
4844*d4514f0bSApple OSS Distributions 	}
4845*d4514f0bSApple OSS Distributions 	return createOptions;
4846*d4514f0bSApple OSS Distributions }
4847*d4514f0bSApple OSS Distributions 
4848*d4514f0bSApple OSS Distributions /*
4849*d4514f0bSApple OSS Distributions  * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4850*d4514f0bSApple OSS Distributions  * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4851*d4514f0bSApple OSS Distributions  * creation.
4852*d4514f0bSApple OSS Distributions  */
4853*d4514f0bSApple OSS Distributions 
4854*d4514f0bSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4855*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::makeMapping(
4856*d4514f0bSApple OSS Distributions 	IOMemoryDescriptor *    owner,
4857*d4514f0bSApple OSS Distributions 	task_t                  __intoTask,
4858*d4514f0bSApple OSS Distributions 	IOVirtualAddress        __address,
4859*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
4860*d4514f0bSApple OSS Distributions 	IOByteCount             __offset,
4861*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
4862*d4514f0bSApple OSS Distributions {
4863*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
4864*d4514f0bSApple OSS Distributions 	IOMemoryMap * mapping;
4865*d4514f0bSApple OSS Distributions 
4866*d4514f0bSApple OSS Distributions 	if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4867*d4514f0bSApple OSS Distributions 		struct IOMemoryReference * newRef;
4868*d4514f0bSApple OSS Distributions 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4869*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess == err) {
4870*d4514f0bSApple OSS Distributions 			if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4871*d4514f0bSApple OSS Distributions 				memoryReferenceFree(newRef);
4872*d4514f0bSApple OSS Distributions 			}
4873*d4514f0bSApple OSS Distributions 		}
4874*d4514f0bSApple OSS Distributions 	}
4875*d4514f0bSApple OSS Distributions 	if (kIOReturnSuccess != err) {
4876*d4514f0bSApple OSS Distributions 		return NULL;
4877*d4514f0bSApple OSS Distributions 	}
4878*d4514f0bSApple OSS Distributions 	mapping = IOMemoryDescriptor::makeMapping(
4879*d4514f0bSApple OSS Distributions 		owner, __intoTask, __address, options, __offset, __length);
4880*d4514f0bSApple OSS Distributions 
4881*d4514f0bSApple OSS Distributions #if IOTRACKING
4882*d4514f0bSApple OSS Distributions 	if ((mapping == (IOMemoryMap *) __address)
4883*d4514f0bSApple OSS Distributions 	    && (0 == (kIOMapStatic & mapping->fOptions))
4884*d4514f0bSApple OSS Distributions 	    && (NULL == mapping->fSuperMap)
4885*d4514f0bSApple OSS Distributions 	    && ((kIOTracking & gIOKitDebug) || _task)) {
4886*d4514f0bSApple OSS Distributions 		// only dram maps in the default on development case
4887*d4514f0bSApple OSS Distributions 		IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4888*d4514f0bSApple OSS Distributions 	}
4889*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
4890*d4514f0bSApple OSS Distributions 
4891*d4514f0bSApple OSS Distributions 	return mapping;
4892*d4514f0bSApple OSS Distributions }
4893*d4514f0bSApple OSS Distributions 
4894*d4514f0bSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4895*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4896*d4514f0bSApple OSS Distributions 	vm_map_t                __addressMap,
4897*d4514f0bSApple OSS Distributions 	IOVirtualAddress *      __address,
4898*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
4899*d4514f0bSApple OSS Distributions 	IOByteCount             __offset,
4900*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
4901*d4514f0bSApple OSS Distributions {
4902*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4903*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(kIOReturnSuccess);
4904*d4514f0bSApple OSS Distributions #ifndef __LP64__
4905*d4514f0bSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
4906*d4514f0bSApple OSS Distributions 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4907*d4514f0bSApple OSS Distributions 	}
4908*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
4909*d4514f0bSApple OSS Distributions 
4910*d4514f0bSApple OSS Distributions 	kern_return_t  err;
4911*d4514f0bSApple OSS Distributions 
4912*d4514f0bSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4913*d4514f0bSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4914*d4514f0bSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
4915*d4514f0bSApple OSS Distributions 
4916*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4917*d4514f0bSApple OSS Distributions 	Ranges vec = _ranges;
4918*d4514f0bSApple OSS Distributions 
4919*d4514f0bSApple OSS Distributions 	mach_vm_address_t range0Addr = 0;
4920*d4514f0bSApple OSS Distributions 	mach_vm_size_t    range0Len = 0;
4921*d4514f0bSApple OSS Distributions 
4922*d4514f0bSApple OSS Distributions 	if ((offset >= _length) || ((offset + length) > _length)) {
4923*d4514f0bSApple OSS Distributions 		traceInterval.setEndArg1(kIOReturnBadArgument);
4924*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4925*d4514f0bSApple OSS Distributions 		// assert(offset == 0 && _length == 0 && length == 0);
4926*d4514f0bSApple OSS Distributions 		return kIOReturnBadArgument;
4927*d4514f0bSApple OSS Distributions 	}
4928*d4514f0bSApple OSS Distributions 
4929*d4514f0bSApple OSS Distributions 	assert(!(kIOMemoryRemote & _flags));
4930*d4514f0bSApple OSS Distributions 	if (kIOMemoryRemote & _flags) {
4931*d4514f0bSApple OSS Distributions 		return 0;
4932*d4514f0bSApple OSS Distributions 	}
4933*d4514f0bSApple OSS Distributions 
4934*d4514f0bSApple OSS Distributions 	if (vec.v) {
4935*d4514f0bSApple OSS Distributions 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4936*d4514f0bSApple OSS Distributions 	}
4937*d4514f0bSApple OSS Distributions 
4938*d4514f0bSApple OSS Distributions 	// mapping source == dest? (could be much better)
4939*d4514f0bSApple OSS Distributions 	if (_task
4940*d4514f0bSApple OSS Distributions 	    && (mapping->fAddressTask == _task)
4941*d4514f0bSApple OSS Distributions 	    && (mapping->fAddressMap == get_task_map(_task))
4942*d4514f0bSApple OSS Distributions 	    && (options & kIOMapAnywhere)
4943*d4514f0bSApple OSS Distributions 	    && (!(kIOMapUnique & options))
4944*d4514f0bSApple OSS Distributions 	    && (!(kIOMapGuardedMask & options))
4945*d4514f0bSApple OSS Distributions 	    && (1 == _rangesCount)
4946*d4514f0bSApple OSS Distributions 	    && (0 == offset)
4947*d4514f0bSApple OSS Distributions 	    && range0Addr
4948*d4514f0bSApple OSS Distributions 	    && (length <= range0Len)) {
4949*d4514f0bSApple OSS Distributions 		mapping->fAddress = range0Addr;
4950*d4514f0bSApple OSS Distributions 		mapping->fOptions |= kIOMapStatic;
4951*d4514f0bSApple OSS Distributions 
4952*d4514f0bSApple OSS Distributions 		return kIOReturnSuccess;
4953*d4514f0bSApple OSS Distributions 	}
4954*d4514f0bSApple OSS Distributions 
4955*d4514f0bSApple OSS Distributions 	if (!_memRef) {
4956*d4514f0bSApple OSS Distributions 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4957*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess != err) {
4958*d4514f0bSApple OSS Distributions 			traceInterval.setEndArg1(err);
4959*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4960*d4514f0bSApple OSS Distributions 			return err;
4961*d4514f0bSApple OSS Distributions 		}
4962*d4514f0bSApple OSS Distributions 	}
4963*d4514f0bSApple OSS Distributions 
4964*d4514f0bSApple OSS Distributions 	memory_object_t pager;
4965*d4514f0bSApple OSS Distributions 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4966*d4514f0bSApple OSS Distributions 
4967*d4514f0bSApple OSS Distributions 	// <upl_transpose //
4968*d4514f0bSApple OSS Distributions 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4969*d4514f0bSApple OSS Distributions 		do{
4970*d4514f0bSApple OSS Distributions 			upl_t               redirUPL2;
4971*d4514f0bSApple OSS Distributions 			upl_size_t          size;
4972*d4514f0bSApple OSS Distributions 			upl_control_flags_t flags;
4973*d4514f0bSApple OSS Distributions 			unsigned int        lock_count;
4974*d4514f0bSApple OSS Distributions 
4975*d4514f0bSApple OSS Distributions 			if (!_memRef || (1 != _memRef->count)) {
4976*d4514f0bSApple OSS Distributions 				err = kIOReturnNotReadable;
4977*d4514f0bSApple OSS Distributions 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4978*d4514f0bSApple OSS Distributions 				break;
4979*d4514f0bSApple OSS Distributions 			}
4980*d4514f0bSApple OSS Distributions 
4981*d4514f0bSApple OSS Distributions 			size = (upl_size_t) round_page(mapping->fLength);
4982*d4514f0bSApple OSS Distributions 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4983*d4514f0bSApple OSS Distributions 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4984*d4514f0bSApple OSS Distributions 
4985*d4514f0bSApple OSS Distributions 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4986*d4514f0bSApple OSS Distributions 			    NULL, NULL,
4987*d4514f0bSApple OSS Distributions 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4988*d4514f0bSApple OSS Distributions 				redirUPL2 = NULL;
4989*d4514f0bSApple OSS Distributions 			}
4990*d4514f0bSApple OSS Distributions 
4991*d4514f0bSApple OSS Distributions 			for (lock_count = 0;
4992*d4514f0bSApple OSS Distributions 			    IORecursiveLockHaveLock(gIOMemoryLock);
4993*d4514f0bSApple OSS Distributions 			    lock_count++) {
4994*d4514f0bSApple OSS Distributions 				UNLOCK;
4995*d4514f0bSApple OSS Distributions 			}
4996*d4514f0bSApple OSS Distributions 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4997*d4514f0bSApple OSS Distributions 			for (;
4998*d4514f0bSApple OSS Distributions 			    lock_count;
4999*d4514f0bSApple OSS Distributions 			    lock_count--) {
5000*d4514f0bSApple OSS Distributions 				LOCK;
5001*d4514f0bSApple OSS Distributions 			}
5002*d4514f0bSApple OSS Distributions 
5003*d4514f0bSApple OSS Distributions 			if (kIOReturnSuccess != err) {
5004*d4514f0bSApple OSS Distributions 				IOLog("upl_transpose(%x)\n", err);
5005*d4514f0bSApple OSS Distributions 				err = kIOReturnSuccess;
5006*d4514f0bSApple OSS Distributions 			}
5007*d4514f0bSApple OSS Distributions 
5008*d4514f0bSApple OSS Distributions 			if (redirUPL2) {
5009*d4514f0bSApple OSS Distributions 				upl_commit(redirUPL2, NULL, 0);
5010*d4514f0bSApple OSS Distributions 				upl_deallocate(redirUPL2);
5011*d4514f0bSApple OSS Distributions 				redirUPL2 = NULL;
5012*d4514f0bSApple OSS Distributions 			}
5013*d4514f0bSApple OSS Distributions 			{
5014*d4514f0bSApple OSS Distributions 				// swap the memEntries since they now refer to different vm_objects
5015*d4514f0bSApple OSS Distributions 				IOMemoryReference * me = _memRef;
5016*d4514f0bSApple OSS Distributions 				_memRef = mapping->fMemory->_memRef;
5017*d4514f0bSApple OSS Distributions 				mapping->fMemory->_memRef = me;
5018*d4514f0bSApple OSS Distributions 			}
5019*d4514f0bSApple OSS Distributions 			if (pager) {
5020*d4514f0bSApple OSS Distributions 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5021*d4514f0bSApple OSS Distributions 			}
5022*d4514f0bSApple OSS Distributions 		}while (false);
5023*d4514f0bSApple OSS Distributions 	}
5024*d4514f0bSApple OSS Distributions 	// upl_transpose> //
5025*d4514f0bSApple OSS Distributions 	else {
5026*d4514f0bSApple OSS Distributions 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5027*d4514f0bSApple OSS Distributions 		if (err) {
5028*d4514f0bSApple OSS Distributions 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5029*d4514f0bSApple OSS Distributions 		}
5030*d4514f0bSApple OSS Distributions 		if ((err == KERN_SUCCESS) && pager) {
5031*d4514f0bSApple OSS Distributions 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5032*d4514f0bSApple OSS Distributions 
5033*d4514f0bSApple OSS Distributions 			if (err != KERN_SUCCESS) {
5034*d4514f0bSApple OSS Distributions 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5035*d4514f0bSApple OSS Distributions 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5036*d4514f0bSApple OSS Distributions 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5037*d4514f0bSApple OSS Distributions 			}
5038*d4514f0bSApple OSS Distributions 		}
5039*d4514f0bSApple OSS Distributions 	}
5040*d4514f0bSApple OSS Distributions 
5041*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(err);
5042*d4514f0bSApple OSS Distributions 	if (err) {
5043*d4514f0bSApple OSS Distributions 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5044*d4514f0bSApple OSS Distributions 	}
5045*d4514f0bSApple OSS Distributions 	return err;
5046*d4514f0bSApple OSS Distributions }
5047*d4514f0bSApple OSS Distributions 
5048*d4514f0bSApple OSS Distributions #if IOTRACKING
5049*d4514f0bSApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5050*d4514f0bSApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5051*d4514f0bSApple OSS Distributions     mach_vm_address_t * address, mach_vm_size_t * size)
5052*d4514f0bSApple OSS Distributions {
5053*d4514f0bSApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5054*d4514f0bSApple OSS Distributions 
5055*d4514f0bSApple OSS Distributions 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5056*d4514f0bSApple OSS Distributions 
5057*d4514f0bSApple OSS Distributions 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5058*d4514f0bSApple OSS Distributions 		return kIOReturnNotReady;
5059*d4514f0bSApple OSS Distributions 	}
5060*d4514f0bSApple OSS Distributions 
5061*d4514f0bSApple OSS Distributions 	*task    = map->fAddressTask;
5062*d4514f0bSApple OSS Distributions 	*address = map->fAddress;
5063*d4514f0bSApple OSS Distributions 	*size    = map->fLength;
5064*d4514f0bSApple OSS Distributions 
5065*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
5066*d4514f0bSApple OSS Distributions }
5067*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
5068*d4514f0bSApple OSS Distributions 
5069*d4514f0bSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5070*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5071*d4514f0bSApple OSS Distributions 	vm_map_t                addressMap,
5072*d4514f0bSApple OSS Distributions 	IOVirtualAddress        __address,
5073*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
5074*d4514f0bSApple OSS Distributions {
5075*d4514f0bSApple OSS Distributions 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5076*d4514f0bSApple OSS Distributions 	IOReturn ret;
5077*d4514f0bSApple OSS Distributions 	ret = super::doUnmap(addressMap, __address, __length);
5078*d4514f0bSApple OSS Distributions 	traceInterval.setEndArg1(ret);
5079*d4514f0bSApple OSS Distributions 	return ret;
5080*d4514f0bSApple OSS Distributions }
5081*d4514f0bSApple OSS Distributions 
5082*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5083*d4514f0bSApple OSS Distributions 
5084*d4514f0bSApple OSS Distributions #undef super
5085*d4514f0bSApple OSS Distributions #define super OSObject
5086*d4514f0bSApple OSS Distributions 
5087*d4514f0bSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5088*d4514f0bSApple OSS Distributions 
5089*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5090*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5091*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5092*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5093*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5094*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5095*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5096*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5097*d4514f0bSApple OSS Distributions 
5098*d4514f0bSApple OSS Distributions /* ex-inline function implementation */
5099*d4514f0bSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5100*d4514f0bSApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5101*d4514f0bSApple OSS Distributions {
5102*d4514f0bSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
5103*d4514f0bSApple OSS Distributions }
5104*d4514f0bSApple OSS Distributions 
5105*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5106*d4514f0bSApple OSS Distributions 
5107*d4514f0bSApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5108*d4514f0bSApple OSS Distributions IOMemoryMap::init(
5109*d4514f0bSApple OSS Distributions 	task_t                  intoTask,
5110*d4514f0bSApple OSS Distributions 	mach_vm_address_t       toAddress,
5111*d4514f0bSApple OSS Distributions 	IOOptionBits            _options,
5112*d4514f0bSApple OSS Distributions 	mach_vm_size_t          _offset,
5113*d4514f0bSApple OSS Distributions 	mach_vm_size_t          _length )
5114*d4514f0bSApple OSS Distributions {
5115*d4514f0bSApple OSS Distributions 	if (!intoTask) {
5116*d4514f0bSApple OSS Distributions 		return false;
5117*d4514f0bSApple OSS Distributions 	}
5118*d4514f0bSApple OSS Distributions 
5119*d4514f0bSApple OSS Distributions 	if (!super::init()) {
5120*d4514f0bSApple OSS Distributions 		return false;
5121*d4514f0bSApple OSS Distributions 	}
5122*d4514f0bSApple OSS Distributions 
5123*d4514f0bSApple OSS Distributions 	fAddressMap  = get_task_map(intoTask);
5124*d4514f0bSApple OSS Distributions 	if (!fAddressMap) {
5125*d4514f0bSApple OSS Distributions 		return false;
5126*d4514f0bSApple OSS Distributions 	}
5127*d4514f0bSApple OSS Distributions 	vm_map_reference(fAddressMap);
5128*d4514f0bSApple OSS Distributions 
5129*d4514f0bSApple OSS Distributions 	fAddressTask = intoTask;
5130*d4514f0bSApple OSS Distributions 	fOptions     = _options;
5131*d4514f0bSApple OSS Distributions 	fLength      = _length;
5132*d4514f0bSApple OSS Distributions 	fOffset      = _offset;
5133*d4514f0bSApple OSS Distributions 	fAddress     = toAddress;
5134*d4514f0bSApple OSS Distributions 
5135*d4514f0bSApple OSS Distributions 	return true;
5136*d4514f0bSApple OSS Distributions }
5137*d4514f0bSApple OSS Distributions 
5138*d4514f0bSApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5139*d4514f0bSApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5140*d4514f0bSApple OSS Distributions {
5141*d4514f0bSApple OSS Distributions 	if (!_memory) {
5142*d4514f0bSApple OSS Distributions 		return false;
5143*d4514f0bSApple OSS Distributions 	}
5144*d4514f0bSApple OSS Distributions 
5145*d4514f0bSApple OSS Distributions 	if (!fSuperMap) {
5146*d4514f0bSApple OSS Distributions 		if ((_offset + fLength) > _memory->getLength()) {
5147*d4514f0bSApple OSS Distributions 			return false;
5148*d4514f0bSApple OSS Distributions 		}
5149*d4514f0bSApple OSS Distributions 		fOffset = _offset;
5150*d4514f0bSApple OSS Distributions 	}
5151*d4514f0bSApple OSS Distributions 
5152*d4514f0bSApple OSS Distributions 
5153*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5154*d4514f0bSApple OSS Distributions 	if (fMemory) {
5155*d4514f0bSApple OSS Distributions 		if (fMemory != _memory) {
5156*d4514f0bSApple OSS Distributions 			fMemory->removeMapping(this);
5157*d4514f0bSApple OSS Distributions 		}
5158*d4514f0bSApple OSS Distributions 	}
5159*d4514f0bSApple OSS Distributions 	fMemory = os::move(tempval);
5160*d4514f0bSApple OSS Distributions 
5161*d4514f0bSApple OSS Distributions 	return true;
5162*d4514f0bSApple OSS Distributions }
5163*d4514f0bSApple OSS Distributions 
5164*d4514f0bSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5165*d4514f0bSApple OSS Distributions IOMemoryDescriptor::doMap(
5166*d4514f0bSApple OSS Distributions 	vm_map_t                __addressMap,
5167*d4514f0bSApple OSS Distributions 	IOVirtualAddress *      __address,
5168*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
5169*d4514f0bSApple OSS Distributions 	IOByteCount             __offset,
5170*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
5171*d4514f0bSApple OSS Distributions {
5172*d4514f0bSApple OSS Distributions 	return kIOReturnUnsupported;
5173*d4514f0bSApple OSS Distributions }
5174*d4514f0bSApple OSS Distributions 
5175*d4514f0bSApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5176*d4514f0bSApple OSS Distributions IOMemoryDescriptor::handleFault(
5177*d4514f0bSApple OSS Distributions 	void *                  _pager,
5178*d4514f0bSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5179*d4514f0bSApple OSS Distributions 	mach_vm_size_t          length)
5180*d4514f0bSApple OSS Distributions {
5181*d4514f0bSApple OSS Distributions 	if (kIOMemoryRedirected & _flags) {
5182*d4514f0bSApple OSS Distributions #if DEBUG
5183*d4514f0bSApple OSS Distributions 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5184*d4514f0bSApple OSS Distributions #endif
5185*d4514f0bSApple OSS Distributions 		do {
5186*d4514f0bSApple OSS Distributions 			SLEEP;
5187*d4514f0bSApple OSS Distributions 		} while (kIOMemoryRedirected & _flags);
5188*d4514f0bSApple OSS Distributions 	}
5189*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
5190*d4514f0bSApple OSS Distributions }
5191*d4514f0bSApple OSS Distributions 
5192*d4514f0bSApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5193*d4514f0bSApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5194*d4514f0bSApple OSS Distributions 	void *                  _pager,
5195*d4514f0bSApple OSS Distributions 	vm_map_t                addressMap,
5196*d4514f0bSApple OSS Distributions 	mach_vm_address_t       address,
5197*d4514f0bSApple OSS Distributions 	mach_vm_size_t          sourceOffset,
5198*d4514f0bSApple OSS Distributions 	mach_vm_size_t          length,
5199*d4514f0bSApple OSS Distributions 	IOOptionBits            options )
5200*d4514f0bSApple OSS Distributions {
5201*d4514f0bSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5202*d4514f0bSApple OSS Distributions 	memory_object_t     pager = (memory_object_t) _pager;
5203*d4514f0bSApple OSS Distributions 	mach_vm_size_t      size;
5204*d4514f0bSApple OSS Distributions 	mach_vm_size_t      bytes;
5205*d4514f0bSApple OSS Distributions 	mach_vm_size_t      page;
5206*d4514f0bSApple OSS Distributions 	mach_vm_size_t      pageOffset;
5207*d4514f0bSApple OSS Distributions 	mach_vm_size_t      pagerOffset;
5208*d4514f0bSApple OSS Distributions 	IOPhysicalLength    segLen, chunk;
5209*d4514f0bSApple OSS Distributions 	addr64_t            physAddr;
5210*d4514f0bSApple OSS Distributions 	IOOptionBits        type;
5211*d4514f0bSApple OSS Distributions 
5212*d4514f0bSApple OSS Distributions 	type = _flags & kIOMemoryTypeMask;
5213*d4514f0bSApple OSS Distributions 
5214*d4514f0bSApple OSS Distributions 	if (reserved->dp.pagerContig) {
5215*d4514f0bSApple OSS Distributions 		sourceOffset = 0;
5216*d4514f0bSApple OSS Distributions 		pagerOffset  = 0;
5217*d4514f0bSApple OSS Distributions 	}
5218*d4514f0bSApple OSS Distributions 
5219*d4514f0bSApple OSS Distributions 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5220*d4514f0bSApple OSS Distributions 	assert( physAddr );
5221*d4514f0bSApple OSS Distributions 	pageOffset = physAddr - trunc_page_64( physAddr );
5222*d4514f0bSApple OSS Distributions 	pagerOffset = sourceOffset;
5223*d4514f0bSApple OSS Distributions 
5224*d4514f0bSApple OSS Distributions 	size = length + pageOffset;
5225*d4514f0bSApple OSS Distributions 	physAddr -= pageOffset;
5226*d4514f0bSApple OSS Distributions 
5227*d4514f0bSApple OSS Distributions 	segLen += pageOffset;
5228*d4514f0bSApple OSS Distributions 	bytes = size;
5229*d4514f0bSApple OSS Distributions 	do{
5230*d4514f0bSApple OSS Distributions 		// in the middle of the loop only map whole pages
5231*d4514f0bSApple OSS Distributions 		if (segLen >= bytes) {
5232*d4514f0bSApple OSS Distributions 			segLen = bytes;
5233*d4514f0bSApple OSS Distributions 		} else if (segLen != trunc_page_64(segLen)) {
5234*d4514f0bSApple OSS Distributions 			err = kIOReturnVMError;
5235*d4514f0bSApple OSS Distributions 		}
5236*d4514f0bSApple OSS Distributions 		if (physAddr != trunc_page_64(physAddr)) {
5237*d4514f0bSApple OSS Distributions 			err = kIOReturnBadArgument;
5238*d4514f0bSApple OSS Distributions 		}
5239*d4514f0bSApple OSS Distributions 
5240*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess != err) {
5241*d4514f0bSApple OSS Distributions 			break;
5242*d4514f0bSApple OSS Distributions 		}
5243*d4514f0bSApple OSS Distributions 
5244*d4514f0bSApple OSS Distributions #if DEBUG || DEVELOPMENT
5245*d4514f0bSApple OSS Distributions 		if ((kIOMemoryTypeUPL != type)
5246*d4514f0bSApple OSS Distributions 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5247*d4514f0bSApple OSS Distributions 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5248*d4514f0bSApple OSS Distributions 			    physAddr, (uint64_t)segLen);
5249*d4514f0bSApple OSS Distributions 		}
5250*d4514f0bSApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5251*d4514f0bSApple OSS Distributions 
5252*d4514f0bSApple OSS Distributions 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5253*d4514f0bSApple OSS Distributions 		for (page = 0;
5254*d4514f0bSApple OSS Distributions 		    (page < segLen) && (KERN_SUCCESS == err);
5255*d4514f0bSApple OSS Distributions 		    page += chunk) {
5256*d4514f0bSApple OSS Distributions 			err = device_pager_populate_object(pager, pagerOffset,
5257*d4514f0bSApple OSS Distributions 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5258*d4514f0bSApple OSS Distributions 			pagerOffset += chunk;
5259*d4514f0bSApple OSS Distributions 		}
5260*d4514f0bSApple OSS Distributions 
5261*d4514f0bSApple OSS Distributions 		assert(KERN_SUCCESS == err);
5262*d4514f0bSApple OSS Distributions 		if (err) {
5263*d4514f0bSApple OSS Distributions 			break;
5264*d4514f0bSApple OSS Distributions 		}
5265*d4514f0bSApple OSS Distributions 
5266*d4514f0bSApple OSS Distributions 		// This call to vm_fault causes an early pmap level resolution
5267*d4514f0bSApple OSS Distributions 		// of the mappings created above for kernel mappings, since
5268*d4514f0bSApple OSS Distributions 		// faulting in later can't take place from interrupt level.
5269*d4514f0bSApple OSS Distributions 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5270*d4514f0bSApple OSS Distributions 			err = vm_fault(addressMap,
5271*d4514f0bSApple OSS Distributions 			    (vm_map_offset_t)trunc_page_64(address),
5272*d4514f0bSApple OSS Distributions 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5273*d4514f0bSApple OSS Distributions 			    FALSE, VM_KERN_MEMORY_NONE,
5274*d4514f0bSApple OSS Distributions 			    THREAD_UNINT, NULL,
5275*d4514f0bSApple OSS Distributions 			    (vm_map_offset_t)0);
5276*d4514f0bSApple OSS Distributions 
5277*d4514f0bSApple OSS Distributions 			if (KERN_SUCCESS != err) {
5278*d4514f0bSApple OSS Distributions 				break;
5279*d4514f0bSApple OSS Distributions 			}
5280*d4514f0bSApple OSS Distributions 		}
5281*d4514f0bSApple OSS Distributions 
5282*d4514f0bSApple OSS Distributions 		sourceOffset += segLen - pageOffset;
5283*d4514f0bSApple OSS Distributions 		address += segLen;
5284*d4514f0bSApple OSS Distributions 		bytes -= segLen;
5285*d4514f0bSApple OSS Distributions 		pageOffset = 0;
5286*d4514f0bSApple OSS Distributions 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5287*d4514f0bSApple OSS Distributions 
5288*d4514f0bSApple OSS Distributions 	if (bytes) {
5289*d4514f0bSApple OSS Distributions 		err = kIOReturnBadArgument;
5290*d4514f0bSApple OSS Distributions 	}
5291*d4514f0bSApple OSS Distributions 
5292*d4514f0bSApple OSS Distributions 	return err;
5293*d4514f0bSApple OSS Distributions }
5294*d4514f0bSApple OSS Distributions 
5295*d4514f0bSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5296*d4514f0bSApple OSS Distributions IOMemoryDescriptor::doUnmap(
5297*d4514f0bSApple OSS Distributions 	vm_map_t                addressMap,
5298*d4514f0bSApple OSS Distributions 	IOVirtualAddress        __address,
5299*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
5300*d4514f0bSApple OSS Distributions {
5301*d4514f0bSApple OSS Distributions 	IOReturn          err;
5302*d4514f0bSApple OSS Distributions 	IOMemoryMap *     mapping;
5303*d4514f0bSApple OSS Distributions 	mach_vm_address_t address;
5304*d4514f0bSApple OSS Distributions 	mach_vm_size_t    length;
5305*d4514f0bSApple OSS Distributions 
5306*d4514f0bSApple OSS Distributions 	if (__length) {
5307*d4514f0bSApple OSS Distributions 		panic("doUnmap");
5308*d4514f0bSApple OSS Distributions 	}
5309*d4514f0bSApple OSS Distributions 
5310*d4514f0bSApple OSS Distributions 	mapping = (IOMemoryMap *) __address;
5311*d4514f0bSApple OSS Distributions 	addressMap = mapping->fAddressMap;
5312*d4514f0bSApple OSS Distributions 	address    = mapping->fAddress;
5313*d4514f0bSApple OSS Distributions 	length     = mapping->fLength;
5314*d4514f0bSApple OSS Distributions 
5315*d4514f0bSApple OSS Distributions 	if (kIOMapOverwrite & mapping->fOptions) {
5316*d4514f0bSApple OSS Distributions 		err = KERN_SUCCESS;
5317*d4514f0bSApple OSS Distributions 	} else {
5318*d4514f0bSApple OSS Distributions 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5319*d4514f0bSApple OSS Distributions 			addressMap = IOPageableMapForAddress( address );
5320*d4514f0bSApple OSS Distributions 		}
5321*d4514f0bSApple OSS Distributions #if DEBUG
5322*d4514f0bSApple OSS Distributions 		if (kIOLogMapping & gIOKitDebug) {
5323*d4514f0bSApple OSS Distributions 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5324*d4514f0bSApple OSS Distributions 			    addressMap, address, length );
5325*d4514f0bSApple OSS Distributions 		}
5326*d4514f0bSApple OSS Distributions #endif
5327*d4514f0bSApple OSS Distributions 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5328*d4514f0bSApple OSS Distributions 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5329*d4514f0bSApple OSS Distributions 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5330*d4514f0bSApple OSS Distributions 		}
5331*d4514f0bSApple OSS Distributions 	}
5332*d4514f0bSApple OSS Distributions 
5333*d4514f0bSApple OSS Distributions #if IOTRACKING
5334*d4514f0bSApple OSS Distributions 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5335*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
5336*d4514f0bSApple OSS Distributions 
5337*d4514f0bSApple OSS Distributions 	return err;
5338*d4514f0bSApple OSS Distributions }
5339*d4514f0bSApple OSS Distributions 
5340*d4514f0bSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5341*d4514f0bSApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5342*d4514f0bSApple OSS Distributions {
5343*d4514f0bSApple OSS Distributions 	IOReturn            err = kIOReturnSuccess;
5344*d4514f0bSApple OSS Distributions 	IOMemoryMap *       mapping = NULL;
5345*d4514f0bSApple OSS Distributions 	OSSharedPtr<OSIterator>        iter;
5346*d4514f0bSApple OSS Distributions 
5347*d4514f0bSApple OSS Distributions 	LOCK;
5348*d4514f0bSApple OSS Distributions 
5349*d4514f0bSApple OSS Distributions 	if (doRedirect) {
5350*d4514f0bSApple OSS Distributions 		_flags |= kIOMemoryRedirected;
5351*d4514f0bSApple OSS Distributions 	} else {
5352*d4514f0bSApple OSS Distributions 		_flags &= ~kIOMemoryRedirected;
5353*d4514f0bSApple OSS Distributions 	}
5354*d4514f0bSApple OSS Distributions 
5355*d4514f0bSApple OSS Distributions 	do {
5356*d4514f0bSApple OSS Distributions 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5357*d4514f0bSApple OSS Distributions 			memory_object_t   pager;
5358*d4514f0bSApple OSS Distributions 
5359*d4514f0bSApple OSS Distributions 			if (reserved) {
5360*d4514f0bSApple OSS Distributions 				pager = (memory_object_t) reserved->dp.devicePager;
5361*d4514f0bSApple OSS Distributions 			} else {
5362*d4514f0bSApple OSS Distributions 				pager = MACH_PORT_NULL;
5363*d4514f0bSApple OSS Distributions 			}
5364*d4514f0bSApple OSS Distributions 
5365*d4514f0bSApple OSS Distributions 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5366*d4514f0bSApple OSS Distributions 				mapping->redirect( safeTask, doRedirect );
5367*d4514f0bSApple OSS Distributions 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5368*d4514f0bSApple OSS Distributions 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5369*d4514f0bSApple OSS Distributions 				}
5370*d4514f0bSApple OSS Distributions 			}
5371*d4514f0bSApple OSS Distributions 
5372*d4514f0bSApple OSS Distributions 			iter.reset();
5373*d4514f0bSApple OSS Distributions 		}
5374*d4514f0bSApple OSS Distributions 	} while (false);
5375*d4514f0bSApple OSS Distributions 
5376*d4514f0bSApple OSS Distributions 	if (!doRedirect) {
5377*d4514f0bSApple OSS Distributions 		WAKEUP;
5378*d4514f0bSApple OSS Distributions 	}
5379*d4514f0bSApple OSS Distributions 
5380*d4514f0bSApple OSS Distributions 	UNLOCK;
5381*d4514f0bSApple OSS Distributions 
5382*d4514f0bSApple OSS Distributions #ifndef __LP64__
5383*d4514f0bSApple OSS Distributions 	// temporary binary compatibility
5384*d4514f0bSApple OSS Distributions 	IOSubMemoryDescriptor * subMem;
5385*d4514f0bSApple OSS Distributions 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5386*d4514f0bSApple OSS Distributions 		err = subMem->redirect( safeTask, doRedirect );
5387*d4514f0bSApple OSS Distributions 	} else {
5388*d4514f0bSApple OSS Distributions 		err = kIOReturnSuccess;
5389*d4514f0bSApple OSS Distributions 	}
5390*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5391*d4514f0bSApple OSS Distributions 
5392*d4514f0bSApple OSS Distributions 	return err;
5393*d4514f0bSApple OSS Distributions }
5394*d4514f0bSApple OSS Distributions 
5395*d4514f0bSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5396*d4514f0bSApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5397*d4514f0bSApple OSS Distributions {
5398*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5399*d4514f0bSApple OSS Distributions 
5400*d4514f0bSApple OSS Distributions 	if (fSuperMap) {
5401*d4514f0bSApple OSS Distributions //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5402*d4514f0bSApple OSS Distributions 	} else {
5403*d4514f0bSApple OSS Distributions 		LOCK;
5404*d4514f0bSApple OSS Distributions 
5405*d4514f0bSApple OSS Distributions 		do{
5406*d4514f0bSApple OSS Distributions 			if (!fAddress) {
5407*d4514f0bSApple OSS Distributions 				break;
5408*d4514f0bSApple OSS Distributions 			}
5409*d4514f0bSApple OSS Distributions 			if (!fAddressMap) {
5410*d4514f0bSApple OSS Distributions 				break;
5411*d4514f0bSApple OSS Distributions 			}
5412*d4514f0bSApple OSS Distributions 
5413*d4514f0bSApple OSS Distributions 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5414*d4514f0bSApple OSS Distributions 			    && (0 == (fOptions & kIOMapStatic))) {
5415*d4514f0bSApple OSS Distributions 				IOUnmapPages( fAddressMap, fAddress, fLength );
5416*d4514f0bSApple OSS Distributions 				err = kIOReturnSuccess;
5417*d4514f0bSApple OSS Distributions #if DEBUG
5418*d4514f0bSApple OSS Distributions 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5419*d4514f0bSApple OSS Distributions #endif
5420*d4514f0bSApple OSS Distributions 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5421*d4514f0bSApple OSS Distributions 				IOOptionBits newMode;
5422*d4514f0bSApple OSS Distributions 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5423*d4514f0bSApple OSS Distributions 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5424*d4514f0bSApple OSS Distributions 			}
5425*d4514f0bSApple OSS Distributions 		}while (false);
5426*d4514f0bSApple OSS Distributions 		UNLOCK;
5427*d4514f0bSApple OSS Distributions 	}
5428*d4514f0bSApple OSS Distributions 
5429*d4514f0bSApple OSS Distributions 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5430*d4514f0bSApple OSS Distributions 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5431*d4514f0bSApple OSS Distributions 	    && safeTask
5432*d4514f0bSApple OSS Distributions 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5433*d4514f0bSApple OSS Distributions 		fMemory->redirect(safeTask, doRedirect);
5434*d4514f0bSApple OSS Distributions 	}
5435*d4514f0bSApple OSS Distributions 
5436*d4514f0bSApple OSS Distributions 	return err;
5437*d4514f0bSApple OSS Distributions }
5438*d4514f0bSApple OSS Distributions 
5439*d4514f0bSApple OSS Distributions IOReturn
unmap(void)5440*d4514f0bSApple OSS Distributions IOMemoryMap::unmap( void )
5441*d4514f0bSApple OSS Distributions {
5442*d4514f0bSApple OSS Distributions 	IOReturn    err;
5443*d4514f0bSApple OSS Distributions 
5444*d4514f0bSApple OSS Distributions 	LOCK;
5445*d4514f0bSApple OSS Distributions 
5446*d4514f0bSApple OSS Distributions 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5447*d4514f0bSApple OSS Distributions 	    && (0 == (kIOMapStatic & fOptions))) {
5448*d4514f0bSApple OSS Distributions 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5449*d4514f0bSApple OSS Distributions 	} else {
5450*d4514f0bSApple OSS Distributions 		err = kIOReturnSuccess;
5451*d4514f0bSApple OSS Distributions 	}
5452*d4514f0bSApple OSS Distributions 
5453*d4514f0bSApple OSS Distributions 	if (fAddressMap) {
5454*d4514f0bSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5455*d4514f0bSApple OSS Distributions 		fAddressMap = NULL;
5456*d4514f0bSApple OSS Distributions 	}
5457*d4514f0bSApple OSS Distributions 
5458*d4514f0bSApple OSS Distributions 	fAddress = 0;
5459*d4514f0bSApple OSS Distributions 
5460*d4514f0bSApple OSS Distributions 	UNLOCK;
5461*d4514f0bSApple OSS Distributions 
5462*d4514f0bSApple OSS Distributions 	return err;
5463*d4514f0bSApple OSS Distributions }
5464*d4514f0bSApple OSS Distributions 
5465*d4514f0bSApple OSS Distributions void
taskDied(void)5466*d4514f0bSApple OSS Distributions IOMemoryMap::taskDied( void )
5467*d4514f0bSApple OSS Distributions {
5468*d4514f0bSApple OSS Distributions 	LOCK;
5469*d4514f0bSApple OSS Distributions 	if (fUserClientUnmap) {
5470*d4514f0bSApple OSS Distributions 		unmap();
5471*d4514f0bSApple OSS Distributions 	}
5472*d4514f0bSApple OSS Distributions #if IOTRACKING
5473*d4514f0bSApple OSS Distributions 	else {
5474*d4514f0bSApple OSS Distributions 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5475*d4514f0bSApple OSS Distributions 	}
5476*d4514f0bSApple OSS Distributions #endif /* IOTRACKING */
5477*d4514f0bSApple OSS Distributions 
5478*d4514f0bSApple OSS Distributions 	if (fAddressMap) {
5479*d4514f0bSApple OSS Distributions 		vm_map_deallocate(fAddressMap);
5480*d4514f0bSApple OSS Distributions 		fAddressMap = NULL;
5481*d4514f0bSApple OSS Distributions 	}
5482*d4514f0bSApple OSS Distributions 	fAddressTask = NULL;
5483*d4514f0bSApple OSS Distributions 	fAddress     = 0;
5484*d4514f0bSApple OSS Distributions 	UNLOCK;
5485*d4514f0bSApple OSS Distributions }
5486*d4514f0bSApple OSS Distributions 
5487*d4514f0bSApple OSS Distributions IOReturn
userClientUnmap(void)5488*d4514f0bSApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5489*d4514f0bSApple OSS Distributions {
5490*d4514f0bSApple OSS Distributions 	fUserClientUnmap = true;
5491*d4514f0bSApple OSS Distributions 	return kIOReturnSuccess;
5492*d4514f0bSApple OSS Distributions }
5493*d4514f0bSApple OSS Distributions 
5494*d4514f0bSApple OSS Distributions // Overload the release mechanism.  All mappings must be a member
5495*d4514f0bSApple OSS Distributions // of a memory descriptors _mappings set.  This means that we
5496*d4514f0bSApple OSS Distributions // always have 2 references on a mapping.  When either of these mappings
5497*d4514f0bSApple OSS Distributions // are released we need to free ourselves.
5498*d4514f0bSApple OSS Distributions void
taggedRelease(const void * tag) const5499*d4514f0bSApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5500*d4514f0bSApple OSS Distributions {
5501*d4514f0bSApple OSS Distributions 	LOCK;
5502*d4514f0bSApple OSS Distributions 	super::taggedRelease(tag, 2);
5503*d4514f0bSApple OSS Distributions 	UNLOCK;
5504*d4514f0bSApple OSS Distributions }
5505*d4514f0bSApple OSS Distributions 
5506*d4514f0bSApple OSS Distributions void
free()5507*d4514f0bSApple OSS Distributions IOMemoryMap::free()
5508*d4514f0bSApple OSS Distributions {
5509*d4514f0bSApple OSS Distributions 	unmap();
5510*d4514f0bSApple OSS Distributions 
5511*d4514f0bSApple OSS Distributions 	if (fMemory) {
5512*d4514f0bSApple OSS Distributions 		LOCK;
5513*d4514f0bSApple OSS Distributions 		fMemory->removeMapping(this);
5514*d4514f0bSApple OSS Distributions 		UNLOCK;
5515*d4514f0bSApple OSS Distributions 		fMemory.reset();
5516*d4514f0bSApple OSS Distributions 	}
5517*d4514f0bSApple OSS Distributions 
5518*d4514f0bSApple OSS Distributions 	if (fSuperMap) {
5519*d4514f0bSApple OSS Distributions 		fSuperMap.reset();
5520*d4514f0bSApple OSS Distributions 	}
5521*d4514f0bSApple OSS Distributions 
5522*d4514f0bSApple OSS Distributions 	if (fRedirUPL) {
5523*d4514f0bSApple OSS Distributions 		upl_commit(fRedirUPL, NULL, 0);
5524*d4514f0bSApple OSS Distributions 		upl_deallocate(fRedirUPL);
5525*d4514f0bSApple OSS Distributions 	}
5526*d4514f0bSApple OSS Distributions 
5527*d4514f0bSApple OSS Distributions 	super::free();
5528*d4514f0bSApple OSS Distributions }
5529*d4514f0bSApple OSS Distributions 
5530*d4514f0bSApple OSS Distributions IOByteCount
getLength()5531*d4514f0bSApple OSS Distributions IOMemoryMap::getLength()
5532*d4514f0bSApple OSS Distributions {
5533*d4514f0bSApple OSS Distributions 	return fLength;
5534*d4514f0bSApple OSS Distributions }
5535*d4514f0bSApple OSS Distributions 
5536*d4514f0bSApple OSS Distributions IOVirtualAddress
getVirtualAddress()5537*d4514f0bSApple OSS Distributions IOMemoryMap::getVirtualAddress()
5538*d4514f0bSApple OSS Distributions {
5539*d4514f0bSApple OSS Distributions #ifndef __LP64__
5540*d4514f0bSApple OSS Distributions 	if (fSuperMap) {
5541*d4514f0bSApple OSS Distributions 		fSuperMap->getVirtualAddress();
5542*d4514f0bSApple OSS Distributions 	} else if (fAddressMap
5543*d4514f0bSApple OSS Distributions 	    && vm_map_is_64bit(fAddressMap)
5544*d4514f0bSApple OSS Distributions 	    && (sizeof(IOVirtualAddress) < 8)) {
5545*d4514f0bSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5546*d4514f0bSApple OSS Distributions 	}
5547*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5548*d4514f0bSApple OSS Distributions 
5549*d4514f0bSApple OSS Distributions 	return fAddress;
5550*d4514f0bSApple OSS Distributions }
5551*d4514f0bSApple OSS Distributions 
5552*d4514f0bSApple OSS Distributions #ifndef __LP64__
5553*d4514f0bSApple OSS Distributions mach_vm_address_t
getAddress()5554*d4514f0bSApple OSS Distributions IOMemoryMap::getAddress()
5555*d4514f0bSApple OSS Distributions {
5556*d4514f0bSApple OSS Distributions 	return fAddress;
5557*d4514f0bSApple OSS Distributions }
5558*d4514f0bSApple OSS Distributions 
5559*d4514f0bSApple OSS Distributions mach_vm_size_t
getSize()5560*d4514f0bSApple OSS Distributions IOMemoryMap::getSize()
5561*d4514f0bSApple OSS Distributions {
5562*d4514f0bSApple OSS Distributions 	return fLength;
5563*d4514f0bSApple OSS Distributions }
5564*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5565*d4514f0bSApple OSS Distributions 
5566*d4514f0bSApple OSS Distributions 
5567*d4514f0bSApple OSS Distributions task_t
getAddressTask()5568*d4514f0bSApple OSS Distributions IOMemoryMap::getAddressTask()
5569*d4514f0bSApple OSS Distributions {
5570*d4514f0bSApple OSS Distributions 	if (fSuperMap) {
5571*d4514f0bSApple OSS Distributions 		return fSuperMap->getAddressTask();
5572*d4514f0bSApple OSS Distributions 	} else {
5573*d4514f0bSApple OSS Distributions 		return fAddressTask;
5574*d4514f0bSApple OSS Distributions 	}
5575*d4514f0bSApple OSS Distributions }
5576*d4514f0bSApple OSS Distributions 
5577*d4514f0bSApple OSS Distributions IOOptionBits
getMapOptions()5578*d4514f0bSApple OSS Distributions IOMemoryMap::getMapOptions()
5579*d4514f0bSApple OSS Distributions {
5580*d4514f0bSApple OSS Distributions 	return fOptions;
5581*d4514f0bSApple OSS Distributions }
5582*d4514f0bSApple OSS Distributions 
5583*d4514f0bSApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5584*d4514f0bSApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5585*d4514f0bSApple OSS Distributions {
5586*d4514f0bSApple OSS Distributions 	return fMemory.get();
5587*d4514f0bSApple OSS Distributions }
5588*d4514f0bSApple OSS Distributions 
5589*d4514f0bSApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5590*d4514f0bSApple OSS Distributions IOMemoryMap::copyCompatible(
5591*d4514f0bSApple OSS Distributions 	IOMemoryMap * newMapping )
5592*d4514f0bSApple OSS Distributions {
5593*d4514f0bSApple OSS Distributions 	task_t              task      = newMapping->getAddressTask();
5594*d4514f0bSApple OSS Distributions 	mach_vm_address_t   toAddress = newMapping->fAddress;
5595*d4514f0bSApple OSS Distributions 	IOOptionBits        _options  = newMapping->fOptions;
5596*d4514f0bSApple OSS Distributions 	mach_vm_size_t      _offset   = newMapping->fOffset;
5597*d4514f0bSApple OSS Distributions 	mach_vm_size_t      _length   = newMapping->fLength;
5598*d4514f0bSApple OSS Distributions 
5599*d4514f0bSApple OSS Distributions 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5600*d4514f0bSApple OSS Distributions 		return NULL;
5601*d4514f0bSApple OSS Distributions 	}
5602*d4514f0bSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5603*d4514f0bSApple OSS Distributions 		return NULL;
5604*d4514f0bSApple OSS Distributions 	}
5605*d4514f0bSApple OSS Distributions 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5606*d4514f0bSApple OSS Distributions 		return NULL;
5607*d4514f0bSApple OSS Distributions 	}
5608*d4514f0bSApple OSS Distributions 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5609*d4514f0bSApple OSS Distributions 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5610*d4514f0bSApple OSS Distributions 		return NULL;
5611*d4514f0bSApple OSS Distributions 	}
5612*d4514f0bSApple OSS Distributions 
5613*d4514f0bSApple OSS Distributions 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5614*d4514f0bSApple OSS Distributions 		return NULL;
5615*d4514f0bSApple OSS Distributions 	}
5616*d4514f0bSApple OSS Distributions 
5617*d4514f0bSApple OSS Distributions 	if (_offset < fOffset) {
5618*d4514f0bSApple OSS Distributions 		return NULL;
5619*d4514f0bSApple OSS Distributions 	}
5620*d4514f0bSApple OSS Distributions 
5621*d4514f0bSApple OSS Distributions 	_offset -= fOffset;
5622*d4514f0bSApple OSS Distributions 
5623*d4514f0bSApple OSS Distributions 	if ((_offset + _length) > fLength) {
5624*d4514f0bSApple OSS Distributions 		return NULL;
5625*d4514f0bSApple OSS Distributions 	}
5626*d4514f0bSApple OSS Distributions 
5627*d4514f0bSApple OSS Distributions 	if ((fLength == _length) && (!_offset)) {
5628*d4514f0bSApple OSS Distributions 		retain();
5629*d4514f0bSApple OSS Distributions 		newMapping = this;
5630*d4514f0bSApple OSS Distributions 	} else {
5631*d4514f0bSApple OSS Distributions 		newMapping->fSuperMap.reset(this, OSRetain);
5632*d4514f0bSApple OSS Distributions 		newMapping->fOffset   = fOffset + _offset;
5633*d4514f0bSApple OSS Distributions 		newMapping->fAddress  = fAddress + _offset;
5634*d4514f0bSApple OSS Distributions 	}
5635*d4514f0bSApple OSS Distributions 
5636*d4514f0bSApple OSS Distributions 	return newMapping;
5637*d4514f0bSApple OSS Distributions }
5638*d4514f0bSApple OSS Distributions 
5639*d4514f0bSApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5640*d4514f0bSApple OSS Distributions IOMemoryMap::wireRange(
5641*d4514f0bSApple OSS Distributions 	uint32_t                options,
5642*d4514f0bSApple OSS Distributions 	mach_vm_size_t          offset,
5643*d4514f0bSApple OSS Distributions 	mach_vm_size_t          length)
5644*d4514f0bSApple OSS Distributions {
5645*d4514f0bSApple OSS Distributions 	IOReturn kr;
5646*d4514f0bSApple OSS Distributions 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5647*d4514f0bSApple OSS Distributions 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5648*d4514f0bSApple OSS Distributions 	vm_prot_t prot;
5649*d4514f0bSApple OSS Distributions 
5650*d4514f0bSApple OSS Distributions 	prot = (kIODirectionOutIn & options);
5651*d4514f0bSApple OSS Distributions 	if (prot) {
5652*d4514f0bSApple OSS Distributions 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5653*d4514f0bSApple OSS Distributions 	} else {
5654*d4514f0bSApple OSS Distributions 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5655*d4514f0bSApple OSS Distributions 	}
5656*d4514f0bSApple OSS Distributions 
5657*d4514f0bSApple OSS Distributions 	return kr;
5658*d4514f0bSApple OSS Distributions }
5659*d4514f0bSApple OSS Distributions 
5660*d4514f0bSApple OSS Distributions 
5661*d4514f0bSApple OSS Distributions IOPhysicalAddress
5662*d4514f0bSApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5663*d4514f0bSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5664*d4514f0bSApple OSS Distributions #else /* !__LP64__ */
5665*d4514f0bSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5666*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5667*d4514f0bSApple OSS Distributions {
5668*d4514f0bSApple OSS Distributions 	IOPhysicalAddress   address;
5669*d4514f0bSApple OSS Distributions 
5670*d4514f0bSApple OSS Distributions 	LOCK;
5671*d4514f0bSApple OSS Distributions #ifdef __LP64__
5672*d4514f0bSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5673*d4514f0bSApple OSS Distributions #else /* !__LP64__ */
5674*d4514f0bSApple OSS Distributions 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5675*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5676*d4514f0bSApple OSS Distributions 	UNLOCK;
5677*d4514f0bSApple OSS Distributions 
5678*d4514f0bSApple OSS Distributions 	return address;
5679*d4514f0bSApple OSS Distributions }
5680*d4514f0bSApple OSS Distributions 
5681*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5682*d4514f0bSApple OSS Distributions 
5683*d4514f0bSApple OSS Distributions #undef super
5684*d4514f0bSApple OSS Distributions #define super OSObject
5685*d4514f0bSApple OSS Distributions 
5686*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5687*d4514f0bSApple OSS Distributions 
5688*d4514f0bSApple OSS Distributions void
initialize(void)5689*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initialize( void )
5690*d4514f0bSApple OSS Distributions {
5691*d4514f0bSApple OSS Distributions 	if (NULL == gIOMemoryLock) {
5692*d4514f0bSApple OSS Distributions 		gIOMemoryLock = IORecursiveLockAlloc();
5693*d4514f0bSApple OSS Distributions 	}
5694*d4514f0bSApple OSS Distributions 
5695*d4514f0bSApple OSS Distributions 	gIOLastPage = IOGetLastPageNumber();
5696*d4514f0bSApple OSS Distributions }
5697*d4514f0bSApple OSS Distributions 
5698*d4514f0bSApple OSS Distributions void
free(void)5699*d4514f0bSApple OSS Distributions IOMemoryDescriptor::free( void )
5700*d4514f0bSApple OSS Distributions {
5701*d4514f0bSApple OSS Distributions 	if (_mappings) {
5702*d4514f0bSApple OSS Distributions 		_mappings.reset();
5703*d4514f0bSApple OSS Distributions 	}
5704*d4514f0bSApple OSS Distributions 
5705*d4514f0bSApple OSS Distributions 	if (reserved) {
5706*d4514f0bSApple OSS Distributions 		cleanKernelReserved(reserved);
5707*d4514f0bSApple OSS Distributions 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5708*d4514f0bSApple OSS Distributions 		reserved = NULL;
5709*d4514f0bSApple OSS Distributions 	}
5710*d4514f0bSApple OSS Distributions 	super::free();
5711*d4514f0bSApple OSS Distributions }
5712*d4514f0bSApple OSS Distributions 
5713*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5714*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setMapping(
5715*d4514f0bSApple OSS Distributions 	task_t                  intoTask,
5716*d4514f0bSApple OSS Distributions 	IOVirtualAddress        mapAddress,
5717*d4514f0bSApple OSS Distributions 	IOOptionBits            options )
5718*d4514f0bSApple OSS Distributions {
5719*d4514f0bSApple OSS Distributions 	return createMappingInTask( intoTask, mapAddress,
5720*d4514f0bSApple OSS Distributions 	           options | kIOMapStatic,
5721*d4514f0bSApple OSS Distributions 	           0, getLength());
5722*d4514f0bSApple OSS Distributions }
5723*d4514f0bSApple OSS Distributions 
5724*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5725*d4514f0bSApple OSS Distributions IOMemoryDescriptor::map(
5726*d4514f0bSApple OSS Distributions 	IOOptionBits            options )
5727*d4514f0bSApple OSS Distributions {
5728*d4514f0bSApple OSS Distributions 	return createMappingInTask( kernel_task, 0,
5729*d4514f0bSApple OSS Distributions 	           options | kIOMapAnywhere,
5730*d4514f0bSApple OSS Distributions 	           0, getLength());
5731*d4514f0bSApple OSS Distributions }
5732*d4514f0bSApple OSS Distributions 
5733*d4514f0bSApple OSS Distributions #ifndef __LP64__
5734*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5735*d4514f0bSApple OSS Distributions IOMemoryDescriptor::map(
5736*d4514f0bSApple OSS Distributions 	task_t                  intoTask,
5737*d4514f0bSApple OSS Distributions 	IOVirtualAddress        atAddress,
5738*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
5739*d4514f0bSApple OSS Distributions 	IOByteCount             offset,
5740*d4514f0bSApple OSS Distributions 	IOByteCount             length )
5741*d4514f0bSApple OSS Distributions {
5742*d4514f0bSApple OSS Distributions 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5743*d4514f0bSApple OSS Distributions 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5744*d4514f0bSApple OSS Distributions 		return NULL;
5745*d4514f0bSApple OSS Distributions 	}
5746*d4514f0bSApple OSS Distributions 
5747*d4514f0bSApple OSS Distributions 	return createMappingInTask(intoTask, atAddress,
5748*d4514f0bSApple OSS Distributions 	           options, offset, length);
5749*d4514f0bSApple OSS Distributions }
5750*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5751*d4514f0bSApple OSS Distributions 
5752*d4514f0bSApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5753*d4514f0bSApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5754*d4514f0bSApple OSS Distributions 	task_t                  intoTask,
5755*d4514f0bSApple OSS Distributions 	mach_vm_address_t       atAddress,
5756*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
5757*d4514f0bSApple OSS Distributions 	mach_vm_size_t          offset,
5758*d4514f0bSApple OSS Distributions 	mach_vm_size_t          length)
5759*d4514f0bSApple OSS Distributions {
5760*d4514f0bSApple OSS Distributions 	IOMemoryMap * result;
5761*d4514f0bSApple OSS Distributions 	IOMemoryMap * mapping;
5762*d4514f0bSApple OSS Distributions 
5763*d4514f0bSApple OSS Distributions 	if (0 == length) {
5764*d4514f0bSApple OSS Distributions 		length = getLength();
5765*d4514f0bSApple OSS Distributions 	}
5766*d4514f0bSApple OSS Distributions 
5767*d4514f0bSApple OSS Distributions 	mapping = new IOMemoryMap;
5768*d4514f0bSApple OSS Distributions 
5769*d4514f0bSApple OSS Distributions 	if (mapping
5770*d4514f0bSApple OSS Distributions 	    && !mapping->init( intoTask, atAddress,
5771*d4514f0bSApple OSS Distributions 	    options, offset, length )) {
5772*d4514f0bSApple OSS Distributions 		mapping->release();
5773*d4514f0bSApple OSS Distributions 		mapping = NULL;
5774*d4514f0bSApple OSS Distributions 	}
5775*d4514f0bSApple OSS Distributions 
5776*d4514f0bSApple OSS Distributions 	if (mapping) {
5777*d4514f0bSApple OSS Distributions 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5778*d4514f0bSApple OSS Distributions 	} else {
5779*d4514f0bSApple OSS Distributions 		result = nullptr;
5780*d4514f0bSApple OSS Distributions 	}
5781*d4514f0bSApple OSS Distributions 
5782*d4514f0bSApple OSS Distributions #if DEBUG
5783*d4514f0bSApple OSS Distributions 	if (!result) {
5784*d4514f0bSApple OSS Distributions 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5785*d4514f0bSApple OSS Distributions 		    this, atAddress, (uint32_t) options, offset, length);
5786*d4514f0bSApple OSS Distributions 	}
5787*d4514f0bSApple OSS Distributions #endif
5788*d4514f0bSApple OSS Distributions 
5789*d4514f0bSApple OSS Distributions 	// already retained through makeMapping
5790*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5791*d4514f0bSApple OSS Distributions 
5792*d4514f0bSApple OSS Distributions 	return retval;
5793*d4514f0bSApple OSS Distributions }
5794*d4514f0bSApple OSS Distributions 
5795*d4514f0bSApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5796*d4514f0bSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5797*d4514f0bSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5798*d4514f0bSApple OSS Distributions     IOOptionBits         options,
5799*d4514f0bSApple OSS Distributions     IOByteCount          offset)
5800*d4514f0bSApple OSS Distributions {
5801*d4514f0bSApple OSS Distributions 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5802*d4514f0bSApple OSS Distributions }
5803*d4514f0bSApple OSS Distributions #endif
5804*d4514f0bSApple OSS Distributions 
5805*d4514f0bSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5806*d4514f0bSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5807*d4514f0bSApple OSS Distributions     IOOptionBits         options,
5808*d4514f0bSApple OSS Distributions     mach_vm_size_t       offset)
5809*d4514f0bSApple OSS Distributions {
5810*d4514f0bSApple OSS Distributions 	IOReturn err = kIOReturnSuccess;
5811*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> physMem;
5812*d4514f0bSApple OSS Distributions 
5813*d4514f0bSApple OSS Distributions 	LOCK;
5814*d4514f0bSApple OSS Distributions 
5815*d4514f0bSApple OSS Distributions 	if (fAddress && fAddressMap) {
5816*d4514f0bSApple OSS Distributions 		do{
5817*d4514f0bSApple OSS Distributions 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5818*d4514f0bSApple OSS Distributions 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5819*d4514f0bSApple OSS Distributions 				physMem = fMemory;
5820*d4514f0bSApple OSS Distributions 			}
5821*d4514f0bSApple OSS Distributions 
5822*d4514f0bSApple OSS Distributions 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5823*d4514f0bSApple OSS Distributions 				upl_size_t          size = (typeof(size))round_page(fLength);
5824*d4514f0bSApple OSS Distributions 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5825*d4514f0bSApple OSS Distributions 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5826*d4514f0bSApple OSS Distributions 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5827*d4514f0bSApple OSS Distributions 				    NULL, NULL,
5828*d4514f0bSApple OSS Distributions 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5829*d4514f0bSApple OSS Distributions 					fRedirUPL = NULL;
5830*d4514f0bSApple OSS Distributions 				}
5831*d4514f0bSApple OSS Distributions 
5832*d4514f0bSApple OSS Distributions 				if (physMem) {
5833*d4514f0bSApple OSS Distributions 					IOUnmapPages( fAddressMap, fAddress, fLength );
5834*d4514f0bSApple OSS Distributions 					if ((false)) {
5835*d4514f0bSApple OSS Distributions 						physMem->redirect(NULL, true);
5836*d4514f0bSApple OSS Distributions 					}
5837*d4514f0bSApple OSS Distributions 				}
5838*d4514f0bSApple OSS Distributions 			}
5839*d4514f0bSApple OSS Distributions 
5840*d4514f0bSApple OSS Distributions 			if (newBackingMemory) {
5841*d4514f0bSApple OSS Distributions 				if (newBackingMemory != fMemory) {
5842*d4514f0bSApple OSS Distributions 					fOffset = 0;
5843*d4514f0bSApple OSS Distributions 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5844*d4514f0bSApple OSS Distributions 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5845*d4514f0bSApple OSS Distributions 					    offset, fLength)) {
5846*d4514f0bSApple OSS Distributions 						err = kIOReturnError;
5847*d4514f0bSApple OSS Distributions 					}
5848*d4514f0bSApple OSS Distributions 				}
5849*d4514f0bSApple OSS Distributions 				if (fRedirUPL) {
5850*d4514f0bSApple OSS Distributions 					upl_commit(fRedirUPL, NULL, 0);
5851*d4514f0bSApple OSS Distributions 					upl_deallocate(fRedirUPL);
5852*d4514f0bSApple OSS Distributions 					fRedirUPL = NULL;
5853*d4514f0bSApple OSS Distributions 				}
5854*d4514f0bSApple OSS Distributions 				if ((false) && physMem) {
5855*d4514f0bSApple OSS Distributions 					physMem->redirect(NULL, false);
5856*d4514f0bSApple OSS Distributions 				}
5857*d4514f0bSApple OSS Distributions 			}
5858*d4514f0bSApple OSS Distributions 		}while (false);
5859*d4514f0bSApple OSS Distributions 	}
5860*d4514f0bSApple OSS Distributions 
5861*d4514f0bSApple OSS Distributions 	UNLOCK;
5862*d4514f0bSApple OSS Distributions 
5863*d4514f0bSApple OSS Distributions 	return err;
5864*d4514f0bSApple OSS Distributions }
5865*d4514f0bSApple OSS Distributions 
5866*d4514f0bSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5867*d4514f0bSApple OSS Distributions IOMemoryDescriptor::makeMapping(
5868*d4514f0bSApple OSS Distributions 	IOMemoryDescriptor *    owner,
5869*d4514f0bSApple OSS Distributions 	task_t                  __intoTask,
5870*d4514f0bSApple OSS Distributions 	IOVirtualAddress        __address,
5871*d4514f0bSApple OSS Distributions 	IOOptionBits            options,
5872*d4514f0bSApple OSS Distributions 	IOByteCount             __offset,
5873*d4514f0bSApple OSS Distributions 	IOByteCount             __length )
5874*d4514f0bSApple OSS Distributions {
5875*d4514f0bSApple OSS Distributions #ifndef __LP64__
5876*d4514f0bSApple OSS Distributions 	if (!(kIOMap64Bit & options)) {
5877*d4514f0bSApple OSS Distributions 		panic("IOMemoryDescriptor::makeMapping !64bit");
5878*d4514f0bSApple OSS Distributions 	}
5879*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
5880*d4514f0bSApple OSS Distributions 
5881*d4514f0bSApple OSS Distributions 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5882*d4514f0bSApple OSS Distributions 	__block IOMemoryMap * result  = NULL;
5883*d4514f0bSApple OSS Distributions 
5884*d4514f0bSApple OSS Distributions 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5885*d4514f0bSApple OSS Distributions 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5886*d4514f0bSApple OSS Distributions 	mach_vm_size_t length  = mapping->fLength;
5887*d4514f0bSApple OSS Distributions 
5888*d4514f0bSApple OSS Distributions 	mapping->fOffset = offset;
5889*d4514f0bSApple OSS Distributions 
5890*d4514f0bSApple OSS Distributions 	LOCK;
5891*d4514f0bSApple OSS Distributions 
5892*d4514f0bSApple OSS Distributions 	do{
5893*d4514f0bSApple OSS Distributions 		if (kIOMapStatic & options) {
5894*d4514f0bSApple OSS Distributions 			result = mapping;
5895*d4514f0bSApple OSS Distributions 			addMapping(mapping);
5896*d4514f0bSApple OSS Distributions 			mapping->setMemoryDescriptor(this, 0);
5897*d4514f0bSApple OSS Distributions 			continue;
5898*d4514f0bSApple OSS Distributions 		}
5899*d4514f0bSApple OSS Distributions 
5900*d4514f0bSApple OSS Distributions 		if (kIOMapUnique & options) {
5901*d4514f0bSApple OSS Distributions 			addr64_t phys;
5902*d4514f0bSApple OSS Distributions 			IOByteCount       physLen;
5903*d4514f0bSApple OSS Distributions 
5904*d4514f0bSApple OSS Distributions //	    if (owner != this)		continue;
5905*d4514f0bSApple OSS Distributions 
5906*d4514f0bSApple OSS Distributions 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5907*d4514f0bSApple OSS Distributions 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5908*d4514f0bSApple OSS Distributions 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5909*d4514f0bSApple OSS Distributions 				if (!phys || (physLen < length)) {
5910*d4514f0bSApple OSS Distributions 					continue;
5911*d4514f0bSApple OSS Distributions 				}
5912*d4514f0bSApple OSS Distributions 
5913*d4514f0bSApple OSS Distributions 				mapDesc = IOMemoryDescriptor::withAddressRange(
5914*d4514f0bSApple OSS Distributions 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5915*d4514f0bSApple OSS Distributions 				if (!mapDesc) {
5916*d4514f0bSApple OSS Distributions 					continue;
5917*d4514f0bSApple OSS Distributions 				}
5918*d4514f0bSApple OSS Distributions 				offset = 0;
5919*d4514f0bSApple OSS Distributions 				mapping->fOffset = offset;
5920*d4514f0bSApple OSS Distributions 			}
5921*d4514f0bSApple OSS Distributions 		} else {
5922*d4514f0bSApple OSS Distributions 			// look for a compatible existing mapping
5923*d4514f0bSApple OSS Distributions 			if (_mappings) {
5924*d4514f0bSApple OSS Distributions 				_mappings->iterateObjects(^(OSObject * object)
5925*d4514f0bSApple OSS Distributions 				{
5926*d4514f0bSApple OSS Distributions 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5927*d4514f0bSApple OSS Distributions 					if ((result = lookMapping->copyCompatible(mapping))) {
5928*d4514f0bSApple OSS Distributions 					        addMapping(result);
5929*d4514f0bSApple OSS Distributions 					        result->setMemoryDescriptor(this, offset);
5930*d4514f0bSApple OSS Distributions 					        return true;
5931*d4514f0bSApple OSS Distributions 					}
5932*d4514f0bSApple OSS Distributions 					return false;
5933*d4514f0bSApple OSS Distributions 				});
5934*d4514f0bSApple OSS Distributions 			}
5935*d4514f0bSApple OSS Distributions 			if (result || (options & kIOMapReference)) {
5936*d4514f0bSApple OSS Distributions 				if (result != mapping) {
5937*d4514f0bSApple OSS Distributions 					mapping->release();
5938*d4514f0bSApple OSS Distributions 					mapping = NULL;
5939*d4514f0bSApple OSS Distributions 				}
5940*d4514f0bSApple OSS Distributions 				continue;
5941*d4514f0bSApple OSS Distributions 			}
5942*d4514f0bSApple OSS Distributions 		}
5943*d4514f0bSApple OSS Distributions 
5944*d4514f0bSApple OSS Distributions 		if (!mapDesc) {
5945*d4514f0bSApple OSS Distributions 			mapDesc.reset(this, OSRetain);
5946*d4514f0bSApple OSS Distributions 		}
5947*d4514f0bSApple OSS Distributions 		IOReturn
5948*d4514f0bSApple OSS Distributions 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5949*d4514f0bSApple OSS Distributions 		if (kIOReturnSuccess == kr) {
5950*d4514f0bSApple OSS Distributions 			result = mapping;
5951*d4514f0bSApple OSS Distributions 			mapDesc->addMapping(result);
5952*d4514f0bSApple OSS Distributions 			result->setMemoryDescriptor(mapDesc.get(), offset);
5953*d4514f0bSApple OSS Distributions 		} else {
5954*d4514f0bSApple OSS Distributions 			mapping->release();
5955*d4514f0bSApple OSS Distributions 			mapping = NULL;
5956*d4514f0bSApple OSS Distributions 		}
5957*d4514f0bSApple OSS Distributions 	}while (false);
5958*d4514f0bSApple OSS Distributions 
5959*d4514f0bSApple OSS Distributions 	UNLOCK;
5960*d4514f0bSApple OSS Distributions 
5961*d4514f0bSApple OSS Distributions 	return result;
5962*d4514f0bSApple OSS Distributions }
5963*d4514f0bSApple OSS Distributions 
5964*d4514f0bSApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5965*d4514f0bSApple OSS Distributions IOMemoryDescriptor::addMapping(
5966*d4514f0bSApple OSS Distributions 	IOMemoryMap * mapping )
5967*d4514f0bSApple OSS Distributions {
5968*d4514f0bSApple OSS Distributions 	if (mapping) {
5969*d4514f0bSApple OSS Distributions 		if (NULL == _mappings) {
5970*d4514f0bSApple OSS Distributions 			_mappings = OSSet::withCapacity(1);
5971*d4514f0bSApple OSS Distributions 		}
5972*d4514f0bSApple OSS Distributions 		if (_mappings) {
5973*d4514f0bSApple OSS Distributions 			_mappings->setObject( mapping );
5974*d4514f0bSApple OSS Distributions 		}
5975*d4514f0bSApple OSS Distributions 	}
5976*d4514f0bSApple OSS Distributions }
5977*d4514f0bSApple OSS Distributions 
5978*d4514f0bSApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5979*d4514f0bSApple OSS Distributions IOMemoryDescriptor::removeMapping(
5980*d4514f0bSApple OSS Distributions 	IOMemoryMap * mapping )
5981*d4514f0bSApple OSS Distributions {
5982*d4514f0bSApple OSS Distributions 	if (_mappings) {
5983*d4514f0bSApple OSS Distributions 		_mappings->removeObject( mapping);
5984*d4514f0bSApple OSS Distributions 	}
5985*d4514f0bSApple OSS Distributions }
5986*d4514f0bSApple OSS Distributions 
5987*d4514f0bSApple OSS Distributions void
setMapperOptions(uint16_t options)5988*d4514f0bSApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5989*d4514f0bSApple OSS Distributions {
5990*d4514f0bSApple OSS Distributions 	_iomapperOptions = options;
5991*d4514f0bSApple OSS Distributions }
5992*d4514f0bSApple OSS Distributions 
5993*d4514f0bSApple OSS Distributions uint16_t
getMapperOptions(void)5994*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5995*d4514f0bSApple OSS Distributions {
5996*d4514f0bSApple OSS Distributions 	return _iomapperOptions;
5997*d4514f0bSApple OSS Distributions }
5998*d4514f0bSApple OSS Distributions 
5999*d4514f0bSApple OSS Distributions #ifndef __LP64__
6000*d4514f0bSApple OSS Distributions // obsolete initializers
6001*d4514f0bSApple OSS Distributions // - initWithOptions is the designated initializer
6002*d4514f0bSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6003*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithAddress(void *      address,
6004*d4514f0bSApple OSS Distributions     IOByteCount   length,
6005*d4514f0bSApple OSS Distributions     IODirection direction)
6006*d4514f0bSApple OSS Distributions {
6007*d4514f0bSApple OSS Distributions 	return false;
6008*d4514f0bSApple OSS Distributions }
6009*d4514f0bSApple OSS Distributions 
6010*d4514f0bSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6011*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6012*d4514f0bSApple OSS Distributions     IOByteCount    length,
6013*d4514f0bSApple OSS Distributions     IODirection  direction,
6014*d4514f0bSApple OSS Distributions     task_t       task)
6015*d4514f0bSApple OSS Distributions {
6016*d4514f0bSApple OSS Distributions 	return false;
6017*d4514f0bSApple OSS Distributions }
6018*d4514f0bSApple OSS Distributions 
6019*d4514f0bSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6020*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
6021*d4514f0bSApple OSS Distributions 	IOPhysicalAddress      address,
6022*d4514f0bSApple OSS Distributions 	IOByteCount            length,
6023*d4514f0bSApple OSS Distributions 	IODirection            direction )
6024*d4514f0bSApple OSS Distributions {
6025*d4514f0bSApple OSS Distributions 	return false;
6026*d4514f0bSApple OSS Distributions }
6027*d4514f0bSApple OSS Distributions 
6028*d4514f0bSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6029*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithRanges(
6030*d4514f0bSApple OSS Distributions 	IOVirtualRange * ranges,
6031*d4514f0bSApple OSS Distributions 	UInt32           withCount,
6032*d4514f0bSApple OSS Distributions 	IODirection      direction,
6033*d4514f0bSApple OSS Distributions 	task_t           task,
6034*d4514f0bSApple OSS Distributions 	bool             asReference)
6035*d4514f0bSApple OSS Distributions {
6036*d4514f0bSApple OSS Distributions 	return false;
6037*d4514f0bSApple OSS Distributions }
6038*d4514f0bSApple OSS Distributions 
6039*d4514f0bSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6040*d4514f0bSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
6041*d4514f0bSApple OSS Distributions     UInt32           withCount,
6042*d4514f0bSApple OSS Distributions     IODirection      direction,
6043*d4514f0bSApple OSS Distributions     bool             asReference)
6044*d4514f0bSApple OSS Distributions {
6045*d4514f0bSApple OSS Distributions 	return false;
6046*d4514f0bSApple OSS Distributions }
6047*d4514f0bSApple OSS Distributions 
6048*d4514f0bSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6049*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6050*d4514f0bSApple OSS Distributions     IOByteCount * lengthOfSegment)
6051*d4514f0bSApple OSS Distributions {
6052*d4514f0bSApple OSS Distributions 	return NULL;
6053*d4514f0bSApple OSS Distributions }
6054*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
6055*d4514f0bSApple OSS Distributions 
6056*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6057*d4514f0bSApple OSS Distributions 
6058*d4514f0bSApple OSS Distributions bool
serialize(OSSerialize * s) const6059*d4514f0bSApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6060*d4514f0bSApple OSS Distributions {
6061*d4514f0bSApple OSS Distributions 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6062*d4514f0bSApple OSS Distributions 	OSSharedPtr<OSObject>           values[2] = {NULL};
6063*d4514f0bSApple OSS Distributions 	OSSharedPtr<OSArray>            array;
6064*d4514f0bSApple OSS Distributions 
6065*d4514f0bSApple OSS Distributions 	struct SerData {
6066*d4514f0bSApple OSS Distributions 		user_addr_t address;
6067*d4514f0bSApple OSS Distributions 		user_size_t length;
6068*d4514f0bSApple OSS Distributions 	};
6069*d4514f0bSApple OSS Distributions 
6070*d4514f0bSApple OSS Distributions 	unsigned int index;
6071*d4514f0bSApple OSS Distributions 
6072*d4514f0bSApple OSS Distributions 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6073*d4514f0bSApple OSS Distributions 
6074*d4514f0bSApple OSS Distributions 	if (s == NULL) {
6075*d4514f0bSApple OSS Distributions 		return false;
6076*d4514f0bSApple OSS Distributions 	}
6077*d4514f0bSApple OSS Distributions 
6078*d4514f0bSApple OSS Distributions 	array = OSArray::withCapacity(4);
6079*d4514f0bSApple OSS Distributions 	if (!array) {
6080*d4514f0bSApple OSS Distributions 		return false;
6081*d4514f0bSApple OSS Distributions 	}
6082*d4514f0bSApple OSS Distributions 
6083*d4514f0bSApple OSS Distributions 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6084*d4514f0bSApple OSS Distributions 	if (!vcopy) {
6085*d4514f0bSApple OSS Distributions 		return false;
6086*d4514f0bSApple OSS Distributions 	}
6087*d4514f0bSApple OSS Distributions 
6088*d4514f0bSApple OSS Distributions 	keys[0] = OSSymbol::withCString("address");
6089*d4514f0bSApple OSS Distributions 	keys[1] = OSSymbol::withCString("length");
6090*d4514f0bSApple OSS Distributions 
6091*d4514f0bSApple OSS Distributions 	// Copy the volatile data so we don't have to allocate memory
6092*d4514f0bSApple OSS Distributions 	// while the lock is held.
6093*d4514f0bSApple OSS Distributions 	LOCK;
6094*d4514f0bSApple OSS Distributions 	if (vcopy.size() == _rangesCount) {
6095*d4514f0bSApple OSS Distributions 		Ranges vec = _ranges;
6096*d4514f0bSApple OSS Distributions 		for (index = 0; index < vcopy.size(); index++) {
6097*d4514f0bSApple OSS Distributions 			mach_vm_address_t addr; mach_vm_size_t len;
6098*d4514f0bSApple OSS Distributions 			getAddrLenForInd(addr, len, type, vec, index, _task);
6099*d4514f0bSApple OSS Distributions 			vcopy[index].address = addr;
6100*d4514f0bSApple OSS Distributions 			vcopy[index].length  = len;
6101*d4514f0bSApple OSS Distributions 		}
6102*d4514f0bSApple OSS Distributions 	} else {
6103*d4514f0bSApple OSS Distributions 		// The descriptor changed out from under us.  Give up.
6104*d4514f0bSApple OSS Distributions 		UNLOCK;
6105*d4514f0bSApple OSS Distributions 		return false;
6106*d4514f0bSApple OSS Distributions 	}
6107*d4514f0bSApple OSS Distributions 	UNLOCK;
6108*d4514f0bSApple OSS Distributions 
6109*d4514f0bSApple OSS Distributions 	for (index = 0; index < vcopy.size(); index++) {
6110*d4514f0bSApple OSS Distributions 		user_addr_t addr = vcopy[index].address;
6111*d4514f0bSApple OSS Distributions 		IOByteCount len = (IOByteCount) vcopy[index].length;
6112*d4514f0bSApple OSS Distributions 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6113*d4514f0bSApple OSS Distributions 		if (values[0] == NULL) {
6114*d4514f0bSApple OSS Distributions 			return false;
6115*d4514f0bSApple OSS Distributions 		}
6116*d4514f0bSApple OSS Distributions 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6117*d4514f0bSApple OSS Distributions 		if (values[1] == NULL) {
6118*d4514f0bSApple OSS Distributions 			return false;
6119*d4514f0bSApple OSS Distributions 		}
6120*d4514f0bSApple OSS Distributions 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6121*d4514f0bSApple OSS Distributions 		if (dict == NULL) {
6122*d4514f0bSApple OSS Distributions 			return false;
6123*d4514f0bSApple OSS Distributions 		}
6124*d4514f0bSApple OSS Distributions 		array->setObject(dict.get());
6125*d4514f0bSApple OSS Distributions 		dict.reset();
6126*d4514f0bSApple OSS Distributions 		values[0].reset();
6127*d4514f0bSApple OSS Distributions 		values[1].reset();
6128*d4514f0bSApple OSS Distributions 	}
6129*d4514f0bSApple OSS Distributions 
6130*d4514f0bSApple OSS Distributions 	return array->serialize(s);
6131*d4514f0bSApple OSS Distributions }
6132*d4514f0bSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6133*d4514f0bSApple OSS Distributions 
6134*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6135*d4514f0bSApple OSS Distributions #ifdef __LP64__
6136*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6137*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6138*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6139*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6140*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6141*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6142*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6143*d4514f0bSApple OSS Distributions #else /* !__LP64__ */
6144*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6145*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6146*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6147*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6148*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6149*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6150*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6151*d4514f0bSApple OSS Distributions #endif /* !__LP64__ */
6152*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6153*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6154*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6155*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6156*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6157*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6158*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6159*d4514f0bSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6160*d4514f0bSApple OSS Distributions 
6161*d4514f0bSApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6162*d4514f0bSApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6163*d4514f0bSApple OSS Distributions     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6164*d4514f0bSApple OSS Distributions 
6165*d4514f0bSApple OSS Distributions /* ex-inline function implementation */
6166*d4514f0bSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6167*d4514f0bSApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6168*d4514f0bSApple OSS Distributions {
6169*d4514f0bSApple OSS Distributions 	return getPhysicalSegment( 0, NULL );
6170*d4514f0bSApple OSS Distributions }
6171*d4514f0bSApple OSS Distributions 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6172*d4514f0bSApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6173*d4514f0bSApple OSS Distributions 
6174*d4514f0bSApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6175*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6176*d4514f0bSApple OSS Distributions {
6177*d4514f0bSApple OSS Distributions 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6178*d4514f0bSApple OSS Distributions 	if (me && !me->initWithCapacity(capacity)) {
6179*d4514f0bSApple OSS Distributions 		return nullptr;
6180*d4514f0bSApple OSS Distributions 	}
6181*d4514f0bSApple OSS Distributions 	return me;
6182*d4514f0bSApple OSS Distributions }
6183*d4514f0bSApple OSS Distributions 
6184*d4514f0bSApple OSS Distributions bool
initWithCapacity(size_t capacity)6185*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6186*d4514f0bSApple OSS Distributions {
6187*d4514f0bSApple OSS Distributions 	if (_data && (!capacity || (_capacity < capacity))) {
6188*d4514f0bSApple OSS Distributions 		freeMemory();
6189*d4514f0bSApple OSS Distributions 	}
6190*d4514f0bSApple OSS Distributions 
6191*d4514f0bSApple OSS Distributions 	if (!OSObject::init()) {
6192*d4514f0bSApple OSS Distributions 		return false;
6193*d4514f0bSApple OSS Distributions 	}
6194*d4514f0bSApple OSS Distributions 
6195*d4514f0bSApple OSS Distributions 	if (!_data && capacity) {
6196*d4514f0bSApple OSS Distributions 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6197*d4514f0bSApple OSS Distributions 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6198*d4514f0bSApple OSS Distributions 		if (!_data) {
6199*d4514f0bSApple OSS Distributions 			return false;
6200*d4514f0bSApple OSS Distributions 		}
6201*d4514f0bSApple OSS Distributions 		_capacity = capacity;
6202*d4514f0bSApple OSS Distributions 	}
6203*d4514f0bSApple OSS Distributions 
6204*d4514f0bSApple OSS Distributions 	_length = 0;
6205*d4514f0bSApple OSS Distributions 
6206*d4514f0bSApple OSS Distributions 	return true;
6207*d4514f0bSApple OSS Distributions }
6208*d4514f0bSApple OSS Distributions 
6209*d4514f0bSApple OSS Distributions void
free()6210*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6211*d4514f0bSApple OSS Distributions {
6212*d4514f0bSApple OSS Distributions 	freeMemory();
6213*d4514f0bSApple OSS Distributions 	OSObject::free();
6214*d4514f0bSApple OSS Distributions }
6215*d4514f0bSApple OSS Distributions 
6216*d4514f0bSApple OSS Distributions void
freeMemory()6217*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6218*d4514f0bSApple OSS Distributions {
6219*d4514f0bSApple OSS Distributions 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6220*d4514f0bSApple OSS Distributions 	_data = nullptr;
6221*d4514f0bSApple OSS Distributions 	_capacity = _length = 0;
6222*d4514f0bSApple OSS Distributions }
6223*d4514f0bSApple OSS Distributions 
6224*d4514f0bSApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6225*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6226*d4514f0bSApple OSS Distributions {
6227*d4514f0bSApple OSS Distributions 	const auto oldLength = getLength();
6228*d4514f0bSApple OSS Distributions 	size_t newLength;
6229*d4514f0bSApple OSS Distributions 	if (os_add_overflow(oldLength, length, &newLength)) {
6230*d4514f0bSApple OSS Distributions 		return false;
6231*d4514f0bSApple OSS Distributions 	}
6232*d4514f0bSApple OSS Distributions 
6233*d4514f0bSApple OSS Distributions 	if (!setLength(newLength)) {
6234*d4514f0bSApple OSS Distributions 		return false;
6235*d4514f0bSApple OSS Distributions 	}
6236*d4514f0bSApple OSS Distributions 
6237*d4514f0bSApple OSS Distributions 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6238*d4514f0bSApple OSS Distributions 	if (bytes) {
6239*d4514f0bSApple OSS Distributions 		bcopy(bytes, dest, length);
6240*d4514f0bSApple OSS Distributions 	}
6241*d4514f0bSApple OSS Distributions 
6242*d4514f0bSApple OSS Distributions 	return true;
6243*d4514f0bSApple OSS Distributions }
6244*d4514f0bSApple OSS Distributions 
6245*d4514f0bSApple OSS Distributions bool
setLength(size_t length)6246*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6247*d4514f0bSApple OSS Distributions {
6248*d4514f0bSApple OSS Distributions 	if (!_data || (length > _capacity)) {
6249*d4514f0bSApple OSS Distributions 		void *newData;
6250*d4514f0bSApple OSS Distributions 
6251*d4514f0bSApple OSS Distributions 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6252*d4514f0bSApple OSS Distributions 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6253*d4514f0bSApple OSS Distributions 		    NULL);
6254*d4514f0bSApple OSS Distributions 		if (!newData) {
6255*d4514f0bSApple OSS Distributions 			return false;
6256*d4514f0bSApple OSS Distributions 		}
6257*d4514f0bSApple OSS Distributions 
6258*d4514f0bSApple OSS Distributions 		_data = newData;
6259*d4514f0bSApple OSS Distributions 		_capacity = length;
6260*d4514f0bSApple OSS Distributions 	}
6261*d4514f0bSApple OSS Distributions 
6262*d4514f0bSApple OSS Distributions 	_length = length;
6263*d4514f0bSApple OSS Distributions 	return true;
6264*d4514f0bSApple OSS Distributions }
6265*d4514f0bSApple OSS Distributions 
6266*d4514f0bSApple OSS Distributions const void *
getBytes() const6267*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6268*d4514f0bSApple OSS Distributions {
6269*d4514f0bSApple OSS Distributions 	return _length ? _data : nullptr;
6270*d4514f0bSApple OSS Distributions }
6271*d4514f0bSApple OSS Distributions 
6272*d4514f0bSApple OSS Distributions size_t
getLength() const6273*d4514f0bSApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6274*d4514f0bSApple OSS Distributions {
6275*d4514f0bSApple OSS Distributions 	return _data ? _length : 0;
6276*d4514f0bSApple OSS Distributions }
6277