1*a325d9c4SApple OSS Distributions /*
2*a325d9c4SApple OSS Distributions * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*a325d9c4SApple OSS Distributions *
4*a325d9c4SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*a325d9c4SApple OSS Distributions *
6*a325d9c4SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*a325d9c4SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*a325d9c4SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*a325d9c4SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*a325d9c4SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*a325d9c4SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*a325d9c4SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*a325d9c4SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*a325d9c4SApple OSS Distributions *
15*a325d9c4SApple OSS Distributions * Please obtain a copy of the License at
16*a325d9c4SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*a325d9c4SApple OSS Distributions *
18*a325d9c4SApple OSS Distributions * The Original Code and all software distributed under the License are
19*a325d9c4SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*a325d9c4SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*a325d9c4SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*a325d9c4SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*a325d9c4SApple OSS Distributions * Please see the License for the specific language governing rights and
24*a325d9c4SApple OSS Distributions * limitations under the License.
25*a325d9c4SApple OSS Distributions *
26*a325d9c4SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*a325d9c4SApple OSS Distributions */
28*a325d9c4SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*a325d9c4SApple OSS Distributions
30*a325d9c4SApple OSS Distributions #include <sys/cdefs.h>
31*a325d9c4SApple OSS Distributions
32*a325d9c4SApple OSS Distributions #include <IOKit/assert.h>
33*a325d9c4SApple OSS Distributions #include <IOKit/system.h>
34*a325d9c4SApple OSS Distributions #include <IOKit/IOLib.h>
35*a325d9c4SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*a325d9c4SApple OSS Distributions #include <IOKit/IOMapper.h>
37*a325d9c4SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*a325d9c4SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*a325d9c4SApple OSS Distributions
40*a325d9c4SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*a325d9c4SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*a325d9c4SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*a325d9c4SApple OSS Distributions
44*a325d9c4SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*a325d9c4SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*a325d9c4SApple OSS Distributions #include <libkern/OSDebug.h>
47*a325d9c4SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*a325d9c4SApple OSS Distributions
49*a325d9c4SApple OSS Distributions #include "IOKitKernelInternal.h"
50*a325d9c4SApple OSS Distributions
51*a325d9c4SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*a325d9c4SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*a325d9c4SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*a325d9c4SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*a325d9c4SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*a325d9c4SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*a325d9c4SApple OSS Distributions #include <os/overflow.h>
58*a325d9c4SApple OSS Distributions #include <os/cpp_util.h>
59*a325d9c4SApple OSS Distributions #include <os/base_private.h>
60*a325d9c4SApple OSS Distributions
61*a325d9c4SApple OSS Distributions #include <sys/uio.h>
62*a325d9c4SApple OSS Distributions
63*a325d9c4SApple OSS Distributions __BEGIN_DECLS
64*a325d9c4SApple OSS Distributions #include <vm/pmap.h>
65*a325d9c4SApple OSS Distributions #include <vm/vm_pageout.h>
66*a325d9c4SApple OSS Distributions #include <mach/memory_object_types.h>
67*a325d9c4SApple OSS Distributions #include <device/device_port.h>
68*a325d9c4SApple OSS Distributions
69*a325d9c4SApple OSS Distributions #include <mach/vm_prot.h>
70*a325d9c4SApple OSS Distributions #include <mach/mach_vm.h>
71*a325d9c4SApple OSS Distributions #include <mach/memory_entry.h>
72*a325d9c4SApple OSS Distributions #include <vm/vm_fault.h>
73*a325d9c4SApple OSS Distributions #include <vm/vm_protos.h>
74*a325d9c4SApple OSS Distributions
75*a325d9c4SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76*a325d9c4SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
77*a325d9c4SApple OSS Distributions
78*a325d9c4SApple OSS Distributions __END_DECLS
79*a325d9c4SApple OSS Distributions
80*a325d9c4SApple OSS Distributions #define kIOMapperWaitSystem ((IOMapper *) 1)
81*a325d9c4SApple OSS Distributions
82*a325d9c4SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
83*a325d9c4SApple OSS Distributions
84*a325d9c4SApple OSS Distributions ppnum_t gIOLastPage;
85*a325d9c4SApple OSS Distributions
86*a325d9c4SApple OSS Distributions enum {
87*a325d9c4SApple OSS Distributions kIOMapGuardSizeLarge = 65536
88*a325d9c4SApple OSS Distributions };
89*a325d9c4SApple OSS Distributions
90*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91*a325d9c4SApple OSS Distributions
92*a325d9c4SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
93*a325d9c4SApple OSS Distributions
94*a325d9c4SApple OSS Distributions #define super IOMemoryDescriptor
95*a325d9c4SApple OSS Distributions
96*a325d9c4SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
97*a325d9c4SApple OSS Distributions IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
98*a325d9c4SApple OSS Distributions
99*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100*a325d9c4SApple OSS Distributions
101*a325d9c4SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
102*a325d9c4SApple OSS Distributions
103*a325d9c4SApple OSS Distributions #define LOCK IORecursiveLockLock( gIOMemoryLock)
104*a325d9c4SApple OSS Distributions #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
105*a325d9c4SApple OSS Distributions #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
106*a325d9c4SApple OSS Distributions #define WAKEUP \
107*a325d9c4SApple OSS Distributions IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
108*a325d9c4SApple OSS Distributions
109*a325d9c4SApple OSS Distributions #if 0
110*a325d9c4SApple OSS Distributions #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
111*a325d9c4SApple OSS Distributions #else
112*a325d9c4SApple OSS Distributions #define DEBG(fmt, args...) {}
113*a325d9c4SApple OSS Distributions #endif
114*a325d9c4SApple OSS Distributions
115*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116*a325d9c4SApple OSS Distributions
117*a325d9c4SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
118*a325d9c4SApple OSS Distributions // Function
119*a325d9c4SApple OSS Distributions
120*a325d9c4SApple OSS Distributions enum ioPLBlockFlags {
121*a325d9c4SApple OSS Distributions kIOPLOnDevice = 0x00000001,
122*a325d9c4SApple OSS Distributions kIOPLExternUPL = 0x00000002,
123*a325d9c4SApple OSS Distributions };
124*a325d9c4SApple OSS Distributions
125*a325d9c4SApple OSS Distributions struct IOMDPersistentInitData {
126*a325d9c4SApple OSS Distributions const IOGeneralMemoryDescriptor * fMD;
127*a325d9c4SApple OSS Distributions IOMemoryReference * fMemRef;
128*a325d9c4SApple OSS Distributions };
129*a325d9c4SApple OSS Distributions
130*a325d9c4SApple OSS Distributions struct ioPLBlock {
131*a325d9c4SApple OSS Distributions upl_t fIOPL;
132*a325d9c4SApple OSS Distributions vm_address_t fPageInfo; // Pointer to page list or index into it
133*a325d9c4SApple OSS Distributions uint64_t fIOMDOffset; // The offset of this iopl in descriptor
134*a325d9c4SApple OSS Distributions ppnum_t fMappedPage; // Page number of first page in this iopl
135*a325d9c4SApple OSS Distributions unsigned int fPageOffset; // Offset within first page of iopl
136*a325d9c4SApple OSS Distributions unsigned int fFlags; // Flags
137*a325d9c4SApple OSS Distributions };
138*a325d9c4SApple OSS Distributions
139*a325d9c4SApple OSS Distributions enum { kMaxWireTags = 6 };
140*a325d9c4SApple OSS Distributions
141*a325d9c4SApple OSS Distributions struct ioGMDData {
142*a325d9c4SApple OSS Distributions IOMapper * fMapper;
143*a325d9c4SApple OSS Distributions uint64_t fDMAMapAlignment;
144*a325d9c4SApple OSS Distributions uint64_t fMappedBase;
145*a325d9c4SApple OSS Distributions uint64_t fMappedLength;
146*a325d9c4SApple OSS Distributions uint64_t fPreparationID;
147*a325d9c4SApple OSS Distributions #if IOTRACKING
148*a325d9c4SApple OSS Distributions IOTracking fWireTracking;
149*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
150*a325d9c4SApple OSS Distributions unsigned int fPageCnt;
151*a325d9c4SApple OSS Distributions uint8_t fDMAMapNumAddressBits;
152*a325d9c4SApple OSS Distributions unsigned char fCompletionError:1;
153*a325d9c4SApple OSS Distributions unsigned char fMappedBaseValid:1;
154*a325d9c4SApple OSS Distributions unsigned char _resv:4;
155*a325d9c4SApple OSS Distributions unsigned char fDMAAccess:2;
156*a325d9c4SApple OSS Distributions
157*a325d9c4SApple OSS Distributions /* variable length arrays */
158*a325d9c4SApple OSS Distributions upl_page_info_t fPageList[1]
159*a325d9c4SApple OSS Distributions #if __LP64__
160*a325d9c4SApple OSS Distributions // align fPageList as for ioPLBlock
161*a325d9c4SApple OSS Distributions __attribute__((aligned(sizeof(upl_t))))
162*a325d9c4SApple OSS Distributions #endif
163*a325d9c4SApple OSS Distributions ;
164*a325d9c4SApple OSS Distributions //ioPLBlock fBlocks[1];
165*a325d9c4SApple OSS Distributions };
166*a325d9c4SApple OSS Distributions
167*a325d9c4SApple OSS Distributions #pragma GCC visibility push(hidden)
168*a325d9c4SApple OSS Distributions
169*a325d9c4SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
170*a325d9c4SApple OSS Distributions {
171*a325d9c4SApple OSS Distributions OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
172*a325d9c4SApple OSS Distributions
173*a325d9c4SApple OSS Distributions public:
174*a325d9c4SApple OSS Distributions static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
175*a325d9c4SApple OSS Distributions virtual bool initWithCapacity(size_t capacity);
176*a325d9c4SApple OSS Distributions virtual void free() APPLE_KEXT_OVERRIDE;
177*a325d9c4SApple OSS Distributions
178*a325d9c4SApple OSS Distributions virtual bool appendBytes(const void * bytes, size_t length);
179*a325d9c4SApple OSS Distributions virtual void setLength(size_t length);
180*a325d9c4SApple OSS Distributions
181*a325d9c4SApple OSS Distributions virtual const void * getBytes() const;
182*a325d9c4SApple OSS Distributions virtual size_t getLength() const;
183*a325d9c4SApple OSS Distributions
184*a325d9c4SApple OSS Distributions private:
185*a325d9c4SApple OSS Distributions void freeMemory();
186*a325d9c4SApple OSS Distributions
187*a325d9c4SApple OSS Distributions void * _data = nullptr;
188*a325d9c4SApple OSS Distributions size_t _length = 0;
189*a325d9c4SApple OSS Distributions size_t _capacity = 0;
190*a325d9c4SApple OSS Distributions };
191*a325d9c4SApple OSS Distributions
192*a325d9c4SApple OSS Distributions #pragma GCC visibility pop
193*a325d9c4SApple OSS Distributions
194*a325d9c4SApple OSS Distributions #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
195*a325d9c4SApple OSS Distributions #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
196*a325d9c4SApple OSS Distributions #define getNumIOPL(osd, d) \
197*a325d9c4SApple OSS Distributions ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
198*a325d9c4SApple OSS Distributions #define getPageList(d) (&(d->fPageList[0]))
199*a325d9c4SApple OSS Distributions #define computeDataSize(p, u) \
200*a325d9c4SApple OSS Distributions (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
201*a325d9c4SApple OSS Distributions
202*a325d9c4SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
203*a325d9c4SApple OSS Distributions
204*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
205*a325d9c4SApple OSS Distributions
206*a325d9c4SApple OSS Distributions extern "C" {
207*a325d9c4SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)208*a325d9c4SApple OSS Distributions device_data_action(
209*a325d9c4SApple OSS Distributions uintptr_t device_handle,
210*a325d9c4SApple OSS Distributions ipc_port_t device_pager,
211*a325d9c4SApple OSS Distributions vm_prot_t protection,
212*a325d9c4SApple OSS Distributions vm_object_offset_t offset,
213*a325d9c4SApple OSS Distributions vm_size_t size)
214*a325d9c4SApple OSS Distributions {
215*a325d9c4SApple OSS Distributions kern_return_t kr;
216*a325d9c4SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
217*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> memDesc;
218*a325d9c4SApple OSS Distributions
219*a325d9c4SApple OSS Distributions LOCK;
220*a325d9c4SApple OSS Distributions if (ref->dp.memory) {
221*a325d9c4SApple OSS Distributions memDesc.reset(ref->dp.memory, OSRetain);
222*a325d9c4SApple OSS Distributions kr = memDesc->handleFault(device_pager, offset, size);
223*a325d9c4SApple OSS Distributions memDesc.reset();
224*a325d9c4SApple OSS Distributions } else {
225*a325d9c4SApple OSS Distributions kr = KERN_ABORTED;
226*a325d9c4SApple OSS Distributions }
227*a325d9c4SApple OSS Distributions UNLOCK;
228*a325d9c4SApple OSS Distributions
229*a325d9c4SApple OSS Distributions return kr;
230*a325d9c4SApple OSS Distributions }
231*a325d9c4SApple OSS Distributions
232*a325d9c4SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)233*a325d9c4SApple OSS Distributions device_close(
234*a325d9c4SApple OSS Distributions uintptr_t device_handle)
235*a325d9c4SApple OSS Distributions {
236*a325d9c4SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
237*a325d9c4SApple OSS Distributions
238*a325d9c4SApple OSS Distributions IOFreeType( ref, IOMemoryDescriptorReserved );
239*a325d9c4SApple OSS Distributions
240*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
241*a325d9c4SApple OSS Distributions }
242*a325d9c4SApple OSS Distributions }; // end extern "C"
243*a325d9c4SApple OSS Distributions
244*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245*a325d9c4SApple OSS Distributions
246*a325d9c4SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
247*a325d9c4SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
248*a325d9c4SApple OSS Distributions // checked for as a NULL reference is illegal.
249*a325d9c4SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)250*a325d9c4SApple OSS Distributions getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
251*a325d9c4SApple OSS Distributions UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
252*a325d9c4SApple OSS Distributions {
253*a325d9c4SApple OSS Distributions assert(kIOMemoryTypeUIO == type
254*a325d9c4SApple OSS Distributions || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
255*a325d9c4SApple OSS Distributions || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
256*a325d9c4SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
257*a325d9c4SApple OSS Distributions user_size_t us;
258*a325d9c4SApple OSS Distributions user_addr_t ad;
259*a325d9c4SApple OSS Distributions uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
260*a325d9c4SApple OSS Distributions }
261*a325d9c4SApple OSS Distributions #ifndef __LP64__
262*a325d9c4SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
263*a325d9c4SApple OSS Distributions IOAddressRange cur = r.v64[ind];
264*a325d9c4SApple OSS Distributions addr = cur.address;
265*a325d9c4SApple OSS Distributions len = cur.length;
266*a325d9c4SApple OSS Distributions }
267*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
268*a325d9c4SApple OSS Distributions else {
269*a325d9c4SApple OSS Distributions IOVirtualRange cur = r.v[ind];
270*a325d9c4SApple OSS Distributions addr = cur.address;
271*a325d9c4SApple OSS Distributions len = cur.length;
272*a325d9c4SApple OSS Distributions }
273*a325d9c4SApple OSS Distributions }
274*a325d9c4SApple OSS Distributions
275*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
276*a325d9c4SApple OSS Distributions
277*a325d9c4SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)278*a325d9c4SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
279*a325d9c4SApple OSS Distributions {
280*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
281*a325d9c4SApple OSS Distributions
282*a325d9c4SApple OSS Distributions *control = VM_PURGABLE_SET_STATE;
283*a325d9c4SApple OSS Distributions
284*a325d9c4SApple OSS Distributions enum { kIOMemoryPurgeableControlMask = 15 };
285*a325d9c4SApple OSS Distributions
286*a325d9c4SApple OSS Distributions switch (kIOMemoryPurgeableControlMask & newState) {
287*a325d9c4SApple OSS Distributions case kIOMemoryPurgeableKeepCurrent:
288*a325d9c4SApple OSS Distributions *control = VM_PURGABLE_GET_STATE;
289*a325d9c4SApple OSS Distributions break;
290*a325d9c4SApple OSS Distributions
291*a325d9c4SApple OSS Distributions case kIOMemoryPurgeableNonVolatile:
292*a325d9c4SApple OSS Distributions *state = VM_PURGABLE_NONVOLATILE;
293*a325d9c4SApple OSS Distributions break;
294*a325d9c4SApple OSS Distributions case kIOMemoryPurgeableVolatile:
295*a325d9c4SApple OSS Distributions *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
296*a325d9c4SApple OSS Distributions break;
297*a325d9c4SApple OSS Distributions case kIOMemoryPurgeableEmpty:
298*a325d9c4SApple OSS Distributions *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
299*a325d9c4SApple OSS Distributions break;
300*a325d9c4SApple OSS Distributions default:
301*a325d9c4SApple OSS Distributions err = kIOReturnBadArgument;
302*a325d9c4SApple OSS Distributions break;
303*a325d9c4SApple OSS Distributions }
304*a325d9c4SApple OSS Distributions
305*a325d9c4SApple OSS Distributions if (*control == VM_PURGABLE_SET_STATE) {
306*a325d9c4SApple OSS Distributions // let VM know this call is from the kernel and is allowed to alter
307*a325d9c4SApple OSS Distributions // the volatility of the memory entry even if it was created with
308*a325d9c4SApple OSS Distributions // MAP_MEM_PURGABLE_KERNEL_ONLY
309*a325d9c4SApple OSS Distributions *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
310*a325d9c4SApple OSS Distributions }
311*a325d9c4SApple OSS Distributions
312*a325d9c4SApple OSS Distributions return err;
313*a325d9c4SApple OSS Distributions }
314*a325d9c4SApple OSS Distributions
315*a325d9c4SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)316*a325d9c4SApple OSS Distributions purgeableStateBits(int * state)
317*a325d9c4SApple OSS Distributions {
318*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
319*a325d9c4SApple OSS Distributions
320*a325d9c4SApple OSS Distributions switch (VM_PURGABLE_STATE_MASK & *state) {
321*a325d9c4SApple OSS Distributions case VM_PURGABLE_NONVOLATILE:
322*a325d9c4SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
323*a325d9c4SApple OSS Distributions break;
324*a325d9c4SApple OSS Distributions case VM_PURGABLE_VOLATILE:
325*a325d9c4SApple OSS Distributions *state = kIOMemoryPurgeableVolatile;
326*a325d9c4SApple OSS Distributions break;
327*a325d9c4SApple OSS Distributions case VM_PURGABLE_EMPTY:
328*a325d9c4SApple OSS Distributions *state = kIOMemoryPurgeableEmpty;
329*a325d9c4SApple OSS Distributions break;
330*a325d9c4SApple OSS Distributions default:
331*a325d9c4SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
332*a325d9c4SApple OSS Distributions err = kIOReturnNotReady;
333*a325d9c4SApple OSS Distributions break;
334*a325d9c4SApple OSS Distributions }
335*a325d9c4SApple OSS Distributions return err;
336*a325d9c4SApple OSS Distributions }
337*a325d9c4SApple OSS Distributions
338*a325d9c4SApple OSS Distributions typedef struct {
339*a325d9c4SApple OSS Distributions unsigned int wimg;
340*a325d9c4SApple OSS Distributions unsigned int object_type;
341*a325d9c4SApple OSS Distributions } iokit_memtype_entry;
342*a325d9c4SApple OSS Distributions
343*a325d9c4SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
344*a325d9c4SApple OSS Distributions [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
345*a325d9c4SApple OSS Distributions [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
346*a325d9c4SApple OSS Distributions [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
347*a325d9c4SApple OSS Distributions [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
348*a325d9c4SApple OSS Distributions [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
349*a325d9c4SApple OSS Distributions [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
350*a325d9c4SApple OSS Distributions [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
351*a325d9c4SApple OSS Distributions [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
352*a325d9c4SApple OSS Distributions [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
353*a325d9c4SApple OSS Distributions [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
354*a325d9c4SApple OSS Distributions };
355*a325d9c4SApple OSS Distributions
356*a325d9c4SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)357*a325d9c4SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
358*a325d9c4SApple OSS Distributions {
359*a325d9c4SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
360*a325d9c4SApple OSS Distributions vm_prot_t prot = 0;
361*a325d9c4SApple OSS Distributions SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
362*a325d9c4SApple OSS Distributions return prot;
363*a325d9c4SApple OSS Distributions }
364*a325d9c4SApple OSS Distributions
365*a325d9c4SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)366*a325d9c4SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
367*a325d9c4SApple OSS Distributions {
368*a325d9c4SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
369*a325d9c4SApple OSS Distributions if (cacheMode == kIODefaultCache) {
370*a325d9c4SApple OSS Distributions return -1U;
371*a325d9c4SApple OSS Distributions }
372*a325d9c4SApple OSS Distributions return iomd_mem_types[cacheMode].wimg;
373*a325d9c4SApple OSS Distributions }
374*a325d9c4SApple OSS Distributions
375*a325d9c4SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)376*a325d9c4SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
377*a325d9c4SApple OSS Distributions {
378*a325d9c4SApple OSS Distributions pagerFlags &= VM_WIMG_MASK;
379*a325d9c4SApple OSS Distributions IOOptionBits cacheMode = kIODefaultCache;
380*a325d9c4SApple OSS Distributions for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
381*a325d9c4SApple OSS Distributions if (iomd_mem_types[i].wimg == pagerFlags) {
382*a325d9c4SApple OSS Distributions cacheMode = i;
383*a325d9c4SApple OSS Distributions break;
384*a325d9c4SApple OSS Distributions }
385*a325d9c4SApple OSS Distributions }
386*a325d9c4SApple OSS Distributions return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
387*a325d9c4SApple OSS Distributions }
388*a325d9c4SApple OSS Distributions
389*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391*a325d9c4SApple OSS Distributions
392*a325d9c4SApple OSS Distributions struct IOMemoryEntry {
393*a325d9c4SApple OSS Distributions ipc_port_t entry;
394*a325d9c4SApple OSS Distributions int64_t offset;
395*a325d9c4SApple OSS Distributions uint64_t size;
396*a325d9c4SApple OSS Distributions uint64_t start;
397*a325d9c4SApple OSS Distributions };
398*a325d9c4SApple OSS Distributions
399*a325d9c4SApple OSS Distributions struct IOMemoryReference {
400*a325d9c4SApple OSS Distributions volatile SInt32 refCount;
401*a325d9c4SApple OSS Distributions vm_prot_t prot;
402*a325d9c4SApple OSS Distributions uint32_t capacity;
403*a325d9c4SApple OSS Distributions uint32_t count;
404*a325d9c4SApple OSS Distributions struct IOMemoryReference * mapRef;
405*a325d9c4SApple OSS Distributions IOMemoryEntry entries[0];
406*a325d9c4SApple OSS Distributions };
407*a325d9c4SApple OSS Distributions
408*a325d9c4SApple OSS Distributions enum{
409*a325d9c4SApple OSS Distributions kIOMemoryReferenceReuse = 0x00000001,
410*a325d9c4SApple OSS Distributions kIOMemoryReferenceWrite = 0x00000002,
411*a325d9c4SApple OSS Distributions kIOMemoryReferenceCOW = 0x00000004,
412*a325d9c4SApple OSS Distributions };
413*a325d9c4SApple OSS Distributions
414*a325d9c4SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
415*a325d9c4SApple OSS Distributions
416*a325d9c4SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)417*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
418*a325d9c4SApple OSS Distributions {
419*a325d9c4SApple OSS Distributions IOMemoryReference * ref;
420*a325d9c4SApple OSS Distributions size_t newSize, oldSize, copySize;
421*a325d9c4SApple OSS Distributions
422*a325d9c4SApple OSS Distributions newSize = (sizeof(IOMemoryReference)
423*a325d9c4SApple OSS Distributions - sizeof(ref->entries)
424*a325d9c4SApple OSS Distributions + capacity * sizeof(ref->entries[0]));
425*a325d9c4SApple OSS Distributions ref = (typeof(ref))IOMalloc(newSize);
426*a325d9c4SApple OSS Distributions if (realloc) {
427*a325d9c4SApple OSS Distributions oldSize = (sizeof(IOMemoryReference)
428*a325d9c4SApple OSS Distributions - sizeof(realloc->entries)
429*a325d9c4SApple OSS Distributions + realloc->capacity * sizeof(realloc->entries[0]));
430*a325d9c4SApple OSS Distributions copySize = oldSize;
431*a325d9c4SApple OSS Distributions if (copySize > newSize) {
432*a325d9c4SApple OSS Distributions copySize = newSize;
433*a325d9c4SApple OSS Distributions }
434*a325d9c4SApple OSS Distributions if (ref) {
435*a325d9c4SApple OSS Distributions bcopy(realloc, ref, copySize);
436*a325d9c4SApple OSS Distributions }
437*a325d9c4SApple OSS Distributions IOFree(realloc, oldSize);
438*a325d9c4SApple OSS Distributions } else if (ref) {
439*a325d9c4SApple OSS Distributions bzero(ref, sizeof(*ref));
440*a325d9c4SApple OSS Distributions ref->refCount = 1;
441*a325d9c4SApple OSS Distributions OSIncrementAtomic(&gIOMemoryReferenceCount);
442*a325d9c4SApple OSS Distributions }
443*a325d9c4SApple OSS Distributions if (!ref) {
444*a325d9c4SApple OSS Distributions return NULL;
445*a325d9c4SApple OSS Distributions }
446*a325d9c4SApple OSS Distributions ref->capacity = capacity;
447*a325d9c4SApple OSS Distributions return ref;
448*a325d9c4SApple OSS Distributions }
449*a325d9c4SApple OSS Distributions
450*a325d9c4SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)451*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
452*a325d9c4SApple OSS Distributions {
453*a325d9c4SApple OSS Distributions IOMemoryEntry * entries;
454*a325d9c4SApple OSS Distributions size_t size;
455*a325d9c4SApple OSS Distributions
456*a325d9c4SApple OSS Distributions if (ref->mapRef) {
457*a325d9c4SApple OSS Distributions memoryReferenceFree(ref->mapRef);
458*a325d9c4SApple OSS Distributions ref->mapRef = NULL;
459*a325d9c4SApple OSS Distributions }
460*a325d9c4SApple OSS Distributions
461*a325d9c4SApple OSS Distributions entries = ref->entries + ref->count;
462*a325d9c4SApple OSS Distributions while (entries > &ref->entries[0]) {
463*a325d9c4SApple OSS Distributions entries--;
464*a325d9c4SApple OSS Distributions ipc_port_release_send(entries->entry);
465*a325d9c4SApple OSS Distributions }
466*a325d9c4SApple OSS Distributions size = (sizeof(IOMemoryReference)
467*a325d9c4SApple OSS Distributions - sizeof(ref->entries)
468*a325d9c4SApple OSS Distributions + ref->capacity * sizeof(ref->entries[0]));
469*a325d9c4SApple OSS Distributions IOFree(ref, size);
470*a325d9c4SApple OSS Distributions
471*a325d9c4SApple OSS Distributions OSDecrementAtomic(&gIOMemoryReferenceCount);
472*a325d9c4SApple OSS Distributions }
473*a325d9c4SApple OSS Distributions
474*a325d9c4SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)475*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
476*a325d9c4SApple OSS Distributions {
477*a325d9c4SApple OSS Distributions if (1 == OSDecrementAtomic(&ref->refCount)) {
478*a325d9c4SApple OSS Distributions memoryReferenceFree(ref);
479*a325d9c4SApple OSS Distributions }
480*a325d9c4SApple OSS Distributions }
481*a325d9c4SApple OSS Distributions
482*a325d9c4SApple OSS Distributions
483*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)484*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
485*a325d9c4SApple OSS Distributions IOOptionBits options,
486*a325d9c4SApple OSS Distributions IOMemoryReference ** reference)
487*a325d9c4SApple OSS Distributions {
488*a325d9c4SApple OSS Distributions enum { kCapacity = 4, kCapacityInc = 4 };
489*a325d9c4SApple OSS Distributions
490*a325d9c4SApple OSS Distributions kern_return_t err;
491*a325d9c4SApple OSS Distributions IOMemoryReference * ref;
492*a325d9c4SApple OSS Distributions IOMemoryEntry * entries;
493*a325d9c4SApple OSS Distributions IOMemoryEntry * cloneEntries;
494*a325d9c4SApple OSS Distributions vm_map_t map;
495*a325d9c4SApple OSS Distributions ipc_port_t entry, cloneEntry;
496*a325d9c4SApple OSS Distributions vm_prot_t prot;
497*a325d9c4SApple OSS Distributions memory_object_size_t actualSize;
498*a325d9c4SApple OSS Distributions uint32_t rangeIdx;
499*a325d9c4SApple OSS Distributions uint32_t count;
500*a325d9c4SApple OSS Distributions mach_vm_address_t entryAddr, endAddr, entrySize;
501*a325d9c4SApple OSS Distributions mach_vm_size_t srcAddr, srcLen;
502*a325d9c4SApple OSS Distributions mach_vm_size_t nextAddr, nextLen;
503*a325d9c4SApple OSS Distributions mach_vm_size_t offset, remain;
504*a325d9c4SApple OSS Distributions vm_map_offset_t overmap_start = 0, overmap_end = 0;
505*a325d9c4SApple OSS Distributions int misaligned_start = 0, misaligned_end = 0;
506*a325d9c4SApple OSS Distributions IOByteCount physLen;
507*a325d9c4SApple OSS Distributions IOOptionBits type = (_flags & kIOMemoryTypeMask);
508*a325d9c4SApple OSS Distributions IOOptionBits cacheMode;
509*a325d9c4SApple OSS Distributions unsigned int pagerFlags;
510*a325d9c4SApple OSS Distributions vm_tag_t tag;
511*a325d9c4SApple OSS Distributions vm_named_entry_kernel_flags_t vmne_kflags;
512*a325d9c4SApple OSS Distributions
513*a325d9c4SApple OSS Distributions ref = memoryReferenceAlloc(kCapacity, NULL);
514*a325d9c4SApple OSS Distributions if (!ref) {
515*a325d9c4SApple OSS Distributions return kIOReturnNoMemory;
516*a325d9c4SApple OSS Distributions }
517*a325d9c4SApple OSS Distributions
518*a325d9c4SApple OSS Distributions tag = (vm_tag_t) getVMTag(kernel_map);
519*a325d9c4SApple OSS Distributions vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
520*a325d9c4SApple OSS Distributions entries = &ref->entries[0];
521*a325d9c4SApple OSS Distributions count = 0;
522*a325d9c4SApple OSS Distributions err = KERN_SUCCESS;
523*a325d9c4SApple OSS Distributions
524*a325d9c4SApple OSS Distributions offset = 0;
525*a325d9c4SApple OSS Distributions rangeIdx = 0;
526*a325d9c4SApple OSS Distributions remain = _length;
527*a325d9c4SApple OSS Distributions if (_task) {
528*a325d9c4SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
529*a325d9c4SApple OSS Distributions
530*a325d9c4SApple OSS Distributions // account for IOBMD setLength(), use its capacity as length
531*a325d9c4SApple OSS Distributions IOBufferMemoryDescriptor * bmd;
532*a325d9c4SApple OSS Distributions if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
533*a325d9c4SApple OSS Distributions nextLen = bmd->getCapacity();
534*a325d9c4SApple OSS Distributions remain = nextLen;
535*a325d9c4SApple OSS Distributions }
536*a325d9c4SApple OSS Distributions } else {
537*a325d9c4SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
538*a325d9c4SApple OSS Distributions nextLen = physLen;
539*a325d9c4SApple OSS Distributions
540*a325d9c4SApple OSS Distributions // default cache mode for physical
541*a325d9c4SApple OSS Distributions if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
542*a325d9c4SApple OSS Distributions IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
543*a325d9c4SApple OSS Distributions _flags |= (mode << kIOMemoryBufferCacheShift);
544*a325d9c4SApple OSS Distributions }
545*a325d9c4SApple OSS Distributions }
546*a325d9c4SApple OSS Distributions
547*a325d9c4SApple OSS Distributions // cache mode & vm_prot
548*a325d9c4SApple OSS Distributions prot = VM_PROT_READ;
549*a325d9c4SApple OSS Distributions cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
550*a325d9c4SApple OSS Distributions prot |= vmProtForCacheMode(cacheMode);
551*a325d9c4SApple OSS Distributions // VM system requires write access to change cache mode
552*a325d9c4SApple OSS Distributions if (kIODefaultCache != cacheMode) {
553*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
554*a325d9c4SApple OSS Distributions }
555*a325d9c4SApple OSS Distributions if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
556*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
557*a325d9c4SApple OSS Distributions }
558*a325d9c4SApple OSS Distributions if (kIOMemoryReferenceWrite & options) {
559*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
560*a325d9c4SApple OSS Distributions }
561*a325d9c4SApple OSS Distributions if (kIOMemoryReferenceCOW & options) {
562*a325d9c4SApple OSS Distributions prot |= MAP_MEM_VM_COPY;
563*a325d9c4SApple OSS Distributions }
564*a325d9c4SApple OSS Distributions
565*a325d9c4SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
566*a325d9c4SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
567*a325d9c4SApple OSS Distributions }
568*a325d9c4SApple OSS Distributions
569*a325d9c4SApple OSS Distributions if ((kIOMemoryReferenceReuse & options) && _memRef) {
570*a325d9c4SApple OSS Distributions cloneEntries = &_memRef->entries[0];
571*a325d9c4SApple OSS Distributions prot |= MAP_MEM_NAMED_REUSE;
572*a325d9c4SApple OSS Distributions }
573*a325d9c4SApple OSS Distributions
574*a325d9c4SApple OSS Distributions if (_task) {
575*a325d9c4SApple OSS Distributions // virtual ranges
576*a325d9c4SApple OSS Distributions
577*a325d9c4SApple OSS Distributions if (kIOMemoryBufferPageable & _flags) {
578*a325d9c4SApple OSS Distributions int ledger_tag, ledger_no_footprint;
579*a325d9c4SApple OSS Distributions
580*a325d9c4SApple OSS Distributions // IOBufferMemoryDescriptor alloc - set flags for entry + object create
581*a325d9c4SApple OSS Distributions prot |= MAP_MEM_NAMED_CREATE;
582*a325d9c4SApple OSS Distributions
583*a325d9c4SApple OSS Distributions // default accounting settings:
584*a325d9c4SApple OSS Distributions // + "none" ledger tag
585*a325d9c4SApple OSS Distributions // + include in footprint
586*a325d9c4SApple OSS Distributions // can be changed later with ::setOwnership()
587*a325d9c4SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
588*a325d9c4SApple OSS Distributions ledger_no_footprint = 0;
589*a325d9c4SApple OSS Distributions
590*a325d9c4SApple OSS Distributions if (kIOMemoryBufferPurgeable & _flags) {
591*a325d9c4SApple OSS Distributions prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
592*a325d9c4SApple OSS Distributions if (VM_KERN_MEMORY_SKYWALK == tag) {
593*a325d9c4SApple OSS Distributions // Skywalk purgeable memory accounting:
594*a325d9c4SApple OSS Distributions // + "network" ledger tag
595*a325d9c4SApple OSS Distributions // + not included in footprint
596*a325d9c4SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NETWORK;
597*a325d9c4SApple OSS Distributions ledger_no_footprint = 1;
598*a325d9c4SApple OSS Distributions } else {
599*a325d9c4SApple OSS Distributions // regular purgeable memory accounting:
600*a325d9c4SApple OSS Distributions // + no ledger tag
601*a325d9c4SApple OSS Distributions // + included in footprint
602*a325d9c4SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
603*a325d9c4SApple OSS Distributions ledger_no_footprint = 0;
604*a325d9c4SApple OSS Distributions }
605*a325d9c4SApple OSS Distributions }
606*a325d9c4SApple OSS Distributions vmne_kflags.vmnekf_ledger_tag = ledger_tag;
607*a325d9c4SApple OSS Distributions vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
608*a325d9c4SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
609*a325d9c4SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
610*a325d9c4SApple OSS Distributions }
611*a325d9c4SApple OSS Distributions
612*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
613*a325d9c4SApple OSS Distributions map = NULL;
614*a325d9c4SApple OSS Distributions } else {
615*a325d9c4SApple OSS Distributions prot |= MAP_MEM_USE_DATA_ADDR;
616*a325d9c4SApple OSS Distributions map = get_task_map(_task);
617*a325d9c4SApple OSS Distributions }
618*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
619*a325d9c4SApple OSS Distributions
620*a325d9c4SApple OSS Distributions while (remain) {
621*a325d9c4SApple OSS Distributions srcAddr = nextAddr;
622*a325d9c4SApple OSS Distributions srcLen = nextLen;
623*a325d9c4SApple OSS Distributions nextAddr = 0;
624*a325d9c4SApple OSS Distributions nextLen = 0;
625*a325d9c4SApple OSS Distributions // coalesce addr range
626*a325d9c4SApple OSS Distributions for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
627*a325d9c4SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
628*a325d9c4SApple OSS Distributions if ((srcAddr + srcLen) != nextAddr) {
629*a325d9c4SApple OSS Distributions break;
630*a325d9c4SApple OSS Distributions }
631*a325d9c4SApple OSS Distributions srcLen += nextLen;
632*a325d9c4SApple OSS Distributions }
633*a325d9c4SApple OSS Distributions
634*a325d9c4SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
635*a325d9c4SApple OSS Distributions entryAddr = srcAddr;
636*a325d9c4SApple OSS Distributions endAddr = srcAddr + srcLen;
637*a325d9c4SApple OSS Distributions } else {
638*a325d9c4SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
639*a325d9c4SApple OSS Distributions endAddr = round_page_64(srcAddr + srcLen);
640*a325d9c4SApple OSS Distributions }
641*a325d9c4SApple OSS Distributions if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
642*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
643*a325d9c4SApple OSS Distributions }
644*a325d9c4SApple OSS Distributions
645*a325d9c4SApple OSS Distributions do{
646*a325d9c4SApple OSS Distributions entrySize = (endAddr - entryAddr);
647*a325d9c4SApple OSS Distributions if (!entrySize) {
648*a325d9c4SApple OSS Distributions break;
649*a325d9c4SApple OSS Distributions }
650*a325d9c4SApple OSS Distributions actualSize = entrySize;
651*a325d9c4SApple OSS Distributions
652*a325d9c4SApple OSS Distributions cloneEntry = MACH_PORT_NULL;
653*a325d9c4SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
654*a325d9c4SApple OSS Distributions if (cloneEntries < &_memRef->entries[_memRef->count]) {
655*a325d9c4SApple OSS Distributions cloneEntry = cloneEntries->entry;
656*a325d9c4SApple OSS Distributions } else {
657*a325d9c4SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
658*a325d9c4SApple OSS Distributions }
659*a325d9c4SApple OSS Distributions }
660*a325d9c4SApple OSS Distributions
661*a325d9c4SApple OSS Distributions err = mach_make_memory_entry_internal(map,
662*a325d9c4SApple OSS Distributions &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
663*a325d9c4SApple OSS Distributions
664*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
665*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
666*a325d9c4SApple OSS Distributions break;
667*a325d9c4SApple OSS Distributions }
668*a325d9c4SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
669*a325d9c4SApple OSS Distributions if (actualSize > entrySize) {
670*a325d9c4SApple OSS Distributions actualSize = entrySize;
671*a325d9c4SApple OSS Distributions }
672*a325d9c4SApple OSS Distributions } else if (actualSize > entrySize) {
673*a325d9c4SApple OSS Distributions panic("mach_make_memory_entry_64 actualSize");
674*a325d9c4SApple OSS Distributions }
675*a325d9c4SApple OSS Distributions
676*a325d9c4SApple OSS Distributions memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
677*a325d9c4SApple OSS Distributions
678*a325d9c4SApple OSS Distributions if (count && overmap_start) {
679*a325d9c4SApple OSS Distributions /*
680*a325d9c4SApple OSS Distributions * Track misaligned start for all
681*a325d9c4SApple OSS Distributions * except the first entry.
682*a325d9c4SApple OSS Distributions */
683*a325d9c4SApple OSS Distributions misaligned_start++;
684*a325d9c4SApple OSS Distributions }
685*a325d9c4SApple OSS Distributions
686*a325d9c4SApple OSS Distributions if (overmap_end) {
687*a325d9c4SApple OSS Distributions /*
688*a325d9c4SApple OSS Distributions * Ignore misaligned end for the
689*a325d9c4SApple OSS Distributions * last entry.
690*a325d9c4SApple OSS Distributions */
691*a325d9c4SApple OSS Distributions if ((entryAddr + actualSize) != endAddr) {
692*a325d9c4SApple OSS Distributions misaligned_end++;
693*a325d9c4SApple OSS Distributions }
694*a325d9c4SApple OSS Distributions }
695*a325d9c4SApple OSS Distributions
696*a325d9c4SApple OSS Distributions if (count) {
697*a325d9c4SApple OSS Distributions /* Middle entries */
698*a325d9c4SApple OSS Distributions if (misaligned_start || misaligned_end) {
699*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
700*a325d9c4SApple OSS Distributions ipc_port_release_send(entry);
701*a325d9c4SApple OSS Distributions err = KERN_NOT_SUPPORTED;
702*a325d9c4SApple OSS Distributions break;
703*a325d9c4SApple OSS Distributions }
704*a325d9c4SApple OSS Distributions }
705*a325d9c4SApple OSS Distributions
706*a325d9c4SApple OSS Distributions if (count >= ref->capacity) {
707*a325d9c4SApple OSS Distributions ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
708*a325d9c4SApple OSS Distributions entries = &ref->entries[count];
709*a325d9c4SApple OSS Distributions }
710*a325d9c4SApple OSS Distributions entries->entry = entry;
711*a325d9c4SApple OSS Distributions entries->size = actualSize;
712*a325d9c4SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
713*a325d9c4SApple OSS Distributions entries->start = entryAddr;
714*a325d9c4SApple OSS Distributions entryAddr += actualSize;
715*a325d9c4SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
716*a325d9c4SApple OSS Distributions if ((cloneEntries->entry == entries->entry)
717*a325d9c4SApple OSS Distributions && (cloneEntries->size == entries->size)
718*a325d9c4SApple OSS Distributions && (cloneEntries->offset == entries->offset)) {
719*a325d9c4SApple OSS Distributions cloneEntries++;
720*a325d9c4SApple OSS Distributions } else {
721*a325d9c4SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
722*a325d9c4SApple OSS Distributions }
723*a325d9c4SApple OSS Distributions }
724*a325d9c4SApple OSS Distributions entries++;
725*a325d9c4SApple OSS Distributions count++;
726*a325d9c4SApple OSS Distributions }while (true);
727*a325d9c4SApple OSS Distributions offset += srcLen;
728*a325d9c4SApple OSS Distributions remain -= srcLen;
729*a325d9c4SApple OSS Distributions }
730*a325d9c4SApple OSS Distributions } else {
731*a325d9c4SApple OSS Distributions // _task == 0, physical or kIOMemoryTypeUPL
732*a325d9c4SApple OSS Distributions memory_object_t pager;
733*a325d9c4SApple OSS Distributions vm_size_t size = ptoa_64(_pages);
734*a325d9c4SApple OSS Distributions
735*a325d9c4SApple OSS Distributions if (!getKernelReserved()) {
736*a325d9c4SApple OSS Distributions panic("getKernelReserved");
737*a325d9c4SApple OSS Distributions }
738*a325d9c4SApple OSS Distributions
739*a325d9c4SApple OSS Distributions reserved->dp.pagerContig = (1 == _rangesCount);
740*a325d9c4SApple OSS Distributions reserved->dp.memory = this;
741*a325d9c4SApple OSS Distributions
742*a325d9c4SApple OSS Distributions pagerFlags = pagerFlagsForCacheMode(cacheMode);
743*a325d9c4SApple OSS Distributions if (-1U == pagerFlags) {
744*a325d9c4SApple OSS Distributions panic("phys is kIODefaultCache");
745*a325d9c4SApple OSS Distributions }
746*a325d9c4SApple OSS Distributions if (reserved->dp.pagerContig) {
747*a325d9c4SApple OSS Distributions pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
748*a325d9c4SApple OSS Distributions }
749*a325d9c4SApple OSS Distributions
750*a325d9c4SApple OSS Distributions pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
751*a325d9c4SApple OSS Distributions size, pagerFlags);
752*a325d9c4SApple OSS Distributions assert(pager);
753*a325d9c4SApple OSS Distributions if (!pager) {
754*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
755*a325d9c4SApple OSS Distributions err = kIOReturnVMError;
756*a325d9c4SApple OSS Distributions } else {
757*a325d9c4SApple OSS Distributions srcAddr = nextAddr;
758*a325d9c4SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
759*a325d9c4SApple OSS Distributions err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
760*a325d9c4SApple OSS Distributions size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
761*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
762*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
763*a325d9c4SApple OSS Distributions device_pager_deallocate(pager);
764*a325d9c4SApple OSS Distributions } else {
765*a325d9c4SApple OSS Distributions reserved->dp.devicePager = pager;
766*a325d9c4SApple OSS Distributions entries->entry = entry;
767*a325d9c4SApple OSS Distributions entries->size = size;
768*a325d9c4SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
769*a325d9c4SApple OSS Distributions entries++;
770*a325d9c4SApple OSS Distributions count++;
771*a325d9c4SApple OSS Distributions }
772*a325d9c4SApple OSS Distributions }
773*a325d9c4SApple OSS Distributions }
774*a325d9c4SApple OSS Distributions
775*a325d9c4SApple OSS Distributions ref->count = count;
776*a325d9c4SApple OSS Distributions ref->prot = prot;
777*a325d9c4SApple OSS Distributions
778*a325d9c4SApple OSS Distributions if (_task && (KERN_SUCCESS == err)
779*a325d9c4SApple OSS Distributions && (kIOMemoryMapCopyOnWrite & _flags)
780*a325d9c4SApple OSS Distributions && !(kIOMemoryReferenceCOW & options)) {
781*a325d9c4SApple OSS Distributions err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
782*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
783*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
784*a325d9c4SApple OSS Distributions }
785*a325d9c4SApple OSS Distributions }
786*a325d9c4SApple OSS Distributions
787*a325d9c4SApple OSS Distributions if (KERN_SUCCESS == err) {
788*a325d9c4SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
789*a325d9c4SApple OSS Distributions memoryReferenceFree(ref);
790*a325d9c4SApple OSS Distributions OSIncrementAtomic(&_memRef->refCount);
791*a325d9c4SApple OSS Distributions ref = _memRef;
792*a325d9c4SApple OSS Distributions }
793*a325d9c4SApple OSS Distributions } else {
794*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
795*a325d9c4SApple OSS Distributions memoryReferenceFree(ref);
796*a325d9c4SApple OSS Distributions ref = NULL;
797*a325d9c4SApple OSS Distributions }
798*a325d9c4SApple OSS Distributions
799*a325d9c4SApple OSS Distributions *reference = ref;
800*a325d9c4SApple OSS Distributions
801*a325d9c4SApple OSS Distributions return err;
802*a325d9c4SApple OSS Distributions }
803*a325d9c4SApple OSS Distributions
804*a325d9c4SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)805*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
806*a325d9c4SApple OSS Distributions {
807*a325d9c4SApple OSS Distributions switch (kIOMapGuardedMask & options) {
808*a325d9c4SApple OSS Distributions default:
809*a325d9c4SApple OSS Distributions case kIOMapGuardedSmall:
810*a325d9c4SApple OSS Distributions return vm_map_page_size(map);
811*a325d9c4SApple OSS Distributions case kIOMapGuardedLarge:
812*a325d9c4SApple OSS Distributions assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
813*a325d9c4SApple OSS Distributions return kIOMapGuardSizeLarge;
814*a325d9c4SApple OSS Distributions }
815*a325d9c4SApple OSS Distributions ;
816*a325d9c4SApple OSS Distributions }
817*a325d9c4SApple OSS Distributions
818*a325d9c4SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)819*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
820*a325d9c4SApple OSS Distributions vm_map_offset_t addr, mach_vm_size_t size)
821*a325d9c4SApple OSS Distributions {
822*a325d9c4SApple OSS Distributions kern_return_t kr;
823*a325d9c4SApple OSS Distributions vm_map_offset_t actualAddr;
824*a325d9c4SApple OSS Distributions mach_vm_size_t actualSize;
825*a325d9c4SApple OSS Distributions
826*a325d9c4SApple OSS Distributions actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
827*a325d9c4SApple OSS Distributions actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
828*a325d9c4SApple OSS Distributions
829*a325d9c4SApple OSS Distributions if (kIOMapGuardedMask & options) {
830*a325d9c4SApple OSS Distributions mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
831*a325d9c4SApple OSS Distributions actualAddr -= guardSize;
832*a325d9c4SApple OSS Distributions actualSize += 2 * guardSize;
833*a325d9c4SApple OSS Distributions }
834*a325d9c4SApple OSS Distributions kr = mach_vm_deallocate(map, actualAddr, actualSize);
835*a325d9c4SApple OSS Distributions
836*a325d9c4SApple OSS Distributions return kr;
837*a325d9c4SApple OSS Distributions }
838*a325d9c4SApple OSS Distributions
839*a325d9c4SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)840*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
841*a325d9c4SApple OSS Distributions {
842*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
843*a325d9c4SApple OSS Distributions IOReturn err;
844*a325d9c4SApple OSS Distributions vm_map_offset_t addr;
845*a325d9c4SApple OSS Distributions mach_vm_size_t size;
846*a325d9c4SApple OSS Distributions mach_vm_size_t guardSize;
847*a325d9c4SApple OSS Distributions
848*a325d9c4SApple OSS Distributions addr = ref->mapped;
849*a325d9c4SApple OSS Distributions size = ref->size;
850*a325d9c4SApple OSS Distributions guardSize = 0;
851*a325d9c4SApple OSS Distributions
852*a325d9c4SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
853*a325d9c4SApple OSS Distributions if (!(kIOMapAnywhere & ref->options)) {
854*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
855*a325d9c4SApple OSS Distributions }
856*a325d9c4SApple OSS Distributions guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
857*a325d9c4SApple OSS Distributions size += 2 * guardSize;
858*a325d9c4SApple OSS Distributions }
859*a325d9c4SApple OSS Distributions
860*a325d9c4SApple OSS Distributions err = vm_map_enter_mem_object(map, &addr, size,
861*a325d9c4SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
862*a325d9c4SApple OSS Distributions // TODO4K this should not be necessary...
863*a325d9c4SApple OSS Distributions (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
864*a325d9c4SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
865*a325d9c4SApple OSS Distributions (vm_map_offset_t) 0,
866*a325d9c4SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
867*a325d9c4SApple OSS Distributions (((ref->options & kIOMapAnywhere)
868*a325d9c4SApple OSS Distributions ? VM_FLAGS_ANYWHERE
869*a325d9c4SApple OSS Distributions : VM_FLAGS_FIXED)),
870*a325d9c4SApple OSS Distributions VM_MAP_KERNEL_FLAGS_NONE,
871*a325d9c4SApple OSS Distributions ref->tag,
872*a325d9c4SApple OSS Distributions IPC_PORT_NULL,
873*a325d9c4SApple OSS Distributions (memory_object_offset_t) 0,
874*a325d9c4SApple OSS Distributions false, /* copy */
875*a325d9c4SApple OSS Distributions ref->prot,
876*a325d9c4SApple OSS Distributions ref->prot,
877*a325d9c4SApple OSS Distributions VM_INHERIT_NONE);
878*a325d9c4SApple OSS Distributions if (KERN_SUCCESS == err) {
879*a325d9c4SApple OSS Distributions ref->mapped = (mach_vm_address_t) addr;
880*a325d9c4SApple OSS Distributions ref->map = map;
881*a325d9c4SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
882*a325d9c4SApple OSS Distributions vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
883*a325d9c4SApple OSS Distributions
884*a325d9c4SApple OSS Distributions err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
885*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
886*a325d9c4SApple OSS Distributions err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
887*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
888*a325d9c4SApple OSS Distributions ref->mapped += guardSize;
889*a325d9c4SApple OSS Distributions }
890*a325d9c4SApple OSS Distributions }
891*a325d9c4SApple OSS Distributions
892*a325d9c4SApple OSS Distributions return err;
893*a325d9c4SApple OSS Distributions }
894*a325d9c4SApple OSS Distributions
895*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)896*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
897*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
898*a325d9c4SApple OSS Distributions vm_map_t map,
899*a325d9c4SApple OSS Distributions mach_vm_size_t inoffset,
900*a325d9c4SApple OSS Distributions mach_vm_size_t size,
901*a325d9c4SApple OSS Distributions IOOptionBits options,
902*a325d9c4SApple OSS Distributions mach_vm_address_t * inaddr)
903*a325d9c4SApple OSS Distributions {
904*a325d9c4SApple OSS Distributions IOReturn err;
905*a325d9c4SApple OSS Distributions int64_t offset = inoffset;
906*a325d9c4SApple OSS Distributions uint32_t rangeIdx, entryIdx;
907*a325d9c4SApple OSS Distributions vm_map_offset_t addr, mapAddr;
908*a325d9c4SApple OSS Distributions vm_map_offset_t pageOffset, entryOffset, remain, chunk;
909*a325d9c4SApple OSS Distributions
910*a325d9c4SApple OSS Distributions mach_vm_address_t nextAddr;
911*a325d9c4SApple OSS Distributions mach_vm_size_t nextLen;
912*a325d9c4SApple OSS Distributions IOByteCount physLen;
913*a325d9c4SApple OSS Distributions IOMemoryEntry * entry;
914*a325d9c4SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
915*a325d9c4SApple OSS Distributions IOOptionBits type;
916*a325d9c4SApple OSS Distributions IOOptionBits cacheMode;
917*a325d9c4SApple OSS Distributions vm_tag_t tag;
918*a325d9c4SApple OSS Distributions // for the kIOMapPrefault option.
919*a325d9c4SApple OSS Distributions upl_page_info_t * pageList = NULL;
920*a325d9c4SApple OSS Distributions UInt currentPageIndex = 0;
921*a325d9c4SApple OSS Distributions bool didAlloc;
922*a325d9c4SApple OSS Distributions
923*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
924*a325d9c4SApple OSS Distributions
925*a325d9c4SApple OSS Distributions if (ref->mapRef) {
926*a325d9c4SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
927*a325d9c4SApple OSS Distributions return err;
928*a325d9c4SApple OSS Distributions }
929*a325d9c4SApple OSS Distributions
930*a325d9c4SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
931*a325d9c4SApple OSS Distributions err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
932*a325d9c4SApple OSS Distributions return err;
933*a325d9c4SApple OSS Distributions }
934*a325d9c4SApple OSS Distributions
935*a325d9c4SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
936*a325d9c4SApple OSS Distributions
937*a325d9c4SApple OSS Distributions prot = VM_PROT_READ;
938*a325d9c4SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
939*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
940*a325d9c4SApple OSS Distributions }
941*a325d9c4SApple OSS Distributions prot &= ref->prot;
942*a325d9c4SApple OSS Distributions
943*a325d9c4SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
944*a325d9c4SApple OSS Distributions if (kIODefaultCache != cacheMode) {
945*a325d9c4SApple OSS Distributions // VM system requires write access to update named entry cache mode
946*a325d9c4SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
947*a325d9c4SApple OSS Distributions }
948*a325d9c4SApple OSS Distributions
949*a325d9c4SApple OSS Distributions tag = (typeof(tag))getVMTag(map);
950*a325d9c4SApple OSS Distributions
951*a325d9c4SApple OSS Distributions if (_task) {
952*a325d9c4SApple OSS Distributions // Find first range for offset
953*a325d9c4SApple OSS Distributions if (!_rangesCount) {
954*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
955*a325d9c4SApple OSS Distributions }
956*a325d9c4SApple OSS Distributions for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
957*a325d9c4SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
958*a325d9c4SApple OSS Distributions if (remain < nextLen) {
959*a325d9c4SApple OSS Distributions break;
960*a325d9c4SApple OSS Distributions }
961*a325d9c4SApple OSS Distributions remain -= nextLen;
962*a325d9c4SApple OSS Distributions }
963*a325d9c4SApple OSS Distributions } else {
964*a325d9c4SApple OSS Distributions rangeIdx = 0;
965*a325d9c4SApple OSS Distributions remain = 0;
966*a325d9c4SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
967*a325d9c4SApple OSS Distributions nextLen = size;
968*a325d9c4SApple OSS Distributions }
969*a325d9c4SApple OSS Distributions
970*a325d9c4SApple OSS Distributions assert(remain < nextLen);
971*a325d9c4SApple OSS Distributions if (remain >= nextLen) {
972*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
973*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
974*a325d9c4SApple OSS Distributions }
975*a325d9c4SApple OSS Distributions
976*a325d9c4SApple OSS Distributions nextAddr += remain;
977*a325d9c4SApple OSS Distributions nextLen -= remain;
978*a325d9c4SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
979*a325d9c4SApple OSS Distributions pageOffset = (vm_map_page_mask(map) & nextAddr);
980*a325d9c4SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
981*a325d9c4SApple OSS Distributions pageOffset = (page_mask & nextAddr);
982*a325d9c4SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
983*a325d9c4SApple OSS Distributions addr = 0;
984*a325d9c4SApple OSS Distributions didAlloc = false;
985*a325d9c4SApple OSS Distributions
986*a325d9c4SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
987*a325d9c4SApple OSS Distributions addr = *inaddr;
988*a325d9c4SApple OSS Distributions if (pageOffset != (vm_map_page_mask(map) & addr)) {
989*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
990*a325d9c4SApple OSS Distributions }
991*a325d9c4SApple OSS Distributions addr -= pageOffset;
992*a325d9c4SApple OSS Distributions }
993*a325d9c4SApple OSS Distributions
994*a325d9c4SApple OSS Distributions // find first entry for offset
995*a325d9c4SApple OSS Distributions for (entryIdx = 0;
996*a325d9c4SApple OSS Distributions (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
997*a325d9c4SApple OSS Distributions entryIdx++) {
998*a325d9c4SApple OSS Distributions }
999*a325d9c4SApple OSS Distributions entryIdx--;
1000*a325d9c4SApple OSS Distributions entry = &ref->entries[entryIdx];
1001*a325d9c4SApple OSS Distributions
1002*a325d9c4SApple OSS Distributions // allocate VM
1003*a325d9c4SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1004*a325d9c4SApple OSS Distributions size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1005*a325d9c4SApple OSS Distributions #else
1006*a325d9c4SApple OSS Distributions size = round_page_64(size + pageOffset);
1007*a325d9c4SApple OSS Distributions #endif
1008*a325d9c4SApple OSS Distributions if (kIOMapOverwrite & options) {
1009*a325d9c4SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1010*a325d9c4SApple OSS Distributions map = IOPageableMapForAddress(addr);
1011*a325d9c4SApple OSS Distributions }
1012*a325d9c4SApple OSS Distributions err = KERN_SUCCESS;
1013*a325d9c4SApple OSS Distributions } else {
1014*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1015*a325d9c4SApple OSS Distributions ref.map = map;
1016*a325d9c4SApple OSS Distributions ref.tag = tag;
1017*a325d9c4SApple OSS Distributions ref.options = options;
1018*a325d9c4SApple OSS Distributions ref.size = size;
1019*a325d9c4SApple OSS Distributions ref.prot = prot;
1020*a325d9c4SApple OSS Distributions if (options & kIOMapAnywhere) {
1021*a325d9c4SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1022*a325d9c4SApple OSS Distributions ref.mapped = 0;
1023*a325d9c4SApple OSS Distributions } else {
1024*a325d9c4SApple OSS Distributions ref.mapped = addr;
1025*a325d9c4SApple OSS Distributions }
1026*a325d9c4SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1027*a325d9c4SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1028*a325d9c4SApple OSS Distributions } else {
1029*a325d9c4SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1030*a325d9c4SApple OSS Distributions }
1031*a325d9c4SApple OSS Distributions if (KERN_SUCCESS == err) {
1032*a325d9c4SApple OSS Distributions addr = ref.mapped;
1033*a325d9c4SApple OSS Distributions map = ref.map;
1034*a325d9c4SApple OSS Distributions didAlloc = true;
1035*a325d9c4SApple OSS Distributions }
1036*a325d9c4SApple OSS Distributions }
1037*a325d9c4SApple OSS Distributions
1038*a325d9c4SApple OSS Distributions /*
1039*a325d9c4SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1040*a325d9c4SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1041*a325d9c4SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1042*a325d9c4SApple OSS Distributions * operations.
1043*a325d9c4SApple OSS Distributions */
1044*a325d9c4SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1045*a325d9c4SApple OSS Distributions options &= ~kIOMapPrefault;
1046*a325d9c4SApple OSS Distributions }
1047*a325d9c4SApple OSS Distributions
1048*a325d9c4SApple OSS Distributions /*
1049*a325d9c4SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1050*a325d9c4SApple OSS Distributions * memory type, and the underlying data.
1051*a325d9c4SApple OSS Distributions */
1052*a325d9c4SApple OSS Distributions if (options & kIOMapPrefault) {
1053*a325d9c4SApple OSS Distributions /*
1054*a325d9c4SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1055*a325d9c4SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1056*a325d9c4SApple OSS Distributions */
1057*a325d9c4SApple OSS Distributions assert(_wireCount != 0);
1058*a325d9c4SApple OSS Distributions assert(_memoryEntries != NULL);
1059*a325d9c4SApple OSS Distributions if ((_wireCount == 0) ||
1060*a325d9c4SApple OSS Distributions (_memoryEntries == NULL)) {
1061*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1062*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
1063*a325d9c4SApple OSS Distributions }
1064*a325d9c4SApple OSS Distributions
1065*a325d9c4SApple OSS Distributions // Get the page list.
1066*a325d9c4SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1067*a325d9c4SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1068*a325d9c4SApple OSS Distributions pageList = getPageList(dataP);
1069*a325d9c4SApple OSS Distributions
1070*a325d9c4SApple OSS Distributions // Get the number of IOPLs.
1071*a325d9c4SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1072*a325d9c4SApple OSS Distributions
1073*a325d9c4SApple OSS Distributions /*
1074*a325d9c4SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1075*a325d9c4SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1076*a325d9c4SApple OSS Distributions * right range at the end.
1077*a325d9c4SApple OSS Distributions */
1078*a325d9c4SApple OSS Distributions UInt ioplIndex = 0;
1079*a325d9c4SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1080*a325d9c4SApple OSS Distributions ioplIndex++;
1081*a325d9c4SApple OSS Distributions }
1082*a325d9c4SApple OSS Distributions ioplIndex--;
1083*a325d9c4SApple OSS Distributions
1084*a325d9c4SApple OSS Distributions // Retrieve the IOPL info block.
1085*a325d9c4SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1086*a325d9c4SApple OSS Distributions
1087*a325d9c4SApple OSS Distributions /*
1088*a325d9c4SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1089*a325d9c4SApple OSS Distributions * array.
1090*a325d9c4SApple OSS Distributions */
1091*a325d9c4SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1092*a325d9c4SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1093*a325d9c4SApple OSS Distributions } else {
1094*a325d9c4SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1095*a325d9c4SApple OSS Distributions }
1096*a325d9c4SApple OSS Distributions
1097*a325d9c4SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1098*a325d9c4SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1099*a325d9c4SApple OSS Distributions
1100*a325d9c4SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1101*a325d9c4SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1102*a325d9c4SApple OSS Distributions }
1103*a325d9c4SApple OSS Distributions
1104*a325d9c4SApple OSS Distributions // enter mappings
1105*a325d9c4SApple OSS Distributions remain = size;
1106*a325d9c4SApple OSS Distributions mapAddr = addr;
1107*a325d9c4SApple OSS Distributions addr += pageOffset;
1108*a325d9c4SApple OSS Distributions
1109*a325d9c4SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1110*a325d9c4SApple OSS Distributions entryOffset = offset - entry->offset;
1111*a325d9c4SApple OSS Distributions if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1112*a325d9c4SApple OSS Distributions err = kIOReturnNotAligned;
1113*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1114*a325d9c4SApple OSS Distributions break;
1115*a325d9c4SApple OSS Distributions }
1116*a325d9c4SApple OSS Distributions
1117*a325d9c4SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1118*a325d9c4SApple OSS Distributions vm_size_t unused = 0;
1119*a325d9c4SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1120*a325d9c4SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1121*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
1122*a325d9c4SApple OSS Distributions }
1123*a325d9c4SApple OSS Distributions
1124*a325d9c4SApple OSS Distributions entryOffset -= pageOffset;
1125*a325d9c4SApple OSS Distributions if (entryOffset >= entry->size) {
1126*a325d9c4SApple OSS Distributions panic("entryOffset");
1127*a325d9c4SApple OSS Distributions }
1128*a325d9c4SApple OSS Distributions chunk = entry->size - entryOffset;
1129*a325d9c4SApple OSS Distributions if (chunk) {
1130*a325d9c4SApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
1131*a325d9c4SApple OSS Distributions
1132*a325d9c4SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1133*a325d9c4SApple OSS Distributions vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1134*a325d9c4SApple OSS Distributions
1135*a325d9c4SApple OSS Distributions if (chunk > remain) {
1136*a325d9c4SApple OSS Distributions chunk = remain;
1137*a325d9c4SApple OSS Distributions }
1138*a325d9c4SApple OSS Distributions if (options & kIOMapPrefault) {
1139*a325d9c4SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1140*a325d9c4SApple OSS Distributions
1141*a325d9c4SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1142*a325d9c4SApple OSS Distributions &mapAddr,
1143*a325d9c4SApple OSS Distributions chunk, 0 /* mask */,
1144*a325d9c4SApple OSS Distributions (VM_FLAGS_FIXED
1145*a325d9c4SApple OSS Distributions | VM_FLAGS_OVERWRITE),
1146*a325d9c4SApple OSS Distributions vmk_flags,
1147*a325d9c4SApple OSS Distributions tag,
1148*a325d9c4SApple OSS Distributions entry->entry,
1149*a325d9c4SApple OSS Distributions entryOffset,
1150*a325d9c4SApple OSS Distributions prot, // cur
1151*a325d9c4SApple OSS Distributions prot, // max
1152*a325d9c4SApple OSS Distributions &pageList[currentPageIndex],
1153*a325d9c4SApple OSS Distributions nb_pages);
1154*a325d9c4SApple OSS Distributions
1155*a325d9c4SApple OSS Distributions if (err || vm_map_page_mask(map) < PAGE_MASK) {
1156*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1157*a325d9c4SApple OSS Distributions }
1158*a325d9c4SApple OSS Distributions // Compute the next index in the page list.
1159*a325d9c4SApple OSS Distributions currentPageIndex += nb_pages;
1160*a325d9c4SApple OSS Distributions assert(currentPageIndex <= _pages);
1161*a325d9c4SApple OSS Distributions } else {
1162*a325d9c4SApple OSS Distributions err = vm_map_enter_mem_object(map,
1163*a325d9c4SApple OSS Distributions &mapAddr,
1164*a325d9c4SApple OSS Distributions chunk, 0 /* mask */,
1165*a325d9c4SApple OSS Distributions (VM_FLAGS_FIXED
1166*a325d9c4SApple OSS Distributions | VM_FLAGS_OVERWRITE),
1167*a325d9c4SApple OSS Distributions vmk_flags,
1168*a325d9c4SApple OSS Distributions tag,
1169*a325d9c4SApple OSS Distributions entry->entry,
1170*a325d9c4SApple OSS Distributions entryOffset,
1171*a325d9c4SApple OSS Distributions false, // copy
1172*a325d9c4SApple OSS Distributions prot, // cur
1173*a325d9c4SApple OSS Distributions prot, // max
1174*a325d9c4SApple OSS Distributions VM_INHERIT_NONE);
1175*a325d9c4SApple OSS Distributions }
1176*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1177*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1178*a325d9c4SApple OSS Distributions break;
1179*a325d9c4SApple OSS Distributions }
1180*a325d9c4SApple OSS Distributions remain -= chunk;
1181*a325d9c4SApple OSS Distributions if (!remain) {
1182*a325d9c4SApple OSS Distributions break;
1183*a325d9c4SApple OSS Distributions }
1184*a325d9c4SApple OSS Distributions mapAddr += chunk;
1185*a325d9c4SApple OSS Distributions offset += chunk - pageOffset;
1186*a325d9c4SApple OSS Distributions }
1187*a325d9c4SApple OSS Distributions pageOffset = 0;
1188*a325d9c4SApple OSS Distributions entry++;
1189*a325d9c4SApple OSS Distributions entryIdx++;
1190*a325d9c4SApple OSS Distributions if (entryIdx >= ref->count) {
1191*a325d9c4SApple OSS Distributions err = kIOReturnOverrun;
1192*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1193*a325d9c4SApple OSS Distributions break;
1194*a325d9c4SApple OSS Distributions }
1195*a325d9c4SApple OSS Distributions }
1196*a325d9c4SApple OSS Distributions
1197*a325d9c4SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1198*a325d9c4SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1199*a325d9c4SApple OSS Distributions addr = 0;
1200*a325d9c4SApple OSS Distributions }
1201*a325d9c4SApple OSS Distributions *inaddr = addr;
1202*a325d9c4SApple OSS Distributions
1203*a325d9c4SApple OSS Distributions if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1204*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1205*a325d9c4SApple OSS Distributions }
1206*a325d9c4SApple OSS Distributions return err;
1207*a325d9c4SApple OSS Distributions }
1208*a325d9c4SApple OSS Distributions
1209*a325d9c4SApple OSS Distributions #define LOGUNALIGN 0
1210*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1211*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1212*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
1213*a325d9c4SApple OSS Distributions vm_map_t map,
1214*a325d9c4SApple OSS Distributions mach_vm_size_t inoffset,
1215*a325d9c4SApple OSS Distributions mach_vm_size_t size,
1216*a325d9c4SApple OSS Distributions IOOptionBits options,
1217*a325d9c4SApple OSS Distributions mach_vm_address_t * inaddr)
1218*a325d9c4SApple OSS Distributions {
1219*a325d9c4SApple OSS Distributions IOReturn err;
1220*a325d9c4SApple OSS Distributions int64_t offset = inoffset;
1221*a325d9c4SApple OSS Distributions uint32_t entryIdx, firstEntryIdx;
1222*a325d9c4SApple OSS Distributions vm_map_offset_t addr, mapAddr, mapAddrOut;
1223*a325d9c4SApple OSS Distributions vm_map_offset_t entryOffset, remain, chunk;
1224*a325d9c4SApple OSS Distributions
1225*a325d9c4SApple OSS Distributions IOMemoryEntry * entry;
1226*a325d9c4SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
1227*a325d9c4SApple OSS Distributions IOOptionBits type;
1228*a325d9c4SApple OSS Distributions IOOptionBits cacheMode;
1229*a325d9c4SApple OSS Distributions vm_tag_t tag;
1230*a325d9c4SApple OSS Distributions // for the kIOMapPrefault option.
1231*a325d9c4SApple OSS Distributions upl_page_info_t * pageList = NULL;
1232*a325d9c4SApple OSS Distributions UInt currentPageIndex = 0;
1233*a325d9c4SApple OSS Distributions bool didAlloc;
1234*a325d9c4SApple OSS Distributions
1235*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1236*a325d9c4SApple OSS Distributions
1237*a325d9c4SApple OSS Distributions if (ref->mapRef) {
1238*a325d9c4SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1239*a325d9c4SApple OSS Distributions return err;
1240*a325d9c4SApple OSS Distributions }
1241*a325d9c4SApple OSS Distributions
1242*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1243*a325d9c4SApple OSS Distributions printf("MAP offset %qx, %qx\n", inoffset, size);
1244*a325d9c4SApple OSS Distributions #endif
1245*a325d9c4SApple OSS Distributions
1246*a325d9c4SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
1247*a325d9c4SApple OSS Distributions
1248*a325d9c4SApple OSS Distributions prot = VM_PROT_READ;
1249*a325d9c4SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
1250*a325d9c4SApple OSS Distributions prot |= VM_PROT_WRITE;
1251*a325d9c4SApple OSS Distributions }
1252*a325d9c4SApple OSS Distributions prot &= ref->prot;
1253*a325d9c4SApple OSS Distributions
1254*a325d9c4SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1255*a325d9c4SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1256*a325d9c4SApple OSS Distributions // VM system requires write access to update named entry cache mode
1257*a325d9c4SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1258*a325d9c4SApple OSS Distributions }
1259*a325d9c4SApple OSS Distributions
1260*a325d9c4SApple OSS Distributions tag = (vm_tag_t) getVMTag(map);
1261*a325d9c4SApple OSS Distributions
1262*a325d9c4SApple OSS Distributions addr = 0;
1263*a325d9c4SApple OSS Distributions didAlloc = false;
1264*a325d9c4SApple OSS Distributions
1265*a325d9c4SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1266*a325d9c4SApple OSS Distributions addr = *inaddr;
1267*a325d9c4SApple OSS Distributions }
1268*a325d9c4SApple OSS Distributions
1269*a325d9c4SApple OSS Distributions // find first entry for offset
1270*a325d9c4SApple OSS Distributions for (firstEntryIdx = 0;
1271*a325d9c4SApple OSS Distributions (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1272*a325d9c4SApple OSS Distributions firstEntryIdx++) {
1273*a325d9c4SApple OSS Distributions }
1274*a325d9c4SApple OSS Distributions firstEntryIdx--;
1275*a325d9c4SApple OSS Distributions
1276*a325d9c4SApple OSS Distributions // calculate required VM space
1277*a325d9c4SApple OSS Distributions
1278*a325d9c4SApple OSS Distributions entryIdx = firstEntryIdx;
1279*a325d9c4SApple OSS Distributions entry = &ref->entries[entryIdx];
1280*a325d9c4SApple OSS Distributions
1281*a325d9c4SApple OSS Distributions remain = size;
1282*a325d9c4SApple OSS Distributions int64_t iteroffset = offset;
1283*a325d9c4SApple OSS Distributions uint64_t mapSize = 0;
1284*a325d9c4SApple OSS Distributions while (remain) {
1285*a325d9c4SApple OSS Distributions entryOffset = iteroffset - entry->offset;
1286*a325d9c4SApple OSS Distributions if (entryOffset >= entry->size) {
1287*a325d9c4SApple OSS Distributions panic("entryOffset");
1288*a325d9c4SApple OSS Distributions }
1289*a325d9c4SApple OSS Distributions
1290*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1291*a325d9c4SApple OSS Distributions printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1292*a325d9c4SApple OSS Distributions entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1293*a325d9c4SApple OSS Distributions #endif
1294*a325d9c4SApple OSS Distributions
1295*a325d9c4SApple OSS Distributions chunk = entry->size - entryOffset;
1296*a325d9c4SApple OSS Distributions if (chunk) {
1297*a325d9c4SApple OSS Distributions if (chunk > remain) {
1298*a325d9c4SApple OSS Distributions chunk = remain;
1299*a325d9c4SApple OSS Distributions }
1300*a325d9c4SApple OSS Distributions mach_vm_size_t entrySize;
1301*a325d9c4SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1302*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
1303*a325d9c4SApple OSS Distributions mapSize += entrySize;
1304*a325d9c4SApple OSS Distributions
1305*a325d9c4SApple OSS Distributions remain -= chunk;
1306*a325d9c4SApple OSS Distributions if (!remain) {
1307*a325d9c4SApple OSS Distributions break;
1308*a325d9c4SApple OSS Distributions }
1309*a325d9c4SApple OSS Distributions iteroffset += chunk; // - pageOffset;
1310*a325d9c4SApple OSS Distributions }
1311*a325d9c4SApple OSS Distributions entry++;
1312*a325d9c4SApple OSS Distributions entryIdx++;
1313*a325d9c4SApple OSS Distributions if (entryIdx >= ref->count) {
1314*a325d9c4SApple OSS Distributions panic("overrun");
1315*a325d9c4SApple OSS Distributions err = kIOReturnOverrun;
1316*a325d9c4SApple OSS Distributions break;
1317*a325d9c4SApple OSS Distributions }
1318*a325d9c4SApple OSS Distributions }
1319*a325d9c4SApple OSS Distributions
1320*a325d9c4SApple OSS Distributions if (kIOMapOverwrite & options) {
1321*a325d9c4SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1322*a325d9c4SApple OSS Distributions map = IOPageableMapForAddress(addr);
1323*a325d9c4SApple OSS Distributions }
1324*a325d9c4SApple OSS Distributions err = KERN_SUCCESS;
1325*a325d9c4SApple OSS Distributions } else {
1326*a325d9c4SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1327*a325d9c4SApple OSS Distributions ref.map = map;
1328*a325d9c4SApple OSS Distributions ref.tag = tag;
1329*a325d9c4SApple OSS Distributions ref.options = options;
1330*a325d9c4SApple OSS Distributions ref.size = mapSize;
1331*a325d9c4SApple OSS Distributions ref.prot = prot;
1332*a325d9c4SApple OSS Distributions if (options & kIOMapAnywhere) {
1333*a325d9c4SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1334*a325d9c4SApple OSS Distributions ref.mapped = 0;
1335*a325d9c4SApple OSS Distributions } else {
1336*a325d9c4SApple OSS Distributions ref.mapped = addr;
1337*a325d9c4SApple OSS Distributions }
1338*a325d9c4SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*a325d9c4SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1340*a325d9c4SApple OSS Distributions } else {
1341*a325d9c4SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1342*a325d9c4SApple OSS Distributions }
1343*a325d9c4SApple OSS Distributions
1344*a325d9c4SApple OSS Distributions if (KERN_SUCCESS == err) {
1345*a325d9c4SApple OSS Distributions addr = ref.mapped;
1346*a325d9c4SApple OSS Distributions map = ref.map;
1347*a325d9c4SApple OSS Distributions didAlloc = true;
1348*a325d9c4SApple OSS Distributions }
1349*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1350*a325d9c4SApple OSS Distributions IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1351*a325d9c4SApple OSS Distributions #endif
1352*a325d9c4SApple OSS Distributions }
1353*a325d9c4SApple OSS Distributions
1354*a325d9c4SApple OSS Distributions /*
1355*a325d9c4SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1356*a325d9c4SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1357*a325d9c4SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1358*a325d9c4SApple OSS Distributions * operations.
1359*a325d9c4SApple OSS Distributions */
1360*a325d9c4SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1361*a325d9c4SApple OSS Distributions options &= ~kIOMapPrefault;
1362*a325d9c4SApple OSS Distributions }
1363*a325d9c4SApple OSS Distributions
1364*a325d9c4SApple OSS Distributions /*
1365*a325d9c4SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1366*a325d9c4SApple OSS Distributions * memory type, and the underlying data.
1367*a325d9c4SApple OSS Distributions */
1368*a325d9c4SApple OSS Distributions if (options & kIOMapPrefault) {
1369*a325d9c4SApple OSS Distributions /*
1370*a325d9c4SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1371*a325d9c4SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1372*a325d9c4SApple OSS Distributions */
1373*a325d9c4SApple OSS Distributions assert(_wireCount != 0);
1374*a325d9c4SApple OSS Distributions assert(_memoryEntries != NULL);
1375*a325d9c4SApple OSS Distributions if ((_wireCount == 0) ||
1376*a325d9c4SApple OSS Distributions (_memoryEntries == NULL)) {
1377*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
1378*a325d9c4SApple OSS Distributions }
1379*a325d9c4SApple OSS Distributions
1380*a325d9c4SApple OSS Distributions // Get the page list.
1381*a325d9c4SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1382*a325d9c4SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1383*a325d9c4SApple OSS Distributions pageList = getPageList(dataP);
1384*a325d9c4SApple OSS Distributions
1385*a325d9c4SApple OSS Distributions // Get the number of IOPLs.
1386*a325d9c4SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1387*a325d9c4SApple OSS Distributions
1388*a325d9c4SApple OSS Distributions /*
1389*a325d9c4SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1390*a325d9c4SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1391*a325d9c4SApple OSS Distributions * right range at the end.
1392*a325d9c4SApple OSS Distributions */
1393*a325d9c4SApple OSS Distributions UInt ioplIndex = 0;
1394*a325d9c4SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1395*a325d9c4SApple OSS Distributions ioplIndex++;
1396*a325d9c4SApple OSS Distributions }
1397*a325d9c4SApple OSS Distributions ioplIndex--;
1398*a325d9c4SApple OSS Distributions
1399*a325d9c4SApple OSS Distributions // Retrieve the IOPL info block.
1400*a325d9c4SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1401*a325d9c4SApple OSS Distributions
1402*a325d9c4SApple OSS Distributions /*
1403*a325d9c4SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1404*a325d9c4SApple OSS Distributions * array.
1405*a325d9c4SApple OSS Distributions */
1406*a325d9c4SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1407*a325d9c4SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1408*a325d9c4SApple OSS Distributions } else {
1409*a325d9c4SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1410*a325d9c4SApple OSS Distributions }
1411*a325d9c4SApple OSS Distributions
1412*a325d9c4SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1413*a325d9c4SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1414*a325d9c4SApple OSS Distributions
1415*a325d9c4SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1416*a325d9c4SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1417*a325d9c4SApple OSS Distributions }
1418*a325d9c4SApple OSS Distributions
1419*a325d9c4SApple OSS Distributions // enter mappings
1420*a325d9c4SApple OSS Distributions remain = size;
1421*a325d9c4SApple OSS Distributions mapAddr = addr;
1422*a325d9c4SApple OSS Distributions entryIdx = firstEntryIdx;
1423*a325d9c4SApple OSS Distributions entry = &ref->entries[entryIdx];
1424*a325d9c4SApple OSS Distributions
1425*a325d9c4SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1426*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1427*a325d9c4SApple OSS Distributions printf("offset %qx, %qx\n", offset, entry->offset);
1428*a325d9c4SApple OSS Distributions #endif
1429*a325d9c4SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1430*a325d9c4SApple OSS Distributions vm_size_t unused = 0;
1431*a325d9c4SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1432*a325d9c4SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1433*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
1434*a325d9c4SApple OSS Distributions }
1435*a325d9c4SApple OSS Distributions entryOffset = offset - entry->offset;
1436*a325d9c4SApple OSS Distributions if (entryOffset >= entry->size) {
1437*a325d9c4SApple OSS Distributions panic("entryOffset");
1438*a325d9c4SApple OSS Distributions }
1439*a325d9c4SApple OSS Distributions chunk = entry->size - entryOffset;
1440*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1441*a325d9c4SApple OSS Distributions printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1442*a325d9c4SApple OSS Distributions #endif
1443*a325d9c4SApple OSS Distributions if (chunk) {
1444*a325d9c4SApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
1445*a325d9c4SApple OSS Distributions
1446*a325d9c4SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1447*a325d9c4SApple OSS Distributions vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1448*a325d9c4SApple OSS Distributions
1449*a325d9c4SApple OSS Distributions if (chunk > remain) {
1450*a325d9c4SApple OSS Distributions chunk = remain;
1451*a325d9c4SApple OSS Distributions }
1452*a325d9c4SApple OSS Distributions mapAddrOut = mapAddr;
1453*a325d9c4SApple OSS Distributions if (options & kIOMapPrefault) {
1454*a325d9c4SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1455*a325d9c4SApple OSS Distributions
1456*a325d9c4SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1457*a325d9c4SApple OSS Distributions &mapAddrOut,
1458*a325d9c4SApple OSS Distributions chunk, 0 /* mask */,
1459*a325d9c4SApple OSS Distributions (VM_FLAGS_FIXED
1460*a325d9c4SApple OSS Distributions | VM_FLAGS_OVERWRITE
1461*a325d9c4SApple OSS Distributions | VM_FLAGS_RETURN_DATA_ADDR),
1462*a325d9c4SApple OSS Distributions vmk_flags,
1463*a325d9c4SApple OSS Distributions tag,
1464*a325d9c4SApple OSS Distributions entry->entry,
1465*a325d9c4SApple OSS Distributions entryOffset,
1466*a325d9c4SApple OSS Distributions prot, // cur
1467*a325d9c4SApple OSS Distributions prot, // max
1468*a325d9c4SApple OSS Distributions &pageList[currentPageIndex],
1469*a325d9c4SApple OSS Distributions nb_pages);
1470*a325d9c4SApple OSS Distributions
1471*a325d9c4SApple OSS Distributions // Compute the next index in the page list.
1472*a325d9c4SApple OSS Distributions currentPageIndex += nb_pages;
1473*a325d9c4SApple OSS Distributions assert(currentPageIndex <= _pages);
1474*a325d9c4SApple OSS Distributions } else {
1475*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1476*a325d9c4SApple OSS Distributions printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1477*a325d9c4SApple OSS Distributions #endif
1478*a325d9c4SApple OSS Distributions err = vm_map_enter_mem_object(map,
1479*a325d9c4SApple OSS Distributions &mapAddrOut,
1480*a325d9c4SApple OSS Distributions chunk, 0 /* mask */,
1481*a325d9c4SApple OSS Distributions (VM_FLAGS_FIXED
1482*a325d9c4SApple OSS Distributions | VM_FLAGS_OVERWRITE
1483*a325d9c4SApple OSS Distributions | VM_FLAGS_RETURN_DATA_ADDR),
1484*a325d9c4SApple OSS Distributions vmk_flags,
1485*a325d9c4SApple OSS Distributions tag,
1486*a325d9c4SApple OSS Distributions entry->entry,
1487*a325d9c4SApple OSS Distributions entryOffset,
1488*a325d9c4SApple OSS Distributions false, // copy
1489*a325d9c4SApple OSS Distributions prot, // cur
1490*a325d9c4SApple OSS Distributions prot, // max
1491*a325d9c4SApple OSS Distributions VM_INHERIT_NONE);
1492*a325d9c4SApple OSS Distributions }
1493*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1494*a325d9c4SApple OSS Distributions panic("map enter err %x", err);
1495*a325d9c4SApple OSS Distributions break;
1496*a325d9c4SApple OSS Distributions }
1497*a325d9c4SApple OSS Distributions #if LOGUNALIGN
1498*a325d9c4SApple OSS Distributions printf("mapAddr o %qx\n", mapAddrOut);
1499*a325d9c4SApple OSS Distributions #endif
1500*a325d9c4SApple OSS Distributions if (entryIdx == firstEntryIdx) {
1501*a325d9c4SApple OSS Distributions addr = mapAddrOut;
1502*a325d9c4SApple OSS Distributions }
1503*a325d9c4SApple OSS Distributions remain -= chunk;
1504*a325d9c4SApple OSS Distributions if (!remain) {
1505*a325d9c4SApple OSS Distributions break;
1506*a325d9c4SApple OSS Distributions }
1507*a325d9c4SApple OSS Distributions mach_vm_size_t entrySize;
1508*a325d9c4SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1509*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
1510*a325d9c4SApple OSS Distributions mapAddr += entrySize;
1511*a325d9c4SApple OSS Distributions offset += chunk;
1512*a325d9c4SApple OSS Distributions }
1513*a325d9c4SApple OSS Distributions
1514*a325d9c4SApple OSS Distributions entry++;
1515*a325d9c4SApple OSS Distributions entryIdx++;
1516*a325d9c4SApple OSS Distributions if (entryIdx >= ref->count) {
1517*a325d9c4SApple OSS Distributions err = kIOReturnOverrun;
1518*a325d9c4SApple OSS Distributions break;
1519*a325d9c4SApple OSS Distributions }
1520*a325d9c4SApple OSS Distributions }
1521*a325d9c4SApple OSS Distributions
1522*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1523*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1524*a325d9c4SApple OSS Distributions }
1525*a325d9c4SApple OSS Distributions
1526*a325d9c4SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1527*a325d9c4SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1528*a325d9c4SApple OSS Distributions addr = 0;
1529*a325d9c4SApple OSS Distributions }
1530*a325d9c4SApple OSS Distributions *inaddr = addr;
1531*a325d9c4SApple OSS Distributions
1532*a325d9c4SApple OSS Distributions return err;
1533*a325d9c4SApple OSS Distributions }
1534*a325d9c4SApple OSS Distributions
1535*a325d9c4SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1536*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1537*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
1538*a325d9c4SApple OSS Distributions uint64_t * offset)
1539*a325d9c4SApple OSS Distributions {
1540*a325d9c4SApple OSS Distributions kern_return_t kr;
1541*a325d9c4SApple OSS Distributions vm_object_offset_t data_offset = 0;
1542*a325d9c4SApple OSS Distributions uint64_t total;
1543*a325d9c4SApple OSS Distributions uint32_t idx;
1544*a325d9c4SApple OSS Distributions
1545*a325d9c4SApple OSS Distributions assert(ref->count);
1546*a325d9c4SApple OSS Distributions if (offset) {
1547*a325d9c4SApple OSS Distributions *offset = (uint64_t) data_offset;
1548*a325d9c4SApple OSS Distributions }
1549*a325d9c4SApple OSS Distributions total = 0;
1550*a325d9c4SApple OSS Distributions for (idx = 0; idx < ref->count; idx++) {
1551*a325d9c4SApple OSS Distributions kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1552*a325d9c4SApple OSS Distributions &data_offset);
1553*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != kr) {
1554*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1555*a325d9c4SApple OSS Distributions } else if (0 != data_offset) {
1556*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1557*a325d9c4SApple OSS Distributions }
1558*a325d9c4SApple OSS Distributions if (offset && !idx) {
1559*a325d9c4SApple OSS Distributions *offset = (uint64_t) data_offset;
1560*a325d9c4SApple OSS Distributions }
1561*a325d9c4SApple OSS Distributions total += round_page(data_offset + ref->entries[idx].size);
1562*a325d9c4SApple OSS Distributions }
1563*a325d9c4SApple OSS Distributions
1564*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1565*a325d9c4SApple OSS Distributions (offset ? *offset : (vm_object_offset_t)-1), total);
1566*a325d9c4SApple OSS Distributions
1567*a325d9c4SApple OSS Distributions return total;
1568*a325d9c4SApple OSS Distributions }
1569*a325d9c4SApple OSS Distributions
1570*a325d9c4SApple OSS Distributions
1571*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1572*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1573*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
1574*a325d9c4SApple OSS Distributions IOByteCount * residentPageCount,
1575*a325d9c4SApple OSS Distributions IOByteCount * dirtyPageCount)
1576*a325d9c4SApple OSS Distributions {
1577*a325d9c4SApple OSS Distributions IOReturn err;
1578*a325d9c4SApple OSS Distributions IOMemoryEntry * entries;
1579*a325d9c4SApple OSS Distributions unsigned int resident, dirty;
1580*a325d9c4SApple OSS Distributions unsigned int totalResident, totalDirty;
1581*a325d9c4SApple OSS Distributions
1582*a325d9c4SApple OSS Distributions totalResident = totalDirty = 0;
1583*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
1584*a325d9c4SApple OSS Distributions entries = ref->entries + ref->count;
1585*a325d9c4SApple OSS Distributions while (entries > &ref->entries[0]) {
1586*a325d9c4SApple OSS Distributions entries--;
1587*a325d9c4SApple OSS Distributions err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1588*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1589*a325d9c4SApple OSS Distributions break;
1590*a325d9c4SApple OSS Distributions }
1591*a325d9c4SApple OSS Distributions totalResident += resident;
1592*a325d9c4SApple OSS Distributions totalDirty += dirty;
1593*a325d9c4SApple OSS Distributions }
1594*a325d9c4SApple OSS Distributions
1595*a325d9c4SApple OSS Distributions if (residentPageCount) {
1596*a325d9c4SApple OSS Distributions *residentPageCount = totalResident;
1597*a325d9c4SApple OSS Distributions }
1598*a325d9c4SApple OSS Distributions if (dirtyPageCount) {
1599*a325d9c4SApple OSS Distributions *dirtyPageCount = totalDirty;
1600*a325d9c4SApple OSS Distributions }
1601*a325d9c4SApple OSS Distributions return err;
1602*a325d9c4SApple OSS Distributions }
1603*a325d9c4SApple OSS Distributions
1604*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1605*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1606*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
1607*a325d9c4SApple OSS Distributions IOOptionBits newState,
1608*a325d9c4SApple OSS Distributions IOOptionBits * oldState)
1609*a325d9c4SApple OSS Distributions {
1610*a325d9c4SApple OSS Distributions IOReturn err;
1611*a325d9c4SApple OSS Distributions IOMemoryEntry * entries;
1612*a325d9c4SApple OSS Distributions vm_purgable_t control;
1613*a325d9c4SApple OSS Distributions int totalState, state;
1614*a325d9c4SApple OSS Distributions
1615*a325d9c4SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1616*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
1617*a325d9c4SApple OSS Distributions entries = ref->entries + ref->count;
1618*a325d9c4SApple OSS Distributions while (entries > &ref->entries[0]) {
1619*a325d9c4SApple OSS Distributions entries--;
1620*a325d9c4SApple OSS Distributions
1621*a325d9c4SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
1622*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1623*a325d9c4SApple OSS Distributions break;
1624*a325d9c4SApple OSS Distributions }
1625*a325d9c4SApple OSS Distributions err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1626*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1627*a325d9c4SApple OSS Distributions break;
1628*a325d9c4SApple OSS Distributions }
1629*a325d9c4SApple OSS Distributions err = purgeableStateBits(&state);
1630*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1631*a325d9c4SApple OSS Distributions break;
1632*a325d9c4SApple OSS Distributions }
1633*a325d9c4SApple OSS Distributions
1634*a325d9c4SApple OSS Distributions if (kIOMemoryPurgeableEmpty == state) {
1635*a325d9c4SApple OSS Distributions totalState = kIOMemoryPurgeableEmpty;
1636*a325d9c4SApple OSS Distributions } else if (kIOMemoryPurgeableEmpty == totalState) {
1637*a325d9c4SApple OSS Distributions continue;
1638*a325d9c4SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == totalState) {
1639*a325d9c4SApple OSS Distributions continue;
1640*a325d9c4SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == state) {
1641*a325d9c4SApple OSS Distributions totalState = kIOMemoryPurgeableVolatile;
1642*a325d9c4SApple OSS Distributions } else {
1643*a325d9c4SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1644*a325d9c4SApple OSS Distributions }
1645*a325d9c4SApple OSS Distributions }
1646*a325d9c4SApple OSS Distributions
1647*a325d9c4SApple OSS Distributions if (oldState) {
1648*a325d9c4SApple OSS Distributions *oldState = totalState;
1649*a325d9c4SApple OSS Distributions }
1650*a325d9c4SApple OSS Distributions return err;
1651*a325d9c4SApple OSS Distributions }
1652*a325d9c4SApple OSS Distributions
1653*a325d9c4SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1654*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1655*a325d9c4SApple OSS Distributions IOMemoryReference * ref,
1656*a325d9c4SApple OSS Distributions task_t newOwner,
1657*a325d9c4SApple OSS Distributions int newLedgerTag,
1658*a325d9c4SApple OSS Distributions IOOptionBits newLedgerOptions)
1659*a325d9c4SApple OSS Distributions {
1660*a325d9c4SApple OSS Distributions IOReturn err, totalErr;
1661*a325d9c4SApple OSS Distributions IOMemoryEntry * entries;
1662*a325d9c4SApple OSS Distributions
1663*a325d9c4SApple OSS Distributions totalErr = kIOReturnSuccess;
1664*a325d9c4SApple OSS Distributions entries = ref->entries + ref->count;
1665*a325d9c4SApple OSS Distributions while (entries > &ref->entries[0]) {
1666*a325d9c4SApple OSS Distributions entries--;
1667*a325d9c4SApple OSS Distributions
1668*a325d9c4SApple OSS Distributions err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1669*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
1670*a325d9c4SApple OSS Distributions totalErr = err;
1671*a325d9c4SApple OSS Distributions }
1672*a325d9c4SApple OSS Distributions }
1673*a325d9c4SApple OSS Distributions
1674*a325d9c4SApple OSS Distributions return totalErr;
1675*a325d9c4SApple OSS Distributions }
1676*a325d9c4SApple OSS Distributions
1677*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1678*a325d9c4SApple OSS Distributions
1679*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1680*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withAddress(void * address,
1681*a325d9c4SApple OSS Distributions IOByteCount length,
1682*a325d9c4SApple OSS Distributions IODirection direction)
1683*a325d9c4SApple OSS Distributions {
1684*a325d9c4SApple OSS Distributions return IOMemoryDescriptor::
1685*a325d9c4SApple OSS Distributions withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1686*a325d9c4SApple OSS Distributions }
1687*a325d9c4SApple OSS Distributions
1688*a325d9c4SApple OSS Distributions #ifndef __LP64__
1689*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1690*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1691*a325d9c4SApple OSS Distributions IOByteCount length,
1692*a325d9c4SApple OSS Distributions IODirection direction,
1693*a325d9c4SApple OSS Distributions task_t task)
1694*a325d9c4SApple OSS Distributions {
1695*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1696*a325d9c4SApple OSS Distributions if (that) {
1697*a325d9c4SApple OSS Distributions if (that->initWithAddress(address, length, direction, task)) {
1698*a325d9c4SApple OSS Distributions return os::move(that);
1699*a325d9c4SApple OSS Distributions }
1700*a325d9c4SApple OSS Distributions }
1701*a325d9c4SApple OSS Distributions return nullptr;
1702*a325d9c4SApple OSS Distributions }
1703*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
1704*a325d9c4SApple OSS Distributions
1705*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1706*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1707*a325d9c4SApple OSS Distributions IOPhysicalAddress address,
1708*a325d9c4SApple OSS Distributions IOByteCount length,
1709*a325d9c4SApple OSS Distributions IODirection direction )
1710*a325d9c4SApple OSS Distributions {
1711*a325d9c4SApple OSS Distributions return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1712*a325d9c4SApple OSS Distributions }
1713*a325d9c4SApple OSS Distributions
1714*a325d9c4SApple OSS Distributions #ifndef __LP64__
1715*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1716*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1717*a325d9c4SApple OSS Distributions UInt32 withCount,
1718*a325d9c4SApple OSS Distributions IODirection direction,
1719*a325d9c4SApple OSS Distributions task_t task,
1720*a325d9c4SApple OSS Distributions bool asReference)
1721*a325d9c4SApple OSS Distributions {
1722*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1723*a325d9c4SApple OSS Distributions if (that) {
1724*a325d9c4SApple OSS Distributions if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1725*a325d9c4SApple OSS Distributions return os::move(that);
1726*a325d9c4SApple OSS Distributions }
1727*a325d9c4SApple OSS Distributions }
1728*a325d9c4SApple OSS Distributions return nullptr;
1729*a325d9c4SApple OSS Distributions }
1730*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
1731*a325d9c4SApple OSS Distributions
1732*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1733*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1734*a325d9c4SApple OSS Distributions mach_vm_size_t length,
1735*a325d9c4SApple OSS Distributions IOOptionBits options,
1736*a325d9c4SApple OSS Distributions task_t task)
1737*a325d9c4SApple OSS Distributions {
1738*a325d9c4SApple OSS Distributions IOAddressRange range = { address, length };
1739*a325d9c4SApple OSS Distributions return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1740*a325d9c4SApple OSS Distributions }
1741*a325d9c4SApple OSS Distributions
1742*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1743*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1744*a325d9c4SApple OSS Distributions UInt32 rangeCount,
1745*a325d9c4SApple OSS Distributions IOOptionBits options,
1746*a325d9c4SApple OSS Distributions task_t task)
1747*a325d9c4SApple OSS Distributions {
1748*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1749*a325d9c4SApple OSS Distributions if (that) {
1750*a325d9c4SApple OSS Distributions if (task) {
1751*a325d9c4SApple OSS Distributions options |= kIOMemoryTypeVirtual64;
1752*a325d9c4SApple OSS Distributions } else {
1753*a325d9c4SApple OSS Distributions options |= kIOMemoryTypePhysical64;
1754*a325d9c4SApple OSS Distributions }
1755*a325d9c4SApple OSS Distributions
1756*a325d9c4SApple OSS Distributions if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1757*a325d9c4SApple OSS Distributions return os::move(that);
1758*a325d9c4SApple OSS Distributions }
1759*a325d9c4SApple OSS Distributions }
1760*a325d9c4SApple OSS Distributions
1761*a325d9c4SApple OSS Distributions return nullptr;
1762*a325d9c4SApple OSS Distributions }
1763*a325d9c4SApple OSS Distributions
1764*a325d9c4SApple OSS Distributions
1765*a325d9c4SApple OSS Distributions /*
1766*a325d9c4SApple OSS Distributions * withOptions:
1767*a325d9c4SApple OSS Distributions *
1768*a325d9c4SApple OSS Distributions * Create a new IOMemoryDescriptor. The buffer is made up of several
1769*a325d9c4SApple OSS Distributions * virtual address ranges, from a given task.
1770*a325d9c4SApple OSS Distributions *
1771*a325d9c4SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1772*a325d9c4SApple OSS Distributions */
1773*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1774*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withOptions(void * buffers,
1775*a325d9c4SApple OSS Distributions UInt32 count,
1776*a325d9c4SApple OSS Distributions UInt32 offset,
1777*a325d9c4SApple OSS Distributions task_t task,
1778*a325d9c4SApple OSS Distributions IOOptionBits opts,
1779*a325d9c4SApple OSS Distributions IOMapper * mapper)
1780*a325d9c4SApple OSS Distributions {
1781*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1782*a325d9c4SApple OSS Distributions
1783*a325d9c4SApple OSS Distributions if (self
1784*a325d9c4SApple OSS Distributions && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1785*a325d9c4SApple OSS Distributions return nullptr;
1786*a325d9c4SApple OSS Distributions }
1787*a325d9c4SApple OSS Distributions
1788*a325d9c4SApple OSS Distributions return os::move(self);
1789*a325d9c4SApple OSS Distributions }
1790*a325d9c4SApple OSS Distributions
1791*a325d9c4SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1792*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void * buffers,
1793*a325d9c4SApple OSS Distributions UInt32 count,
1794*a325d9c4SApple OSS Distributions UInt32 offset,
1795*a325d9c4SApple OSS Distributions task_t task,
1796*a325d9c4SApple OSS Distributions IOOptionBits options,
1797*a325d9c4SApple OSS Distributions IOMapper * mapper)
1798*a325d9c4SApple OSS Distributions {
1799*a325d9c4SApple OSS Distributions return false;
1800*a325d9c4SApple OSS Distributions }
1801*a325d9c4SApple OSS Distributions
1802*a325d9c4SApple OSS Distributions #ifndef __LP64__
1803*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1804*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1805*a325d9c4SApple OSS Distributions UInt32 withCount,
1806*a325d9c4SApple OSS Distributions IODirection direction,
1807*a325d9c4SApple OSS Distributions bool asReference)
1808*a325d9c4SApple OSS Distributions {
1809*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1810*a325d9c4SApple OSS Distributions if (that) {
1811*a325d9c4SApple OSS Distributions if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1812*a325d9c4SApple OSS Distributions return os::move(that);
1813*a325d9c4SApple OSS Distributions }
1814*a325d9c4SApple OSS Distributions }
1815*a325d9c4SApple OSS Distributions return nullptr;
1816*a325d9c4SApple OSS Distributions }
1817*a325d9c4SApple OSS Distributions
1818*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1819*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1820*a325d9c4SApple OSS Distributions IOByteCount offset,
1821*a325d9c4SApple OSS Distributions IOByteCount length,
1822*a325d9c4SApple OSS Distributions IODirection direction)
1823*a325d9c4SApple OSS Distributions {
1824*a325d9c4SApple OSS Distributions return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1825*a325d9c4SApple OSS Distributions }
1826*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
1827*a325d9c4SApple OSS Distributions
1828*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1829*a325d9c4SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1830*a325d9c4SApple OSS Distributions {
1831*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor *origGenMD =
1832*a325d9c4SApple OSS Distributions OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1833*a325d9c4SApple OSS Distributions
1834*a325d9c4SApple OSS Distributions if (origGenMD) {
1835*a325d9c4SApple OSS Distributions return IOGeneralMemoryDescriptor::
1836*a325d9c4SApple OSS Distributions withPersistentMemoryDescriptor(origGenMD);
1837*a325d9c4SApple OSS Distributions } else {
1838*a325d9c4SApple OSS Distributions return nullptr;
1839*a325d9c4SApple OSS Distributions }
1840*a325d9c4SApple OSS Distributions }
1841*a325d9c4SApple OSS Distributions
1842*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1843*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1844*a325d9c4SApple OSS Distributions {
1845*a325d9c4SApple OSS Distributions IOMemoryReference * memRef;
1846*a325d9c4SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self;
1847*a325d9c4SApple OSS Distributions
1848*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1849*a325d9c4SApple OSS Distributions return nullptr;
1850*a325d9c4SApple OSS Distributions }
1851*a325d9c4SApple OSS Distributions
1852*a325d9c4SApple OSS Distributions if (memRef == originalMD->_memRef) {
1853*a325d9c4SApple OSS Distributions self.reset(originalMD, OSRetain);
1854*a325d9c4SApple OSS Distributions originalMD->memoryReferenceRelease(memRef);
1855*a325d9c4SApple OSS Distributions return os::move(self);
1856*a325d9c4SApple OSS Distributions }
1857*a325d9c4SApple OSS Distributions
1858*a325d9c4SApple OSS Distributions self = OSMakeShared<IOGeneralMemoryDescriptor>();
1859*a325d9c4SApple OSS Distributions IOMDPersistentInitData initData = { originalMD, memRef };
1860*a325d9c4SApple OSS Distributions
1861*a325d9c4SApple OSS Distributions if (self
1862*a325d9c4SApple OSS Distributions && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1863*a325d9c4SApple OSS Distributions return nullptr;
1864*a325d9c4SApple OSS Distributions }
1865*a325d9c4SApple OSS Distributions return os::move(self);
1866*a325d9c4SApple OSS Distributions }
1867*a325d9c4SApple OSS Distributions
1868*a325d9c4SApple OSS Distributions #ifndef __LP64__
1869*a325d9c4SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1870*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void * address,
1871*a325d9c4SApple OSS Distributions IOByteCount withLength,
1872*a325d9c4SApple OSS Distributions IODirection withDirection)
1873*a325d9c4SApple OSS Distributions {
1874*a325d9c4SApple OSS Distributions _singleRange.v.address = (vm_offset_t) address;
1875*a325d9c4SApple OSS Distributions _singleRange.v.length = withLength;
1876*a325d9c4SApple OSS Distributions
1877*a325d9c4SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1878*a325d9c4SApple OSS Distributions }
1879*a325d9c4SApple OSS Distributions
1880*a325d9c4SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1881*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1882*a325d9c4SApple OSS Distributions IOByteCount withLength,
1883*a325d9c4SApple OSS Distributions IODirection withDirection,
1884*a325d9c4SApple OSS Distributions task_t withTask)
1885*a325d9c4SApple OSS Distributions {
1886*a325d9c4SApple OSS Distributions _singleRange.v.address = address;
1887*a325d9c4SApple OSS Distributions _singleRange.v.length = withLength;
1888*a325d9c4SApple OSS Distributions
1889*a325d9c4SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1890*a325d9c4SApple OSS Distributions }
1891*a325d9c4SApple OSS Distributions
1892*a325d9c4SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1893*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1894*a325d9c4SApple OSS Distributions IOPhysicalAddress address,
1895*a325d9c4SApple OSS Distributions IOByteCount withLength,
1896*a325d9c4SApple OSS Distributions IODirection withDirection )
1897*a325d9c4SApple OSS Distributions {
1898*a325d9c4SApple OSS Distributions _singleRange.p.address = address;
1899*a325d9c4SApple OSS Distributions _singleRange.p.length = withLength;
1900*a325d9c4SApple OSS Distributions
1901*a325d9c4SApple OSS Distributions return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1902*a325d9c4SApple OSS Distributions }
1903*a325d9c4SApple OSS Distributions
1904*a325d9c4SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1905*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1906*a325d9c4SApple OSS Distributions IOPhysicalRange * ranges,
1907*a325d9c4SApple OSS Distributions UInt32 count,
1908*a325d9c4SApple OSS Distributions IODirection direction,
1909*a325d9c4SApple OSS Distributions bool reference)
1910*a325d9c4SApple OSS Distributions {
1911*a325d9c4SApple OSS Distributions IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1912*a325d9c4SApple OSS Distributions
1913*a325d9c4SApple OSS Distributions if (reference) {
1914*a325d9c4SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1915*a325d9c4SApple OSS Distributions }
1916*a325d9c4SApple OSS Distributions
1917*a325d9c4SApple OSS Distributions return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1918*a325d9c4SApple OSS Distributions }
1919*a325d9c4SApple OSS Distributions
1920*a325d9c4SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1921*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1922*a325d9c4SApple OSS Distributions IOVirtualRange * ranges,
1923*a325d9c4SApple OSS Distributions UInt32 count,
1924*a325d9c4SApple OSS Distributions IODirection direction,
1925*a325d9c4SApple OSS Distributions task_t task,
1926*a325d9c4SApple OSS Distributions bool reference)
1927*a325d9c4SApple OSS Distributions {
1928*a325d9c4SApple OSS Distributions IOOptionBits mdOpts = direction;
1929*a325d9c4SApple OSS Distributions
1930*a325d9c4SApple OSS Distributions if (reference) {
1931*a325d9c4SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1932*a325d9c4SApple OSS Distributions }
1933*a325d9c4SApple OSS Distributions
1934*a325d9c4SApple OSS Distributions if (task) {
1935*a325d9c4SApple OSS Distributions mdOpts |= kIOMemoryTypeVirtual;
1936*a325d9c4SApple OSS Distributions
1937*a325d9c4SApple OSS Distributions // Auto-prepare if this is a kernel memory descriptor as very few
1938*a325d9c4SApple OSS Distributions // clients bother to prepare() kernel memory.
1939*a325d9c4SApple OSS Distributions // But it was not enforced so what are you going to do?
1940*a325d9c4SApple OSS Distributions if (task == kernel_task) {
1941*a325d9c4SApple OSS Distributions mdOpts |= kIOMemoryAutoPrepare;
1942*a325d9c4SApple OSS Distributions }
1943*a325d9c4SApple OSS Distributions } else {
1944*a325d9c4SApple OSS Distributions mdOpts |= kIOMemoryTypePhysical;
1945*a325d9c4SApple OSS Distributions }
1946*a325d9c4SApple OSS Distributions
1947*a325d9c4SApple OSS Distributions return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1948*a325d9c4SApple OSS Distributions }
1949*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
1950*a325d9c4SApple OSS Distributions
1951*a325d9c4SApple OSS Distributions /*
1952*a325d9c4SApple OSS Distributions * initWithOptions:
1953*a325d9c4SApple OSS Distributions *
1954*a325d9c4SApple OSS Distributions * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1955*a325d9c4SApple OSS Distributions * from a given task, several physical ranges, an UPL from the ubc
1956*a325d9c4SApple OSS Distributions * system or a uio (may be 64bit) from the BSD subsystem.
1957*a325d9c4SApple OSS Distributions *
1958*a325d9c4SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1959*a325d9c4SApple OSS Distributions *
1960*a325d9c4SApple OSS Distributions * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1961*a325d9c4SApple OSS Distributions * existing instance -- note this behavior is not commonly supported in other
1962*a325d9c4SApple OSS Distributions * I/O Kit classes, although it is supported here.
1963*a325d9c4SApple OSS Distributions */
1964*a325d9c4SApple OSS Distributions
1965*a325d9c4SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1966*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1967*a325d9c4SApple OSS Distributions UInt32 count,
1968*a325d9c4SApple OSS Distributions UInt32 offset,
1969*a325d9c4SApple OSS Distributions task_t task,
1970*a325d9c4SApple OSS Distributions IOOptionBits options,
1971*a325d9c4SApple OSS Distributions IOMapper * mapper)
1972*a325d9c4SApple OSS Distributions {
1973*a325d9c4SApple OSS Distributions IOOptionBits type = options & kIOMemoryTypeMask;
1974*a325d9c4SApple OSS Distributions
1975*a325d9c4SApple OSS Distributions #ifndef __LP64__
1976*a325d9c4SApple OSS Distributions if (task
1977*a325d9c4SApple OSS Distributions && (kIOMemoryTypeVirtual == type)
1978*a325d9c4SApple OSS Distributions && vm_map_is_64bit(get_task_map(task))
1979*a325d9c4SApple OSS Distributions && ((IOVirtualRange *) buffers)->address) {
1980*a325d9c4SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1981*a325d9c4SApple OSS Distributions return false;
1982*a325d9c4SApple OSS Distributions }
1983*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
1984*a325d9c4SApple OSS Distributions
1985*a325d9c4SApple OSS Distributions // Grab the original MD's configuation data to initialse the
1986*a325d9c4SApple OSS Distributions // arguments to this function.
1987*a325d9c4SApple OSS Distributions if (kIOMemoryTypePersistentMD == type) {
1988*a325d9c4SApple OSS Distributions IOMDPersistentInitData *initData = (typeof(initData))buffers;
1989*a325d9c4SApple OSS Distributions const IOGeneralMemoryDescriptor *orig = initData->fMD;
1990*a325d9c4SApple OSS Distributions ioGMDData *dataP = getDataP(orig->_memoryEntries);
1991*a325d9c4SApple OSS Distributions
1992*a325d9c4SApple OSS Distributions // Only accept persistent memory descriptors with valid dataP data.
1993*a325d9c4SApple OSS Distributions assert(orig->_rangesCount == 1);
1994*a325d9c4SApple OSS Distributions if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1995*a325d9c4SApple OSS Distributions return false;
1996*a325d9c4SApple OSS Distributions }
1997*a325d9c4SApple OSS Distributions
1998*a325d9c4SApple OSS Distributions _memRef = initData->fMemRef; // Grab the new named entry
1999*a325d9c4SApple OSS Distributions options = orig->_flags & ~kIOMemoryAsReference;
2000*a325d9c4SApple OSS Distributions type = options & kIOMemoryTypeMask;
2001*a325d9c4SApple OSS Distributions buffers = orig->_ranges.v;
2002*a325d9c4SApple OSS Distributions count = orig->_rangesCount;
2003*a325d9c4SApple OSS Distributions
2004*a325d9c4SApple OSS Distributions // Now grab the original task and whatever mapper was previously used
2005*a325d9c4SApple OSS Distributions task = orig->_task;
2006*a325d9c4SApple OSS Distributions mapper = dataP->fMapper;
2007*a325d9c4SApple OSS Distributions
2008*a325d9c4SApple OSS Distributions // We are ready to go through the original initialisation now
2009*a325d9c4SApple OSS Distributions }
2010*a325d9c4SApple OSS Distributions
2011*a325d9c4SApple OSS Distributions switch (type) {
2012*a325d9c4SApple OSS Distributions case kIOMemoryTypeUIO:
2013*a325d9c4SApple OSS Distributions case kIOMemoryTypeVirtual:
2014*a325d9c4SApple OSS Distributions #ifndef __LP64__
2015*a325d9c4SApple OSS Distributions case kIOMemoryTypeVirtual64:
2016*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2017*a325d9c4SApple OSS Distributions assert(task);
2018*a325d9c4SApple OSS Distributions if (!task) {
2019*a325d9c4SApple OSS Distributions return false;
2020*a325d9c4SApple OSS Distributions }
2021*a325d9c4SApple OSS Distributions break;
2022*a325d9c4SApple OSS Distributions
2023*a325d9c4SApple OSS Distributions case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2024*a325d9c4SApple OSS Distributions #ifndef __LP64__
2025*a325d9c4SApple OSS Distributions case kIOMemoryTypePhysical64:
2026*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2027*a325d9c4SApple OSS Distributions case kIOMemoryTypeUPL:
2028*a325d9c4SApple OSS Distributions assert(!task);
2029*a325d9c4SApple OSS Distributions break;
2030*a325d9c4SApple OSS Distributions default:
2031*a325d9c4SApple OSS Distributions return false; /* bad argument */
2032*a325d9c4SApple OSS Distributions }
2033*a325d9c4SApple OSS Distributions
2034*a325d9c4SApple OSS Distributions assert(buffers);
2035*a325d9c4SApple OSS Distributions assert(count);
2036*a325d9c4SApple OSS Distributions
2037*a325d9c4SApple OSS Distributions /*
2038*a325d9c4SApple OSS Distributions * We can check the _initialized instance variable before having ever set
2039*a325d9c4SApple OSS Distributions * it to an initial value because I/O Kit guarantees that all our instance
2040*a325d9c4SApple OSS Distributions * variables are zeroed on an object's allocation.
2041*a325d9c4SApple OSS Distributions */
2042*a325d9c4SApple OSS Distributions
2043*a325d9c4SApple OSS Distributions if (_initialized) {
2044*a325d9c4SApple OSS Distributions /*
2045*a325d9c4SApple OSS Distributions * An existing memory descriptor is being retargeted to point to
2046*a325d9c4SApple OSS Distributions * somewhere else. Clean up our present state.
2047*a325d9c4SApple OSS Distributions */
2048*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2049*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2050*a325d9c4SApple OSS Distributions while (_wireCount) {
2051*a325d9c4SApple OSS Distributions complete();
2052*a325d9c4SApple OSS Distributions }
2053*a325d9c4SApple OSS Distributions }
2054*a325d9c4SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2055*a325d9c4SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2056*a325d9c4SApple OSS Distributions uio_free((uio_t) _ranges.v);
2057*a325d9c4SApple OSS Distributions }
2058*a325d9c4SApple OSS Distributions #ifndef __LP64__
2059*a325d9c4SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2060*a325d9c4SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2061*a325d9c4SApple OSS Distributions }
2062*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2063*a325d9c4SApple OSS Distributions else {
2064*a325d9c4SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2065*a325d9c4SApple OSS Distributions }
2066*a325d9c4SApple OSS Distributions }
2067*a325d9c4SApple OSS Distributions
2068*a325d9c4SApple OSS Distributions options |= (kIOMemoryRedirected & _flags);
2069*a325d9c4SApple OSS Distributions if (!(kIOMemoryRedirected & options)) {
2070*a325d9c4SApple OSS Distributions if (_memRef) {
2071*a325d9c4SApple OSS Distributions memoryReferenceRelease(_memRef);
2072*a325d9c4SApple OSS Distributions _memRef = NULL;
2073*a325d9c4SApple OSS Distributions }
2074*a325d9c4SApple OSS Distributions if (_mappings) {
2075*a325d9c4SApple OSS Distributions _mappings->flushCollection();
2076*a325d9c4SApple OSS Distributions }
2077*a325d9c4SApple OSS Distributions }
2078*a325d9c4SApple OSS Distributions } else {
2079*a325d9c4SApple OSS Distributions if (!super::init()) {
2080*a325d9c4SApple OSS Distributions return false;
2081*a325d9c4SApple OSS Distributions }
2082*a325d9c4SApple OSS Distributions _initialized = true;
2083*a325d9c4SApple OSS Distributions }
2084*a325d9c4SApple OSS Distributions
2085*a325d9c4SApple OSS Distributions // Grab the appropriate mapper
2086*a325d9c4SApple OSS Distributions if (kIOMemoryHostOrRemote & options) {
2087*a325d9c4SApple OSS Distributions options |= kIOMemoryMapperNone;
2088*a325d9c4SApple OSS Distributions }
2089*a325d9c4SApple OSS Distributions if (kIOMemoryMapperNone & options) {
2090*a325d9c4SApple OSS Distributions mapper = NULL; // No Mapper
2091*a325d9c4SApple OSS Distributions } else if (mapper == kIOMapperSystem) {
2092*a325d9c4SApple OSS Distributions IOMapper::checkForSystemMapper();
2093*a325d9c4SApple OSS Distributions gIOSystemMapper = mapper = IOMapper::gSystem;
2094*a325d9c4SApple OSS Distributions }
2095*a325d9c4SApple OSS Distributions
2096*a325d9c4SApple OSS Distributions // Remove the dynamic internal use flags from the initial setting
2097*a325d9c4SApple OSS Distributions options &= ~(kIOMemoryPreparedReadOnly);
2098*a325d9c4SApple OSS Distributions _flags = options;
2099*a325d9c4SApple OSS Distributions _task = task;
2100*a325d9c4SApple OSS Distributions
2101*a325d9c4SApple OSS Distributions #ifndef __LP64__
2102*a325d9c4SApple OSS Distributions _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2103*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2104*a325d9c4SApple OSS Distributions
2105*a325d9c4SApple OSS Distributions _dmaReferences = 0;
2106*a325d9c4SApple OSS Distributions __iomd_reservedA = 0;
2107*a325d9c4SApple OSS Distributions __iomd_reservedB = 0;
2108*a325d9c4SApple OSS Distributions _highestPage = 0;
2109*a325d9c4SApple OSS Distributions
2110*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & options) {
2111*a325d9c4SApple OSS Distributions if (!_prepareLock) {
2112*a325d9c4SApple OSS Distributions _prepareLock = IOLockAlloc();
2113*a325d9c4SApple OSS Distributions }
2114*a325d9c4SApple OSS Distributions } else if (_prepareLock) {
2115*a325d9c4SApple OSS Distributions IOLockFree(_prepareLock);
2116*a325d9c4SApple OSS Distributions _prepareLock = NULL;
2117*a325d9c4SApple OSS Distributions }
2118*a325d9c4SApple OSS Distributions
2119*a325d9c4SApple OSS Distributions if (kIOMemoryTypeUPL == type) {
2120*a325d9c4SApple OSS Distributions ioGMDData *dataP;
2121*a325d9c4SApple OSS Distributions unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2122*a325d9c4SApple OSS Distributions
2123*a325d9c4SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2124*a325d9c4SApple OSS Distributions return false;
2125*a325d9c4SApple OSS Distributions }
2126*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
2127*a325d9c4SApple OSS Distributions dataP->fPageCnt = 0;
2128*a325d9c4SApple OSS Distributions switch (kIOMemoryDirectionMask & options) {
2129*a325d9c4SApple OSS Distributions case kIODirectionOut:
2130*a325d9c4SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
2131*a325d9c4SApple OSS Distributions break;
2132*a325d9c4SApple OSS Distributions case kIODirectionIn:
2133*a325d9c4SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
2134*a325d9c4SApple OSS Distributions break;
2135*a325d9c4SApple OSS Distributions case kIODirectionNone:
2136*a325d9c4SApple OSS Distributions case kIODirectionOutIn:
2137*a325d9c4SApple OSS Distributions default:
2138*a325d9c4SApple OSS Distributions panic("bad dir for upl 0x%x", (int) options);
2139*a325d9c4SApple OSS Distributions break;
2140*a325d9c4SApple OSS Distributions }
2141*a325d9c4SApple OSS Distributions // _wireCount++; // UPLs start out life wired
2142*a325d9c4SApple OSS Distributions
2143*a325d9c4SApple OSS Distributions _length = count;
2144*a325d9c4SApple OSS Distributions _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2145*a325d9c4SApple OSS Distributions
2146*a325d9c4SApple OSS Distributions ioPLBlock iopl;
2147*a325d9c4SApple OSS Distributions iopl.fIOPL = (upl_t) buffers;
2148*a325d9c4SApple OSS Distributions upl_set_referenced(iopl.fIOPL, true);
2149*a325d9c4SApple OSS Distributions upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2150*a325d9c4SApple OSS Distributions
2151*a325d9c4SApple OSS Distributions if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2152*a325d9c4SApple OSS Distributions panic("short external upl");
2153*a325d9c4SApple OSS Distributions }
2154*a325d9c4SApple OSS Distributions
2155*a325d9c4SApple OSS Distributions _highestPage = upl_get_highest_page(iopl.fIOPL);
2156*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2157*a325d9c4SApple OSS Distributions
2158*a325d9c4SApple OSS Distributions // Set the flag kIOPLOnDevice convieniently equal to 1
2159*a325d9c4SApple OSS Distributions iopl.fFlags = pageList->device | kIOPLExternUPL;
2160*a325d9c4SApple OSS Distributions if (!pageList->device) {
2161*a325d9c4SApple OSS Distributions // Pre-compute the offset into the UPL's page list
2162*a325d9c4SApple OSS Distributions pageList = &pageList[atop_32(offset)];
2163*a325d9c4SApple OSS Distributions offset &= PAGE_MASK;
2164*a325d9c4SApple OSS Distributions }
2165*a325d9c4SApple OSS Distributions iopl.fIOMDOffset = 0;
2166*a325d9c4SApple OSS Distributions iopl.fMappedPage = 0;
2167*a325d9c4SApple OSS Distributions iopl.fPageInfo = (vm_address_t) pageList;
2168*a325d9c4SApple OSS Distributions iopl.fPageOffset = offset;
2169*a325d9c4SApple OSS Distributions _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2170*a325d9c4SApple OSS Distributions } else {
2171*a325d9c4SApple OSS Distributions // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2172*a325d9c4SApple OSS Distributions // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2173*a325d9c4SApple OSS Distributions
2174*a325d9c4SApple OSS Distributions // Initialize the memory descriptor
2175*a325d9c4SApple OSS Distributions if (options & kIOMemoryAsReference) {
2176*a325d9c4SApple OSS Distributions #ifndef __LP64__
2177*a325d9c4SApple OSS Distributions _rangesIsAllocated = false;
2178*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2179*a325d9c4SApple OSS Distributions
2180*a325d9c4SApple OSS Distributions // Hack assignment to get the buffer arg into _ranges.
2181*a325d9c4SApple OSS Distributions // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2182*a325d9c4SApple OSS Distributions // work, C++ sigh.
2183*a325d9c4SApple OSS Distributions // This also initialises the uio & physical ranges.
2184*a325d9c4SApple OSS Distributions _ranges.v = (IOVirtualRange *) buffers;
2185*a325d9c4SApple OSS Distributions } else {
2186*a325d9c4SApple OSS Distributions #ifndef __LP64__
2187*a325d9c4SApple OSS Distributions _rangesIsAllocated = true;
2188*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2189*a325d9c4SApple OSS Distributions switch (type) {
2190*a325d9c4SApple OSS Distributions case kIOMemoryTypeUIO:
2191*a325d9c4SApple OSS Distributions _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2192*a325d9c4SApple OSS Distributions break;
2193*a325d9c4SApple OSS Distributions
2194*a325d9c4SApple OSS Distributions #ifndef __LP64__
2195*a325d9c4SApple OSS Distributions case kIOMemoryTypeVirtual64:
2196*a325d9c4SApple OSS Distributions case kIOMemoryTypePhysical64:
2197*a325d9c4SApple OSS Distributions if (count == 1
2198*a325d9c4SApple OSS Distributions #ifndef __arm__
2199*a325d9c4SApple OSS Distributions && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2200*a325d9c4SApple OSS Distributions #endif
2201*a325d9c4SApple OSS Distributions ) {
2202*a325d9c4SApple OSS Distributions if (kIOMemoryTypeVirtual64 == type) {
2203*a325d9c4SApple OSS Distributions type = kIOMemoryTypeVirtual;
2204*a325d9c4SApple OSS Distributions } else {
2205*a325d9c4SApple OSS Distributions type = kIOMemoryTypePhysical;
2206*a325d9c4SApple OSS Distributions }
2207*a325d9c4SApple OSS Distributions _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2208*a325d9c4SApple OSS Distributions _rangesIsAllocated = false;
2209*a325d9c4SApple OSS Distributions _ranges.v = &_singleRange.v;
2210*a325d9c4SApple OSS Distributions _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2211*a325d9c4SApple OSS Distributions _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2212*a325d9c4SApple OSS Distributions break;
2213*a325d9c4SApple OSS Distributions }
2214*a325d9c4SApple OSS Distributions _ranges.v64 = IONew(IOAddressRange, count);
2215*a325d9c4SApple OSS Distributions if (!_ranges.v64) {
2216*a325d9c4SApple OSS Distributions return false;
2217*a325d9c4SApple OSS Distributions }
2218*a325d9c4SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2219*a325d9c4SApple OSS Distributions break;
2220*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2221*a325d9c4SApple OSS Distributions case kIOMemoryTypeVirtual:
2222*a325d9c4SApple OSS Distributions case kIOMemoryTypePhysical:
2223*a325d9c4SApple OSS Distributions if (count == 1) {
2224*a325d9c4SApple OSS Distributions _flags |= kIOMemoryAsReference;
2225*a325d9c4SApple OSS Distributions #ifndef __LP64__
2226*a325d9c4SApple OSS Distributions _rangesIsAllocated = false;
2227*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2228*a325d9c4SApple OSS Distributions _ranges.v = &_singleRange.v;
2229*a325d9c4SApple OSS Distributions } else {
2230*a325d9c4SApple OSS Distributions _ranges.v = IONew(IOVirtualRange, count);
2231*a325d9c4SApple OSS Distributions if (!_ranges.v) {
2232*a325d9c4SApple OSS Distributions return false;
2233*a325d9c4SApple OSS Distributions }
2234*a325d9c4SApple OSS Distributions }
2235*a325d9c4SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2236*a325d9c4SApple OSS Distributions break;
2237*a325d9c4SApple OSS Distributions }
2238*a325d9c4SApple OSS Distributions }
2239*a325d9c4SApple OSS Distributions _rangesCount = count;
2240*a325d9c4SApple OSS Distributions
2241*a325d9c4SApple OSS Distributions // Find starting address within the vector of ranges
2242*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
2243*a325d9c4SApple OSS Distributions mach_vm_size_t totalLength = 0;
2244*a325d9c4SApple OSS Distributions unsigned int ind, pages = 0;
2245*a325d9c4SApple OSS Distributions for (ind = 0; ind < count; ind++) {
2246*a325d9c4SApple OSS Distributions mach_vm_address_t addr;
2247*a325d9c4SApple OSS Distributions mach_vm_address_t endAddr;
2248*a325d9c4SApple OSS Distributions mach_vm_size_t len;
2249*a325d9c4SApple OSS Distributions
2250*a325d9c4SApple OSS Distributions // addr & len are returned by this function
2251*a325d9c4SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, ind);
2252*a325d9c4SApple OSS Distributions if (_task) {
2253*a325d9c4SApple OSS Distributions mach_vm_size_t phys_size;
2254*a325d9c4SApple OSS Distributions kern_return_t kret;
2255*a325d9c4SApple OSS Distributions kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2256*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != kret) {
2257*a325d9c4SApple OSS Distributions break;
2258*a325d9c4SApple OSS Distributions }
2259*a325d9c4SApple OSS Distributions if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2260*a325d9c4SApple OSS Distributions break;
2261*a325d9c4SApple OSS Distributions }
2262*a325d9c4SApple OSS Distributions } else {
2263*a325d9c4SApple OSS Distributions if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2264*a325d9c4SApple OSS Distributions break;
2265*a325d9c4SApple OSS Distributions }
2266*a325d9c4SApple OSS Distributions if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2267*a325d9c4SApple OSS Distributions break;
2268*a325d9c4SApple OSS Distributions }
2269*a325d9c4SApple OSS Distributions if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2270*a325d9c4SApple OSS Distributions break;
2271*a325d9c4SApple OSS Distributions }
2272*a325d9c4SApple OSS Distributions }
2273*a325d9c4SApple OSS Distributions if (os_add_overflow(totalLength, len, &totalLength)) {
2274*a325d9c4SApple OSS Distributions break;
2275*a325d9c4SApple OSS Distributions }
2276*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2277*a325d9c4SApple OSS Distributions uint64_t highPage = atop_64(addr + len - 1);
2278*a325d9c4SApple OSS Distributions if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2279*a325d9c4SApple OSS Distributions _highestPage = (ppnum_t) highPage;
2280*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2281*a325d9c4SApple OSS Distributions }
2282*a325d9c4SApple OSS Distributions }
2283*a325d9c4SApple OSS Distributions }
2284*a325d9c4SApple OSS Distributions if ((ind < count)
2285*a325d9c4SApple OSS Distributions || (totalLength != ((IOByteCount) totalLength))) {
2286*a325d9c4SApple OSS Distributions return false; /* overflow */
2287*a325d9c4SApple OSS Distributions }
2288*a325d9c4SApple OSS Distributions _length = totalLength;
2289*a325d9c4SApple OSS Distributions _pages = pages;
2290*a325d9c4SApple OSS Distributions
2291*a325d9c4SApple OSS Distributions // Auto-prepare memory at creation time.
2292*a325d9c4SApple OSS Distributions // Implied completion when descriptor is free-ed
2293*a325d9c4SApple OSS Distributions
2294*a325d9c4SApple OSS Distributions
2295*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2296*a325d9c4SApple OSS Distributions _wireCount++; // Physical MDs are, by definition, wired
2297*a325d9c4SApple OSS Distributions } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2298*a325d9c4SApple OSS Distributions ioGMDData *dataP;
2299*a325d9c4SApple OSS Distributions unsigned dataSize;
2300*a325d9c4SApple OSS Distributions
2301*a325d9c4SApple OSS Distributions if (_pages > atop_64(max_mem)) {
2302*a325d9c4SApple OSS Distributions return false;
2303*a325d9c4SApple OSS Distributions }
2304*a325d9c4SApple OSS Distributions
2305*a325d9c4SApple OSS Distributions dataSize = computeDataSize(_pages, /* upls */ count * 2);
2306*a325d9c4SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2307*a325d9c4SApple OSS Distributions return false;
2308*a325d9c4SApple OSS Distributions }
2309*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
2310*a325d9c4SApple OSS Distributions dataP->fPageCnt = _pages;
2311*a325d9c4SApple OSS Distributions
2312*a325d9c4SApple OSS Distributions if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2313*a325d9c4SApple OSS Distributions && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2314*a325d9c4SApple OSS Distributions _kernelTag = IOMemoryTag(kernel_map);
2315*a325d9c4SApple OSS Distributions if (_kernelTag == gIOSurfaceTag) {
2316*a325d9c4SApple OSS Distributions _userTag = VM_MEMORY_IOSURFACE;
2317*a325d9c4SApple OSS Distributions }
2318*a325d9c4SApple OSS Distributions }
2319*a325d9c4SApple OSS Distributions
2320*a325d9c4SApple OSS Distributions if ((kIOMemoryPersistent & _flags) && !_memRef) {
2321*a325d9c4SApple OSS Distributions IOReturn
2322*a325d9c4SApple OSS Distributions err = memoryReferenceCreate(0, &_memRef);
2323*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
2324*a325d9c4SApple OSS Distributions return false;
2325*a325d9c4SApple OSS Distributions }
2326*a325d9c4SApple OSS Distributions }
2327*a325d9c4SApple OSS Distributions
2328*a325d9c4SApple OSS Distributions if ((_flags & kIOMemoryAutoPrepare)
2329*a325d9c4SApple OSS Distributions && prepare() != kIOReturnSuccess) {
2330*a325d9c4SApple OSS Distributions return false;
2331*a325d9c4SApple OSS Distributions }
2332*a325d9c4SApple OSS Distributions }
2333*a325d9c4SApple OSS Distributions }
2334*a325d9c4SApple OSS Distributions
2335*a325d9c4SApple OSS Distributions return true;
2336*a325d9c4SApple OSS Distributions }
2337*a325d9c4SApple OSS Distributions
2338*a325d9c4SApple OSS Distributions /*
2339*a325d9c4SApple OSS Distributions * free
2340*a325d9c4SApple OSS Distributions *
2341*a325d9c4SApple OSS Distributions * Free resources.
2342*a325d9c4SApple OSS Distributions */
2343*a325d9c4SApple OSS Distributions void
free()2344*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2345*a325d9c4SApple OSS Distributions {
2346*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2347*a325d9c4SApple OSS Distributions
2348*a325d9c4SApple OSS Distributions if (reserved && reserved->dp.memory) {
2349*a325d9c4SApple OSS Distributions LOCK;
2350*a325d9c4SApple OSS Distributions reserved->dp.memory = NULL;
2351*a325d9c4SApple OSS Distributions UNLOCK;
2352*a325d9c4SApple OSS Distributions }
2353*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2354*a325d9c4SApple OSS Distributions ioGMDData * dataP;
2355*a325d9c4SApple OSS Distributions if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2356*a325d9c4SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2357*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2358*a325d9c4SApple OSS Distributions }
2359*a325d9c4SApple OSS Distributions } else {
2360*a325d9c4SApple OSS Distributions while (_wireCount) {
2361*a325d9c4SApple OSS Distributions complete();
2362*a325d9c4SApple OSS Distributions }
2363*a325d9c4SApple OSS Distributions }
2364*a325d9c4SApple OSS Distributions
2365*a325d9c4SApple OSS Distributions if (_memoryEntries) {
2366*a325d9c4SApple OSS Distributions _memoryEntries.reset();
2367*a325d9c4SApple OSS Distributions }
2368*a325d9c4SApple OSS Distributions
2369*a325d9c4SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2370*a325d9c4SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2371*a325d9c4SApple OSS Distributions uio_free((uio_t) _ranges.v);
2372*a325d9c4SApple OSS Distributions }
2373*a325d9c4SApple OSS Distributions #ifndef __LP64__
2374*a325d9c4SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2375*a325d9c4SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2376*a325d9c4SApple OSS Distributions }
2377*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2378*a325d9c4SApple OSS Distributions else {
2379*a325d9c4SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2380*a325d9c4SApple OSS Distributions }
2381*a325d9c4SApple OSS Distributions
2382*a325d9c4SApple OSS Distributions _ranges.v = NULL;
2383*a325d9c4SApple OSS Distributions }
2384*a325d9c4SApple OSS Distributions
2385*a325d9c4SApple OSS Distributions if (reserved) {
2386*a325d9c4SApple OSS Distributions cleanKernelReserved(reserved);
2387*a325d9c4SApple OSS Distributions if (reserved->dp.devicePager) {
2388*a325d9c4SApple OSS Distributions // memEntry holds a ref on the device pager which owns reserved
2389*a325d9c4SApple OSS Distributions // (IOMemoryDescriptorReserved) so no reserved access after this point
2390*a325d9c4SApple OSS Distributions device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2391*a325d9c4SApple OSS Distributions } else {
2392*a325d9c4SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
2393*a325d9c4SApple OSS Distributions }
2394*a325d9c4SApple OSS Distributions reserved = NULL;
2395*a325d9c4SApple OSS Distributions }
2396*a325d9c4SApple OSS Distributions
2397*a325d9c4SApple OSS Distributions if (_memRef) {
2398*a325d9c4SApple OSS Distributions memoryReferenceRelease(_memRef);
2399*a325d9c4SApple OSS Distributions }
2400*a325d9c4SApple OSS Distributions if (_prepareLock) {
2401*a325d9c4SApple OSS Distributions IOLockFree(_prepareLock);
2402*a325d9c4SApple OSS Distributions }
2403*a325d9c4SApple OSS Distributions
2404*a325d9c4SApple OSS Distributions super::free();
2405*a325d9c4SApple OSS Distributions }
2406*a325d9c4SApple OSS Distributions
2407*a325d9c4SApple OSS Distributions #ifndef __LP64__
2408*a325d9c4SApple OSS Distributions void
unmapFromKernel()2409*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2410*a325d9c4SApple OSS Distributions {
2411*a325d9c4SApple OSS Distributions panic("IOGMD::unmapFromKernel deprecated");
2412*a325d9c4SApple OSS Distributions }
2413*a325d9c4SApple OSS Distributions
2414*a325d9c4SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2415*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2416*a325d9c4SApple OSS Distributions {
2417*a325d9c4SApple OSS Distributions panic("IOGMD::mapIntoKernel deprecated");
2418*a325d9c4SApple OSS Distributions }
2419*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2420*a325d9c4SApple OSS Distributions
2421*a325d9c4SApple OSS Distributions /*
2422*a325d9c4SApple OSS Distributions * getDirection:
2423*a325d9c4SApple OSS Distributions *
2424*a325d9c4SApple OSS Distributions * Get the direction of the transfer.
2425*a325d9c4SApple OSS Distributions */
2426*a325d9c4SApple OSS Distributions IODirection
getDirection() const2427*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2428*a325d9c4SApple OSS Distributions {
2429*a325d9c4SApple OSS Distributions #ifndef __LP64__
2430*a325d9c4SApple OSS Distributions if (_direction) {
2431*a325d9c4SApple OSS Distributions return _direction;
2432*a325d9c4SApple OSS Distributions }
2433*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2434*a325d9c4SApple OSS Distributions return (IODirection) (_flags & kIOMemoryDirectionMask);
2435*a325d9c4SApple OSS Distributions }
2436*a325d9c4SApple OSS Distributions
2437*a325d9c4SApple OSS Distributions /*
2438*a325d9c4SApple OSS Distributions * getLength:
2439*a325d9c4SApple OSS Distributions *
2440*a325d9c4SApple OSS Distributions * Get the length of the transfer (over all ranges).
2441*a325d9c4SApple OSS Distributions */
2442*a325d9c4SApple OSS Distributions IOByteCount
getLength() const2443*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getLength() const
2444*a325d9c4SApple OSS Distributions {
2445*a325d9c4SApple OSS Distributions return _length;
2446*a325d9c4SApple OSS Distributions }
2447*a325d9c4SApple OSS Distributions
2448*a325d9c4SApple OSS Distributions void
setTag(IOOptionBits tag)2449*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2450*a325d9c4SApple OSS Distributions {
2451*a325d9c4SApple OSS Distributions _tag = tag;
2452*a325d9c4SApple OSS Distributions }
2453*a325d9c4SApple OSS Distributions
2454*a325d9c4SApple OSS Distributions IOOptionBits
getTag(void)2455*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2456*a325d9c4SApple OSS Distributions {
2457*a325d9c4SApple OSS Distributions return _tag;
2458*a325d9c4SApple OSS Distributions }
2459*a325d9c4SApple OSS Distributions
2460*a325d9c4SApple OSS Distributions uint64_t
getFlags(void)2461*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2462*a325d9c4SApple OSS Distributions {
2463*a325d9c4SApple OSS Distributions return _flags;
2464*a325d9c4SApple OSS Distributions }
2465*a325d9c4SApple OSS Distributions
2466*a325d9c4SApple OSS Distributions OSObject *
copyContext(void) const2467*a325d9c4SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2468*a325d9c4SApple OSS Distributions {
2469*a325d9c4SApple OSS Distributions if (reserved) {
2470*a325d9c4SApple OSS Distributions OSObject * context = reserved->contextObject;
2471*a325d9c4SApple OSS Distributions if (context) {
2472*a325d9c4SApple OSS Distributions context->retain();
2473*a325d9c4SApple OSS Distributions }
2474*a325d9c4SApple OSS Distributions return context;
2475*a325d9c4SApple OSS Distributions } else {
2476*a325d9c4SApple OSS Distributions return NULL;
2477*a325d9c4SApple OSS Distributions }
2478*a325d9c4SApple OSS Distributions }
2479*a325d9c4SApple OSS Distributions
2480*a325d9c4SApple OSS Distributions void
setContext(OSObject * obj)2481*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2482*a325d9c4SApple OSS Distributions {
2483*a325d9c4SApple OSS Distributions if (this->reserved == NULL && obj == NULL) {
2484*a325d9c4SApple OSS Distributions // No existing object, and no object to set
2485*a325d9c4SApple OSS Distributions return;
2486*a325d9c4SApple OSS Distributions }
2487*a325d9c4SApple OSS Distributions
2488*a325d9c4SApple OSS Distributions IOMemoryDescriptorReserved * reserved = getKernelReserved();
2489*a325d9c4SApple OSS Distributions if (reserved) {
2490*a325d9c4SApple OSS Distributions OSObject * oldObject = reserved->contextObject;
2491*a325d9c4SApple OSS Distributions if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2492*a325d9c4SApple OSS Distributions oldObject->release();
2493*a325d9c4SApple OSS Distributions }
2494*a325d9c4SApple OSS Distributions if (obj != NULL) {
2495*a325d9c4SApple OSS Distributions obj->retain();
2496*a325d9c4SApple OSS Distributions reserved->contextObject = obj;
2497*a325d9c4SApple OSS Distributions }
2498*a325d9c4SApple OSS Distributions }
2499*a325d9c4SApple OSS Distributions }
2500*a325d9c4SApple OSS Distributions
2501*a325d9c4SApple OSS Distributions #ifndef __LP64__
2502*a325d9c4SApple OSS Distributions #pragma clang diagnostic push
2503*a325d9c4SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2504*a325d9c4SApple OSS Distributions
2505*a325d9c4SApple OSS Distributions // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2506*a325d9c4SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2507*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2508*a325d9c4SApple OSS Distributions {
2509*a325d9c4SApple OSS Distributions addr64_t physAddr = 0;
2510*a325d9c4SApple OSS Distributions
2511*a325d9c4SApple OSS Distributions if (prepare() == kIOReturnSuccess) {
2512*a325d9c4SApple OSS Distributions physAddr = getPhysicalSegment64( offset, length );
2513*a325d9c4SApple OSS Distributions complete();
2514*a325d9c4SApple OSS Distributions }
2515*a325d9c4SApple OSS Distributions
2516*a325d9c4SApple OSS Distributions return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2517*a325d9c4SApple OSS Distributions }
2518*a325d9c4SApple OSS Distributions
2519*a325d9c4SApple OSS Distributions #pragma clang diagnostic pop
2520*a325d9c4SApple OSS Distributions
2521*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2522*a325d9c4SApple OSS Distributions
2523*a325d9c4SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2524*a325d9c4SApple OSS Distributions IOMemoryDescriptor::readBytes
2525*a325d9c4SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2526*a325d9c4SApple OSS Distributions {
2527*a325d9c4SApple OSS Distributions addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2528*a325d9c4SApple OSS Distributions IOByteCount endoffset;
2529*a325d9c4SApple OSS Distributions IOByteCount remaining;
2530*a325d9c4SApple OSS Distributions
2531*a325d9c4SApple OSS Distributions
2532*a325d9c4SApple OSS Distributions // Check that this entire I/O is within the available range
2533*a325d9c4SApple OSS Distributions if ((offset > _length)
2534*a325d9c4SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2535*a325d9c4SApple OSS Distributions || (endoffset > _length)) {
2536*a325d9c4SApple OSS Distributions assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2537*a325d9c4SApple OSS Distributions return 0;
2538*a325d9c4SApple OSS Distributions }
2539*a325d9c4SApple OSS Distributions if (offset >= _length) {
2540*a325d9c4SApple OSS Distributions return 0;
2541*a325d9c4SApple OSS Distributions }
2542*a325d9c4SApple OSS Distributions
2543*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2544*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2545*a325d9c4SApple OSS Distributions return 0;
2546*a325d9c4SApple OSS Distributions }
2547*a325d9c4SApple OSS Distributions
2548*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2549*a325d9c4SApple OSS Distributions LOCK;
2550*a325d9c4SApple OSS Distributions }
2551*a325d9c4SApple OSS Distributions
2552*a325d9c4SApple OSS Distributions remaining = length = min(length, _length - offset);
2553*a325d9c4SApple OSS Distributions while (remaining) { // (process another target segment?)
2554*a325d9c4SApple OSS Distributions addr64_t srcAddr64;
2555*a325d9c4SApple OSS Distributions IOByteCount srcLen;
2556*a325d9c4SApple OSS Distributions
2557*a325d9c4SApple OSS Distributions srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2558*a325d9c4SApple OSS Distributions if (!srcAddr64) {
2559*a325d9c4SApple OSS Distributions break;
2560*a325d9c4SApple OSS Distributions }
2561*a325d9c4SApple OSS Distributions
2562*a325d9c4SApple OSS Distributions // Clip segment length to remaining
2563*a325d9c4SApple OSS Distributions if (srcLen > remaining) {
2564*a325d9c4SApple OSS Distributions srcLen = remaining;
2565*a325d9c4SApple OSS Distributions }
2566*a325d9c4SApple OSS Distributions
2567*a325d9c4SApple OSS Distributions if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2568*a325d9c4SApple OSS Distributions srcLen = (UINT_MAX - PAGE_SIZE + 1);
2569*a325d9c4SApple OSS Distributions }
2570*a325d9c4SApple OSS Distributions copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2571*a325d9c4SApple OSS Distributions cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2572*a325d9c4SApple OSS Distributions
2573*a325d9c4SApple OSS Distributions dstAddr += srcLen;
2574*a325d9c4SApple OSS Distributions offset += srcLen;
2575*a325d9c4SApple OSS Distributions remaining -= srcLen;
2576*a325d9c4SApple OSS Distributions }
2577*a325d9c4SApple OSS Distributions
2578*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2579*a325d9c4SApple OSS Distributions UNLOCK;
2580*a325d9c4SApple OSS Distributions }
2581*a325d9c4SApple OSS Distributions
2582*a325d9c4SApple OSS Distributions assert(!remaining);
2583*a325d9c4SApple OSS Distributions
2584*a325d9c4SApple OSS Distributions return length - remaining;
2585*a325d9c4SApple OSS Distributions }
2586*a325d9c4SApple OSS Distributions
2587*a325d9c4SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2588*a325d9c4SApple OSS Distributions IOMemoryDescriptor::writeBytes
2589*a325d9c4SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2590*a325d9c4SApple OSS Distributions {
2591*a325d9c4SApple OSS Distributions addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2592*a325d9c4SApple OSS Distributions IOByteCount remaining;
2593*a325d9c4SApple OSS Distributions IOByteCount endoffset;
2594*a325d9c4SApple OSS Distributions IOByteCount offset = inoffset;
2595*a325d9c4SApple OSS Distributions
2596*a325d9c4SApple OSS Distributions assert( !(kIOMemoryPreparedReadOnly & _flags));
2597*a325d9c4SApple OSS Distributions
2598*a325d9c4SApple OSS Distributions // Check that this entire I/O is within the available range
2599*a325d9c4SApple OSS Distributions if ((offset > _length)
2600*a325d9c4SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2601*a325d9c4SApple OSS Distributions || (endoffset > _length)) {
2602*a325d9c4SApple OSS Distributions assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2603*a325d9c4SApple OSS Distributions return 0;
2604*a325d9c4SApple OSS Distributions }
2605*a325d9c4SApple OSS Distributions if (kIOMemoryPreparedReadOnly & _flags) {
2606*a325d9c4SApple OSS Distributions return 0;
2607*a325d9c4SApple OSS Distributions }
2608*a325d9c4SApple OSS Distributions if (offset >= _length) {
2609*a325d9c4SApple OSS Distributions return 0;
2610*a325d9c4SApple OSS Distributions }
2611*a325d9c4SApple OSS Distributions
2612*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2613*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2614*a325d9c4SApple OSS Distributions return 0;
2615*a325d9c4SApple OSS Distributions }
2616*a325d9c4SApple OSS Distributions
2617*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2618*a325d9c4SApple OSS Distributions LOCK;
2619*a325d9c4SApple OSS Distributions }
2620*a325d9c4SApple OSS Distributions
2621*a325d9c4SApple OSS Distributions remaining = length = min(length, _length - offset);
2622*a325d9c4SApple OSS Distributions while (remaining) { // (process another target segment?)
2623*a325d9c4SApple OSS Distributions addr64_t dstAddr64;
2624*a325d9c4SApple OSS Distributions IOByteCount dstLen;
2625*a325d9c4SApple OSS Distributions
2626*a325d9c4SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2627*a325d9c4SApple OSS Distributions if (!dstAddr64) {
2628*a325d9c4SApple OSS Distributions break;
2629*a325d9c4SApple OSS Distributions }
2630*a325d9c4SApple OSS Distributions
2631*a325d9c4SApple OSS Distributions // Clip segment length to remaining
2632*a325d9c4SApple OSS Distributions if (dstLen > remaining) {
2633*a325d9c4SApple OSS Distributions dstLen = remaining;
2634*a325d9c4SApple OSS Distributions }
2635*a325d9c4SApple OSS Distributions
2636*a325d9c4SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2637*a325d9c4SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
2638*a325d9c4SApple OSS Distributions }
2639*a325d9c4SApple OSS Distributions if (!srcAddr) {
2640*a325d9c4SApple OSS Distributions bzero_phys(dstAddr64, (unsigned int) dstLen);
2641*a325d9c4SApple OSS Distributions } else {
2642*a325d9c4SApple OSS Distributions copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2643*a325d9c4SApple OSS Distributions cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2644*a325d9c4SApple OSS Distributions srcAddr += dstLen;
2645*a325d9c4SApple OSS Distributions }
2646*a325d9c4SApple OSS Distributions offset += dstLen;
2647*a325d9c4SApple OSS Distributions remaining -= dstLen;
2648*a325d9c4SApple OSS Distributions }
2649*a325d9c4SApple OSS Distributions
2650*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2651*a325d9c4SApple OSS Distributions UNLOCK;
2652*a325d9c4SApple OSS Distributions }
2653*a325d9c4SApple OSS Distributions
2654*a325d9c4SApple OSS Distributions assert(!remaining);
2655*a325d9c4SApple OSS Distributions
2656*a325d9c4SApple OSS Distributions #if defined(__x86_64__)
2657*a325d9c4SApple OSS Distributions // copypv does not cppvFsnk on intel
2658*a325d9c4SApple OSS Distributions #else
2659*a325d9c4SApple OSS Distributions if (!srcAddr) {
2660*a325d9c4SApple OSS Distributions performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2661*a325d9c4SApple OSS Distributions }
2662*a325d9c4SApple OSS Distributions #endif
2663*a325d9c4SApple OSS Distributions
2664*a325d9c4SApple OSS Distributions return length - remaining;
2665*a325d9c4SApple OSS Distributions }
2666*a325d9c4SApple OSS Distributions
2667*a325d9c4SApple OSS Distributions #ifndef __LP64__
2668*a325d9c4SApple OSS Distributions void
setPosition(IOByteCount position)2669*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2670*a325d9c4SApple OSS Distributions {
2671*a325d9c4SApple OSS Distributions panic("IOGMD::setPosition deprecated");
2672*a325d9c4SApple OSS Distributions }
2673*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
2674*a325d9c4SApple OSS Distributions
2675*a325d9c4SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2676*a325d9c4SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2677*a325d9c4SApple OSS Distributions
2678*a325d9c4SApple OSS Distributions uint64_t
getPreparationID(void)2679*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2680*a325d9c4SApple OSS Distributions {
2681*a325d9c4SApple OSS Distributions ioGMDData *dataP;
2682*a325d9c4SApple OSS Distributions
2683*a325d9c4SApple OSS Distributions if (!_wireCount) {
2684*a325d9c4SApple OSS Distributions return kIOPreparationIDUnprepared;
2685*a325d9c4SApple OSS Distributions }
2686*a325d9c4SApple OSS Distributions
2687*a325d9c4SApple OSS Distributions if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2688*a325d9c4SApple OSS Distributions || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2689*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setPreparationID();
2690*a325d9c4SApple OSS Distributions return IOMemoryDescriptor::getPreparationID();
2691*a325d9c4SApple OSS Distributions }
2692*a325d9c4SApple OSS Distributions
2693*a325d9c4SApple OSS Distributions if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2694*a325d9c4SApple OSS Distributions return kIOPreparationIDUnprepared;
2695*a325d9c4SApple OSS Distributions }
2696*a325d9c4SApple OSS Distributions
2697*a325d9c4SApple OSS Distributions if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2698*a325d9c4SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2699*a325d9c4SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2700*a325d9c4SApple OSS Distributions }
2701*a325d9c4SApple OSS Distributions return dataP->fPreparationID;
2702*a325d9c4SApple OSS Distributions }
2703*a325d9c4SApple OSS Distributions
2704*a325d9c4SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2705*a325d9c4SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2706*a325d9c4SApple OSS Distributions {
2707*a325d9c4SApple OSS Distributions if (reserved->creator) {
2708*a325d9c4SApple OSS Distributions task_deallocate(reserved->creator);
2709*a325d9c4SApple OSS Distributions reserved->creator = NULL;
2710*a325d9c4SApple OSS Distributions }
2711*a325d9c4SApple OSS Distributions
2712*a325d9c4SApple OSS Distributions if (reserved->contextObject) {
2713*a325d9c4SApple OSS Distributions reserved->contextObject->release();
2714*a325d9c4SApple OSS Distributions reserved->contextObject = NULL;
2715*a325d9c4SApple OSS Distributions }
2716*a325d9c4SApple OSS Distributions }
2717*a325d9c4SApple OSS Distributions
2718*a325d9c4SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2719*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2720*a325d9c4SApple OSS Distributions {
2721*a325d9c4SApple OSS Distributions if (!reserved) {
2722*a325d9c4SApple OSS Distributions reserved = IOMallocType(IOMemoryDescriptorReserved);
2723*a325d9c4SApple OSS Distributions }
2724*a325d9c4SApple OSS Distributions return reserved;
2725*a325d9c4SApple OSS Distributions }
2726*a325d9c4SApple OSS Distributions
2727*a325d9c4SApple OSS Distributions void
setPreparationID(void)2728*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2729*a325d9c4SApple OSS Distributions {
2730*a325d9c4SApple OSS Distributions if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2731*a325d9c4SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2732*a325d9c4SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2733*a325d9c4SApple OSS Distributions }
2734*a325d9c4SApple OSS Distributions }
2735*a325d9c4SApple OSS Distributions
2736*a325d9c4SApple OSS Distributions uint64_t
getPreparationID(void)2737*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2738*a325d9c4SApple OSS Distributions {
2739*a325d9c4SApple OSS Distributions if (reserved) {
2740*a325d9c4SApple OSS Distributions return reserved->preparationID;
2741*a325d9c4SApple OSS Distributions } else {
2742*a325d9c4SApple OSS Distributions return kIOPreparationIDUnsupported;
2743*a325d9c4SApple OSS Distributions }
2744*a325d9c4SApple OSS Distributions }
2745*a325d9c4SApple OSS Distributions
2746*a325d9c4SApple OSS Distributions void
setDescriptorID(void)2747*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2748*a325d9c4SApple OSS Distributions {
2749*a325d9c4SApple OSS Distributions if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2750*a325d9c4SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2751*a325d9c4SApple OSS Distributions OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2752*a325d9c4SApple OSS Distributions }
2753*a325d9c4SApple OSS Distributions }
2754*a325d9c4SApple OSS Distributions
2755*a325d9c4SApple OSS Distributions uint64_t
getDescriptorID(void)2756*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2757*a325d9c4SApple OSS Distributions {
2758*a325d9c4SApple OSS Distributions setDescriptorID();
2759*a325d9c4SApple OSS Distributions
2760*a325d9c4SApple OSS Distributions if (reserved) {
2761*a325d9c4SApple OSS Distributions return reserved->descriptorID;
2762*a325d9c4SApple OSS Distributions } else {
2763*a325d9c4SApple OSS Distributions return kIODescriptorIDInvalid;
2764*a325d9c4SApple OSS Distributions }
2765*a325d9c4SApple OSS Distributions }
2766*a325d9c4SApple OSS Distributions
2767*a325d9c4SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2768*a325d9c4SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2769*a325d9c4SApple OSS Distributions {
2770*a325d9c4SApple OSS Distributions if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2771*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
2772*a325d9c4SApple OSS Distributions }
2773*a325d9c4SApple OSS Distributions
2774*a325d9c4SApple OSS Distributions assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2775*a325d9c4SApple OSS Distributions if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2776*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
2777*a325d9c4SApple OSS Distributions }
2778*a325d9c4SApple OSS Distributions
2779*a325d9c4SApple OSS Distributions uint64_t descriptorID = getDescriptorID();
2780*a325d9c4SApple OSS Distributions assert(descriptorID != kIODescriptorIDInvalid);
2781*a325d9c4SApple OSS Distributions if (getDescriptorID() == kIODescriptorIDInvalid) {
2782*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
2783*a325d9c4SApple OSS Distributions }
2784*a325d9c4SApple OSS Distributions
2785*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2786*a325d9c4SApple OSS Distributions
2787*a325d9c4SApple OSS Distributions #if __LP64__
2788*a325d9c4SApple OSS Distributions static const uint8_t num_segments_page = 8;
2789*a325d9c4SApple OSS Distributions #else
2790*a325d9c4SApple OSS Distributions static const uint8_t num_segments_page = 4;
2791*a325d9c4SApple OSS Distributions #endif
2792*a325d9c4SApple OSS Distributions static const uint8_t num_segments_long = 2;
2793*a325d9c4SApple OSS Distributions
2794*a325d9c4SApple OSS Distributions IOPhysicalAddress segments_page[num_segments_page];
2795*a325d9c4SApple OSS Distributions IOPhysicalRange segments_long[num_segments_long];
2796*a325d9c4SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2797*a325d9c4SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2798*a325d9c4SApple OSS Distributions
2799*a325d9c4SApple OSS Distributions uint8_t segment_page_idx = 0;
2800*a325d9c4SApple OSS Distributions uint8_t segment_long_idx = 0;
2801*a325d9c4SApple OSS Distributions
2802*a325d9c4SApple OSS Distributions IOPhysicalRange physical_segment;
2803*a325d9c4SApple OSS Distributions for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2804*a325d9c4SApple OSS Distributions physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2805*a325d9c4SApple OSS Distributions
2806*a325d9c4SApple OSS Distributions if (physical_segment.length == 0) {
2807*a325d9c4SApple OSS Distributions break;
2808*a325d9c4SApple OSS Distributions }
2809*a325d9c4SApple OSS Distributions
2810*a325d9c4SApple OSS Distributions /**
2811*a325d9c4SApple OSS Distributions * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2812*a325d9c4SApple OSS Distributions * buffer memory, pack segment events according to the following.
2813*a325d9c4SApple OSS Distributions *
2814*a325d9c4SApple OSS Distributions * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2815*a325d9c4SApple OSS Distributions * IOMDPA_MAPPED event emitted on by the current thread_id.
2816*a325d9c4SApple OSS Distributions *
2817*a325d9c4SApple OSS Distributions * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2818*a325d9c4SApple OSS Distributions * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2819*a325d9c4SApple OSS Distributions * - unmapped pages will have a ppn of MAX_INT_32
2820*a325d9c4SApple OSS Distributions * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2821*a325d9c4SApple OSS Distributions * - address_0, length_0, address_0, length_1
2822*a325d9c4SApple OSS Distributions * - unmapped pages will have an address of 0
2823*a325d9c4SApple OSS Distributions *
2824*a325d9c4SApple OSS Distributions * During each iteration do the following depending on the length of the mapping:
2825*a325d9c4SApple OSS Distributions * 1. add the current segment to the appropriate queue of pending segments
2826*a325d9c4SApple OSS Distributions * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2827*a325d9c4SApple OSS Distributions * 1a. if FALSE emit and reset all events in the previous queue
2828*a325d9c4SApple OSS Distributions * 2. check if we have filled up the current queue of pending events
2829*a325d9c4SApple OSS Distributions * 2a. if TRUE emit and reset all events in the pending queue
2830*a325d9c4SApple OSS Distributions * 3. after completing all iterations emit events in the current queue
2831*a325d9c4SApple OSS Distributions */
2832*a325d9c4SApple OSS Distributions
2833*a325d9c4SApple OSS Distributions bool emit_page = false;
2834*a325d9c4SApple OSS Distributions bool emit_long = false;
2835*a325d9c4SApple OSS Distributions if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2836*a325d9c4SApple OSS Distributions segments_page[segment_page_idx] = physical_segment.address;
2837*a325d9c4SApple OSS Distributions segment_page_idx++;
2838*a325d9c4SApple OSS Distributions
2839*a325d9c4SApple OSS Distributions emit_long = segment_long_idx != 0;
2840*a325d9c4SApple OSS Distributions emit_page = segment_page_idx == num_segments_page;
2841*a325d9c4SApple OSS Distributions
2842*a325d9c4SApple OSS Distributions if (os_unlikely(emit_long)) {
2843*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2844*a325d9c4SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2845*a325d9c4SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2846*a325d9c4SApple OSS Distributions }
2847*a325d9c4SApple OSS Distributions
2848*a325d9c4SApple OSS Distributions if (os_unlikely(emit_page)) {
2849*a325d9c4SApple OSS Distributions #if __LP64__
2850*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2851*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2852*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2853*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2854*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2855*a325d9c4SApple OSS Distributions #else
2856*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2857*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2858*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2859*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2860*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2861*a325d9c4SApple OSS Distributions #endif
2862*a325d9c4SApple OSS Distributions }
2863*a325d9c4SApple OSS Distributions } else {
2864*a325d9c4SApple OSS Distributions segments_long[segment_long_idx] = physical_segment;
2865*a325d9c4SApple OSS Distributions segment_long_idx++;
2866*a325d9c4SApple OSS Distributions
2867*a325d9c4SApple OSS Distributions emit_page = segment_page_idx != 0;
2868*a325d9c4SApple OSS Distributions emit_long = segment_long_idx == num_segments_long;
2869*a325d9c4SApple OSS Distributions
2870*a325d9c4SApple OSS Distributions if (os_unlikely(emit_page)) {
2871*a325d9c4SApple OSS Distributions #if __LP64__
2872*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2873*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2874*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2875*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2876*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2877*a325d9c4SApple OSS Distributions #else
2878*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2879*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2880*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2881*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2882*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2883*a325d9c4SApple OSS Distributions #endif
2884*a325d9c4SApple OSS Distributions }
2885*a325d9c4SApple OSS Distributions
2886*a325d9c4SApple OSS Distributions if (emit_long) {
2887*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2888*a325d9c4SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2889*a325d9c4SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2890*a325d9c4SApple OSS Distributions }
2891*a325d9c4SApple OSS Distributions }
2892*a325d9c4SApple OSS Distributions
2893*a325d9c4SApple OSS Distributions if (os_unlikely(emit_page)) {
2894*a325d9c4SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2895*a325d9c4SApple OSS Distributions segment_page_idx = 0;
2896*a325d9c4SApple OSS Distributions }
2897*a325d9c4SApple OSS Distributions
2898*a325d9c4SApple OSS Distributions if (os_unlikely(emit_long)) {
2899*a325d9c4SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2900*a325d9c4SApple OSS Distributions segment_long_idx = 0;
2901*a325d9c4SApple OSS Distributions }
2902*a325d9c4SApple OSS Distributions }
2903*a325d9c4SApple OSS Distributions
2904*a325d9c4SApple OSS Distributions if (segment_page_idx != 0) {
2905*a325d9c4SApple OSS Distributions assert(segment_long_idx == 0);
2906*a325d9c4SApple OSS Distributions #if __LP64__
2907*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2908*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2909*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2910*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2911*a325d9c4SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2912*a325d9c4SApple OSS Distributions #else
2913*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2914*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2915*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2916*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2917*a325d9c4SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2918*a325d9c4SApple OSS Distributions #endif
2919*a325d9c4SApple OSS Distributions } else if (segment_long_idx != 0) {
2920*a325d9c4SApple OSS Distributions assert(segment_page_idx == 0);
2921*a325d9c4SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2922*a325d9c4SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2923*a325d9c4SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2924*a325d9c4SApple OSS Distributions }
2925*a325d9c4SApple OSS Distributions
2926*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
2927*a325d9c4SApple OSS Distributions }
2928*a325d9c4SApple OSS Distributions
2929*a325d9c4SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2930*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2931*a325d9c4SApple OSS Distributions {
2932*a325d9c4SApple OSS Distributions _kernelTag = (vm_tag_t) kernelTag;
2933*a325d9c4SApple OSS Distributions _userTag = (vm_tag_t) userTag;
2934*a325d9c4SApple OSS Distributions }
2935*a325d9c4SApple OSS Distributions
2936*a325d9c4SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2937*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2938*a325d9c4SApple OSS Distributions {
2939*a325d9c4SApple OSS Distributions if (vm_kernel_map_is_kernel(map)) {
2940*a325d9c4SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _kernelTag) {
2941*a325d9c4SApple OSS Distributions return (uint32_t) _kernelTag;
2942*a325d9c4SApple OSS Distributions }
2943*a325d9c4SApple OSS Distributions } else {
2944*a325d9c4SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _userTag) {
2945*a325d9c4SApple OSS Distributions return (uint32_t) _userTag;
2946*a325d9c4SApple OSS Distributions }
2947*a325d9c4SApple OSS Distributions }
2948*a325d9c4SApple OSS Distributions return IOMemoryTag(map);
2949*a325d9c4SApple OSS Distributions }
2950*a325d9c4SApple OSS Distributions
2951*a325d9c4SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2952*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2953*a325d9c4SApple OSS Distributions {
2954*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
2955*a325d9c4SApple OSS Distributions DMACommandOps params;
2956*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2957*a325d9c4SApple OSS Distributions ioGMDData *dataP;
2958*a325d9c4SApple OSS Distributions
2959*a325d9c4SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
2960*a325d9c4SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
2961*a325d9c4SApple OSS Distributions
2962*a325d9c4SApple OSS Distributions if (kIOMDDMAMap == op) {
2963*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
2964*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
2965*a325d9c4SApple OSS Distributions }
2966*a325d9c4SApple OSS Distributions
2967*a325d9c4SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2968*a325d9c4SApple OSS Distributions
2969*a325d9c4SApple OSS Distributions if (!_memoryEntries
2970*a325d9c4SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2971*a325d9c4SApple OSS Distributions return kIOReturnNoMemory;
2972*a325d9c4SApple OSS Distributions }
2973*a325d9c4SApple OSS Distributions
2974*a325d9c4SApple OSS Distributions if (_memoryEntries && data->fMapper) {
2975*a325d9c4SApple OSS Distributions bool remap, keepMap;
2976*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
2977*a325d9c4SApple OSS Distributions
2978*a325d9c4SApple OSS Distributions if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2979*a325d9c4SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2980*a325d9c4SApple OSS Distributions }
2981*a325d9c4SApple OSS Distributions if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2982*a325d9c4SApple OSS Distributions dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2983*a325d9c4SApple OSS Distributions }
2984*a325d9c4SApple OSS Distributions
2985*a325d9c4SApple OSS Distributions keepMap = (data->fMapper == gIOSystemMapper);
2986*a325d9c4SApple OSS Distributions keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2987*a325d9c4SApple OSS Distributions
2988*a325d9c4SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2989*a325d9c4SApple OSS Distributions IOLockLock(_prepareLock);
2990*a325d9c4SApple OSS Distributions }
2991*a325d9c4SApple OSS Distributions
2992*a325d9c4SApple OSS Distributions remap = (!keepMap);
2993*a325d9c4SApple OSS Distributions remap |= (dataP->fDMAMapNumAddressBits < 64)
2994*a325d9c4SApple OSS Distributions && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2995*a325d9c4SApple OSS Distributions remap |= (dataP->fDMAMapAlignment > page_size);
2996*a325d9c4SApple OSS Distributions
2997*a325d9c4SApple OSS Distributions if (remap || !dataP->fMappedBaseValid) {
2998*a325d9c4SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2999*a325d9c4SApple OSS Distributions if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3000*a325d9c4SApple OSS Distributions dataP->fMappedBase = data->fAlloc;
3001*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = true;
3002*a325d9c4SApple OSS Distributions dataP->fMappedLength = data->fAllocLength;
3003*a325d9c4SApple OSS Distributions data->fAllocLength = 0; // IOMD owns the alloc now
3004*a325d9c4SApple OSS Distributions }
3005*a325d9c4SApple OSS Distributions } else {
3006*a325d9c4SApple OSS Distributions data->fAlloc = dataP->fMappedBase;
3007*a325d9c4SApple OSS Distributions data->fAllocLength = 0; // give out IOMD map
3008*a325d9c4SApple OSS Distributions md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3009*a325d9c4SApple OSS Distributions }
3010*a325d9c4SApple OSS Distributions
3011*a325d9c4SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3012*a325d9c4SApple OSS Distributions IOLockUnlock(_prepareLock);
3013*a325d9c4SApple OSS Distributions }
3014*a325d9c4SApple OSS Distributions }
3015*a325d9c4SApple OSS Distributions return err;
3016*a325d9c4SApple OSS Distributions }
3017*a325d9c4SApple OSS Distributions if (kIOMDDMAUnmap == op) {
3018*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3019*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3020*a325d9c4SApple OSS Distributions }
3021*a325d9c4SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3022*a325d9c4SApple OSS Distributions
3023*a325d9c4SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3024*a325d9c4SApple OSS Distributions
3025*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3026*a325d9c4SApple OSS Distributions }
3027*a325d9c4SApple OSS Distributions
3028*a325d9c4SApple OSS Distributions if (kIOMDAddDMAMapSpec == op) {
3029*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IODMAMapSpecification)) {
3030*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3031*a325d9c4SApple OSS Distributions }
3032*a325d9c4SApple OSS Distributions
3033*a325d9c4SApple OSS Distributions IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3034*a325d9c4SApple OSS Distributions
3035*a325d9c4SApple OSS Distributions if (!_memoryEntries
3036*a325d9c4SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3037*a325d9c4SApple OSS Distributions return kIOReturnNoMemory;
3038*a325d9c4SApple OSS Distributions }
3039*a325d9c4SApple OSS Distributions
3040*a325d9c4SApple OSS Distributions if (_memoryEntries) {
3041*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
3042*a325d9c4SApple OSS Distributions if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3043*a325d9c4SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->numAddressBits;
3044*a325d9c4SApple OSS Distributions }
3045*a325d9c4SApple OSS Distributions if (data->alignment > dataP->fDMAMapAlignment) {
3046*a325d9c4SApple OSS Distributions dataP->fDMAMapAlignment = data->alignment;
3047*a325d9c4SApple OSS Distributions }
3048*a325d9c4SApple OSS Distributions }
3049*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3050*a325d9c4SApple OSS Distributions }
3051*a325d9c4SApple OSS Distributions
3052*a325d9c4SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3053*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3054*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3055*a325d9c4SApple OSS Distributions }
3056*a325d9c4SApple OSS Distributions
3057*a325d9c4SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3058*a325d9c4SApple OSS Distributions data->fLength = _length;
3059*a325d9c4SApple OSS Distributions data->fSGCount = _rangesCount;
3060*a325d9c4SApple OSS Distributions data->fPages = _pages;
3061*a325d9c4SApple OSS Distributions data->fDirection = getDirection();
3062*a325d9c4SApple OSS Distributions if (!_wireCount) {
3063*a325d9c4SApple OSS Distributions data->fIsPrepared = false;
3064*a325d9c4SApple OSS Distributions } else {
3065*a325d9c4SApple OSS Distributions data->fIsPrepared = true;
3066*a325d9c4SApple OSS Distributions data->fHighestPage = _highestPage;
3067*a325d9c4SApple OSS Distributions if (_memoryEntries) {
3068*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
3069*a325d9c4SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
3070*a325d9c4SApple OSS Distributions UInt count = getNumIOPL(_memoryEntries, dataP);
3071*a325d9c4SApple OSS Distributions if (count == 1) {
3072*a325d9c4SApple OSS Distributions data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3073*a325d9c4SApple OSS Distributions }
3074*a325d9c4SApple OSS Distributions }
3075*a325d9c4SApple OSS Distributions }
3076*a325d9c4SApple OSS Distributions
3077*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3078*a325d9c4SApple OSS Distributions } else if (kIOMDDMAActive == op) {
3079*a325d9c4SApple OSS Distributions if (params) {
3080*a325d9c4SApple OSS Distributions int16_t prior;
3081*a325d9c4SApple OSS Distributions prior = OSAddAtomic16(1, &md->_dmaReferences);
3082*a325d9c4SApple OSS Distributions if (!prior) {
3083*a325d9c4SApple OSS Distributions md->_mapName = NULL;
3084*a325d9c4SApple OSS Distributions }
3085*a325d9c4SApple OSS Distributions } else {
3086*a325d9c4SApple OSS Distributions if (md->_dmaReferences) {
3087*a325d9c4SApple OSS Distributions OSAddAtomic16(-1, &md->_dmaReferences);
3088*a325d9c4SApple OSS Distributions } else {
3089*a325d9c4SApple OSS Distributions panic("_dmaReferences underflow");
3090*a325d9c4SApple OSS Distributions }
3091*a325d9c4SApple OSS Distributions }
3092*a325d9c4SApple OSS Distributions } else if (kIOMDWalkSegments != op) {
3093*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
3094*a325d9c4SApple OSS Distributions }
3095*a325d9c4SApple OSS Distributions
3096*a325d9c4SApple OSS Distributions // Get the next segment
3097*a325d9c4SApple OSS Distributions struct InternalState {
3098*a325d9c4SApple OSS Distributions IOMDDMAWalkSegmentArgs fIO;
3099*a325d9c4SApple OSS Distributions mach_vm_size_t fOffset2Index;
3100*a325d9c4SApple OSS Distributions mach_vm_size_t fNextOffset;
3101*a325d9c4SApple OSS Distributions UInt fIndex;
3102*a325d9c4SApple OSS Distributions } *isP;
3103*a325d9c4SApple OSS Distributions
3104*a325d9c4SApple OSS Distributions // Find the next segment
3105*a325d9c4SApple OSS Distributions if (dataSize < sizeof(*isP)) {
3106*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3107*a325d9c4SApple OSS Distributions }
3108*a325d9c4SApple OSS Distributions
3109*a325d9c4SApple OSS Distributions isP = (InternalState *) vData;
3110*a325d9c4SApple OSS Distributions uint64_t offset = isP->fIO.fOffset;
3111*a325d9c4SApple OSS Distributions uint8_t mapped = isP->fIO.fMapped;
3112*a325d9c4SApple OSS Distributions uint64_t mappedBase;
3113*a325d9c4SApple OSS Distributions
3114*a325d9c4SApple OSS Distributions if (mapped && (kIOMemoryRemote & _flags)) {
3115*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3116*a325d9c4SApple OSS Distributions }
3117*a325d9c4SApple OSS Distributions
3118*a325d9c4SApple OSS Distributions if (IOMapper::gSystem && mapped
3119*a325d9c4SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3120*a325d9c4SApple OSS Distributions && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3121*a325d9c4SApple OSS Distributions // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3122*a325d9c4SApple OSS Distributions if (!_memoryEntries
3123*a325d9c4SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3124*a325d9c4SApple OSS Distributions return kIOReturnNoMemory;
3125*a325d9c4SApple OSS Distributions }
3126*a325d9c4SApple OSS Distributions
3127*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
3128*a325d9c4SApple OSS Distributions if (dataP->fMapper) {
3129*a325d9c4SApple OSS Distributions IODMAMapSpecification mapSpec;
3130*a325d9c4SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
3131*a325d9c4SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3132*a325d9c4SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
3133*a325d9c4SApple OSS Distributions err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3134*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
3135*a325d9c4SApple OSS Distributions return err;
3136*a325d9c4SApple OSS Distributions }
3137*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = true;
3138*a325d9c4SApple OSS Distributions }
3139*a325d9c4SApple OSS Distributions }
3140*a325d9c4SApple OSS Distributions
3141*a325d9c4SApple OSS Distributions if (mapped) {
3142*a325d9c4SApple OSS Distributions if (IOMapper::gSystem
3143*a325d9c4SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3144*a325d9c4SApple OSS Distributions && _memoryEntries
3145*a325d9c4SApple OSS Distributions && (dataP = getDataP(_memoryEntries))
3146*a325d9c4SApple OSS Distributions && dataP->fMappedBaseValid) {
3147*a325d9c4SApple OSS Distributions mappedBase = dataP->fMappedBase;
3148*a325d9c4SApple OSS Distributions } else {
3149*a325d9c4SApple OSS Distributions mapped = 0;
3150*a325d9c4SApple OSS Distributions }
3151*a325d9c4SApple OSS Distributions }
3152*a325d9c4SApple OSS Distributions
3153*a325d9c4SApple OSS Distributions if (offset >= _length) {
3154*a325d9c4SApple OSS Distributions return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3155*a325d9c4SApple OSS Distributions }
3156*a325d9c4SApple OSS Distributions
3157*a325d9c4SApple OSS Distributions // Validate the previous offset
3158*a325d9c4SApple OSS Distributions UInt ind;
3159*a325d9c4SApple OSS Distributions mach_vm_size_t off2Ind = isP->fOffset2Index;
3160*a325d9c4SApple OSS Distributions if (!params
3161*a325d9c4SApple OSS Distributions && offset
3162*a325d9c4SApple OSS Distributions && (offset == isP->fNextOffset || off2Ind <= offset)) {
3163*a325d9c4SApple OSS Distributions ind = isP->fIndex;
3164*a325d9c4SApple OSS Distributions } else {
3165*a325d9c4SApple OSS Distributions ind = off2Ind = 0; // Start from beginning
3166*a325d9c4SApple OSS Distributions }
3167*a325d9c4SApple OSS Distributions mach_vm_size_t length;
3168*a325d9c4SApple OSS Distributions UInt64 address;
3169*a325d9c4SApple OSS Distributions
3170*a325d9c4SApple OSS Distributions if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3171*a325d9c4SApple OSS Distributions // Physical address based memory descriptor
3172*a325d9c4SApple OSS Distributions const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3173*a325d9c4SApple OSS Distributions
3174*a325d9c4SApple OSS Distributions // Find the range after the one that contains the offset
3175*a325d9c4SApple OSS Distributions mach_vm_size_t len;
3176*a325d9c4SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3177*a325d9c4SApple OSS Distributions len = physP[ind].length;
3178*a325d9c4SApple OSS Distributions off2Ind += len;
3179*a325d9c4SApple OSS Distributions }
3180*a325d9c4SApple OSS Distributions
3181*a325d9c4SApple OSS Distributions // Calculate length within range and starting address
3182*a325d9c4SApple OSS Distributions length = off2Ind - offset;
3183*a325d9c4SApple OSS Distributions address = physP[ind - 1].address + len - length;
3184*a325d9c4SApple OSS Distributions
3185*a325d9c4SApple OSS Distributions if (true && mapped) {
3186*a325d9c4SApple OSS Distributions address = mappedBase + offset;
3187*a325d9c4SApple OSS Distributions } else {
3188*a325d9c4SApple OSS Distributions // see how far we can coalesce ranges
3189*a325d9c4SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3190*a325d9c4SApple OSS Distributions len = physP[ind].length;
3191*a325d9c4SApple OSS Distributions length += len;
3192*a325d9c4SApple OSS Distributions off2Ind += len;
3193*a325d9c4SApple OSS Distributions ind++;
3194*a325d9c4SApple OSS Distributions }
3195*a325d9c4SApple OSS Distributions }
3196*a325d9c4SApple OSS Distributions
3197*a325d9c4SApple OSS Distributions // correct contiguous check overshoot
3198*a325d9c4SApple OSS Distributions ind--;
3199*a325d9c4SApple OSS Distributions off2Ind -= len;
3200*a325d9c4SApple OSS Distributions }
3201*a325d9c4SApple OSS Distributions #ifndef __LP64__
3202*a325d9c4SApple OSS Distributions else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3203*a325d9c4SApple OSS Distributions // Physical address based memory descriptor
3204*a325d9c4SApple OSS Distributions const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3205*a325d9c4SApple OSS Distributions
3206*a325d9c4SApple OSS Distributions // Find the range after the one that contains the offset
3207*a325d9c4SApple OSS Distributions mach_vm_size_t len;
3208*a325d9c4SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3209*a325d9c4SApple OSS Distributions len = physP[ind].length;
3210*a325d9c4SApple OSS Distributions off2Ind += len;
3211*a325d9c4SApple OSS Distributions }
3212*a325d9c4SApple OSS Distributions
3213*a325d9c4SApple OSS Distributions // Calculate length within range and starting address
3214*a325d9c4SApple OSS Distributions length = off2Ind - offset;
3215*a325d9c4SApple OSS Distributions address = physP[ind - 1].address + len - length;
3216*a325d9c4SApple OSS Distributions
3217*a325d9c4SApple OSS Distributions if (true && mapped) {
3218*a325d9c4SApple OSS Distributions address = mappedBase + offset;
3219*a325d9c4SApple OSS Distributions } else {
3220*a325d9c4SApple OSS Distributions // see how far we can coalesce ranges
3221*a325d9c4SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3222*a325d9c4SApple OSS Distributions len = physP[ind].length;
3223*a325d9c4SApple OSS Distributions length += len;
3224*a325d9c4SApple OSS Distributions off2Ind += len;
3225*a325d9c4SApple OSS Distributions ind++;
3226*a325d9c4SApple OSS Distributions }
3227*a325d9c4SApple OSS Distributions }
3228*a325d9c4SApple OSS Distributions // correct contiguous check overshoot
3229*a325d9c4SApple OSS Distributions ind--;
3230*a325d9c4SApple OSS Distributions off2Ind -= len;
3231*a325d9c4SApple OSS Distributions }
3232*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
3233*a325d9c4SApple OSS Distributions else {
3234*a325d9c4SApple OSS Distributions do {
3235*a325d9c4SApple OSS Distributions if (!_wireCount) {
3236*a325d9c4SApple OSS Distributions panic("IOGMD: not wired for the IODMACommand");
3237*a325d9c4SApple OSS Distributions }
3238*a325d9c4SApple OSS Distributions
3239*a325d9c4SApple OSS Distributions assert(_memoryEntries);
3240*a325d9c4SApple OSS Distributions
3241*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
3242*a325d9c4SApple OSS Distributions const ioPLBlock *ioplList = getIOPLList(dataP);
3243*a325d9c4SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3244*a325d9c4SApple OSS Distributions upl_page_info_t *pageList = getPageList(dataP);
3245*a325d9c4SApple OSS Distributions
3246*a325d9c4SApple OSS Distributions assert(numIOPLs > 0);
3247*a325d9c4SApple OSS Distributions
3248*a325d9c4SApple OSS Distributions // Scan through iopl info blocks looking for block containing offset
3249*a325d9c4SApple OSS Distributions while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3250*a325d9c4SApple OSS Distributions ind++;
3251*a325d9c4SApple OSS Distributions }
3252*a325d9c4SApple OSS Distributions
3253*a325d9c4SApple OSS Distributions // Go back to actual range as search goes past it
3254*a325d9c4SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ind - 1];
3255*a325d9c4SApple OSS Distributions off2Ind = ioplInfo.fIOMDOffset;
3256*a325d9c4SApple OSS Distributions
3257*a325d9c4SApple OSS Distributions if (ind < numIOPLs) {
3258*a325d9c4SApple OSS Distributions length = ioplList[ind].fIOMDOffset;
3259*a325d9c4SApple OSS Distributions } else {
3260*a325d9c4SApple OSS Distributions length = _length;
3261*a325d9c4SApple OSS Distributions }
3262*a325d9c4SApple OSS Distributions length -= offset; // Remainder within iopl
3263*a325d9c4SApple OSS Distributions
3264*a325d9c4SApple OSS Distributions // Subtract offset till this iopl in total list
3265*a325d9c4SApple OSS Distributions offset -= off2Ind;
3266*a325d9c4SApple OSS Distributions
3267*a325d9c4SApple OSS Distributions // If a mapped address is requested and this is a pre-mapped IOPL
3268*a325d9c4SApple OSS Distributions // then just need to compute an offset relative to the mapped base.
3269*a325d9c4SApple OSS Distributions if (mapped) {
3270*a325d9c4SApple OSS Distributions offset += (ioplInfo.fPageOffset & PAGE_MASK);
3271*a325d9c4SApple OSS Distributions address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3272*a325d9c4SApple OSS Distributions continue; // Done leave do/while(false) now
3273*a325d9c4SApple OSS Distributions }
3274*a325d9c4SApple OSS Distributions
3275*a325d9c4SApple OSS Distributions // The offset is rebased into the current iopl.
3276*a325d9c4SApple OSS Distributions // Now add the iopl 1st page offset.
3277*a325d9c4SApple OSS Distributions offset += ioplInfo.fPageOffset;
3278*a325d9c4SApple OSS Distributions
3279*a325d9c4SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
3280*a325d9c4SApple OSS Distributions // the upl's upl_page_info_t array.
3281*a325d9c4SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
3282*a325d9c4SApple OSS Distributions pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3283*a325d9c4SApple OSS Distributions } else {
3284*a325d9c4SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
3285*a325d9c4SApple OSS Distributions }
3286*a325d9c4SApple OSS Distributions
3287*a325d9c4SApple OSS Distributions // Check for direct device non-paged memory
3288*a325d9c4SApple OSS Distributions if (ioplInfo.fFlags & kIOPLOnDevice) {
3289*a325d9c4SApple OSS Distributions address = ptoa_64(pageList->phys_addr) + offset;
3290*a325d9c4SApple OSS Distributions continue; // Done leave do/while(false) now
3291*a325d9c4SApple OSS Distributions }
3292*a325d9c4SApple OSS Distributions
3293*a325d9c4SApple OSS Distributions // Now we need compute the index into the pageList
3294*a325d9c4SApple OSS Distributions UInt pageInd = atop_32(offset);
3295*a325d9c4SApple OSS Distributions offset &= PAGE_MASK;
3296*a325d9c4SApple OSS Distributions
3297*a325d9c4SApple OSS Distributions // Compute the starting address of this segment
3298*a325d9c4SApple OSS Distributions IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3299*a325d9c4SApple OSS Distributions if (!pageAddr) {
3300*a325d9c4SApple OSS Distributions panic("!pageList phys_addr");
3301*a325d9c4SApple OSS Distributions }
3302*a325d9c4SApple OSS Distributions
3303*a325d9c4SApple OSS Distributions address = ptoa_64(pageAddr) + offset;
3304*a325d9c4SApple OSS Distributions
3305*a325d9c4SApple OSS Distributions // length is currently set to the length of the remainider of the iopl.
3306*a325d9c4SApple OSS Distributions // We need to check that the remainder of the iopl is contiguous.
3307*a325d9c4SApple OSS Distributions // This is indicated by pageList[ind].phys_addr being sequential.
3308*a325d9c4SApple OSS Distributions IOByteCount contigLength = PAGE_SIZE - offset;
3309*a325d9c4SApple OSS Distributions while (contigLength < length
3310*a325d9c4SApple OSS Distributions && ++pageAddr == pageList[++pageInd].phys_addr) {
3311*a325d9c4SApple OSS Distributions contigLength += PAGE_SIZE;
3312*a325d9c4SApple OSS Distributions }
3313*a325d9c4SApple OSS Distributions
3314*a325d9c4SApple OSS Distributions if (contigLength < length) {
3315*a325d9c4SApple OSS Distributions length = contigLength;
3316*a325d9c4SApple OSS Distributions }
3317*a325d9c4SApple OSS Distributions
3318*a325d9c4SApple OSS Distributions
3319*a325d9c4SApple OSS Distributions assert(address);
3320*a325d9c4SApple OSS Distributions assert(length);
3321*a325d9c4SApple OSS Distributions } while (false);
3322*a325d9c4SApple OSS Distributions }
3323*a325d9c4SApple OSS Distributions
3324*a325d9c4SApple OSS Distributions // Update return values and state
3325*a325d9c4SApple OSS Distributions isP->fIO.fIOVMAddr = address;
3326*a325d9c4SApple OSS Distributions isP->fIO.fLength = length;
3327*a325d9c4SApple OSS Distributions isP->fIndex = ind;
3328*a325d9c4SApple OSS Distributions isP->fOffset2Index = off2Ind;
3329*a325d9c4SApple OSS Distributions isP->fNextOffset = isP->fIO.fOffset + length;
3330*a325d9c4SApple OSS Distributions
3331*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3332*a325d9c4SApple OSS Distributions }
3333*a325d9c4SApple OSS Distributions
3334*a325d9c4SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3335*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3336*a325d9c4SApple OSS Distributions {
3337*a325d9c4SApple OSS Distributions IOReturn ret;
3338*a325d9c4SApple OSS Distributions mach_vm_address_t address = 0;
3339*a325d9c4SApple OSS Distributions mach_vm_size_t length = 0;
3340*a325d9c4SApple OSS Distributions IOMapper * mapper = gIOSystemMapper;
3341*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3342*a325d9c4SApple OSS Distributions
3343*a325d9c4SApple OSS Distributions if (lengthOfSegment) {
3344*a325d9c4SApple OSS Distributions *lengthOfSegment = 0;
3345*a325d9c4SApple OSS Distributions }
3346*a325d9c4SApple OSS Distributions
3347*a325d9c4SApple OSS Distributions if (offset >= _length) {
3348*a325d9c4SApple OSS Distributions return 0;
3349*a325d9c4SApple OSS Distributions }
3350*a325d9c4SApple OSS Distributions
3351*a325d9c4SApple OSS Distributions // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3352*a325d9c4SApple OSS Distributions // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3353*a325d9c4SApple OSS Distributions // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3354*a325d9c4SApple OSS Distributions // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3355*a325d9c4SApple OSS Distributions
3356*a325d9c4SApple OSS Distributions if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3357*a325d9c4SApple OSS Distributions unsigned rangesIndex = 0;
3358*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
3359*a325d9c4SApple OSS Distributions mach_vm_address_t addr;
3360*a325d9c4SApple OSS Distributions
3361*a325d9c4SApple OSS Distributions // Find starting address within the vector of ranges
3362*a325d9c4SApple OSS Distributions for (;;) {
3363*a325d9c4SApple OSS Distributions getAddrLenForInd(addr, length, type, vec, rangesIndex);
3364*a325d9c4SApple OSS Distributions if (offset < length) {
3365*a325d9c4SApple OSS Distributions break;
3366*a325d9c4SApple OSS Distributions }
3367*a325d9c4SApple OSS Distributions offset -= length; // (make offset relative)
3368*a325d9c4SApple OSS Distributions rangesIndex++;
3369*a325d9c4SApple OSS Distributions }
3370*a325d9c4SApple OSS Distributions
3371*a325d9c4SApple OSS Distributions // Now that we have the starting range,
3372*a325d9c4SApple OSS Distributions // lets find the last contiguous range
3373*a325d9c4SApple OSS Distributions addr += offset;
3374*a325d9c4SApple OSS Distributions length -= offset;
3375*a325d9c4SApple OSS Distributions
3376*a325d9c4SApple OSS Distributions for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3377*a325d9c4SApple OSS Distributions mach_vm_address_t newAddr;
3378*a325d9c4SApple OSS Distributions mach_vm_size_t newLen;
3379*a325d9c4SApple OSS Distributions
3380*a325d9c4SApple OSS Distributions getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3381*a325d9c4SApple OSS Distributions if (addr + length != newAddr) {
3382*a325d9c4SApple OSS Distributions break;
3383*a325d9c4SApple OSS Distributions }
3384*a325d9c4SApple OSS Distributions length += newLen;
3385*a325d9c4SApple OSS Distributions }
3386*a325d9c4SApple OSS Distributions if (addr) {
3387*a325d9c4SApple OSS Distributions address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3388*a325d9c4SApple OSS Distributions }
3389*a325d9c4SApple OSS Distributions } else {
3390*a325d9c4SApple OSS Distributions IOMDDMAWalkSegmentState _state;
3391*a325d9c4SApple OSS Distributions IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3392*a325d9c4SApple OSS Distributions
3393*a325d9c4SApple OSS Distributions state->fOffset = offset;
3394*a325d9c4SApple OSS Distributions state->fLength = _length - offset;
3395*a325d9c4SApple OSS Distributions state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3396*a325d9c4SApple OSS Distributions
3397*a325d9c4SApple OSS Distributions ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3398*a325d9c4SApple OSS Distributions
3399*a325d9c4SApple OSS Distributions if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3400*a325d9c4SApple OSS Distributions DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3401*a325d9c4SApple OSS Distributions ret, this, state->fOffset,
3402*a325d9c4SApple OSS Distributions state->fIOVMAddr, state->fLength);
3403*a325d9c4SApple OSS Distributions }
3404*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == ret) {
3405*a325d9c4SApple OSS Distributions address = state->fIOVMAddr;
3406*a325d9c4SApple OSS Distributions length = state->fLength;
3407*a325d9c4SApple OSS Distributions }
3408*a325d9c4SApple OSS Distributions
3409*a325d9c4SApple OSS Distributions // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3410*a325d9c4SApple OSS Distributions // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3411*a325d9c4SApple OSS Distributions
3412*a325d9c4SApple OSS Distributions if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3413*a325d9c4SApple OSS Distributions if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3414*a325d9c4SApple OSS Distributions addr64_t origAddr = address;
3415*a325d9c4SApple OSS Distributions IOByteCount origLen = length;
3416*a325d9c4SApple OSS Distributions
3417*a325d9c4SApple OSS Distributions address = mapper->mapToPhysicalAddress(origAddr);
3418*a325d9c4SApple OSS Distributions length = page_size - (address & (page_size - 1));
3419*a325d9c4SApple OSS Distributions while ((length < origLen)
3420*a325d9c4SApple OSS Distributions && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3421*a325d9c4SApple OSS Distributions length += page_size;
3422*a325d9c4SApple OSS Distributions }
3423*a325d9c4SApple OSS Distributions if (length > origLen) {
3424*a325d9c4SApple OSS Distributions length = origLen;
3425*a325d9c4SApple OSS Distributions }
3426*a325d9c4SApple OSS Distributions }
3427*a325d9c4SApple OSS Distributions }
3428*a325d9c4SApple OSS Distributions }
3429*a325d9c4SApple OSS Distributions
3430*a325d9c4SApple OSS Distributions if (!address) {
3431*a325d9c4SApple OSS Distributions length = 0;
3432*a325d9c4SApple OSS Distributions }
3433*a325d9c4SApple OSS Distributions
3434*a325d9c4SApple OSS Distributions if (lengthOfSegment) {
3435*a325d9c4SApple OSS Distributions *lengthOfSegment = length;
3436*a325d9c4SApple OSS Distributions }
3437*a325d9c4SApple OSS Distributions
3438*a325d9c4SApple OSS Distributions return address;
3439*a325d9c4SApple OSS Distributions }
3440*a325d9c4SApple OSS Distributions
3441*a325d9c4SApple OSS Distributions #ifndef __LP64__
3442*a325d9c4SApple OSS Distributions #pragma clang diagnostic push
3443*a325d9c4SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3444*a325d9c4SApple OSS Distributions
3445*a325d9c4SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3446*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3447*a325d9c4SApple OSS Distributions {
3448*a325d9c4SApple OSS Distributions addr64_t address = 0;
3449*a325d9c4SApple OSS Distributions
3450*a325d9c4SApple OSS Distributions if (options & _kIOMemorySourceSegment) {
3451*a325d9c4SApple OSS Distributions address = getSourceSegment(offset, lengthOfSegment);
3452*a325d9c4SApple OSS Distributions } else if (options & kIOMemoryMapperNone) {
3453*a325d9c4SApple OSS Distributions address = getPhysicalSegment64(offset, lengthOfSegment);
3454*a325d9c4SApple OSS Distributions } else {
3455*a325d9c4SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment);
3456*a325d9c4SApple OSS Distributions }
3457*a325d9c4SApple OSS Distributions
3458*a325d9c4SApple OSS Distributions return address;
3459*a325d9c4SApple OSS Distributions }
3460*a325d9c4SApple OSS Distributions #pragma clang diagnostic pop
3461*a325d9c4SApple OSS Distributions
3462*a325d9c4SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3463*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3464*a325d9c4SApple OSS Distributions {
3465*a325d9c4SApple OSS Distributions return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3466*a325d9c4SApple OSS Distributions }
3467*a325d9c4SApple OSS Distributions
3468*a325d9c4SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3469*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3470*a325d9c4SApple OSS Distributions {
3471*a325d9c4SApple OSS Distributions addr64_t address = 0;
3472*a325d9c4SApple OSS Distributions IOByteCount length = 0;
3473*a325d9c4SApple OSS Distributions
3474*a325d9c4SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment, 0);
3475*a325d9c4SApple OSS Distributions
3476*a325d9c4SApple OSS Distributions if (lengthOfSegment) {
3477*a325d9c4SApple OSS Distributions length = *lengthOfSegment;
3478*a325d9c4SApple OSS Distributions }
3479*a325d9c4SApple OSS Distributions
3480*a325d9c4SApple OSS Distributions if ((address + length) > 0x100000000ULL) {
3481*a325d9c4SApple OSS Distributions panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3482*a325d9c4SApple OSS Distributions address, (long) length, (getMetaClass())->getClassName());
3483*a325d9c4SApple OSS Distributions }
3484*a325d9c4SApple OSS Distributions
3485*a325d9c4SApple OSS Distributions return (IOPhysicalAddress) address;
3486*a325d9c4SApple OSS Distributions }
3487*a325d9c4SApple OSS Distributions
3488*a325d9c4SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3489*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3490*a325d9c4SApple OSS Distributions {
3491*a325d9c4SApple OSS Distributions IOPhysicalAddress phys32;
3492*a325d9c4SApple OSS Distributions IOByteCount length;
3493*a325d9c4SApple OSS Distributions addr64_t phys64;
3494*a325d9c4SApple OSS Distributions IOMapper * mapper = NULL;
3495*a325d9c4SApple OSS Distributions
3496*a325d9c4SApple OSS Distributions phys32 = getPhysicalSegment(offset, lengthOfSegment);
3497*a325d9c4SApple OSS Distributions if (!phys32) {
3498*a325d9c4SApple OSS Distributions return 0;
3499*a325d9c4SApple OSS Distributions }
3500*a325d9c4SApple OSS Distributions
3501*a325d9c4SApple OSS Distributions if (gIOSystemMapper) {
3502*a325d9c4SApple OSS Distributions mapper = gIOSystemMapper;
3503*a325d9c4SApple OSS Distributions }
3504*a325d9c4SApple OSS Distributions
3505*a325d9c4SApple OSS Distributions if (mapper) {
3506*a325d9c4SApple OSS Distributions IOByteCount origLen;
3507*a325d9c4SApple OSS Distributions
3508*a325d9c4SApple OSS Distributions phys64 = mapper->mapToPhysicalAddress(phys32);
3509*a325d9c4SApple OSS Distributions origLen = *lengthOfSegment;
3510*a325d9c4SApple OSS Distributions length = page_size - (phys64 & (page_size - 1));
3511*a325d9c4SApple OSS Distributions while ((length < origLen)
3512*a325d9c4SApple OSS Distributions && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3513*a325d9c4SApple OSS Distributions length += page_size;
3514*a325d9c4SApple OSS Distributions }
3515*a325d9c4SApple OSS Distributions if (length > origLen) {
3516*a325d9c4SApple OSS Distributions length = origLen;
3517*a325d9c4SApple OSS Distributions }
3518*a325d9c4SApple OSS Distributions
3519*a325d9c4SApple OSS Distributions *lengthOfSegment = length;
3520*a325d9c4SApple OSS Distributions } else {
3521*a325d9c4SApple OSS Distributions phys64 = (addr64_t) phys32;
3522*a325d9c4SApple OSS Distributions }
3523*a325d9c4SApple OSS Distributions
3524*a325d9c4SApple OSS Distributions return phys64;
3525*a325d9c4SApple OSS Distributions }
3526*a325d9c4SApple OSS Distributions
3527*a325d9c4SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3528*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3529*a325d9c4SApple OSS Distributions {
3530*a325d9c4SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3531*a325d9c4SApple OSS Distributions }
3532*a325d9c4SApple OSS Distributions
3533*a325d9c4SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3534*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3535*a325d9c4SApple OSS Distributions {
3536*a325d9c4SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3537*a325d9c4SApple OSS Distributions }
3538*a325d9c4SApple OSS Distributions
3539*a325d9c4SApple OSS Distributions #pragma clang diagnostic push
3540*a325d9c4SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3541*a325d9c4SApple OSS Distributions
3542*a325d9c4SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3543*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3544*a325d9c4SApple OSS Distributions IOByteCount * lengthOfSegment)
3545*a325d9c4SApple OSS Distributions {
3546*a325d9c4SApple OSS Distributions if (_task == kernel_task) {
3547*a325d9c4SApple OSS Distributions return (void *) getSourceSegment(offset, lengthOfSegment);
3548*a325d9c4SApple OSS Distributions } else {
3549*a325d9c4SApple OSS Distributions panic("IOGMD::getVirtualSegment deprecated");
3550*a325d9c4SApple OSS Distributions }
3551*a325d9c4SApple OSS Distributions
3552*a325d9c4SApple OSS Distributions return NULL;
3553*a325d9c4SApple OSS Distributions }
3554*a325d9c4SApple OSS Distributions #pragma clang diagnostic pop
3555*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
3556*a325d9c4SApple OSS Distributions
3557*a325d9c4SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3558*a325d9c4SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3559*a325d9c4SApple OSS Distributions {
3560*a325d9c4SApple OSS Distributions IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3561*a325d9c4SApple OSS Distributions DMACommandOps params;
3562*a325d9c4SApple OSS Distributions IOReturn err;
3563*a325d9c4SApple OSS Distributions
3564*a325d9c4SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
3565*a325d9c4SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
3566*a325d9c4SApple OSS Distributions
3567*a325d9c4SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3568*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3569*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3570*a325d9c4SApple OSS Distributions }
3571*a325d9c4SApple OSS Distributions
3572*a325d9c4SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3573*a325d9c4SApple OSS Distributions data->fLength = getLength();
3574*a325d9c4SApple OSS Distributions data->fSGCount = 0;
3575*a325d9c4SApple OSS Distributions data->fDirection = getDirection();
3576*a325d9c4SApple OSS Distributions data->fIsPrepared = true; // Assume prepared - fails safe
3577*a325d9c4SApple OSS Distributions } else if (kIOMDWalkSegments == op) {
3578*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3579*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3580*a325d9c4SApple OSS Distributions }
3581*a325d9c4SApple OSS Distributions
3582*a325d9c4SApple OSS Distributions IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3583*a325d9c4SApple OSS Distributions IOByteCount offset = (IOByteCount) data->fOffset;
3584*a325d9c4SApple OSS Distributions IOPhysicalLength length, nextLength;
3585*a325d9c4SApple OSS Distributions addr64_t addr, nextAddr;
3586*a325d9c4SApple OSS Distributions
3587*a325d9c4SApple OSS Distributions if (data->fMapped) {
3588*a325d9c4SApple OSS Distributions panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3589*a325d9c4SApple OSS Distributions }
3590*a325d9c4SApple OSS Distributions addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3591*a325d9c4SApple OSS Distributions offset += length;
3592*a325d9c4SApple OSS Distributions while (offset < getLength()) {
3593*a325d9c4SApple OSS Distributions nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3594*a325d9c4SApple OSS Distributions if ((addr + length) != nextAddr) {
3595*a325d9c4SApple OSS Distributions break;
3596*a325d9c4SApple OSS Distributions }
3597*a325d9c4SApple OSS Distributions length += nextLength;
3598*a325d9c4SApple OSS Distributions offset += nextLength;
3599*a325d9c4SApple OSS Distributions }
3600*a325d9c4SApple OSS Distributions data->fIOVMAddr = addr;
3601*a325d9c4SApple OSS Distributions data->fLength = length;
3602*a325d9c4SApple OSS Distributions } else if (kIOMDAddDMAMapSpec == op) {
3603*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
3604*a325d9c4SApple OSS Distributions } else if (kIOMDDMAMap == op) {
3605*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3606*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3607*a325d9c4SApple OSS Distributions }
3608*a325d9c4SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3609*a325d9c4SApple OSS Distributions
3610*a325d9c4SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3611*a325d9c4SApple OSS Distributions
3612*a325d9c4SApple OSS Distributions return err;
3613*a325d9c4SApple OSS Distributions } else if (kIOMDDMAUnmap == op) {
3614*a325d9c4SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3615*a325d9c4SApple OSS Distributions return kIOReturnUnderrun;
3616*a325d9c4SApple OSS Distributions }
3617*a325d9c4SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3618*a325d9c4SApple OSS Distributions
3619*a325d9c4SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3620*a325d9c4SApple OSS Distributions
3621*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3622*a325d9c4SApple OSS Distributions } else {
3623*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
3624*a325d9c4SApple OSS Distributions }
3625*a325d9c4SApple OSS Distributions
3626*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3627*a325d9c4SApple OSS Distributions }
3628*a325d9c4SApple OSS Distributions
3629*a325d9c4SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3630*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3631*a325d9c4SApple OSS Distributions IOOptionBits * oldState )
3632*a325d9c4SApple OSS Distributions {
3633*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3634*a325d9c4SApple OSS Distributions
3635*a325d9c4SApple OSS Distributions vm_purgable_t control;
3636*a325d9c4SApple OSS Distributions int state;
3637*a325d9c4SApple OSS Distributions
3638*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3639*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3640*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3641*a325d9c4SApple OSS Distributions }
3642*a325d9c4SApple OSS Distributions
3643*a325d9c4SApple OSS Distributions if (_memRef) {
3644*a325d9c4SApple OSS Distributions err = super::setPurgeable(newState, oldState);
3645*a325d9c4SApple OSS Distributions } else {
3646*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3647*a325d9c4SApple OSS Distributions LOCK;
3648*a325d9c4SApple OSS Distributions }
3649*a325d9c4SApple OSS Distributions do{
3650*a325d9c4SApple OSS Distributions // Find the appropriate vm_map for the given task
3651*a325d9c4SApple OSS Distributions vm_map_t curMap;
3652*a325d9c4SApple OSS Distributions if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3653*a325d9c4SApple OSS Distributions err = kIOReturnNotReady;
3654*a325d9c4SApple OSS Distributions break;
3655*a325d9c4SApple OSS Distributions } else if (!_task) {
3656*a325d9c4SApple OSS Distributions err = kIOReturnUnsupported;
3657*a325d9c4SApple OSS Distributions break;
3658*a325d9c4SApple OSS Distributions } else {
3659*a325d9c4SApple OSS Distributions curMap = get_task_map(_task);
3660*a325d9c4SApple OSS Distributions if (NULL == curMap) {
3661*a325d9c4SApple OSS Distributions err = KERN_INVALID_ARGUMENT;
3662*a325d9c4SApple OSS Distributions break;
3663*a325d9c4SApple OSS Distributions }
3664*a325d9c4SApple OSS Distributions }
3665*a325d9c4SApple OSS Distributions
3666*a325d9c4SApple OSS Distributions // can only do one range
3667*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
3668*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3669*a325d9c4SApple OSS Distributions mach_vm_address_t addr;
3670*a325d9c4SApple OSS Distributions mach_vm_size_t len;
3671*a325d9c4SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, 0);
3672*a325d9c4SApple OSS Distributions
3673*a325d9c4SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
3674*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
3675*a325d9c4SApple OSS Distributions break;
3676*a325d9c4SApple OSS Distributions }
3677*a325d9c4SApple OSS Distributions err = vm_map_purgable_control(curMap, addr, control, &state);
3678*a325d9c4SApple OSS Distributions if (oldState) {
3679*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == err) {
3680*a325d9c4SApple OSS Distributions err = purgeableStateBits(&state);
3681*a325d9c4SApple OSS Distributions *oldState = state;
3682*a325d9c4SApple OSS Distributions }
3683*a325d9c4SApple OSS Distributions }
3684*a325d9c4SApple OSS Distributions }while (false);
3685*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3686*a325d9c4SApple OSS Distributions UNLOCK;
3687*a325d9c4SApple OSS Distributions }
3688*a325d9c4SApple OSS Distributions }
3689*a325d9c4SApple OSS Distributions
3690*a325d9c4SApple OSS Distributions return err;
3691*a325d9c4SApple OSS Distributions }
3692*a325d9c4SApple OSS Distributions
3693*a325d9c4SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3694*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3695*a325d9c4SApple OSS Distributions IOOptionBits * oldState )
3696*a325d9c4SApple OSS Distributions {
3697*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3698*a325d9c4SApple OSS Distributions
3699*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3700*a325d9c4SApple OSS Distributions LOCK;
3701*a325d9c4SApple OSS Distributions }
3702*a325d9c4SApple OSS Distributions if (_memRef) {
3703*a325d9c4SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3704*a325d9c4SApple OSS Distributions }
3705*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3706*a325d9c4SApple OSS Distributions UNLOCK;
3707*a325d9c4SApple OSS Distributions }
3708*a325d9c4SApple OSS Distributions
3709*a325d9c4SApple OSS Distributions return err;
3710*a325d9c4SApple OSS Distributions }
3711*a325d9c4SApple OSS Distributions
3712*a325d9c4SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3713*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3714*a325d9c4SApple OSS Distributions int newLedgerTag,
3715*a325d9c4SApple OSS Distributions IOOptionBits newLedgerOptions )
3716*a325d9c4SApple OSS Distributions {
3717*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3718*a325d9c4SApple OSS Distributions
3719*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3720*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3721*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3722*a325d9c4SApple OSS Distributions }
3723*a325d9c4SApple OSS Distributions
3724*a325d9c4SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3725*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
3726*a325d9c4SApple OSS Distributions }
3727*a325d9c4SApple OSS Distributions
3728*a325d9c4SApple OSS Distributions if (_memRef) {
3729*a325d9c4SApple OSS Distributions err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3730*a325d9c4SApple OSS Distributions } else {
3731*a325d9c4SApple OSS Distributions err = kIOReturnUnsupported;
3732*a325d9c4SApple OSS Distributions }
3733*a325d9c4SApple OSS Distributions
3734*a325d9c4SApple OSS Distributions return err;
3735*a325d9c4SApple OSS Distributions }
3736*a325d9c4SApple OSS Distributions
3737*a325d9c4SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3738*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3739*a325d9c4SApple OSS Distributions int newLedgerTag,
3740*a325d9c4SApple OSS Distributions IOOptionBits newLedgerOptions )
3741*a325d9c4SApple OSS Distributions {
3742*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3743*a325d9c4SApple OSS Distributions
3744*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3745*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3746*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3747*a325d9c4SApple OSS Distributions }
3748*a325d9c4SApple OSS Distributions
3749*a325d9c4SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3750*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
3751*a325d9c4SApple OSS Distributions }
3752*a325d9c4SApple OSS Distributions
3753*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3754*a325d9c4SApple OSS Distributions LOCK;
3755*a325d9c4SApple OSS Distributions }
3756*a325d9c4SApple OSS Distributions if (_memRef) {
3757*a325d9c4SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3758*a325d9c4SApple OSS Distributions } else {
3759*a325d9c4SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3760*a325d9c4SApple OSS Distributions IOSubMemoryDescriptor * smd;
3761*a325d9c4SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3762*a325d9c4SApple OSS Distributions err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3763*a325d9c4SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3764*a325d9c4SApple OSS Distributions err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3765*a325d9c4SApple OSS Distributions }
3766*a325d9c4SApple OSS Distributions }
3767*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3768*a325d9c4SApple OSS Distributions UNLOCK;
3769*a325d9c4SApple OSS Distributions }
3770*a325d9c4SApple OSS Distributions
3771*a325d9c4SApple OSS Distributions return err;
3772*a325d9c4SApple OSS Distributions }
3773*a325d9c4SApple OSS Distributions
3774*a325d9c4SApple OSS Distributions
3775*a325d9c4SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3776*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3777*a325d9c4SApple OSS Distributions {
3778*a325d9c4SApple OSS Distributions uint64_t length;
3779*a325d9c4SApple OSS Distributions
3780*a325d9c4SApple OSS Distributions if (_memRef) {
3781*a325d9c4SApple OSS Distributions length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3782*a325d9c4SApple OSS Distributions } else {
3783*a325d9c4SApple OSS Distributions IOByteCount iterate, segLen;
3784*a325d9c4SApple OSS Distributions IOPhysicalAddress sourceAddr, sourceAlign;
3785*a325d9c4SApple OSS Distributions
3786*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3787*a325d9c4SApple OSS Distributions LOCK;
3788*a325d9c4SApple OSS Distributions }
3789*a325d9c4SApple OSS Distributions length = 0;
3790*a325d9c4SApple OSS Distributions iterate = 0;
3791*a325d9c4SApple OSS Distributions while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3792*a325d9c4SApple OSS Distributions sourceAlign = (sourceAddr & page_mask);
3793*a325d9c4SApple OSS Distributions if (offset && !iterate) {
3794*a325d9c4SApple OSS Distributions *offset = sourceAlign;
3795*a325d9c4SApple OSS Distributions }
3796*a325d9c4SApple OSS Distributions length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3797*a325d9c4SApple OSS Distributions iterate += segLen;
3798*a325d9c4SApple OSS Distributions }
3799*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3800*a325d9c4SApple OSS Distributions UNLOCK;
3801*a325d9c4SApple OSS Distributions }
3802*a325d9c4SApple OSS Distributions }
3803*a325d9c4SApple OSS Distributions
3804*a325d9c4SApple OSS Distributions return length;
3805*a325d9c4SApple OSS Distributions }
3806*a325d9c4SApple OSS Distributions
3807*a325d9c4SApple OSS Distributions
3808*a325d9c4SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3809*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3810*a325d9c4SApple OSS Distributions IOByteCount * dirtyPageCount )
3811*a325d9c4SApple OSS Distributions {
3812*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3813*a325d9c4SApple OSS Distributions
3814*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3815*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3816*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3817*a325d9c4SApple OSS Distributions }
3818*a325d9c4SApple OSS Distributions
3819*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3820*a325d9c4SApple OSS Distributions LOCK;
3821*a325d9c4SApple OSS Distributions }
3822*a325d9c4SApple OSS Distributions if (_memRef) {
3823*a325d9c4SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3824*a325d9c4SApple OSS Distributions } else {
3825*a325d9c4SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3826*a325d9c4SApple OSS Distributions IOSubMemoryDescriptor * smd;
3827*a325d9c4SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3828*a325d9c4SApple OSS Distributions err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3829*a325d9c4SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3830*a325d9c4SApple OSS Distributions err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3831*a325d9c4SApple OSS Distributions }
3832*a325d9c4SApple OSS Distributions }
3833*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3834*a325d9c4SApple OSS Distributions UNLOCK;
3835*a325d9c4SApple OSS Distributions }
3836*a325d9c4SApple OSS Distributions
3837*a325d9c4SApple OSS Distributions return err;
3838*a325d9c4SApple OSS Distributions }
3839*a325d9c4SApple OSS Distributions
3840*a325d9c4SApple OSS Distributions
3841*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3842*a325d9c4SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3843*a325d9c4SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3844*a325d9c4SApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3845*a325d9c4SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3846*a325d9c4SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3847*a325d9c4SApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3848*a325d9c4SApple OSS Distributions
3849*a325d9c4SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3850*a325d9c4SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3851*a325d9c4SApple OSS Distributions {
3852*a325d9c4SApple OSS Distributions ppnum_t page, end;
3853*a325d9c4SApple OSS Distributions
3854*a325d9c4SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3855*a325d9c4SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3856*a325d9c4SApple OSS Distributions for (; page < end; page++) {
3857*a325d9c4SApple OSS Distributions pmap_clear_noencrypt(page);
3858*a325d9c4SApple OSS Distributions }
3859*a325d9c4SApple OSS Distributions }
3860*a325d9c4SApple OSS Distributions
3861*a325d9c4SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3862*a325d9c4SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3863*a325d9c4SApple OSS Distributions {
3864*a325d9c4SApple OSS Distributions ppnum_t page, end;
3865*a325d9c4SApple OSS Distributions
3866*a325d9c4SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3867*a325d9c4SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3868*a325d9c4SApple OSS Distributions for (; page < end; page++) {
3869*a325d9c4SApple OSS Distributions pmap_set_noencrypt(page);
3870*a325d9c4SApple OSS Distributions }
3871*a325d9c4SApple OSS Distributions }
3872*a325d9c4SApple OSS Distributions
3873*a325d9c4SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3874*a325d9c4SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3875*a325d9c4SApple OSS Distributions IOByteCount offset, IOByteCount length )
3876*a325d9c4SApple OSS Distributions {
3877*a325d9c4SApple OSS Distributions IOByteCount remaining;
3878*a325d9c4SApple OSS Distributions unsigned int res;
3879*a325d9c4SApple OSS Distributions void (*func)(addr64_t pa, unsigned int count) = NULL;
3880*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3881*a325d9c4SApple OSS Distributions void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3882*a325d9c4SApple OSS Distributions #endif
3883*a325d9c4SApple OSS Distributions
3884*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3885*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3886*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
3887*a325d9c4SApple OSS Distributions }
3888*a325d9c4SApple OSS Distributions
3889*a325d9c4SApple OSS Distributions switch (options) {
3890*a325d9c4SApple OSS Distributions case kIOMemoryIncoherentIOFlush:
3891*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3892*a325d9c4SApple OSS Distributions func_ext = &dcache_incoherent_io_flush64;
3893*a325d9c4SApple OSS Distributions #if __ARM_COHERENT_IO__
3894*a325d9c4SApple OSS Distributions func_ext(0, 0, 0, &res);
3895*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3896*a325d9c4SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3897*a325d9c4SApple OSS Distributions break;
3898*a325d9c4SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3899*a325d9c4SApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3900*a325d9c4SApple OSS Distributions func = &dcache_incoherent_io_flush64;
3901*a325d9c4SApple OSS Distributions break;
3902*a325d9c4SApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3903*a325d9c4SApple OSS Distributions case kIOMemoryIncoherentIOStore:
3904*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3905*a325d9c4SApple OSS Distributions func_ext = &dcache_incoherent_io_store64;
3906*a325d9c4SApple OSS Distributions #if __ARM_COHERENT_IO__
3907*a325d9c4SApple OSS Distributions func_ext(0, 0, 0, &res);
3908*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
3909*a325d9c4SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3910*a325d9c4SApple OSS Distributions break;
3911*a325d9c4SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3912*a325d9c4SApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3913*a325d9c4SApple OSS Distributions func = &dcache_incoherent_io_store64;
3914*a325d9c4SApple OSS Distributions break;
3915*a325d9c4SApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3916*a325d9c4SApple OSS Distributions
3917*a325d9c4SApple OSS Distributions case kIOMemorySetEncrypted:
3918*a325d9c4SApple OSS Distributions func = &SetEncryptOp;
3919*a325d9c4SApple OSS Distributions break;
3920*a325d9c4SApple OSS Distributions case kIOMemoryClearEncrypted:
3921*a325d9c4SApple OSS Distributions func = &ClearEncryptOp;
3922*a325d9c4SApple OSS Distributions break;
3923*a325d9c4SApple OSS Distributions }
3924*a325d9c4SApple OSS Distributions
3925*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3926*a325d9c4SApple OSS Distributions if ((func == NULL) && (func_ext == NULL)) {
3927*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
3928*a325d9c4SApple OSS Distributions }
3929*a325d9c4SApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3930*a325d9c4SApple OSS Distributions if (!func) {
3931*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
3932*a325d9c4SApple OSS Distributions }
3933*a325d9c4SApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3934*a325d9c4SApple OSS Distributions
3935*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3936*a325d9c4SApple OSS Distributions LOCK;
3937*a325d9c4SApple OSS Distributions }
3938*a325d9c4SApple OSS Distributions
3939*a325d9c4SApple OSS Distributions res = 0x0UL;
3940*a325d9c4SApple OSS Distributions remaining = length = min(length, getLength() - offset);
3941*a325d9c4SApple OSS Distributions while (remaining) {
3942*a325d9c4SApple OSS Distributions // (process another target segment?)
3943*a325d9c4SApple OSS Distributions addr64_t dstAddr64;
3944*a325d9c4SApple OSS Distributions IOByteCount dstLen;
3945*a325d9c4SApple OSS Distributions
3946*a325d9c4SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3947*a325d9c4SApple OSS Distributions if (!dstAddr64) {
3948*a325d9c4SApple OSS Distributions break;
3949*a325d9c4SApple OSS Distributions }
3950*a325d9c4SApple OSS Distributions
3951*a325d9c4SApple OSS Distributions // Clip segment length to remaining
3952*a325d9c4SApple OSS Distributions if (dstLen > remaining) {
3953*a325d9c4SApple OSS Distributions dstLen = remaining;
3954*a325d9c4SApple OSS Distributions }
3955*a325d9c4SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3956*a325d9c4SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
3957*a325d9c4SApple OSS Distributions }
3958*a325d9c4SApple OSS Distributions if (remaining > UINT_MAX) {
3959*a325d9c4SApple OSS Distributions remaining = UINT_MAX;
3960*a325d9c4SApple OSS Distributions }
3961*a325d9c4SApple OSS Distributions
3962*a325d9c4SApple OSS Distributions #if defined(__arm__) || defined(__arm64__)
3963*a325d9c4SApple OSS Distributions if (func) {
3964*a325d9c4SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3965*a325d9c4SApple OSS Distributions }
3966*a325d9c4SApple OSS Distributions if (func_ext) {
3967*a325d9c4SApple OSS Distributions (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3968*a325d9c4SApple OSS Distributions if (res != 0x0UL) {
3969*a325d9c4SApple OSS Distributions remaining = 0;
3970*a325d9c4SApple OSS Distributions break;
3971*a325d9c4SApple OSS Distributions }
3972*a325d9c4SApple OSS Distributions }
3973*a325d9c4SApple OSS Distributions #else /* defined(__arm__) || defined(__arm64__) */
3974*a325d9c4SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3975*a325d9c4SApple OSS Distributions #endif /* defined(__arm__) || defined(__arm64__) */
3976*a325d9c4SApple OSS Distributions
3977*a325d9c4SApple OSS Distributions offset += dstLen;
3978*a325d9c4SApple OSS Distributions remaining -= dstLen;
3979*a325d9c4SApple OSS Distributions }
3980*a325d9c4SApple OSS Distributions
3981*a325d9c4SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3982*a325d9c4SApple OSS Distributions UNLOCK;
3983*a325d9c4SApple OSS Distributions }
3984*a325d9c4SApple OSS Distributions
3985*a325d9c4SApple OSS Distributions return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3986*a325d9c4SApple OSS Distributions }
3987*a325d9c4SApple OSS Distributions
3988*a325d9c4SApple OSS Distributions /*
3989*a325d9c4SApple OSS Distributions *
3990*a325d9c4SApple OSS Distributions */
3991*a325d9c4SApple OSS Distributions
3992*a325d9c4SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
3993*a325d9c4SApple OSS Distributions
3994*a325d9c4SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
3995*a325d9c4SApple OSS Distributions
3996*a325d9c4SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
3997*a325d9c4SApple OSS Distributions * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
3998*a325d9c4SApple OSS Distributions * kernel non-text data -- should we just add another range instead?
3999*a325d9c4SApple OSS Distributions */
4000*a325d9c4SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4001*a325d9c4SApple OSS Distributions #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4002*a325d9c4SApple OSS Distributions
4003*a325d9c4SApple OSS Distributions #elif defined(__arm__) || defined(__arm64__)
4004*a325d9c4SApple OSS Distributions
4005*a325d9c4SApple OSS Distributions extern vm_offset_t static_memory_end;
4006*a325d9c4SApple OSS Distributions
4007*a325d9c4SApple OSS Distributions #if defined(__arm64__)
4008*a325d9c4SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4009*a325d9c4SApple OSS Distributions #else /* defined(__arm64__) */
4010*a325d9c4SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4011*a325d9c4SApple OSS Distributions #endif /* defined(__arm64__) */
4012*a325d9c4SApple OSS Distributions
4013*a325d9c4SApple OSS Distributions #define io_kernel_static_end static_memory_end
4014*a325d9c4SApple OSS Distributions
4015*a325d9c4SApple OSS Distributions #else
4016*a325d9c4SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4017*a325d9c4SApple OSS Distributions #endif
4018*a325d9c4SApple OSS Distributions
4019*a325d9c4SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4020*a325d9c4SApple OSS Distributions io_get_kernel_static_upl(
4021*a325d9c4SApple OSS Distributions vm_map_t /* map */,
4022*a325d9c4SApple OSS Distributions uintptr_t offset,
4023*a325d9c4SApple OSS Distributions upl_size_t *upl_size,
4024*a325d9c4SApple OSS Distributions unsigned int *page_offset,
4025*a325d9c4SApple OSS Distributions upl_t *upl,
4026*a325d9c4SApple OSS Distributions upl_page_info_array_t page_list,
4027*a325d9c4SApple OSS Distributions unsigned int *count,
4028*a325d9c4SApple OSS Distributions ppnum_t *highest_page)
4029*a325d9c4SApple OSS Distributions {
4030*a325d9c4SApple OSS Distributions unsigned int pageCount, page;
4031*a325d9c4SApple OSS Distributions ppnum_t phys;
4032*a325d9c4SApple OSS Distributions ppnum_t highestPage = 0;
4033*a325d9c4SApple OSS Distributions
4034*a325d9c4SApple OSS Distributions pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4035*a325d9c4SApple OSS Distributions if (pageCount > *count) {
4036*a325d9c4SApple OSS Distributions pageCount = *count;
4037*a325d9c4SApple OSS Distributions }
4038*a325d9c4SApple OSS Distributions *upl_size = (upl_size_t) ptoa_64(pageCount);
4039*a325d9c4SApple OSS Distributions
4040*a325d9c4SApple OSS Distributions *upl = NULL;
4041*a325d9c4SApple OSS Distributions *page_offset = ((unsigned int) page_mask & offset);
4042*a325d9c4SApple OSS Distributions
4043*a325d9c4SApple OSS Distributions for (page = 0; page < pageCount; page++) {
4044*a325d9c4SApple OSS Distributions phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4045*a325d9c4SApple OSS Distributions if (!phys) {
4046*a325d9c4SApple OSS Distributions break;
4047*a325d9c4SApple OSS Distributions }
4048*a325d9c4SApple OSS Distributions page_list[page].phys_addr = phys;
4049*a325d9c4SApple OSS Distributions page_list[page].free_when_done = 0;
4050*a325d9c4SApple OSS Distributions page_list[page].absent = 0;
4051*a325d9c4SApple OSS Distributions page_list[page].dirty = 0;
4052*a325d9c4SApple OSS Distributions page_list[page].precious = 0;
4053*a325d9c4SApple OSS Distributions page_list[page].device = 0;
4054*a325d9c4SApple OSS Distributions if (phys > highestPage) {
4055*a325d9c4SApple OSS Distributions highestPage = phys;
4056*a325d9c4SApple OSS Distributions }
4057*a325d9c4SApple OSS Distributions }
4058*a325d9c4SApple OSS Distributions
4059*a325d9c4SApple OSS Distributions *highest_page = highestPage;
4060*a325d9c4SApple OSS Distributions
4061*a325d9c4SApple OSS Distributions return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4062*a325d9c4SApple OSS Distributions }
4063*a325d9c4SApple OSS Distributions
4064*a325d9c4SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4065*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4066*a325d9c4SApple OSS Distributions {
4067*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4068*a325d9c4SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4069*a325d9c4SApple OSS Distributions ioGMDData *dataP;
4070*a325d9c4SApple OSS Distributions upl_page_info_array_t pageInfo;
4071*a325d9c4SApple OSS Distributions ppnum_t mapBase;
4072*a325d9c4SApple OSS Distributions vm_tag_t tag = VM_KERN_MEMORY_NONE;
4073*a325d9c4SApple OSS Distributions mach_vm_size_t numBytesWired = 0;
4074*a325d9c4SApple OSS Distributions
4075*a325d9c4SApple OSS Distributions assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4076*a325d9c4SApple OSS Distributions
4077*a325d9c4SApple OSS Distributions if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4078*a325d9c4SApple OSS Distributions forDirection = (IODirection) (forDirection | getDirection());
4079*a325d9c4SApple OSS Distributions }
4080*a325d9c4SApple OSS Distributions
4081*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4082*a325d9c4SApple OSS Distributions upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4083*a325d9c4SApple OSS Distributions switch (kIODirectionOutIn & forDirection) {
4084*a325d9c4SApple OSS Distributions case kIODirectionOut:
4085*a325d9c4SApple OSS Distributions // Pages do not need to be marked as dirty on commit
4086*a325d9c4SApple OSS Distributions uplFlags = UPL_COPYOUT_FROM;
4087*a325d9c4SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
4088*a325d9c4SApple OSS Distributions break;
4089*a325d9c4SApple OSS Distributions
4090*a325d9c4SApple OSS Distributions case kIODirectionIn:
4091*a325d9c4SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
4092*a325d9c4SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4093*a325d9c4SApple OSS Distributions break;
4094*a325d9c4SApple OSS Distributions
4095*a325d9c4SApple OSS Distributions default:
4096*a325d9c4SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4097*a325d9c4SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4098*a325d9c4SApple OSS Distributions break;
4099*a325d9c4SApple OSS Distributions }
4100*a325d9c4SApple OSS Distributions
4101*a325d9c4SApple OSS Distributions if (_wireCount) {
4102*a325d9c4SApple OSS Distributions if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4103*a325d9c4SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
4104*a325d9c4SApple OSS Distributions error = kIOReturnNotWritable;
4105*a325d9c4SApple OSS Distributions }
4106*a325d9c4SApple OSS Distributions } else {
4107*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4108*a325d9c4SApple OSS Distributions IOMapper *mapper;
4109*a325d9c4SApple OSS Distributions
4110*a325d9c4SApple OSS Distributions mapper = dataP->fMapper;
4111*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4112*a325d9c4SApple OSS Distributions
4113*a325d9c4SApple OSS Distributions uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4114*a325d9c4SApple OSS Distributions tag = _kernelTag;
4115*a325d9c4SApple OSS Distributions if (VM_KERN_MEMORY_NONE == tag) {
4116*a325d9c4SApple OSS Distributions tag = IOMemoryTag(kernel_map);
4117*a325d9c4SApple OSS Distributions }
4118*a325d9c4SApple OSS Distributions
4119*a325d9c4SApple OSS Distributions if (kIODirectionPrepareToPhys32 & forDirection) {
4120*a325d9c4SApple OSS Distributions if (!mapper) {
4121*a325d9c4SApple OSS Distributions uplFlags |= UPL_NEED_32BIT_ADDR;
4122*a325d9c4SApple OSS Distributions }
4123*a325d9c4SApple OSS Distributions if (dataP->fDMAMapNumAddressBits > 32) {
4124*a325d9c4SApple OSS Distributions dataP->fDMAMapNumAddressBits = 32;
4125*a325d9c4SApple OSS Distributions }
4126*a325d9c4SApple OSS Distributions }
4127*a325d9c4SApple OSS Distributions if (kIODirectionPrepareNoFault & forDirection) {
4128*a325d9c4SApple OSS Distributions uplFlags |= UPL_REQUEST_NO_FAULT;
4129*a325d9c4SApple OSS Distributions }
4130*a325d9c4SApple OSS Distributions if (kIODirectionPrepareNoZeroFill & forDirection) {
4131*a325d9c4SApple OSS Distributions uplFlags |= UPL_NOZEROFILLIO;
4132*a325d9c4SApple OSS Distributions }
4133*a325d9c4SApple OSS Distributions if (kIODirectionPrepareNonCoherent & forDirection) {
4134*a325d9c4SApple OSS Distributions uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4135*a325d9c4SApple OSS Distributions }
4136*a325d9c4SApple OSS Distributions
4137*a325d9c4SApple OSS Distributions mapBase = 0;
4138*a325d9c4SApple OSS Distributions
4139*a325d9c4SApple OSS Distributions // Note that appendBytes(NULL) zeros the data up to the desired length
4140*a325d9c4SApple OSS Distributions size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4141*a325d9c4SApple OSS Distributions if (uplPageSize > ((unsigned int)uplPageSize)) {
4142*a325d9c4SApple OSS Distributions error = kIOReturnNoMemory;
4143*a325d9c4SApple OSS Distributions traceInterval.setEndArg2(error);
4144*a325d9c4SApple OSS Distributions return error;
4145*a325d9c4SApple OSS Distributions }
4146*a325d9c4SApple OSS Distributions if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4147*a325d9c4SApple OSS Distributions error = kIOReturnNoMemory;
4148*a325d9c4SApple OSS Distributions traceInterval.setEndArg2(error);
4149*a325d9c4SApple OSS Distributions return error;
4150*a325d9c4SApple OSS Distributions }
4151*a325d9c4SApple OSS Distributions dataP = NULL;
4152*a325d9c4SApple OSS Distributions
4153*a325d9c4SApple OSS Distributions // Find the appropriate vm_map for the given task
4154*a325d9c4SApple OSS Distributions vm_map_t curMap;
4155*a325d9c4SApple OSS Distributions if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4156*a325d9c4SApple OSS Distributions curMap = NULL;
4157*a325d9c4SApple OSS Distributions } else {
4158*a325d9c4SApple OSS Distributions curMap = get_task_map(_task);
4159*a325d9c4SApple OSS Distributions }
4160*a325d9c4SApple OSS Distributions
4161*a325d9c4SApple OSS Distributions // Iterate over the vector of virtual ranges
4162*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
4163*a325d9c4SApple OSS Distributions unsigned int pageIndex = 0;
4164*a325d9c4SApple OSS Distributions IOByteCount mdOffset = 0;
4165*a325d9c4SApple OSS Distributions ppnum_t highestPage = 0;
4166*a325d9c4SApple OSS Distributions bool byteAlignUPL;
4167*a325d9c4SApple OSS Distributions
4168*a325d9c4SApple OSS Distributions IOMemoryEntry * memRefEntry = NULL;
4169*a325d9c4SApple OSS Distributions if (_memRef) {
4170*a325d9c4SApple OSS Distributions memRefEntry = &_memRef->entries[0];
4171*a325d9c4SApple OSS Distributions byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4172*a325d9c4SApple OSS Distributions } else {
4173*a325d9c4SApple OSS Distributions byteAlignUPL = true;
4174*a325d9c4SApple OSS Distributions }
4175*a325d9c4SApple OSS Distributions
4176*a325d9c4SApple OSS Distributions for (UInt range = 0; mdOffset < _length; range++) {
4177*a325d9c4SApple OSS Distributions ioPLBlock iopl;
4178*a325d9c4SApple OSS Distributions mach_vm_address_t startPage, startPageOffset;
4179*a325d9c4SApple OSS Distributions mach_vm_size_t numBytes;
4180*a325d9c4SApple OSS Distributions ppnum_t highPage = 0;
4181*a325d9c4SApple OSS Distributions
4182*a325d9c4SApple OSS Distributions if (_memRef) {
4183*a325d9c4SApple OSS Distributions if (range >= _memRef->count) {
4184*a325d9c4SApple OSS Distributions panic("memRefEntry");
4185*a325d9c4SApple OSS Distributions }
4186*a325d9c4SApple OSS Distributions memRefEntry = &_memRef->entries[range];
4187*a325d9c4SApple OSS Distributions numBytes = memRefEntry->size;
4188*a325d9c4SApple OSS Distributions startPage = -1ULL;
4189*a325d9c4SApple OSS Distributions if (byteAlignUPL) {
4190*a325d9c4SApple OSS Distributions startPageOffset = 0;
4191*a325d9c4SApple OSS Distributions } else {
4192*a325d9c4SApple OSS Distributions startPageOffset = (memRefEntry->start & PAGE_MASK);
4193*a325d9c4SApple OSS Distributions }
4194*a325d9c4SApple OSS Distributions } else {
4195*a325d9c4SApple OSS Distributions // Get the startPage address and length of vec[range]
4196*a325d9c4SApple OSS Distributions getAddrLenForInd(startPage, numBytes, type, vec, range);
4197*a325d9c4SApple OSS Distributions if (byteAlignUPL) {
4198*a325d9c4SApple OSS Distributions startPageOffset = 0;
4199*a325d9c4SApple OSS Distributions } else {
4200*a325d9c4SApple OSS Distributions startPageOffset = startPage & PAGE_MASK;
4201*a325d9c4SApple OSS Distributions startPage = trunc_page_64(startPage);
4202*a325d9c4SApple OSS Distributions }
4203*a325d9c4SApple OSS Distributions }
4204*a325d9c4SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4205*a325d9c4SApple OSS Distributions numBytes += startPageOffset;
4206*a325d9c4SApple OSS Distributions
4207*a325d9c4SApple OSS Distributions if (mapper) {
4208*a325d9c4SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4209*a325d9c4SApple OSS Distributions } else {
4210*a325d9c4SApple OSS Distributions iopl.fMappedPage = 0;
4211*a325d9c4SApple OSS Distributions }
4212*a325d9c4SApple OSS Distributions
4213*a325d9c4SApple OSS Distributions // Iterate over the current range, creating UPLs
4214*a325d9c4SApple OSS Distributions while (numBytes) {
4215*a325d9c4SApple OSS Distributions vm_address_t kernelStart = (vm_address_t) startPage;
4216*a325d9c4SApple OSS Distributions vm_map_t theMap;
4217*a325d9c4SApple OSS Distributions if (curMap) {
4218*a325d9c4SApple OSS Distributions theMap = curMap;
4219*a325d9c4SApple OSS Distributions } else if (_memRef) {
4220*a325d9c4SApple OSS Distributions theMap = NULL;
4221*a325d9c4SApple OSS Distributions } else {
4222*a325d9c4SApple OSS Distributions assert(_task == kernel_task);
4223*a325d9c4SApple OSS Distributions theMap = IOPageableMapForAddress(kernelStart);
4224*a325d9c4SApple OSS Distributions }
4225*a325d9c4SApple OSS Distributions
4226*a325d9c4SApple OSS Distributions // ioplFlags is an in/out parameter
4227*a325d9c4SApple OSS Distributions upl_control_flags_t ioplFlags = uplFlags;
4228*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4229*a325d9c4SApple OSS Distributions pageInfo = getPageList(dataP);
4230*a325d9c4SApple OSS Distributions upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4231*a325d9c4SApple OSS Distributions
4232*a325d9c4SApple OSS Distributions mach_vm_size_t ioplPhysSize;
4233*a325d9c4SApple OSS Distributions upl_size_t ioplSize;
4234*a325d9c4SApple OSS Distributions unsigned int numPageInfo;
4235*a325d9c4SApple OSS Distributions
4236*a325d9c4SApple OSS Distributions if (_memRef) {
4237*a325d9c4SApple OSS Distributions error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4238*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4239*a325d9c4SApple OSS Distributions } else {
4240*a325d9c4SApple OSS Distributions error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4241*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4242*a325d9c4SApple OSS Distributions }
4243*a325d9c4SApple OSS Distributions if (error != KERN_SUCCESS) {
4244*a325d9c4SApple OSS Distributions if (_memRef) {
4245*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4246*a325d9c4SApple OSS Distributions } else {
4247*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4248*a325d9c4SApple OSS Distributions }
4249*a325d9c4SApple OSS Distributions printf("entry size error %d\n", error);
4250*a325d9c4SApple OSS Distributions goto abortExit;
4251*a325d9c4SApple OSS Distributions }
4252*a325d9c4SApple OSS Distributions ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4253*a325d9c4SApple OSS Distributions numPageInfo = atop_32(ioplPhysSize);
4254*a325d9c4SApple OSS Distributions if (byteAlignUPL) {
4255*a325d9c4SApple OSS Distributions if (numBytes > ioplPhysSize) {
4256*a325d9c4SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4257*a325d9c4SApple OSS Distributions } else {
4258*a325d9c4SApple OSS Distributions ioplSize = ((typeof(ioplSize))numBytes);
4259*a325d9c4SApple OSS Distributions }
4260*a325d9c4SApple OSS Distributions } else {
4261*a325d9c4SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4262*a325d9c4SApple OSS Distributions }
4263*a325d9c4SApple OSS Distributions
4264*a325d9c4SApple OSS Distributions if (_memRef) {
4265*a325d9c4SApple OSS Distributions memory_object_offset_t entryOffset;
4266*a325d9c4SApple OSS Distributions
4267*a325d9c4SApple OSS Distributions entryOffset = mdOffset;
4268*a325d9c4SApple OSS Distributions if (byteAlignUPL) {
4269*a325d9c4SApple OSS Distributions entryOffset = (entryOffset - memRefEntry->offset);
4270*a325d9c4SApple OSS Distributions } else {
4271*a325d9c4SApple OSS Distributions entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4272*a325d9c4SApple OSS Distributions }
4273*a325d9c4SApple OSS Distributions if (ioplSize > (memRefEntry->size - entryOffset)) {
4274*a325d9c4SApple OSS Distributions ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4275*a325d9c4SApple OSS Distributions }
4276*a325d9c4SApple OSS Distributions error = memory_object_iopl_request(memRefEntry->entry,
4277*a325d9c4SApple OSS Distributions entryOffset,
4278*a325d9c4SApple OSS Distributions &ioplSize,
4279*a325d9c4SApple OSS Distributions &iopl.fIOPL,
4280*a325d9c4SApple OSS Distributions baseInfo,
4281*a325d9c4SApple OSS Distributions &numPageInfo,
4282*a325d9c4SApple OSS Distributions &ioplFlags,
4283*a325d9c4SApple OSS Distributions tag);
4284*a325d9c4SApple OSS Distributions } else if ((theMap == kernel_map)
4285*a325d9c4SApple OSS Distributions && (kernelStart >= io_kernel_static_start)
4286*a325d9c4SApple OSS Distributions && (kernelStart < io_kernel_static_end)) {
4287*a325d9c4SApple OSS Distributions error = io_get_kernel_static_upl(theMap,
4288*a325d9c4SApple OSS Distributions kernelStart,
4289*a325d9c4SApple OSS Distributions &ioplSize,
4290*a325d9c4SApple OSS Distributions &iopl.fPageOffset,
4291*a325d9c4SApple OSS Distributions &iopl.fIOPL,
4292*a325d9c4SApple OSS Distributions baseInfo,
4293*a325d9c4SApple OSS Distributions &numPageInfo,
4294*a325d9c4SApple OSS Distributions &highPage);
4295*a325d9c4SApple OSS Distributions } else {
4296*a325d9c4SApple OSS Distributions assert(theMap);
4297*a325d9c4SApple OSS Distributions error = vm_map_create_upl(theMap,
4298*a325d9c4SApple OSS Distributions startPage,
4299*a325d9c4SApple OSS Distributions (upl_size_t*)&ioplSize,
4300*a325d9c4SApple OSS Distributions &iopl.fIOPL,
4301*a325d9c4SApple OSS Distributions baseInfo,
4302*a325d9c4SApple OSS Distributions &numPageInfo,
4303*a325d9c4SApple OSS Distributions &ioplFlags,
4304*a325d9c4SApple OSS Distributions tag);
4305*a325d9c4SApple OSS Distributions }
4306*a325d9c4SApple OSS Distributions
4307*a325d9c4SApple OSS Distributions if (error != KERN_SUCCESS) {
4308*a325d9c4SApple OSS Distributions traceInterval.setEndArg2(error);
4309*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4310*a325d9c4SApple OSS Distributions goto abortExit;
4311*a325d9c4SApple OSS Distributions }
4312*a325d9c4SApple OSS Distributions
4313*a325d9c4SApple OSS Distributions assert(ioplSize);
4314*a325d9c4SApple OSS Distributions
4315*a325d9c4SApple OSS Distributions if (iopl.fIOPL) {
4316*a325d9c4SApple OSS Distributions highPage = upl_get_highest_page(iopl.fIOPL);
4317*a325d9c4SApple OSS Distributions }
4318*a325d9c4SApple OSS Distributions if (highPage > highestPage) {
4319*a325d9c4SApple OSS Distributions highestPage = highPage;
4320*a325d9c4SApple OSS Distributions }
4321*a325d9c4SApple OSS Distributions
4322*a325d9c4SApple OSS Distributions if (baseInfo->device) {
4323*a325d9c4SApple OSS Distributions numPageInfo = 1;
4324*a325d9c4SApple OSS Distributions iopl.fFlags = kIOPLOnDevice;
4325*a325d9c4SApple OSS Distributions } else {
4326*a325d9c4SApple OSS Distributions iopl.fFlags = 0;
4327*a325d9c4SApple OSS Distributions }
4328*a325d9c4SApple OSS Distributions
4329*a325d9c4SApple OSS Distributions if (byteAlignUPL) {
4330*a325d9c4SApple OSS Distributions if (iopl.fIOPL) {
4331*a325d9c4SApple OSS Distributions DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4332*a325d9c4SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4333*a325d9c4SApple OSS Distributions }
4334*a325d9c4SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4335*a325d9c4SApple OSS Distributions // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4336*a325d9c4SApple OSS Distributions startPage -= iopl.fPageOffset;
4337*a325d9c4SApple OSS Distributions }
4338*a325d9c4SApple OSS Distributions ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4339*a325d9c4SApple OSS Distributions numBytes += iopl.fPageOffset;
4340*a325d9c4SApple OSS Distributions }
4341*a325d9c4SApple OSS Distributions
4342*a325d9c4SApple OSS Distributions iopl.fIOMDOffset = mdOffset;
4343*a325d9c4SApple OSS Distributions iopl.fPageInfo = pageIndex;
4344*a325d9c4SApple OSS Distributions
4345*a325d9c4SApple OSS Distributions if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4346*a325d9c4SApple OSS Distributions // Clean up partial created and unsaved iopl
4347*a325d9c4SApple OSS Distributions if (iopl.fIOPL) {
4348*a325d9c4SApple OSS Distributions upl_abort(iopl.fIOPL, 0);
4349*a325d9c4SApple OSS Distributions upl_deallocate(iopl.fIOPL);
4350*a325d9c4SApple OSS Distributions }
4351*a325d9c4SApple OSS Distributions error = kIOReturnNoMemory;
4352*a325d9c4SApple OSS Distributions traceInterval.setEndArg2(error);
4353*a325d9c4SApple OSS Distributions goto abortExit;
4354*a325d9c4SApple OSS Distributions }
4355*a325d9c4SApple OSS Distributions dataP = NULL;
4356*a325d9c4SApple OSS Distributions
4357*a325d9c4SApple OSS Distributions // Check for a multiple iopl's in one virtual range
4358*a325d9c4SApple OSS Distributions pageIndex += numPageInfo;
4359*a325d9c4SApple OSS Distributions mdOffset -= iopl.fPageOffset;
4360*a325d9c4SApple OSS Distributions numBytesWired += ioplSize;
4361*a325d9c4SApple OSS Distributions if (ioplSize < numBytes) {
4362*a325d9c4SApple OSS Distributions numBytes -= ioplSize;
4363*a325d9c4SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4364*a325d9c4SApple OSS Distributions startPage += ioplSize;
4365*a325d9c4SApple OSS Distributions }
4366*a325d9c4SApple OSS Distributions mdOffset += ioplSize;
4367*a325d9c4SApple OSS Distributions iopl.fPageOffset = 0;
4368*a325d9c4SApple OSS Distributions if (mapper) {
4369*a325d9c4SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4370*a325d9c4SApple OSS Distributions }
4371*a325d9c4SApple OSS Distributions } else {
4372*a325d9c4SApple OSS Distributions mdOffset += numBytes;
4373*a325d9c4SApple OSS Distributions break;
4374*a325d9c4SApple OSS Distributions }
4375*a325d9c4SApple OSS Distributions }
4376*a325d9c4SApple OSS Distributions }
4377*a325d9c4SApple OSS Distributions
4378*a325d9c4SApple OSS Distributions _highestPage = highestPage;
4379*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4380*a325d9c4SApple OSS Distributions
4381*a325d9c4SApple OSS Distributions if (UPL_COPYOUT_FROM & uplFlags) {
4382*a325d9c4SApple OSS Distributions _flags |= kIOMemoryPreparedReadOnly;
4383*a325d9c4SApple OSS Distributions }
4384*a325d9c4SApple OSS Distributions traceInterval.setEndCodes(numBytesWired, error);
4385*a325d9c4SApple OSS Distributions }
4386*a325d9c4SApple OSS Distributions
4387*a325d9c4SApple OSS Distributions #if IOTRACKING
4388*a325d9c4SApple OSS Distributions if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4389*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4390*a325d9c4SApple OSS Distributions if (!dataP->fWireTracking.link.next) {
4391*a325d9c4SApple OSS Distributions IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4392*a325d9c4SApple OSS Distributions }
4393*a325d9c4SApple OSS Distributions }
4394*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
4395*a325d9c4SApple OSS Distributions
4396*a325d9c4SApple OSS Distributions return error;
4397*a325d9c4SApple OSS Distributions
4398*a325d9c4SApple OSS Distributions abortExit:
4399*a325d9c4SApple OSS Distributions {
4400*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4401*a325d9c4SApple OSS Distributions UInt done = getNumIOPL(_memoryEntries, dataP);
4402*a325d9c4SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4403*a325d9c4SApple OSS Distributions
4404*a325d9c4SApple OSS Distributions for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4405*a325d9c4SApple OSS Distributions if (ioplList[ioplIdx].fIOPL) {
4406*a325d9c4SApple OSS Distributions upl_abort(ioplList[ioplIdx].fIOPL, 0);
4407*a325d9c4SApple OSS Distributions upl_deallocate(ioplList[ioplIdx].fIOPL);
4408*a325d9c4SApple OSS Distributions }
4409*a325d9c4SApple OSS Distributions }
4410*a325d9c4SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4411*a325d9c4SApple OSS Distributions }
4412*a325d9c4SApple OSS Distributions
4413*a325d9c4SApple OSS Distributions if (error == KERN_FAILURE) {
4414*a325d9c4SApple OSS Distributions error = kIOReturnCannotWire;
4415*a325d9c4SApple OSS Distributions } else if (error == KERN_MEMORY_ERROR) {
4416*a325d9c4SApple OSS Distributions error = kIOReturnNoResources;
4417*a325d9c4SApple OSS Distributions }
4418*a325d9c4SApple OSS Distributions
4419*a325d9c4SApple OSS Distributions return error;
4420*a325d9c4SApple OSS Distributions }
4421*a325d9c4SApple OSS Distributions
4422*a325d9c4SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4423*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4424*a325d9c4SApple OSS Distributions {
4425*a325d9c4SApple OSS Distributions ioGMDData * dataP;
4426*a325d9c4SApple OSS Distributions
4427*a325d9c4SApple OSS Distributions if (size > UINT_MAX) {
4428*a325d9c4SApple OSS Distributions return false;
4429*a325d9c4SApple OSS Distributions }
4430*a325d9c4SApple OSS Distributions if (!_memoryEntries) {
4431*a325d9c4SApple OSS Distributions _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4432*a325d9c4SApple OSS Distributions if (!_memoryEntries) {
4433*a325d9c4SApple OSS Distributions return false;
4434*a325d9c4SApple OSS Distributions }
4435*a325d9c4SApple OSS Distributions } else if (!_memoryEntries->initWithCapacity(size)) {
4436*a325d9c4SApple OSS Distributions return false;
4437*a325d9c4SApple OSS Distributions }
4438*a325d9c4SApple OSS Distributions
4439*a325d9c4SApple OSS Distributions _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4440*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4441*a325d9c4SApple OSS Distributions
4442*a325d9c4SApple OSS Distributions if (mapper == kIOMapperWaitSystem) {
4443*a325d9c4SApple OSS Distributions IOMapper::checkForSystemMapper();
4444*a325d9c4SApple OSS Distributions mapper = IOMapper::gSystem;
4445*a325d9c4SApple OSS Distributions }
4446*a325d9c4SApple OSS Distributions dataP->fMapper = mapper;
4447*a325d9c4SApple OSS Distributions dataP->fPageCnt = 0;
4448*a325d9c4SApple OSS Distributions dataP->fMappedBase = 0;
4449*a325d9c4SApple OSS Distributions dataP->fDMAMapNumAddressBits = 64;
4450*a325d9c4SApple OSS Distributions dataP->fDMAMapAlignment = 0;
4451*a325d9c4SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4452*a325d9c4SApple OSS Distributions dataP->fCompletionError = false;
4453*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = false;
4454*a325d9c4SApple OSS Distributions
4455*a325d9c4SApple OSS Distributions return true;
4456*a325d9c4SApple OSS Distributions }
4457*a325d9c4SApple OSS Distributions
4458*a325d9c4SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4459*a325d9c4SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4460*a325d9c4SApple OSS Distributions IOMapper * mapper,
4461*a325d9c4SApple OSS Distributions IOMemoryDescriptor * memory,
4462*a325d9c4SApple OSS Distributions IODMACommand * command,
4463*a325d9c4SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4464*a325d9c4SApple OSS Distributions uint64_t offset,
4465*a325d9c4SApple OSS Distributions uint64_t length,
4466*a325d9c4SApple OSS Distributions uint64_t * mapAddress,
4467*a325d9c4SApple OSS Distributions uint64_t * mapLength)
4468*a325d9c4SApple OSS Distributions {
4469*a325d9c4SApple OSS Distributions IOReturn err;
4470*a325d9c4SApple OSS Distributions uint32_t mapOptions;
4471*a325d9c4SApple OSS Distributions
4472*a325d9c4SApple OSS Distributions mapOptions = 0;
4473*a325d9c4SApple OSS Distributions mapOptions |= kIODMAMapReadAccess;
4474*a325d9c4SApple OSS Distributions if (!(kIOMemoryPreparedReadOnly & _flags)) {
4475*a325d9c4SApple OSS Distributions mapOptions |= kIODMAMapWriteAccess;
4476*a325d9c4SApple OSS Distributions }
4477*a325d9c4SApple OSS Distributions
4478*a325d9c4SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4479*a325d9c4SApple OSS Distributions mapSpec, command, NULL, mapAddress, mapLength);
4480*a325d9c4SApple OSS Distributions
4481*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == err) {
4482*a325d9c4SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4483*a325d9c4SApple OSS Distributions }
4484*a325d9c4SApple OSS Distributions
4485*a325d9c4SApple OSS Distributions return err;
4486*a325d9c4SApple OSS Distributions }
4487*a325d9c4SApple OSS Distributions
4488*a325d9c4SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4489*a325d9c4SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4490*a325d9c4SApple OSS Distributions IOMapper * mapper,
4491*a325d9c4SApple OSS Distributions IODMACommand * command,
4492*a325d9c4SApple OSS Distributions uint64_t mapLength)
4493*a325d9c4SApple OSS Distributions {
4494*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4495*a325d9c4SApple OSS Distributions kern_allocation_name_t alloc;
4496*a325d9c4SApple OSS Distributions int16_t prior;
4497*a325d9c4SApple OSS Distributions
4498*a325d9c4SApple OSS Distributions if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4499*a325d9c4SApple OSS Distributions kern_allocation_update_size(mapper->fAllocName, mapLength);
4500*a325d9c4SApple OSS Distributions }
4501*a325d9c4SApple OSS Distributions
4502*a325d9c4SApple OSS Distributions if (!command) {
4503*a325d9c4SApple OSS Distributions return;
4504*a325d9c4SApple OSS Distributions }
4505*a325d9c4SApple OSS Distributions prior = OSAddAtomic16(1, &_dmaReferences);
4506*a325d9c4SApple OSS Distributions if (!prior) {
4507*a325d9c4SApple OSS Distributions if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4508*a325d9c4SApple OSS Distributions _mapName = alloc;
4509*a325d9c4SApple OSS Distributions mapLength = _length;
4510*a325d9c4SApple OSS Distributions kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4511*a325d9c4SApple OSS Distributions } else {
4512*a325d9c4SApple OSS Distributions _mapName = NULL;
4513*a325d9c4SApple OSS Distributions }
4514*a325d9c4SApple OSS Distributions }
4515*a325d9c4SApple OSS Distributions }
4516*a325d9c4SApple OSS Distributions
4517*a325d9c4SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4518*a325d9c4SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4519*a325d9c4SApple OSS Distributions IOMapper * mapper,
4520*a325d9c4SApple OSS Distributions IODMACommand * command,
4521*a325d9c4SApple OSS Distributions uint64_t offset,
4522*a325d9c4SApple OSS Distributions uint64_t mapAddress,
4523*a325d9c4SApple OSS Distributions uint64_t mapLength)
4524*a325d9c4SApple OSS Distributions {
4525*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4526*a325d9c4SApple OSS Distributions IOReturn ret;
4527*a325d9c4SApple OSS Distributions kern_allocation_name_t alloc;
4528*a325d9c4SApple OSS Distributions kern_allocation_name_t mapName;
4529*a325d9c4SApple OSS Distributions int16_t prior;
4530*a325d9c4SApple OSS Distributions
4531*a325d9c4SApple OSS Distributions mapName = NULL;
4532*a325d9c4SApple OSS Distributions prior = 0;
4533*a325d9c4SApple OSS Distributions if (command) {
4534*a325d9c4SApple OSS Distributions mapName = _mapName;
4535*a325d9c4SApple OSS Distributions if (_dmaReferences) {
4536*a325d9c4SApple OSS Distributions prior = OSAddAtomic16(-1, &_dmaReferences);
4537*a325d9c4SApple OSS Distributions } else {
4538*a325d9c4SApple OSS Distributions panic("_dmaReferences underflow");
4539*a325d9c4SApple OSS Distributions }
4540*a325d9c4SApple OSS Distributions }
4541*a325d9c4SApple OSS Distributions
4542*a325d9c4SApple OSS Distributions if (!mapLength) {
4543*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4544*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4545*a325d9c4SApple OSS Distributions }
4546*a325d9c4SApple OSS Distributions
4547*a325d9c4SApple OSS Distributions ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4548*a325d9c4SApple OSS Distributions
4549*a325d9c4SApple OSS Distributions if ((alloc = mapper->fAllocName)) {
4550*a325d9c4SApple OSS Distributions kern_allocation_update_size(alloc, -mapLength);
4551*a325d9c4SApple OSS Distributions if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4552*a325d9c4SApple OSS Distributions mapLength = _length;
4553*a325d9c4SApple OSS Distributions kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4554*a325d9c4SApple OSS Distributions }
4555*a325d9c4SApple OSS Distributions }
4556*a325d9c4SApple OSS Distributions
4557*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(ret);
4558*a325d9c4SApple OSS Distributions return ret;
4559*a325d9c4SApple OSS Distributions }
4560*a325d9c4SApple OSS Distributions
4561*a325d9c4SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4562*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4563*a325d9c4SApple OSS Distributions IOMapper * mapper,
4564*a325d9c4SApple OSS Distributions IOMemoryDescriptor * memory,
4565*a325d9c4SApple OSS Distributions IODMACommand * command,
4566*a325d9c4SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4567*a325d9c4SApple OSS Distributions uint64_t offset,
4568*a325d9c4SApple OSS Distributions uint64_t length,
4569*a325d9c4SApple OSS Distributions uint64_t * mapAddress,
4570*a325d9c4SApple OSS Distributions uint64_t * mapLength)
4571*a325d9c4SApple OSS Distributions {
4572*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4573*a325d9c4SApple OSS Distributions ioGMDData * dataP;
4574*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4575*a325d9c4SApple OSS Distributions
4576*a325d9c4SApple OSS Distributions *mapAddress = 0;
4577*a325d9c4SApple OSS Distributions if (kIOMemoryHostOnly & _flags) {
4578*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4579*a325d9c4SApple OSS Distributions }
4580*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4581*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
4582*a325d9c4SApple OSS Distributions }
4583*a325d9c4SApple OSS Distributions
4584*a325d9c4SApple OSS Distributions if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4585*a325d9c4SApple OSS Distributions || offset || (length != _length)) {
4586*a325d9c4SApple OSS Distributions err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4587*a325d9c4SApple OSS Distributions } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4588*a325d9c4SApple OSS Distributions const ioPLBlock * ioplList = getIOPLList(dataP);
4589*a325d9c4SApple OSS Distributions upl_page_info_t * pageList;
4590*a325d9c4SApple OSS Distributions uint32_t mapOptions = 0;
4591*a325d9c4SApple OSS Distributions
4592*a325d9c4SApple OSS Distributions IODMAMapSpecification mapSpec;
4593*a325d9c4SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
4594*a325d9c4SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4595*a325d9c4SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
4596*a325d9c4SApple OSS Distributions
4597*a325d9c4SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
4598*a325d9c4SApple OSS Distributions // the upl's upl_page_info_t array.
4599*a325d9c4SApple OSS Distributions if (ioplList->fFlags & kIOPLExternUPL) {
4600*a325d9c4SApple OSS Distributions pageList = (upl_page_info_t *) ioplList->fPageInfo;
4601*a325d9c4SApple OSS Distributions mapOptions |= kIODMAMapPagingPath;
4602*a325d9c4SApple OSS Distributions } else {
4603*a325d9c4SApple OSS Distributions pageList = getPageList(dataP);
4604*a325d9c4SApple OSS Distributions }
4605*a325d9c4SApple OSS Distributions
4606*a325d9c4SApple OSS Distributions if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4607*a325d9c4SApple OSS Distributions mapOptions |= kIODMAMapPageListFullyOccupied;
4608*a325d9c4SApple OSS Distributions }
4609*a325d9c4SApple OSS Distributions
4610*a325d9c4SApple OSS Distributions assert(dataP->fDMAAccess);
4611*a325d9c4SApple OSS Distributions mapOptions |= dataP->fDMAAccess;
4612*a325d9c4SApple OSS Distributions
4613*a325d9c4SApple OSS Distributions // Check for direct device non-paged memory
4614*a325d9c4SApple OSS Distributions if (ioplList->fFlags & kIOPLOnDevice) {
4615*a325d9c4SApple OSS Distributions mapOptions |= kIODMAMapPhysicallyContiguous;
4616*a325d9c4SApple OSS Distributions }
4617*a325d9c4SApple OSS Distributions
4618*a325d9c4SApple OSS Distributions IODMAMapPageList dmaPageList =
4619*a325d9c4SApple OSS Distributions {
4620*a325d9c4SApple OSS Distributions .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4621*a325d9c4SApple OSS Distributions .pageListCount = _pages,
4622*a325d9c4SApple OSS Distributions .pageList = &pageList[0]
4623*a325d9c4SApple OSS Distributions };
4624*a325d9c4SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4625*a325d9c4SApple OSS Distributions command, &dmaPageList, mapAddress, mapLength);
4626*a325d9c4SApple OSS Distributions
4627*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == err) {
4628*a325d9c4SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4629*a325d9c4SApple OSS Distributions }
4630*a325d9c4SApple OSS Distributions }
4631*a325d9c4SApple OSS Distributions
4632*a325d9c4SApple OSS Distributions return err;
4633*a325d9c4SApple OSS Distributions }
4634*a325d9c4SApple OSS Distributions
4635*a325d9c4SApple OSS Distributions /*
4636*a325d9c4SApple OSS Distributions * prepare
4637*a325d9c4SApple OSS Distributions *
4638*a325d9c4SApple OSS Distributions * Prepare the memory for an I/O transfer. This involves paging in
4639*a325d9c4SApple OSS Distributions * the memory, if necessary, and wiring it down for the duration of
4640*a325d9c4SApple OSS Distributions * the transfer. The complete() method completes the processing of
4641*a325d9c4SApple OSS Distributions * the memory after the I/O transfer finishes. This method needn't
4642*a325d9c4SApple OSS Distributions * called for non-pageable memory.
4643*a325d9c4SApple OSS Distributions */
4644*a325d9c4SApple OSS Distributions
4645*a325d9c4SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4646*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4647*a325d9c4SApple OSS Distributions {
4648*a325d9c4SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4649*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4650*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4651*a325d9c4SApple OSS Distributions
4652*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4653*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4654*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4655*a325d9c4SApple OSS Distributions }
4656*a325d9c4SApple OSS Distributions
4657*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4658*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4659*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4660*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
4661*a325d9c4SApple OSS Distributions }
4662*a325d9c4SApple OSS Distributions
4663*a325d9c4SApple OSS Distributions if (_prepareLock) {
4664*a325d9c4SApple OSS Distributions IOLockLock(_prepareLock);
4665*a325d9c4SApple OSS Distributions }
4666*a325d9c4SApple OSS Distributions
4667*a325d9c4SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4668*a325d9c4SApple OSS Distributions if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4669*a325d9c4SApple OSS Distributions error = kIOReturnNotReady;
4670*a325d9c4SApple OSS Distributions goto finish;
4671*a325d9c4SApple OSS Distributions }
4672*a325d9c4SApple OSS Distributions error = wireVirtual(forDirection);
4673*a325d9c4SApple OSS Distributions }
4674*a325d9c4SApple OSS Distributions
4675*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == error) {
4676*a325d9c4SApple OSS Distributions if (1 == ++_wireCount) {
4677*a325d9c4SApple OSS Distributions if (kIOMemoryClearEncrypt & _flags) {
4678*a325d9c4SApple OSS Distributions performOperation(kIOMemoryClearEncrypted, 0, _length);
4679*a325d9c4SApple OSS Distributions }
4680*a325d9c4SApple OSS Distributions
4681*a325d9c4SApple OSS Distributions ktraceEmitPhysicalSegments();
4682*a325d9c4SApple OSS Distributions }
4683*a325d9c4SApple OSS Distributions }
4684*a325d9c4SApple OSS Distributions
4685*a325d9c4SApple OSS Distributions finish:
4686*a325d9c4SApple OSS Distributions
4687*a325d9c4SApple OSS Distributions if (_prepareLock) {
4688*a325d9c4SApple OSS Distributions IOLockUnlock(_prepareLock);
4689*a325d9c4SApple OSS Distributions }
4690*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(error);
4691*a325d9c4SApple OSS Distributions
4692*a325d9c4SApple OSS Distributions return error;
4693*a325d9c4SApple OSS Distributions }
4694*a325d9c4SApple OSS Distributions
4695*a325d9c4SApple OSS Distributions /*
4696*a325d9c4SApple OSS Distributions * complete
4697*a325d9c4SApple OSS Distributions *
4698*a325d9c4SApple OSS Distributions * Complete processing of the memory after an I/O transfer finishes.
4699*a325d9c4SApple OSS Distributions * This method should not be called unless a prepare was previously
4700*a325d9c4SApple OSS Distributions * issued; the prepare() and complete() must occur in pairs, before
4701*a325d9c4SApple OSS Distributions * before and after an I/O transfer involving pageable memory.
4702*a325d9c4SApple OSS Distributions */
4703*a325d9c4SApple OSS Distributions
4704*a325d9c4SApple OSS Distributions IOReturn
complete(IODirection forDirection)4705*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4706*a325d9c4SApple OSS Distributions {
4707*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4708*a325d9c4SApple OSS Distributions ioGMDData * dataP;
4709*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4710*a325d9c4SApple OSS Distributions
4711*a325d9c4SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4712*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4713*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4714*a325d9c4SApple OSS Distributions }
4715*a325d9c4SApple OSS Distributions
4716*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4717*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4718*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4719*a325d9c4SApple OSS Distributions return kIOReturnNotAttached;
4720*a325d9c4SApple OSS Distributions }
4721*a325d9c4SApple OSS Distributions
4722*a325d9c4SApple OSS Distributions if (_prepareLock) {
4723*a325d9c4SApple OSS Distributions IOLockLock(_prepareLock);
4724*a325d9c4SApple OSS Distributions }
4725*a325d9c4SApple OSS Distributions do{
4726*a325d9c4SApple OSS Distributions assert(_wireCount);
4727*a325d9c4SApple OSS Distributions if (!_wireCount) {
4728*a325d9c4SApple OSS Distributions break;
4729*a325d9c4SApple OSS Distributions }
4730*a325d9c4SApple OSS Distributions dataP = getDataP(_memoryEntries);
4731*a325d9c4SApple OSS Distributions if (!dataP) {
4732*a325d9c4SApple OSS Distributions break;
4733*a325d9c4SApple OSS Distributions }
4734*a325d9c4SApple OSS Distributions
4735*a325d9c4SApple OSS Distributions if (kIODirectionCompleteWithError & forDirection) {
4736*a325d9c4SApple OSS Distributions dataP->fCompletionError = true;
4737*a325d9c4SApple OSS Distributions }
4738*a325d9c4SApple OSS Distributions
4739*a325d9c4SApple OSS Distributions if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4740*a325d9c4SApple OSS Distributions performOperation(kIOMemorySetEncrypted, 0, _length);
4741*a325d9c4SApple OSS Distributions }
4742*a325d9c4SApple OSS Distributions
4743*a325d9c4SApple OSS Distributions _wireCount--;
4744*a325d9c4SApple OSS Distributions if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4745*a325d9c4SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4746*a325d9c4SApple OSS Distributions UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4747*a325d9c4SApple OSS Distributions
4748*a325d9c4SApple OSS Distributions if (_wireCount) {
4749*a325d9c4SApple OSS Distributions // kIODirectionCompleteWithDataValid & forDirection
4750*a325d9c4SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4751*a325d9c4SApple OSS Distributions vm_tag_t tag;
4752*a325d9c4SApple OSS Distributions tag = (typeof(tag))getVMTag(kernel_map);
4753*a325d9c4SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4754*a325d9c4SApple OSS Distributions if (ioplList[ind].fIOPL) {
4755*a325d9c4SApple OSS Distributions iopl_valid_data(ioplList[ind].fIOPL, tag);
4756*a325d9c4SApple OSS Distributions }
4757*a325d9c4SApple OSS Distributions }
4758*a325d9c4SApple OSS Distributions }
4759*a325d9c4SApple OSS Distributions } else {
4760*a325d9c4SApple OSS Distributions if (_dmaReferences) {
4761*a325d9c4SApple OSS Distributions panic("complete() while dma active");
4762*a325d9c4SApple OSS Distributions }
4763*a325d9c4SApple OSS Distributions
4764*a325d9c4SApple OSS Distributions if (dataP->fMappedBaseValid) {
4765*a325d9c4SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4766*a325d9c4SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4767*a325d9c4SApple OSS Distributions }
4768*a325d9c4SApple OSS Distributions #if IOTRACKING
4769*a325d9c4SApple OSS Distributions if (dataP->fWireTracking.link.next) {
4770*a325d9c4SApple OSS Distributions IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4771*a325d9c4SApple OSS Distributions }
4772*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
4773*a325d9c4SApple OSS Distributions // Only complete iopls that we created which are for TypeVirtual
4774*a325d9c4SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4775*a325d9c4SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4776*a325d9c4SApple OSS Distributions if (ioplList[ind].fIOPL) {
4777*a325d9c4SApple OSS Distributions if (dataP->fCompletionError) {
4778*a325d9c4SApple OSS Distributions upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4779*a325d9c4SApple OSS Distributions } else {
4780*a325d9c4SApple OSS Distributions upl_commit(ioplList[ind].fIOPL, NULL, 0);
4781*a325d9c4SApple OSS Distributions }
4782*a325d9c4SApple OSS Distributions upl_deallocate(ioplList[ind].fIOPL);
4783*a325d9c4SApple OSS Distributions }
4784*a325d9c4SApple OSS Distributions }
4785*a325d9c4SApple OSS Distributions } else if (kIOMemoryTypeUPL == type) {
4786*a325d9c4SApple OSS Distributions upl_set_referenced(ioplList[0].fIOPL, false);
4787*a325d9c4SApple OSS Distributions }
4788*a325d9c4SApple OSS Distributions
4789*a325d9c4SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4790*a325d9c4SApple OSS Distributions
4791*a325d9c4SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4792*a325d9c4SApple OSS Distributions _flags &= ~kIOMemoryPreparedReadOnly;
4793*a325d9c4SApple OSS Distributions
4794*a325d9c4SApple OSS Distributions if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4795*a325d9c4SApple OSS Distributions IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4796*a325d9c4SApple OSS Distributions }
4797*a325d9c4SApple OSS Distributions }
4798*a325d9c4SApple OSS Distributions }
4799*a325d9c4SApple OSS Distributions }while (false);
4800*a325d9c4SApple OSS Distributions
4801*a325d9c4SApple OSS Distributions if (_prepareLock) {
4802*a325d9c4SApple OSS Distributions IOLockUnlock(_prepareLock);
4803*a325d9c4SApple OSS Distributions }
4804*a325d9c4SApple OSS Distributions
4805*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4806*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4807*a325d9c4SApple OSS Distributions }
4808*a325d9c4SApple OSS Distributions
4809*a325d9c4SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4810*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4811*a325d9c4SApple OSS Distributions vm_map_t __addressMap,
4812*a325d9c4SApple OSS Distributions IOVirtualAddress * __address,
4813*a325d9c4SApple OSS Distributions IOOptionBits options,
4814*a325d9c4SApple OSS Distributions IOByteCount __offset,
4815*a325d9c4SApple OSS Distributions IOByteCount __length )
4816*a325d9c4SApple OSS Distributions {
4817*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4818*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4819*a325d9c4SApple OSS Distributions #ifndef __LP64__
4820*a325d9c4SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4821*a325d9c4SApple OSS Distributions panic("IOGeneralMemoryDescriptor::doMap !64bit");
4822*a325d9c4SApple OSS Distributions }
4823*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
4824*a325d9c4SApple OSS Distributions
4825*a325d9c4SApple OSS Distributions kern_return_t err;
4826*a325d9c4SApple OSS Distributions
4827*a325d9c4SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4828*a325d9c4SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
4829*a325d9c4SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
4830*a325d9c4SApple OSS Distributions
4831*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4832*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
4833*a325d9c4SApple OSS Distributions
4834*a325d9c4SApple OSS Distributions mach_vm_address_t range0Addr = 0;
4835*a325d9c4SApple OSS Distributions mach_vm_size_t range0Len = 0;
4836*a325d9c4SApple OSS Distributions
4837*a325d9c4SApple OSS Distributions if ((offset >= _length) || ((offset + length) > _length)) {
4838*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(kIOReturnBadArgument);
4839*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4840*a325d9c4SApple OSS Distributions // assert(offset == 0 && _length == 0 && length == 0);
4841*a325d9c4SApple OSS Distributions return kIOReturnBadArgument;
4842*a325d9c4SApple OSS Distributions }
4843*a325d9c4SApple OSS Distributions
4844*a325d9c4SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4845*a325d9c4SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4846*a325d9c4SApple OSS Distributions return 0;
4847*a325d9c4SApple OSS Distributions }
4848*a325d9c4SApple OSS Distributions
4849*a325d9c4SApple OSS Distributions if (vec.v) {
4850*a325d9c4SApple OSS Distributions getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4851*a325d9c4SApple OSS Distributions }
4852*a325d9c4SApple OSS Distributions
4853*a325d9c4SApple OSS Distributions // mapping source == dest? (could be much better)
4854*a325d9c4SApple OSS Distributions if (_task
4855*a325d9c4SApple OSS Distributions && (mapping->fAddressTask == _task)
4856*a325d9c4SApple OSS Distributions && (mapping->fAddressMap == get_task_map(_task))
4857*a325d9c4SApple OSS Distributions && (options & kIOMapAnywhere)
4858*a325d9c4SApple OSS Distributions && (!(kIOMapUnique & options))
4859*a325d9c4SApple OSS Distributions && (!(kIOMapGuardedMask & options))
4860*a325d9c4SApple OSS Distributions && (1 == _rangesCount)
4861*a325d9c4SApple OSS Distributions && (0 == offset)
4862*a325d9c4SApple OSS Distributions && range0Addr
4863*a325d9c4SApple OSS Distributions && (length <= range0Len)) {
4864*a325d9c4SApple OSS Distributions mapping->fAddress = range0Addr;
4865*a325d9c4SApple OSS Distributions mapping->fOptions |= kIOMapStatic;
4866*a325d9c4SApple OSS Distributions
4867*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4868*a325d9c4SApple OSS Distributions }
4869*a325d9c4SApple OSS Distributions
4870*a325d9c4SApple OSS Distributions if (!_memRef) {
4871*a325d9c4SApple OSS Distributions IOOptionBits createOptions = 0;
4872*a325d9c4SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
4873*a325d9c4SApple OSS Distributions createOptions |= kIOMemoryReferenceWrite;
4874*a325d9c4SApple OSS Distributions #if DEVELOPMENT || DEBUG
4875*a325d9c4SApple OSS Distributions if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4876*a325d9c4SApple OSS Distributions && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4877*a325d9c4SApple OSS Distributions OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4878*a325d9c4SApple OSS Distributions }
4879*a325d9c4SApple OSS Distributions #endif
4880*a325d9c4SApple OSS Distributions }
4881*a325d9c4SApple OSS Distributions err = memoryReferenceCreate(createOptions, &_memRef);
4882*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
4883*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(err);
4884*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4885*a325d9c4SApple OSS Distributions return err;
4886*a325d9c4SApple OSS Distributions }
4887*a325d9c4SApple OSS Distributions }
4888*a325d9c4SApple OSS Distributions
4889*a325d9c4SApple OSS Distributions memory_object_t pager;
4890*a325d9c4SApple OSS Distributions pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4891*a325d9c4SApple OSS Distributions
4892*a325d9c4SApple OSS Distributions // <upl_transpose //
4893*a325d9c4SApple OSS Distributions if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4894*a325d9c4SApple OSS Distributions do{
4895*a325d9c4SApple OSS Distributions upl_t redirUPL2;
4896*a325d9c4SApple OSS Distributions upl_size_t size;
4897*a325d9c4SApple OSS Distributions upl_control_flags_t flags;
4898*a325d9c4SApple OSS Distributions unsigned int lock_count;
4899*a325d9c4SApple OSS Distributions
4900*a325d9c4SApple OSS Distributions if (!_memRef || (1 != _memRef->count)) {
4901*a325d9c4SApple OSS Distributions err = kIOReturnNotReadable;
4902*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4903*a325d9c4SApple OSS Distributions break;
4904*a325d9c4SApple OSS Distributions }
4905*a325d9c4SApple OSS Distributions
4906*a325d9c4SApple OSS Distributions size = (upl_size_t) round_page(mapping->fLength);
4907*a325d9c4SApple OSS Distributions flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4908*a325d9c4SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4909*a325d9c4SApple OSS Distributions
4910*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4911*a325d9c4SApple OSS Distributions NULL, NULL,
4912*a325d9c4SApple OSS Distributions &flags, (vm_tag_t) getVMTag(kernel_map))) {
4913*a325d9c4SApple OSS Distributions redirUPL2 = NULL;
4914*a325d9c4SApple OSS Distributions }
4915*a325d9c4SApple OSS Distributions
4916*a325d9c4SApple OSS Distributions for (lock_count = 0;
4917*a325d9c4SApple OSS Distributions IORecursiveLockHaveLock(gIOMemoryLock);
4918*a325d9c4SApple OSS Distributions lock_count++) {
4919*a325d9c4SApple OSS Distributions UNLOCK;
4920*a325d9c4SApple OSS Distributions }
4921*a325d9c4SApple OSS Distributions err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4922*a325d9c4SApple OSS Distributions for (;
4923*a325d9c4SApple OSS Distributions lock_count;
4924*a325d9c4SApple OSS Distributions lock_count--) {
4925*a325d9c4SApple OSS Distributions LOCK;
4926*a325d9c4SApple OSS Distributions }
4927*a325d9c4SApple OSS Distributions
4928*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
4929*a325d9c4SApple OSS Distributions IOLog("upl_transpose(%x)\n", err);
4930*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
4931*a325d9c4SApple OSS Distributions }
4932*a325d9c4SApple OSS Distributions
4933*a325d9c4SApple OSS Distributions if (redirUPL2) {
4934*a325d9c4SApple OSS Distributions upl_commit(redirUPL2, NULL, 0);
4935*a325d9c4SApple OSS Distributions upl_deallocate(redirUPL2);
4936*a325d9c4SApple OSS Distributions redirUPL2 = NULL;
4937*a325d9c4SApple OSS Distributions }
4938*a325d9c4SApple OSS Distributions {
4939*a325d9c4SApple OSS Distributions // swap the memEntries since they now refer to different vm_objects
4940*a325d9c4SApple OSS Distributions IOMemoryReference * me = _memRef;
4941*a325d9c4SApple OSS Distributions _memRef = mapping->fMemory->_memRef;
4942*a325d9c4SApple OSS Distributions mapping->fMemory->_memRef = me;
4943*a325d9c4SApple OSS Distributions }
4944*a325d9c4SApple OSS Distributions if (pager) {
4945*a325d9c4SApple OSS Distributions err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4946*a325d9c4SApple OSS Distributions }
4947*a325d9c4SApple OSS Distributions }while (false);
4948*a325d9c4SApple OSS Distributions }
4949*a325d9c4SApple OSS Distributions // upl_transpose> //
4950*a325d9c4SApple OSS Distributions else {
4951*a325d9c4SApple OSS Distributions err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4952*a325d9c4SApple OSS Distributions if (err) {
4953*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4954*a325d9c4SApple OSS Distributions }
4955*a325d9c4SApple OSS Distributions #if IOTRACKING
4956*a325d9c4SApple OSS Distributions if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4957*a325d9c4SApple OSS Distributions // only dram maps in the default on developement case
4958*a325d9c4SApple OSS Distributions IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4959*a325d9c4SApple OSS Distributions }
4960*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
4961*a325d9c4SApple OSS Distributions if ((err == KERN_SUCCESS) && pager) {
4962*a325d9c4SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4963*a325d9c4SApple OSS Distributions
4964*a325d9c4SApple OSS Distributions if (err != KERN_SUCCESS) {
4965*a325d9c4SApple OSS Distributions doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4966*a325d9c4SApple OSS Distributions } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4967*a325d9c4SApple OSS Distributions mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4968*a325d9c4SApple OSS Distributions }
4969*a325d9c4SApple OSS Distributions }
4970*a325d9c4SApple OSS Distributions }
4971*a325d9c4SApple OSS Distributions
4972*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(err);
4973*a325d9c4SApple OSS Distributions if (err) {
4974*a325d9c4SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4975*a325d9c4SApple OSS Distributions }
4976*a325d9c4SApple OSS Distributions return err;
4977*a325d9c4SApple OSS Distributions }
4978*a325d9c4SApple OSS Distributions
4979*a325d9c4SApple OSS Distributions #if IOTRACKING
4980*a325d9c4SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4981*a325d9c4SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4982*a325d9c4SApple OSS Distributions mach_vm_address_t * address, mach_vm_size_t * size)
4983*a325d9c4SApple OSS Distributions {
4984*a325d9c4SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4985*a325d9c4SApple OSS Distributions
4986*a325d9c4SApple OSS Distributions IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4987*a325d9c4SApple OSS Distributions
4988*a325d9c4SApple OSS Distributions if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4989*a325d9c4SApple OSS Distributions return kIOReturnNotReady;
4990*a325d9c4SApple OSS Distributions }
4991*a325d9c4SApple OSS Distributions
4992*a325d9c4SApple OSS Distributions *task = map->fAddressTask;
4993*a325d9c4SApple OSS Distributions *address = map->fAddress;
4994*a325d9c4SApple OSS Distributions *size = map->fLength;
4995*a325d9c4SApple OSS Distributions
4996*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
4997*a325d9c4SApple OSS Distributions }
4998*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
4999*a325d9c4SApple OSS Distributions
5000*a325d9c4SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5001*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5002*a325d9c4SApple OSS Distributions vm_map_t addressMap,
5003*a325d9c4SApple OSS Distributions IOVirtualAddress __address,
5004*a325d9c4SApple OSS Distributions IOByteCount __length )
5005*a325d9c4SApple OSS Distributions {
5006*a325d9c4SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5007*a325d9c4SApple OSS Distributions IOReturn ret;
5008*a325d9c4SApple OSS Distributions ret = super::doUnmap(addressMap, __address, __length);
5009*a325d9c4SApple OSS Distributions traceInterval.setEndArg1(ret);
5010*a325d9c4SApple OSS Distributions return ret;
5011*a325d9c4SApple OSS Distributions }
5012*a325d9c4SApple OSS Distributions
5013*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5014*a325d9c4SApple OSS Distributions
5015*a325d9c4SApple OSS Distributions #undef super
5016*a325d9c4SApple OSS Distributions #define super OSObject
5017*a325d9c4SApple OSS Distributions
5018*a325d9c4SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5019*a325d9c4SApple OSS Distributions
5020*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5021*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5022*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5023*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5024*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5025*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5026*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5027*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5028*a325d9c4SApple OSS Distributions
5029*a325d9c4SApple OSS Distributions /* ex-inline function implementation */
5030*a325d9c4SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5031*a325d9c4SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5032*a325d9c4SApple OSS Distributions {
5033*a325d9c4SApple OSS Distributions return getPhysicalSegment( 0, NULL );
5034*a325d9c4SApple OSS Distributions }
5035*a325d9c4SApple OSS Distributions
5036*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5037*a325d9c4SApple OSS Distributions
5038*a325d9c4SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5039*a325d9c4SApple OSS Distributions IOMemoryMap::init(
5040*a325d9c4SApple OSS Distributions task_t intoTask,
5041*a325d9c4SApple OSS Distributions mach_vm_address_t toAddress,
5042*a325d9c4SApple OSS Distributions IOOptionBits _options,
5043*a325d9c4SApple OSS Distributions mach_vm_size_t _offset,
5044*a325d9c4SApple OSS Distributions mach_vm_size_t _length )
5045*a325d9c4SApple OSS Distributions {
5046*a325d9c4SApple OSS Distributions if (!intoTask) {
5047*a325d9c4SApple OSS Distributions return false;
5048*a325d9c4SApple OSS Distributions }
5049*a325d9c4SApple OSS Distributions
5050*a325d9c4SApple OSS Distributions if (!super::init()) {
5051*a325d9c4SApple OSS Distributions return false;
5052*a325d9c4SApple OSS Distributions }
5053*a325d9c4SApple OSS Distributions
5054*a325d9c4SApple OSS Distributions fAddressMap = get_task_map(intoTask);
5055*a325d9c4SApple OSS Distributions if (!fAddressMap) {
5056*a325d9c4SApple OSS Distributions return false;
5057*a325d9c4SApple OSS Distributions }
5058*a325d9c4SApple OSS Distributions vm_map_reference(fAddressMap);
5059*a325d9c4SApple OSS Distributions
5060*a325d9c4SApple OSS Distributions fAddressTask = intoTask;
5061*a325d9c4SApple OSS Distributions fOptions = _options;
5062*a325d9c4SApple OSS Distributions fLength = _length;
5063*a325d9c4SApple OSS Distributions fOffset = _offset;
5064*a325d9c4SApple OSS Distributions fAddress = toAddress;
5065*a325d9c4SApple OSS Distributions
5066*a325d9c4SApple OSS Distributions return true;
5067*a325d9c4SApple OSS Distributions }
5068*a325d9c4SApple OSS Distributions
5069*a325d9c4SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5070*a325d9c4SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5071*a325d9c4SApple OSS Distributions {
5072*a325d9c4SApple OSS Distributions if (!_memory) {
5073*a325d9c4SApple OSS Distributions return false;
5074*a325d9c4SApple OSS Distributions }
5075*a325d9c4SApple OSS Distributions
5076*a325d9c4SApple OSS Distributions if (!fSuperMap) {
5077*a325d9c4SApple OSS Distributions if ((_offset + fLength) > _memory->getLength()) {
5078*a325d9c4SApple OSS Distributions return false;
5079*a325d9c4SApple OSS Distributions }
5080*a325d9c4SApple OSS Distributions fOffset = _offset;
5081*a325d9c4SApple OSS Distributions }
5082*a325d9c4SApple OSS Distributions
5083*a325d9c4SApple OSS Distributions
5084*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5085*a325d9c4SApple OSS Distributions if (fMemory) {
5086*a325d9c4SApple OSS Distributions if (fMemory != _memory) {
5087*a325d9c4SApple OSS Distributions fMemory->removeMapping(this);
5088*a325d9c4SApple OSS Distributions }
5089*a325d9c4SApple OSS Distributions }
5090*a325d9c4SApple OSS Distributions fMemory = os::move(tempval);
5091*a325d9c4SApple OSS Distributions
5092*a325d9c4SApple OSS Distributions return true;
5093*a325d9c4SApple OSS Distributions }
5094*a325d9c4SApple OSS Distributions
5095*a325d9c4SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5096*a325d9c4SApple OSS Distributions IOMemoryDescriptor::doMap(
5097*a325d9c4SApple OSS Distributions vm_map_t __addressMap,
5098*a325d9c4SApple OSS Distributions IOVirtualAddress * __address,
5099*a325d9c4SApple OSS Distributions IOOptionBits options,
5100*a325d9c4SApple OSS Distributions IOByteCount __offset,
5101*a325d9c4SApple OSS Distributions IOByteCount __length )
5102*a325d9c4SApple OSS Distributions {
5103*a325d9c4SApple OSS Distributions return kIOReturnUnsupported;
5104*a325d9c4SApple OSS Distributions }
5105*a325d9c4SApple OSS Distributions
5106*a325d9c4SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5107*a325d9c4SApple OSS Distributions IOMemoryDescriptor::handleFault(
5108*a325d9c4SApple OSS Distributions void * _pager,
5109*a325d9c4SApple OSS Distributions mach_vm_size_t sourceOffset,
5110*a325d9c4SApple OSS Distributions mach_vm_size_t length)
5111*a325d9c4SApple OSS Distributions {
5112*a325d9c4SApple OSS Distributions if (kIOMemoryRedirected & _flags) {
5113*a325d9c4SApple OSS Distributions #if DEBUG
5114*a325d9c4SApple OSS Distributions IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5115*a325d9c4SApple OSS Distributions #endif
5116*a325d9c4SApple OSS Distributions do {
5117*a325d9c4SApple OSS Distributions SLEEP;
5118*a325d9c4SApple OSS Distributions } while (kIOMemoryRedirected & _flags);
5119*a325d9c4SApple OSS Distributions }
5120*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
5121*a325d9c4SApple OSS Distributions }
5122*a325d9c4SApple OSS Distributions
5123*a325d9c4SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5124*a325d9c4SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5125*a325d9c4SApple OSS Distributions void * _pager,
5126*a325d9c4SApple OSS Distributions vm_map_t addressMap,
5127*a325d9c4SApple OSS Distributions mach_vm_address_t address,
5128*a325d9c4SApple OSS Distributions mach_vm_size_t sourceOffset,
5129*a325d9c4SApple OSS Distributions mach_vm_size_t length,
5130*a325d9c4SApple OSS Distributions IOOptionBits options )
5131*a325d9c4SApple OSS Distributions {
5132*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5133*a325d9c4SApple OSS Distributions memory_object_t pager = (memory_object_t) _pager;
5134*a325d9c4SApple OSS Distributions mach_vm_size_t size;
5135*a325d9c4SApple OSS Distributions mach_vm_size_t bytes;
5136*a325d9c4SApple OSS Distributions mach_vm_size_t page;
5137*a325d9c4SApple OSS Distributions mach_vm_size_t pageOffset;
5138*a325d9c4SApple OSS Distributions mach_vm_size_t pagerOffset;
5139*a325d9c4SApple OSS Distributions IOPhysicalLength segLen, chunk;
5140*a325d9c4SApple OSS Distributions addr64_t physAddr;
5141*a325d9c4SApple OSS Distributions IOOptionBits type;
5142*a325d9c4SApple OSS Distributions
5143*a325d9c4SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
5144*a325d9c4SApple OSS Distributions
5145*a325d9c4SApple OSS Distributions if (reserved->dp.pagerContig) {
5146*a325d9c4SApple OSS Distributions sourceOffset = 0;
5147*a325d9c4SApple OSS Distributions pagerOffset = 0;
5148*a325d9c4SApple OSS Distributions }
5149*a325d9c4SApple OSS Distributions
5150*a325d9c4SApple OSS Distributions physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5151*a325d9c4SApple OSS Distributions assert( physAddr );
5152*a325d9c4SApple OSS Distributions pageOffset = physAddr - trunc_page_64( physAddr );
5153*a325d9c4SApple OSS Distributions pagerOffset = sourceOffset;
5154*a325d9c4SApple OSS Distributions
5155*a325d9c4SApple OSS Distributions size = length + pageOffset;
5156*a325d9c4SApple OSS Distributions physAddr -= pageOffset;
5157*a325d9c4SApple OSS Distributions
5158*a325d9c4SApple OSS Distributions segLen += pageOffset;
5159*a325d9c4SApple OSS Distributions bytes = size;
5160*a325d9c4SApple OSS Distributions do{
5161*a325d9c4SApple OSS Distributions // in the middle of the loop only map whole pages
5162*a325d9c4SApple OSS Distributions if (segLen >= bytes) {
5163*a325d9c4SApple OSS Distributions segLen = bytes;
5164*a325d9c4SApple OSS Distributions } else if (segLen != trunc_page_64(segLen)) {
5165*a325d9c4SApple OSS Distributions err = kIOReturnVMError;
5166*a325d9c4SApple OSS Distributions }
5167*a325d9c4SApple OSS Distributions if (physAddr != trunc_page_64(physAddr)) {
5168*a325d9c4SApple OSS Distributions err = kIOReturnBadArgument;
5169*a325d9c4SApple OSS Distributions }
5170*a325d9c4SApple OSS Distributions
5171*a325d9c4SApple OSS Distributions if (kIOReturnSuccess != err) {
5172*a325d9c4SApple OSS Distributions break;
5173*a325d9c4SApple OSS Distributions }
5174*a325d9c4SApple OSS Distributions
5175*a325d9c4SApple OSS Distributions #if DEBUG || DEVELOPMENT
5176*a325d9c4SApple OSS Distributions if ((kIOMemoryTypeUPL != type)
5177*a325d9c4SApple OSS Distributions && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5178*a325d9c4SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
5179*a325d9c4SApple OSS Distributions }
5180*a325d9c4SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5181*a325d9c4SApple OSS Distributions
5182*a325d9c4SApple OSS Distributions chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5183*a325d9c4SApple OSS Distributions for (page = 0;
5184*a325d9c4SApple OSS Distributions (page < segLen) && (KERN_SUCCESS == err);
5185*a325d9c4SApple OSS Distributions page += chunk) {
5186*a325d9c4SApple OSS Distributions err = device_pager_populate_object(pager, pagerOffset,
5187*a325d9c4SApple OSS Distributions (ppnum_t)(atop_64(physAddr + page)), chunk);
5188*a325d9c4SApple OSS Distributions pagerOffset += chunk;
5189*a325d9c4SApple OSS Distributions }
5190*a325d9c4SApple OSS Distributions
5191*a325d9c4SApple OSS Distributions assert(KERN_SUCCESS == err);
5192*a325d9c4SApple OSS Distributions if (err) {
5193*a325d9c4SApple OSS Distributions break;
5194*a325d9c4SApple OSS Distributions }
5195*a325d9c4SApple OSS Distributions
5196*a325d9c4SApple OSS Distributions // This call to vm_fault causes an early pmap level resolution
5197*a325d9c4SApple OSS Distributions // of the mappings created above for kernel mappings, since
5198*a325d9c4SApple OSS Distributions // faulting in later can't take place from interrupt level.
5199*a325d9c4SApple OSS Distributions if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5200*a325d9c4SApple OSS Distributions err = vm_fault(addressMap,
5201*a325d9c4SApple OSS Distributions (vm_map_offset_t)trunc_page_64(address),
5202*a325d9c4SApple OSS Distributions options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5203*a325d9c4SApple OSS Distributions FALSE, VM_KERN_MEMORY_NONE,
5204*a325d9c4SApple OSS Distributions THREAD_UNINT, NULL,
5205*a325d9c4SApple OSS Distributions (vm_map_offset_t)0);
5206*a325d9c4SApple OSS Distributions
5207*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != err) {
5208*a325d9c4SApple OSS Distributions break;
5209*a325d9c4SApple OSS Distributions }
5210*a325d9c4SApple OSS Distributions }
5211*a325d9c4SApple OSS Distributions
5212*a325d9c4SApple OSS Distributions sourceOffset += segLen - pageOffset;
5213*a325d9c4SApple OSS Distributions address += segLen;
5214*a325d9c4SApple OSS Distributions bytes -= segLen;
5215*a325d9c4SApple OSS Distributions pageOffset = 0;
5216*a325d9c4SApple OSS Distributions }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5217*a325d9c4SApple OSS Distributions
5218*a325d9c4SApple OSS Distributions if (bytes) {
5219*a325d9c4SApple OSS Distributions err = kIOReturnBadArgument;
5220*a325d9c4SApple OSS Distributions }
5221*a325d9c4SApple OSS Distributions
5222*a325d9c4SApple OSS Distributions return err;
5223*a325d9c4SApple OSS Distributions }
5224*a325d9c4SApple OSS Distributions
5225*a325d9c4SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5226*a325d9c4SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5227*a325d9c4SApple OSS Distributions vm_map_t addressMap,
5228*a325d9c4SApple OSS Distributions IOVirtualAddress __address,
5229*a325d9c4SApple OSS Distributions IOByteCount __length )
5230*a325d9c4SApple OSS Distributions {
5231*a325d9c4SApple OSS Distributions IOReturn err;
5232*a325d9c4SApple OSS Distributions IOMemoryMap * mapping;
5233*a325d9c4SApple OSS Distributions mach_vm_address_t address;
5234*a325d9c4SApple OSS Distributions mach_vm_size_t length;
5235*a325d9c4SApple OSS Distributions
5236*a325d9c4SApple OSS Distributions if (__length) {
5237*a325d9c4SApple OSS Distributions panic("doUnmap");
5238*a325d9c4SApple OSS Distributions }
5239*a325d9c4SApple OSS Distributions
5240*a325d9c4SApple OSS Distributions mapping = (IOMemoryMap *) __address;
5241*a325d9c4SApple OSS Distributions addressMap = mapping->fAddressMap;
5242*a325d9c4SApple OSS Distributions address = mapping->fAddress;
5243*a325d9c4SApple OSS Distributions length = mapping->fLength;
5244*a325d9c4SApple OSS Distributions
5245*a325d9c4SApple OSS Distributions if (kIOMapOverwrite & mapping->fOptions) {
5246*a325d9c4SApple OSS Distributions err = KERN_SUCCESS;
5247*a325d9c4SApple OSS Distributions } else {
5248*a325d9c4SApple OSS Distributions if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5249*a325d9c4SApple OSS Distributions addressMap = IOPageableMapForAddress( address );
5250*a325d9c4SApple OSS Distributions }
5251*a325d9c4SApple OSS Distributions #if DEBUG
5252*a325d9c4SApple OSS Distributions if (kIOLogMapping & gIOKitDebug) {
5253*a325d9c4SApple OSS Distributions IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5254*a325d9c4SApple OSS Distributions addressMap, address, length );
5255*a325d9c4SApple OSS Distributions }
5256*a325d9c4SApple OSS Distributions #endif
5257*a325d9c4SApple OSS Distributions err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5258*a325d9c4SApple OSS Distributions if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5259*a325d9c4SApple OSS Distributions DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5260*a325d9c4SApple OSS Distributions }
5261*a325d9c4SApple OSS Distributions }
5262*a325d9c4SApple OSS Distributions
5263*a325d9c4SApple OSS Distributions #if IOTRACKING
5264*a325d9c4SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5265*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
5266*a325d9c4SApple OSS Distributions
5267*a325d9c4SApple OSS Distributions return err;
5268*a325d9c4SApple OSS Distributions }
5269*a325d9c4SApple OSS Distributions
5270*a325d9c4SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5271*a325d9c4SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5272*a325d9c4SApple OSS Distributions {
5273*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5274*a325d9c4SApple OSS Distributions IOMemoryMap * mapping = NULL;
5275*a325d9c4SApple OSS Distributions OSSharedPtr<OSIterator> iter;
5276*a325d9c4SApple OSS Distributions
5277*a325d9c4SApple OSS Distributions LOCK;
5278*a325d9c4SApple OSS Distributions
5279*a325d9c4SApple OSS Distributions if (doRedirect) {
5280*a325d9c4SApple OSS Distributions _flags |= kIOMemoryRedirected;
5281*a325d9c4SApple OSS Distributions } else {
5282*a325d9c4SApple OSS Distributions _flags &= ~kIOMemoryRedirected;
5283*a325d9c4SApple OSS Distributions }
5284*a325d9c4SApple OSS Distributions
5285*a325d9c4SApple OSS Distributions do {
5286*a325d9c4SApple OSS Distributions if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5287*a325d9c4SApple OSS Distributions memory_object_t pager;
5288*a325d9c4SApple OSS Distributions
5289*a325d9c4SApple OSS Distributions if (reserved) {
5290*a325d9c4SApple OSS Distributions pager = (memory_object_t) reserved->dp.devicePager;
5291*a325d9c4SApple OSS Distributions } else {
5292*a325d9c4SApple OSS Distributions pager = MACH_PORT_NULL;
5293*a325d9c4SApple OSS Distributions }
5294*a325d9c4SApple OSS Distributions
5295*a325d9c4SApple OSS Distributions while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5296*a325d9c4SApple OSS Distributions mapping->redirect( safeTask, doRedirect );
5297*a325d9c4SApple OSS Distributions if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5298*a325d9c4SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5299*a325d9c4SApple OSS Distributions }
5300*a325d9c4SApple OSS Distributions }
5301*a325d9c4SApple OSS Distributions
5302*a325d9c4SApple OSS Distributions iter.reset();
5303*a325d9c4SApple OSS Distributions }
5304*a325d9c4SApple OSS Distributions } while (false);
5305*a325d9c4SApple OSS Distributions
5306*a325d9c4SApple OSS Distributions if (!doRedirect) {
5307*a325d9c4SApple OSS Distributions WAKEUP;
5308*a325d9c4SApple OSS Distributions }
5309*a325d9c4SApple OSS Distributions
5310*a325d9c4SApple OSS Distributions UNLOCK;
5311*a325d9c4SApple OSS Distributions
5312*a325d9c4SApple OSS Distributions #ifndef __LP64__
5313*a325d9c4SApple OSS Distributions // temporary binary compatibility
5314*a325d9c4SApple OSS Distributions IOSubMemoryDescriptor * subMem;
5315*a325d9c4SApple OSS Distributions if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5316*a325d9c4SApple OSS Distributions err = subMem->redirect( safeTask, doRedirect );
5317*a325d9c4SApple OSS Distributions } else {
5318*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
5319*a325d9c4SApple OSS Distributions }
5320*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5321*a325d9c4SApple OSS Distributions
5322*a325d9c4SApple OSS Distributions return err;
5323*a325d9c4SApple OSS Distributions }
5324*a325d9c4SApple OSS Distributions
5325*a325d9c4SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5326*a325d9c4SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5327*a325d9c4SApple OSS Distributions {
5328*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5329*a325d9c4SApple OSS Distributions
5330*a325d9c4SApple OSS Distributions if (fSuperMap) {
5331*a325d9c4SApple OSS Distributions // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5332*a325d9c4SApple OSS Distributions } else {
5333*a325d9c4SApple OSS Distributions LOCK;
5334*a325d9c4SApple OSS Distributions
5335*a325d9c4SApple OSS Distributions do{
5336*a325d9c4SApple OSS Distributions if (!fAddress) {
5337*a325d9c4SApple OSS Distributions break;
5338*a325d9c4SApple OSS Distributions }
5339*a325d9c4SApple OSS Distributions if (!fAddressMap) {
5340*a325d9c4SApple OSS Distributions break;
5341*a325d9c4SApple OSS Distributions }
5342*a325d9c4SApple OSS Distributions
5343*a325d9c4SApple OSS Distributions if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5344*a325d9c4SApple OSS Distributions && (0 == (fOptions & kIOMapStatic))) {
5345*a325d9c4SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5346*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
5347*a325d9c4SApple OSS Distributions #if DEBUG
5348*a325d9c4SApple OSS Distributions IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5349*a325d9c4SApple OSS Distributions #endif
5350*a325d9c4SApple OSS Distributions } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5351*a325d9c4SApple OSS Distributions IOOptionBits newMode;
5352*a325d9c4SApple OSS Distributions newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5353*a325d9c4SApple OSS Distributions IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5354*a325d9c4SApple OSS Distributions }
5355*a325d9c4SApple OSS Distributions }while (false);
5356*a325d9c4SApple OSS Distributions UNLOCK;
5357*a325d9c4SApple OSS Distributions }
5358*a325d9c4SApple OSS Distributions
5359*a325d9c4SApple OSS Distributions if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5360*a325d9c4SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5361*a325d9c4SApple OSS Distributions && safeTask
5362*a325d9c4SApple OSS Distributions && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5363*a325d9c4SApple OSS Distributions fMemory->redirect(safeTask, doRedirect);
5364*a325d9c4SApple OSS Distributions }
5365*a325d9c4SApple OSS Distributions
5366*a325d9c4SApple OSS Distributions return err;
5367*a325d9c4SApple OSS Distributions }
5368*a325d9c4SApple OSS Distributions
5369*a325d9c4SApple OSS Distributions IOReturn
unmap(void)5370*a325d9c4SApple OSS Distributions IOMemoryMap::unmap( void )
5371*a325d9c4SApple OSS Distributions {
5372*a325d9c4SApple OSS Distributions IOReturn err;
5373*a325d9c4SApple OSS Distributions
5374*a325d9c4SApple OSS Distributions LOCK;
5375*a325d9c4SApple OSS Distributions
5376*a325d9c4SApple OSS Distributions if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5377*a325d9c4SApple OSS Distributions && (0 == (kIOMapStatic & fOptions))) {
5378*a325d9c4SApple OSS Distributions err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5379*a325d9c4SApple OSS Distributions } else {
5380*a325d9c4SApple OSS Distributions err = kIOReturnSuccess;
5381*a325d9c4SApple OSS Distributions }
5382*a325d9c4SApple OSS Distributions
5383*a325d9c4SApple OSS Distributions if (fAddressMap) {
5384*a325d9c4SApple OSS Distributions vm_map_deallocate(fAddressMap);
5385*a325d9c4SApple OSS Distributions fAddressMap = NULL;
5386*a325d9c4SApple OSS Distributions }
5387*a325d9c4SApple OSS Distributions
5388*a325d9c4SApple OSS Distributions fAddress = 0;
5389*a325d9c4SApple OSS Distributions
5390*a325d9c4SApple OSS Distributions UNLOCK;
5391*a325d9c4SApple OSS Distributions
5392*a325d9c4SApple OSS Distributions return err;
5393*a325d9c4SApple OSS Distributions }
5394*a325d9c4SApple OSS Distributions
5395*a325d9c4SApple OSS Distributions void
taskDied(void)5396*a325d9c4SApple OSS Distributions IOMemoryMap::taskDied( void )
5397*a325d9c4SApple OSS Distributions {
5398*a325d9c4SApple OSS Distributions LOCK;
5399*a325d9c4SApple OSS Distributions if (fUserClientUnmap) {
5400*a325d9c4SApple OSS Distributions unmap();
5401*a325d9c4SApple OSS Distributions }
5402*a325d9c4SApple OSS Distributions #if IOTRACKING
5403*a325d9c4SApple OSS Distributions else {
5404*a325d9c4SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5405*a325d9c4SApple OSS Distributions }
5406*a325d9c4SApple OSS Distributions #endif /* IOTRACKING */
5407*a325d9c4SApple OSS Distributions
5408*a325d9c4SApple OSS Distributions if (fAddressMap) {
5409*a325d9c4SApple OSS Distributions vm_map_deallocate(fAddressMap);
5410*a325d9c4SApple OSS Distributions fAddressMap = NULL;
5411*a325d9c4SApple OSS Distributions }
5412*a325d9c4SApple OSS Distributions fAddressTask = NULL;
5413*a325d9c4SApple OSS Distributions fAddress = 0;
5414*a325d9c4SApple OSS Distributions UNLOCK;
5415*a325d9c4SApple OSS Distributions }
5416*a325d9c4SApple OSS Distributions
5417*a325d9c4SApple OSS Distributions IOReturn
userClientUnmap(void)5418*a325d9c4SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5419*a325d9c4SApple OSS Distributions {
5420*a325d9c4SApple OSS Distributions fUserClientUnmap = true;
5421*a325d9c4SApple OSS Distributions return kIOReturnSuccess;
5422*a325d9c4SApple OSS Distributions }
5423*a325d9c4SApple OSS Distributions
5424*a325d9c4SApple OSS Distributions // Overload the release mechanism. All mappings must be a member
5425*a325d9c4SApple OSS Distributions // of a memory descriptors _mappings set. This means that we
5426*a325d9c4SApple OSS Distributions // always have 2 references on a mapping. When either of these mappings
5427*a325d9c4SApple OSS Distributions // are released we need to free ourselves.
5428*a325d9c4SApple OSS Distributions void
taggedRelease(const void * tag) const5429*a325d9c4SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5430*a325d9c4SApple OSS Distributions {
5431*a325d9c4SApple OSS Distributions LOCK;
5432*a325d9c4SApple OSS Distributions super::taggedRelease(tag, 2);
5433*a325d9c4SApple OSS Distributions UNLOCK;
5434*a325d9c4SApple OSS Distributions }
5435*a325d9c4SApple OSS Distributions
5436*a325d9c4SApple OSS Distributions void
free()5437*a325d9c4SApple OSS Distributions IOMemoryMap::free()
5438*a325d9c4SApple OSS Distributions {
5439*a325d9c4SApple OSS Distributions unmap();
5440*a325d9c4SApple OSS Distributions
5441*a325d9c4SApple OSS Distributions if (fMemory) {
5442*a325d9c4SApple OSS Distributions LOCK;
5443*a325d9c4SApple OSS Distributions fMemory->removeMapping(this);
5444*a325d9c4SApple OSS Distributions UNLOCK;
5445*a325d9c4SApple OSS Distributions fMemory.reset();
5446*a325d9c4SApple OSS Distributions }
5447*a325d9c4SApple OSS Distributions
5448*a325d9c4SApple OSS Distributions if (fSuperMap) {
5449*a325d9c4SApple OSS Distributions fSuperMap.reset();
5450*a325d9c4SApple OSS Distributions }
5451*a325d9c4SApple OSS Distributions
5452*a325d9c4SApple OSS Distributions if (fRedirUPL) {
5453*a325d9c4SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5454*a325d9c4SApple OSS Distributions upl_deallocate(fRedirUPL);
5455*a325d9c4SApple OSS Distributions }
5456*a325d9c4SApple OSS Distributions
5457*a325d9c4SApple OSS Distributions super::free();
5458*a325d9c4SApple OSS Distributions }
5459*a325d9c4SApple OSS Distributions
5460*a325d9c4SApple OSS Distributions IOByteCount
getLength()5461*a325d9c4SApple OSS Distributions IOMemoryMap::getLength()
5462*a325d9c4SApple OSS Distributions {
5463*a325d9c4SApple OSS Distributions return fLength;
5464*a325d9c4SApple OSS Distributions }
5465*a325d9c4SApple OSS Distributions
5466*a325d9c4SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5467*a325d9c4SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5468*a325d9c4SApple OSS Distributions {
5469*a325d9c4SApple OSS Distributions #ifndef __LP64__
5470*a325d9c4SApple OSS Distributions if (fSuperMap) {
5471*a325d9c4SApple OSS Distributions fSuperMap->getVirtualAddress();
5472*a325d9c4SApple OSS Distributions } else if (fAddressMap
5473*a325d9c4SApple OSS Distributions && vm_map_is_64bit(fAddressMap)
5474*a325d9c4SApple OSS Distributions && (sizeof(IOVirtualAddress) < 8)) {
5475*a325d9c4SApple OSS Distributions OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5476*a325d9c4SApple OSS Distributions }
5477*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5478*a325d9c4SApple OSS Distributions
5479*a325d9c4SApple OSS Distributions return fAddress;
5480*a325d9c4SApple OSS Distributions }
5481*a325d9c4SApple OSS Distributions
5482*a325d9c4SApple OSS Distributions #ifndef __LP64__
5483*a325d9c4SApple OSS Distributions mach_vm_address_t
getAddress()5484*a325d9c4SApple OSS Distributions IOMemoryMap::getAddress()
5485*a325d9c4SApple OSS Distributions {
5486*a325d9c4SApple OSS Distributions return fAddress;
5487*a325d9c4SApple OSS Distributions }
5488*a325d9c4SApple OSS Distributions
5489*a325d9c4SApple OSS Distributions mach_vm_size_t
getSize()5490*a325d9c4SApple OSS Distributions IOMemoryMap::getSize()
5491*a325d9c4SApple OSS Distributions {
5492*a325d9c4SApple OSS Distributions return fLength;
5493*a325d9c4SApple OSS Distributions }
5494*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5495*a325d9c4SApple OSS Distributions
5496*a325d9c4SApple OSS Distributions
5497*a325d9c4SApple OSS Distributions task_t
getAddressTask()5498*a325d9c4SApple OSS Distributions IOMemoryMap::getAddressTask()
5499*a325d9c4SApple OSS Distributions {
5500*a325d9c4SApple OSS Distributions if (fSuperMap) {
5501*a325d9c4SApple OSS Distributions return fSuperMap->getAddressTask();
5502*a325d9c4SApple OSS Distributions } else {
5503*a325d9c4SApple OSS Distributions return fAddressTask;
5504*a325d9c4SApple OSS Distributions }
5505*a325d9c4SApple OSS Distributions }
5506*a325d9c4SApple OSS Distributions
5507*a325d9c4SApple OSS Distributions IOOptionBits
getMapOptions()5508*a325d9c4SApple OSS Distributions IOMemoryMap::getMapOptions()
5509*a325d9c4SApple OSS Distributions {
5510*a325d9c4SApple OSS Distributions return fOptions;
5511*a325d9c4SApple OSS Distributions }
5512*a325d9c4SApple OSS Distributions
5513*a325d9c4SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5514*a325d9c4SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5515*a325d9c4SApple OSS Distributions {
5516*a325d9c4SApple OSS Distributions return fMemory.get();
5517*a325d9c4SApple OSS Distributions }
5518*a325d9c4SApple OSS Distributions
5519*a325d9c4SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5520*a325d9c4SApple OSS Distributions IOMemoryMap::copyCompatible(
5521*a325d9c4SApple OSS Distributions IOMemoryMap * newMapping )
5522*a325d9c4SApple OSS Distributions {
5523*a325d9c4SApple OSS Distributions task_t task = newMapping->getAddressTask();
5524*a325d9c4SApple OSS Distributions mach_vm_address_t toAddress = newMapping->fAddress;
5525*a325d9c4SApple OSS Distributions IOOptionBits _options = newMapping->fOptions;
5526*a325d9c4SApple OSS Distributions mach_vm_size_t _offset = newMapping->fOffset;
5527*a325d9c4SApple OSS Distributions mach_vm_size_t _length = newMapping->fLength;
5528*a325d9c4SApple OSS Distributions
5529*a325d9c4SApple OSS Distributions if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5530*a325d9c4SApple OSS Distributions return NULL;
5531*a325d9c4SApple OSS Distributions }
5532*a325d9c4SApple OSS Distributions if ((fOptions ^ _options) & kIOMapReadOnly) {
5533*a325d9c4SApple OSS Distributions return NULL;
5534*a325d9c4SApple OSS Distributions }
5535*a325d9c4SApple OSS Distributions if ((fOptions ^ _options) & kIOMapGuardedMask) {
5536*a325d9c4SApple OSS Distributions return NULL;
5537*a325d9c4SApple OSS Distributions }
5538*a325d9c4SApple OSS Distributions if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5539*a325d9c4SApple OSS Distributions && ((fOptions ^ _options) & kIOMapCacheMask)) {
5540*a325d9c4SApple OSS Distributions return NULL;
5541*a325d9c4SApple OSS Distributions }
5542*a325d9c4SApple OSS Distributions
5543*a325d9c4SApple OSS Distributions if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5544*a325d9c4SApple OSS Distributions return NULL;
5545*a325d9c4SApple OSS Distributions }
5546*a325d9c4SApple OSS Distributions
5547*a325d9c4SApple OSS Distributions if (_offset < fOffset) {
5548*a325d9c4SApple OSS Distributions return NULL;
5549*a325d9c4SApple OSS Distributions }
5550*a325d9c4SApple OSS Distributions
5551*a325d9c4SApple OSS Distributions _offset -= fOffset;
5552*a325d9c4SApple OSS Distributions
5553*a325d9c4SApple OSS Distributions if ((_offset + _length) > fLength) {
5554*a325d9c4SApple OSS Distributions return NULL;
5555*a325d9c4SApple OSS Distributions }
5556*a325d9c4SApple OSS Distributions
5557*a325d9c4SApple OSS Distributions if ((fLength == _length) && (!_offset)) {
5558*a325d9c4SApple OSS Distributions retain();
5559*a325d9c4SApple OSS Distributions newMapping = this;
5560*a325d9c4SApple OSS Distributions } else {
5561*a325d9c4SApple OSS Distributions newMapping->fSuperMap.reset(this, OSRetain);
5562*a325d9c4SApple OSS Distributions newMapping->fOffset = fOffset + _offset;
5563*a325d9c4SApple OSS Distributions newMapping->fAddress = fAddress + _offset;
5564*a325d9c4SApple OSS Distributions }
5565*a325d9c4SApple OSS Distributions
5566*a325d9c4SApple OSS Distributions return newMapping;
5567*a325d9c4SApple OSS Distributions }
5568*a325d9c4SApple OSS Distributions
5569*a325d9c4SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5570*a325d9c4SApple OSS Distributions IOMemoryMap::wireRange(
5571*a325d9c4SApple OSS Distributions uint32_t options,
5572*a325d9c4SApple OSS Distributions mach_vm_size_t offset,
5573*a325d9c4SApple OSS Distributions mach_vm_size_t length)
5574*a325d9c4SApple OSS Distributions {
5575*a325d9c4SApple OSS Distributions IOReturn kr;
5576*a325d9c4SApple OSS Distributions mach_vm_address_t start = trunc_page_64(fAddress + offset);
5577*a325d9c4SApple OSS Distributions mach_vm_address_t end = round_page_64(fAddress + offset + length);
5578*a325d9c4SApple OSS Distributions vm_prot_t prot;
5579*a325d9c4SApple OSS Distributions
5580*a325d9c4SApple OSS Distributions prot = (kIODirectionOutIn & options);
5581*a325d9c4SApple OSS Distributions if (prot) {
5582*a325d9c4SApple OSS Distributions kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5583*a325d9c4SApple OSS Distributions } else {
5584*a325d9c4SApple OSS Distributions kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5585*a325d9c4SApple OSS Distributions }
5586*a325d9c4SApple OSS Distributions
5587*a325d9c4SApple OSS Distributions return kr;
5588*a325d9c4SApple OSS Distributions }
5589*a325d9c4SApple OSS Distributions
5590*a325d9c4SApple OSS Distributions
5591*a325d9c4SApple OSS Distributions IOPhysicalAddress
5592*a325d9c4SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5593*a325d9c4SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5594*a325d9c4SApple OSS Distributions #else /* !__LP64__ */
5595*a325d9c4SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5596*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5597*a325d9c4SApple OSS Distributions {
5598*a325d9c4SApple OSS Distributions IOPhysicalAddress address;
5599*a325d9c4SApple OSS Distributions
5600*a325d9c4SApple OSS Distributions LOCK;
5601*a325d9c4SApple OSS Distributions #ifdef __LP64__
5602*a325d9c4SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5603*a325d9c4SApple OSS Distributions #else /* !__LP64__ */
5604*a325d9c4SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5605*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5606*a325d9c4SApple OSS Distributions UNLOCK;
5607*a325d9c4SApple OSS Distributions
5608*a325d9c4SApple OSS Distributions return address;
5609*a325d9c4SApple OSS Distributions }
5610*a325d9c4SApple OSS Distributions
5611*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5612*a325d9c4SApple OSS Distributions
5613*a325d9c4SApple OSS Distributions #undef super
5614*a325d9c4SApple OSS Distributions #define super OSObject
5615*a325d9c4SApple OSS Distributions
5616*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5617*a325d9c4SApple OSS Distributions
5618*a325d9c4SApple OSS Distributions void
initialize(void)5619*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5620*a325d9c4SApple OSS Distributions {
5621*a325d9c4SApple OSS Distributions if (NULL == gIOMemoryLock) {
5622*a325d9c4SApple OSS Distributions gIOMemoryLock = IORecursiveLockAlloc();
5623*a325d9c4SApple OSS Distributions }
5624*a325d9c4SApple OSS Distributions
5625*a325d9c4SApple OSS Distributions gIOLastPage = IOGetLastPageNumber();
5626*a325d9c4SApple OSS Distributions }
5627*a325d9c4SApple OSS Distributions
5628*a325d9c4SApple OSS Distributions void
free(void)5629*a325d9c4SApple OSS Distributions IOMemoryDescriptor::free( void )
5630*a325d9c4SApple OSS Distributions {
5631*a325d9c4SApple OSS Distributions if (_mappings) {
5632*a325d9c4SApple OSS Distributions _mappings.reset();
5633*a325d9c4SApple OSS Distributions }
5634*a325d9c4SApple OSS Distributions
5635*a325d9c4SApple OSS Distributions if (reserved) {
5636*a325d9c4SApple OSS Distributions cleanKernelReserved(reserved);
5637*a325d9c4SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
5638*a325d9c4SApple OSS Distributions reserved = NULL;
5639*a325d9c4SApple OSS Distributions }
5640*a325d9c4SApple OSS Distributions super::free();
5641*a325d9c4SApple OSS Distributions }
5642*a325d9c4SApple OSS Distributions
5643*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5644*a325d9c4SApple OSS Distributions IOMemoryDescriptor::setMapping(
5645*a325d9c4SApple OSS Distributions task_t intoTask,
5646*a325d9c4SApple OSS Distributions IOVirtualAddress mapAddress,
5647*a325d9c4SApple OSS Distributions IOOptionBits options )
5648*a325d9c4SApple OSS Distributions {
5649*a325d9c4SApple OSS Distributions return createMappingInTask( intoTask, mapAddress,
5650*a325d9c4SApple OSS Distributions options | kIOMapStatic,
5651*a325d9c4SApple OSS Distributions 0, getLength());
5652*a325d9c4SApple OSS Distributions }
5653*a325d9c4SApple OSS Distributions
5654*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5655*a325d9c4SApple OSS Distributions IOMemoryDescriptor::map(
5656*a325d9c4SApple OSS Distributions IOOptionBits options )
5657*a325d9c4SApple OSS Distributions {
5658*a325d9c4SApple OSS Distributions return createMappingInTask( kernel_task, 0,
5659*a325d9c4SApple OSS Distributions options | kIOMapAnywhere,
5660*a325d9c4SApple OSS Distributions 0, getLength());
5661*a325d9c4SApple OSS Distributions }
5662*a325d9c4SApple OSS Distributions
5663*a325d9c4SApple OSS Distributions #ifndef __LP64__
5664*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5665*a325d9c4SApple OSS Distributions IOMemoryDescriptor::map(
5666*a325d9c4SApple OSS Distributions task_t intoTask,
5667*a325d9c4SApple OSS Distributions IOVirtualAddress atAddress,
5668*a325d9c4SApple OSS Distributions IOOptionBits options,
5669*a325d9c4SApple OSS Distributions IOByteCount offset,
5670*a325d9c4SApple OSS Distributions IOByteCount length )
5671*a325d9c4SApple OSS Distributions {
5672*a325d9c4SApple OSS Distributions if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5673*a325d9c4SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5674*a325d9c4SApple OSS Distributions return NULL;
5675*a325d9c4SApple OSS Distributions }
5676*a325d9c4SApple OSS Distributions
5677*a325d9c4SApple OSS Distributions return createMappingInTask(intoTask, atAddress,
5678*a325d9c4SApple OSS Distributions options, offset, length);
5679*a325d9c4SApple OSS Distributions }
5680*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5681*a325d9c4SApple OSS Distributions
5682*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5683*a325d9c4SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5684*a325d9c4SApple OSS Distributions task_t intoTask,
5685*a325d9c4SApple OSS Distributions mach_vm_address_t atAddress,
5686*a325d9c4SApple OSS Distributions IOOptionBits options,
5687*a325d9c4SApple OSS Distributions mach_vm_size_t offset,
5688*a325d9c4SApple OSS Distributions mach_vm_size_t length)
5689*a325d9c4SApple OSS Distributions {
5690*a325d9c4SApple OSS Distributions IOMemoryMap * result;
5691*a325d9c4SApple OSS Distributions IOMemoryMap * mapping;
5692*a325d9c4SApple OSS Distributions
5693*a325d9c4SApple OSS Distributions if (0 == length) {
5694*a325d9c4SApple OSS Distributions length = getLength();
5695*a325d9c4SApple OSS Distributions }
5696*a325d9c4SApple OSS Distributions
5697*a325d9c4SApple OSS Distributions mapping = new IOMemoryMap;
5698*a325d9c4SApple OSS Distributions
5699*a325d9c4SApple OSS Distributions if (mapping
5700*a325d9c4SApple OSS Distributions && !mapping->init( intoTask, atAddress,
5701*a325d9c4SApple OSS Distributions options, offset, length )) {
5702*a325d9c4SApple OSS Distributions mapping->release();
5703*a325d9c4SApple OSS Distributions mapping = NULL;
5704*a325d9c4SApple OSS Distributions }
5705*a325d9c4SApple OSS Distributions
5706*a325d9c4SApple OSS Distributions if (mapping) {
5707*a325d9c4SApple OSS Distributions result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5708*a325d9c4SApple OSS Distributions } else {
5709*a325d9c4SApple OSS Distributions result = nullptr;
5710*a325d9c4SApple OSS Distributions }
5711*a325d9c4SApple OSS Distributions
5712*a325d9c4SApple OSS Distributions #if DEBUG
5713*a325d9c4SApple OSS Distributions if (!result) {
5714*a325d9c4SApple OSS Distributions IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5715*a325d9c4SApple OSS Distributions this, atAddress, (uint32_t) options, offset, length);
5716*a325d9c4SApple OSS Distributions }
5717*a325d9c4SApple OSS Distributions #endif
5718*a325d9c4SApple OSS Distributions
5719*a325d9c4SApple OSS Distributions // already retained through makeMapping
5720*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5721*a325d9c4SApple OSS Distributions
5722*a325d9c4SApple OSS Distributions return retval;
5723*a325d9c4SApple OSS Distributions }
5724*a325d9c4SApple OSS Distributions
5725*a325d9c4SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5726*a325d9c4SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5727*a325d9c4SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5728*a325d9c4SApple OSS Distributions IOOptionBits options,
5729*a325d9c4SApple OSS Distributions IOByteCount offset)
5730*a325d9c4SApple OSS Distributions {
5731*a325d9c4SApple OSS Distributions return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5732*a325d9c4SApple OSS Distributions }
5733*a325d9c4SApple OSS Distributions #endif
5734*a325d9c4SApple OSS Distributions
5735*a325d9c4SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5736*a325d9c4SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5737*a325d9c4SApple OSS Distributions IOOptionBits options,
5738*a325d9c4SApple OSS Distributions mach_vm_size_t offset)
5739*a325d9c4SApple OSS Distributions {
5740*a325d9c4SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5741*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> physMem;
5742*a325d9c4SApple OSS Distributions
5743*a325d9c4SApple OSS Distributions LOCK;
5744*a325d9c4SApple OSS Distributions
5745*a325d9c4SApple OSS Distributions if (fAddress && fAddressMap) {
5746*a325d9c4SApple OSS Distributions do{
5747*a325d9c4SApple OSS Distributions if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5748*a325d9c4SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5749*a325d9c4SApple OSS Distributions physMem = fMemory;
5750*a325d9c4SApple OSS Distributions }
5751*a325d9c4SApple OSS Distributions
5752*a325d9c4SApple OSS Distributions if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5753*a325d9c4SApple OSS Distributions upl_size_t size = (typeof(size))round_page(fLength);
5754*a325d9c4SApple OSS Distributions upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5755*a325d9c4SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5756*a325d9c4SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5757*a325d9c4SApple OSS Distributions NULL, NULL,
5758*a325d9c4SApple OSS Distributions &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5759*a325d9c4SApple OSS Distributions fRedirUPL = NULL;
5760*a325d9c4SApple OSS Distributions }
5761*a325d9c4SApple OSS Distributions
5762*a325d9c4SApple OSS Distributions if (physMem) {
5763*a325d9c4SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5764*a325d9c4SApple OSS Distributions if ((false)) {
5765*a325d9c4SApple OSS Distributions physMem->redirect(NULL, true);
5766*a325d9c4SApple OSS Distributions }
5767*a325d9c4SApple OSS Distributions }
5768*a325d9c4SApple OSS Distributions }
5769*a325d9c4SApple OSS Distributions
5770*a325d9c4SApple OSS Distributions if (newBackingMemory) {
5771*a325d9c4SApple OSS Distributions if (newBackingMemory != fMemory) {
5772*a325d9c4SApple OSS Distributions fOffset = 0;
5773*a325d9c4SApple OSS Distributions if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5774*a325d9c4SApple OSS Distributions options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5775*a325d9c4SApple OSS Distributions offset, fLength)) {
5776*a325d9c4SApple OSS Distributions err = kIOReturnError;
5777*a325d9c4SApple OSS Distributions }
5778*a325d9c4SApple OSS Distributions }
5779*a325d9c4SApple OSS Distributions if (fRedirUPL) {
5780*a325d9c4SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5781*a325d9c4SApple OSS Distributions upl_deallocate(fRedirUPL);
5782*a325d9c4SApple OSS Distributions fRedirUPL = NULL;
5783*a325d9c4SApple OSS Distributions }
5784*a325d9c4SApple OSS Distributions if ((false) && physMem) {
5785*a325d9c4SApple OSS Distributions physMem->redirect(NULL, false);
5786*a325d9c4SApple OSS Distributions }
5787*a325d9c4SApple OSS Distributions }
5788*a325d9c4SApple OSS Distributions }while (false);
5789*a325d9c4SApple OSS Distributions }
5790*a325d9c4SApple OSS Distributions
5791*a325d9c4SApple OSS Distributions UNLOCK;
5792*a325d9c4SApple OSS Distributions
5793*a325d9c4SApple OSS Distributions return err;
5794*a325d9c4SApple OSS Distributions }
5795*a325d9c4SApple OSS Distributions
5796*a325d9c4SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5797*a325d9c4SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5798*a325d9c4SApple OSS Distributions IOMemoryDescriptor * owner,
5799*a325d9c4SApple OSS Distributions task_t __intoTask,
5800*a325d9c4SApple OSS Distributions IOVirtualAddress __address,
5801*a325d9c4SApple OSS Distributions IOOptionBits options,
5802*a325d9c4SApple OSS Distributions IOByteCount __offset,
5803*a325d9c4SApple OSS Distributions IOByteCount __length )
5804*a325d9c4SApple OSS Distributions {
5805*a325d9c4SApple OSS Distributions #ifndef __LP64__
5806*a325d9c4SApple OSS Distributions if (!(kIOMap64Bit & options)) {
5807*a325d9c4SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
5808*a325d9c4SApple OSS Distributions }
5809*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5810*a325d9c4SApple OSS Distributions
5811*a325d9c4SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> mapDesc;
5812*a325d9c4SApple OSS Distributions __block IOMemoryMap * result = NULL;
5813*a325d9c4SApple OSS Distributions
5814*a325d9c4SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) __address;
5815*a325d9c4SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
5816*a325d9c4SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
5817*a325d9c4SApple OSS Distributions
5818*a325d9c4SApple OSS Distributions mapping->fOffset = offset;
5819*a325d9c4SApple OSS Distributions
5820*a325d9c4SApple OSS Distributions LOCK;
5821*a325d9c4SApple OSS Distributions
5822*a325d9c4SApple OSS Distributions do{
5823*a325d9c4SApple OSS Distributions if (kIOMapStatic & options) {
5824*a325d9c4SApple OSS Distributions result = mapping;
5825*a325d9c4SApple OSS Distributions addMapping(mapping);
5826*a325d9c4SApple OSS Distributions mapping->setMemoryDescriptor(this, 0);
5827*a325d9c4SApple OSS Distributions continue;
5828*a325d9c4SApple OSS Distributions }
5829*a325d9c4SApple OSS Distributions
5830*a325d9c4SApple OSS Distributions if (kIOMapUnique & options) {
5831*a325d9c4SApple OSS Distributions addr64_t phys;
5832*a325d9c4SApple OSS Distributions IOByteCount physLen;
5833*a325d9c4SApple OSS Distributions
5834*a325d9c4SApple OSS Distributions // if (owner != this) continue;
5835*a325d9c4SApple OSS Distributions
5836*a325d9c4SApple OSS Distributions if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5837*a325d9c4SApple OSS Distributions || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5838*a325d9c4SApple OSS Distributions phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5839*a325d9c4SApple OSS Distributions if (!phys || (physLen < length)) {
5840*a325d9c4SApple OSS Distributions continue;
5841*a325d9c4SApple OSS Distributions }
5842*a325d9c4SApple OSS Distributions
5843*a325d9c4SApple OSS Distributions mapDesc = IOMemoryDescriptor::withAddressRange(
5844*a325d9c4SApple OSS Distributions phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5845*a325d9c4SApple OSS Distributions if (!mapDesc) {
5846*a325d9c4SApple OSS Distributions continue;
5847*a325d9c4SApple OSS Distributions }
5848*a325d9c4SApple OSS Distributions offset = 0;
5849*a325d9c4SApple OSS Distributions mapping->fOffset = offset;
5850*a325d9c4SApple OSS Distributions }
5851*a325d9c4SApple OSS Distributions } else {
5852*a325d9c4SApple OSS Distributions // look for a compatible existing mapping
5853*a325d9c4SApple OSS Distributions if (_mappings) {
5854*a325d9c4SApple OSS Distributions _mappings->iterateObjects(^(OSObject * object)
5855*a325d9c4SApple OSS Distributions {
5856*a325d9c4SApple OSS Distributions IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5857*a325d9c4SApple OSS Distributions if ((result = lookMapping->copyCompatible(mapping))) {
5858*a325d9c4SApple OSS Distributions addMapping(result);
5859*a325d9c4SApple OSS Distributions result->setMemoryDescriptor(this, offset);
5860*a325d9c4SApple OSS Distributions return true;
5861*a325d9c4SApple OSS Distributions }
5862*a325d9c4SApple OSS Distributions return false;
5863*a325d9c4SApple OSS Distributions });
5864*a325d9c4SApple OSS Distributions }
5865*a325d9c4SApple OSS Distributions if (result || (options & kIOMapReference)) {
5866*a325d9c4SApple OSS Distributions if (result != mapping) {
5867*a325d9c4SApple OSS Distributions mapping->release();
5868*a325d9c4SApple OSS Distributions mapping = NULL;
5869*a325d9c4SApple OSS Distributions }
5870*a325d9c4SApple OSS Distributions continue;
5871*a325d9c4SApple OSS Distributions }
5872*a325d9c4SApple OSS Distributions }
5873*a325d9c4SApple OSS Distributions
5874*a325d9c4SApple OSS Distributions if (!mapDesc) {
5875*a325d9c4SApple OSS Distributions mapDesc.reset(this, OSRetain);
5876*a325d9c4SApple OSS Distributions }
5877*a325d9c4SApple OSS Distributions IOReturn
5878*a325d9c4SApple OSS Distributions kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5879*a325d9c4SApple OSS Distributions if (kIOReturnSuccess == kr) {
5880*a325d9c4SApple OSS Distributions result = mapping;
5881*a325d9c4SApple OSS Distributions mapDesc->addMapping(result);
5882*a325d9c4SApple OSS Distributions result->setMemoryDescriptor(mapDesc.get(), offset);
5883*a325d9c4SApple OSS Distributions } else {
5884*a325d9c4SApple OSS Distributions mapping->release();
5885*a325d9c4SApple OSS Distributions mapping = NULL;
5886*a325d9c4SApple OSS Distributions }
5887*a325d9c4SApple OSS Distributions }while (false);
5888*a325d9c4SApple OSS Distributions
5889*a325d9c4SApple OSS Distributions UNLOCK;
5890*a325d9c4SApple OSS Distributions
5891*a325d9c4SApple OSS Distributions return result;
5892*a325d9c4SApple OSS Distributions }
5893*a325d9c4SApple OSS Distributions
5894*a325d9c4SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5895*a325d9c4SApple OSS Distributions IOMemoryDescriptor::addMapping(
5896*a325d9c4SApple OSS Distributions IOMemoryMap * mapping )
5897*a325d9c4SApple OSS Distributions {
5898*a325d9c4SApple OSS Distributions if (mapping) {
5899*a325d9c4SApple OSS Distributions if (NULL == _mappings) {
5900*a325d9c4SApple OSS Distributions _mappings = OSSet::withCapacity(1);
5901*a325d9c4SApple OSS Distributions }
5902*a325d9c4SApple OSS Distributions if (_mappings) {
5903*a325d9c4SApple OSS Distributions _mappings->setObject( mapping );
5904*a325d9c4SApple OSS Distributions }
5905*a325d9c4SApple OSS Distributions }
5906*a325d9c4SApple OSS Distributions }
5907*a325d9c4SApple OSS Distributions
5908*a325d9c4SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5909*a325d9c4SApple OSS Distributions IOMemoryDescriptor::removeMapping(
5910*a325d9c4SApple OSS Distributions IOMemoryMap * mapping )
5911*a325d9c4SApple OSS Distributions {
5912*a325d9c4SApple OSS Distributions if (_mappings) {
5913*a325d9c4SApple OSS Distributions _mappings->removeObject( mapping);
5914*a325d9c4SApple OSS Distributions }
5915*a325d9c4SApple OSS Distributions }
5916*a325d9c4SApple OSS Distributions
5917*a325d9c4SApple OSS Distributions #ifndef __LP64__
5918*a325d9c4SApple OSS Distributions // obsolete initializers
5919*a325d9c4SApple OSS Distributions // - initWithOptions is the designated initializer
5920*a325d9c4SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5921*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void * address,
5922*a325d9c4SApple OSS Distributions IOByteCount length,
5923*a325d9c4SApple OSS Distributions IODirection direction)
5924*a325d9c4SApple OSS Distributions {
5925*a325d9c4SApple OSS Distributions return false;
5926*a325d9c4SApple OSS Distributions }
5927*a325d9c4SApple OSS Distributions
5928*a325d9c4SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5929*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5930*a325d9c4SApple OSS Distributions IOByteCount length,
5931*a325d9c4SApple OSS Distributions IODirection direction,
5932*a325d9c4SApple OSS Distributions task_t task)
5933*a325d9c4SApple OSS Distributions {
5934*a325d9c4SApple OSS Distributions return false;
5935*a325d9c4SApple OSS Distributions }
5936*a325d9c4SApple OSS Distributions
5937*a325d9c4SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5938*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
5939*a325d9c4SApple OSS Distributions IOPhysicalAddress address,
5940*a325d9c4SApple OSS Distributions IOByteCount length,
5941*a325d9c4SApple OSS Distributions IODirection direction )
5942*a325d9c4SApple OSS Distributions {
5943*a325d9c4SApple OSS Distributions return false;
5944*a325d9c4SApple OSS Distributions }
5945*a325d9c4SApple OSS Distributions
5946*a325d9c4SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5947*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
5948*a325d9c4SApple OSS Distributions IOVirtualRange * ranges,
5949*a325d9c4SApple OSS Distributions UInt32 withCount,
5950*a325d9c4SApple OSS Distributions IODirection direction,
5951*a325d9c4SApple OSS Distributions task_t task,
5952*a325d9c4SApple OSS Distributions bool asReference)
5953*a325d9c4SApple OSS Distributions {
5954*a325d9c4SApple OSS Distributions return false;
5955*a325d9c4SApple OSS Distributions }
5956*a325d9c4SApple OSS Distributions
5957*a325d9c4SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5958*a325d9c4SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5959*a325d9c4SApple OSS Distributions UInt32 withCount,
5960*a325d9c4SApple OSS Distributions IODirection direction,
5961*a325d9c4SApple OSS Distributions bool asReference)
5962*a325d9c4SApple OSS Distributions {
5963*a325d9c4SApple OSS Distributions return false;
5964*a325d9c4SApple OSS Distributions }
5965*a325d9c4SApple OSS Distributions
5966*a325d9c4SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5967*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5968*a325d9c4SApple OSS Distributions IOByteCount * lengthOfSegment)
5969*a325d9c4SApple OSS Distributions {
5970*a325d9c4SApple OSS Distributions return NULL;
5971*a325d9c4SApple OSS Distributions }
5972*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
5973*a325d9c4SApple OSS Distributions
5974*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5975*a325d9c4SApple OSS Distributions
5976*a325d9c4SApple OSS Distributions bool
serialize(OSSerialize * s) const5977*a325d9c4SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5978*a325d9c4SApple OSS Distributions {
5979*a325d9c4SApple OSS Distributions OSSharedPtr<OSSymbol const> keys[2] = {NULL};
5980*a325d9c4SApple OSS Distributions OSSharedPtr<OSObject> values[2] = {NULL};
5981*a325d9c4SApple OSS Distributions OSSharedPtr<OSArray> array;
5982*a325d9c4SApple OSS Distributions
5983*a325d9c4SApple OSS Distributions struct SerData {
5984*a325d9c4SApple OSS Distributions user_addr_t address;
5985*a325d9c4SApple OSS Distributions user_size_t length;
5986*a325d9c4SApple OSS Distributions };
5987*a325d9c4SApple OSS Distributions
5988*a325d9c4SApple OSS Distributions unsigned int index;
5989*a325d9c4SApple OSS Distributions
5990*a325d9c4SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
5991*a325d9c4SApple OSS Distributions
5992*a325d9c4SApple OSS Distributions if (s == NULL) {
5993*a325d9c4SApple OSS Distributions return false;
5994*a325d9c4SApple OSS Distributions }
5995*a325d9c4SApple OSS Distributions
5996*a325d9c4SApple OSS Distributions array = OSArray::withCapacity(4);
5997*a325d9c4SApple OSS Distributions if (!array) {
5998*a325d9c4SApple OSS Distributions return false;
5999*a325d9c4SApple OSS Distributions }
6000*a325d9c4SApple OSS Distributions
6001*a325d9c4SApple OSS Distributions OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6002*a325d9c4SApple OSS Distributions if (!vcopy) {
6003*a325d9c4SApple OSS Distributions return false;
6004*a325d9c4SApple OSS Distributions }
6005*a325d9c4SApple OSS Distributions
6006*a325d9c4SApple OSS Distributions keys[0] = OSSymbol::withCString("address");
6007*a325d9c4SApple OSS Distributions keys[1] = OSSymbol::withCString("length");
6008*a325d9c4SApple OSS Distributions
6009*a325d9c4SApple OSS Distributions // Copy the volatile data so we don't have to allocate memory
6010*a325d9c4SApple OSS Distributions // while the lock is held.
6011*a325d9c4SApple OSS Distributions LOCK;
6012*a325d9c4SApple OSS Distributions if (vcopy.size() == _rangesCount) {
6013*a325d9c4SApple OSS Distributions Ranges vec = _ranges;
6014*a325d9c4SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6015*a325d9c4SApple OSS Distributions mach_vm_address_t addr; mach_vm_size_t len;
6016*a325d9c4SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, index);
6017*a325d9c4SApple OSS Distributions vcopy[index].address = addr;
6018*a325d9c4SApple OSS Distributions vcopy[index].length = len;
6019*a325d9c4SApple OSS Distributions }
6020*a325d9c4SApple OSS Distributions } else {
6021*a325d9c4SApple OSS Distributions // The descriptor changed out from under us. Give up.
6022*a325d9c4SApple OSS Distributions UNLOCK;
6023*a325d9c4SApple OSS Distributions return false;
6024*a325d9c4SApple OSS Distributions }
6025*a325d9c4SApple OSS Distributions UNLOCK;
6026*a325d9c4SApple OSS Distributions
6027*a325d9c4SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6028*a325d9c4SApple OSS Distributions user_addr_t addr = vcopy[index].address;
6029*a325d9c4SApple OSS Distributions IOByteCount len = (IOByteCount) vcopy[index].length;
6030*a325d9c4SApple OSS Distributions values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6031*a325d9c4SApple OSS Distributions if (values[0] == NULL) {
6032*a325d9c4SApple OSS Distributions return false;
6033*a325d9c4SApple OSS Distributions }
6034*a325d9c4SApple OSS Distributions values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6035*a325d9c4SApple OSS Distributions if (values[1] == NULL) {
6036*a325d9c4SApple OSS Distributions return false;
6037*a325d9c4SApple OSS Distributions }
6038*a325d9c4SApple OSS Distributions OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6039*a325d9c4SApple OSS Distributions if (dict == NULL) {
6040*a325d9c4SApple OSS Distributions return false;
6041*a325d9c4SApple OSS Distributions }
6042*a325d9c4SApple OSS Distributions array->setObject(dict.get());
6043*a325d9c4SApple OSS Distributions dict.reset();
6044*a325d9c4SApple OSS Distributions values[0].reset();
6045*a325d9c4SApple OSS Distributions values[1].reset();
6046*a325d9c4SApple OSS Distributions }
6047*a325d9c4SApple OSS Distributions
6048*a325d9c4SApple OSS Distributions return array->serialize(s);
6049*a325d9c4SApple OSS Distributions }
6050*a325d9c4SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6051*a325d9c4SApple OSS Distributions
6052*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6053*a325d9c4SApple OSS Distributions #ifdef __LP64__
6054*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6055*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6056*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6057*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6058*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6059*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6060*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6061*a325d9c4SApple OSS Distributions #else /* !__LP64__ */
6062*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6063*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6064*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6065*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6066*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6067*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6068*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6069*a325d9c4SApple OSS Distributions #endif /* !__LP64__ */
6070*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6071*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6072*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6073*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6074*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6075*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6076*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6077*a325d9c4SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6078*a325d9c4SApple OSS Distributions
6079*a325d9c4SApple OSS Distributions /* ex-inline function implementation */
6080*a325d9c4SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6081*a325d9c4SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6082*a325d9c4SApple OSS Distributions {
6083*a325d9c4SApple OSS Distributions return getPhysicalSegment( 0, NULL );
6084*a325d9c4SApple OSS Distributions }
6085*a325d9c4SApple OSS Distributions
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6086*a325d9c4SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6087*a325d9c4SApple OSS Distributions
6088*a325d9c4SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6089*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6090*a325d9c4SApple OSS Distributions {
6091*a325d9c4SApple OSS Distributions OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6092*a325d9c4SApple OSS Distributions if (me && !me->initWithCapacity(capacity)) {
6093*a325d9c4SApple OSS Distributions return nullptr;
6094*a325d9c4SApple OSS Distributions }
6095*a325d9c4SApple OSS Distributions return me;
6096*a325d9c4SApple OSS Distributions }
6097*a325d9c4SApple OSS Distributions
6098*a325d9c4SApple OSS Distributions bool
initWithCapacity(size_t capacity)6099*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6100*a325d9c4SApple OSS Distributions {
6101*a325d9c4SApple OSS Distributions if (_data && (!capacity || (_capacity < capacity))) {
6102*a325d9c4SApple OSS Distributions freeMemory();
6103*a325d9c4SApple OSS Distributions }
6104*a325d9c4SApple OSS Distributions
6105*a325d9c4SApple OSS Distributions if (!OSObject::init()) {
6106*a325d9c4SApple OSS Distributions return false;
6107*a325d9c4SApple OSS Distributions }
6108*a325d9c4SApple OSS Distributions
6109*a325d9c4SApple OSS Distributions if (!_data && capacity) {
6110*a325d9c4SApple OSS Distributions _data = IOMalloc(capacity);
6111*a325d9c4SApple OSS Distributions if (!_data) {
6112*a325d9c4SApple OSS Distributions return false;
6113*a325d9c4SApple OSS Distributions }
6114*a325d9c4SApple OSS Distributions _capacity = capacity;
6115*a325d9c4SApple OSS Distributions }
6116*a325d9c4SApple OSS Distributions
6117*a325d9c4SApple OSS Distributions _length = 0;
6118*a325d9c4SApple OSS Distributions
6119*a325d9c4SApple OSS Distributions return true;
6120*a325d9c4SApple OSS Distributions }
6121*a325d9c4SApple OSS Distributions
6122*a325d9c4SApple OSS Distributions void
free()6123*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6124*a325d9c4SApple OSS Distributions {
6125*a325d9c4SApple OSS Distributions freeMemory();
6126*a325d9c4SApple OSS Distributions OSObject::free();
6127*a325d9c4SApple OSS Distributions }
6128*a325d9c4SApple OSS Distributions
6129*a325d9c4SApple OSS Distributions void
freeMemory()6130*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6131*a325d9c4SApple OSS Distributions {
6132*a325d9c4SApple OSS Distributions IOFree(_data, _capacity);
6133*a325d9c4SApple OSS Distributions _data = nullptr;
6134*a325d9c4SApple OSS Distributions _capacity = _length = 0;
6135*a325d9c4SApple OSS Distributions }
6136*a325d9c4SApple OSS Distributions
6137*a325d9c4SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6138*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6139*a325d9c4SApple OSS Distributions {
6140*a325d9c4SApple OSS Distributions const auto oldLength = getLength();
6141*a325d9c4SApple OSS Distributions size_t newLength;
6142*a325d9c4SApple OSS Distributions if (os_add_overflow(oldLength, length, &newLength)) {
6143*a325d9c4SApple OSS Distributions return false;
6144*a325d9c4SApple OSS Distributions }
6145*a325d9c4SApple OSS Distributions
6146*a325d9c4SApple OSS Distributions if (newLength > _capacity) {
6147*a325d9c4SApple OSS Distributions void * const newData = IOMalloc(newLength);
6148*a325d9c4SApple OSS Distributions if (_data) {
6149*a325d9c4SApple OSS Distributions bcopy(_data, newData, oldLength);
6150*a325d9c4SApple OSS Distributions IOFree(_data, _capacity);
6151*a325d9c4SApple OSS Distributions }
6152*a325d9c4SApple OSS Distributions _data = newData;
6153*a325d9c4SApple OSS Distributions _capacity = newLength;
6154*a325d9c4SApple OSS Distributions }
6155*a325d9c4SApple OSS Distributions
6156*a325d9c4SApple OSS Distributions unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6157*a325d9c4SApple OSS Distributions if (bytes) {
6158*a325d9c4SApple OSS Distributions bcopy(bytes, dest, length);
6159*a325d9c4SApple OSS Distributions } else {
6160*a325d9c4SApple OSS Distributions bzero(dest, length);
6161*a325d9c4SApple OSS Distributions }
6162*a325d9c4SApple OSS Distributions
6163*a325d9c4SApple OSS Distributions _length = newLength;
6164*a325d9c4SApple OSS Distributions
6165*a325d9c4SApple OSS Distributions return true;
6166*a325d9c4SApple OSS Distributions }
6167*a325d9c4SApple OSS Distributions
6168*a325d9c4SApple OSS Distributions void
setLength(size_t length)6169*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6170*a325d9c4SApple OSS Distributions {
6171*a325d9c4SApple OSS Distributions if (!_data || (length > _capacity)) {
6172*a325d9c4SApple OSS Distributions void * const newData = IOMallocZero(length);
6173*a325d9c4SApple OSS Distributions if (_data) {
6174*a325d9c4SApple OSS Distributions bcopy(_data, newData, _length);
6175*a325d9c4SApple OSS Distributions IOFree(_data, _capacity);
6176*a325d9c4SApple OSS Distributions }
6177*a325d9c4SApple OSS Distributions _data = newData;
6178*a325d9c4SApple OSS Distributions _capacity = length;
6179*a325d9c4SApple OSS Distributions }
6180*a325d9c4SApple OSS Distributions _length = length;
6181*a325d9c4SApple OSS Distributions }
6182*a325d9c4SApple OSS Distributions
6183*a325d9c4SApple OSS Distributions const void *
getBytes() const6184*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6185*a325d9c4SApple OSS Distributions {
6186*a325d9c4SApple OSS Distributions return _length ? _data : nullptr;
6187*a325d9c4SApple OSS Distributions }
6188*a325d9c4SApple OSS Distributions
6189*a325d9c4SApple OSS Distributions size_t
getLength() const6190*a325d9c4SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6191*a325d9c4SApple OSS Distributions {
6192*a325d9c4SApple OSS Distributions return _data ? _length : 0;
6193*a325d9c4SApple OSS Distributions }
6194