1*4f1223e8SApple OSS Distributions /*
2*4f1223e8SApple OSS Distributions * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*4f1223e8SApple OSS Distributions *
4*4f1223e8SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*4f1223e8SApple OSS Distributions *
6*4f1223e8SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*4f1223e8SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*4f1223e8SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*4f1223e8SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*4f1223e8SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*4f1223e8SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*4f1223e8SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*4f1223e8SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*4f1223e8SApple OSS Distributions *
15*4f1223e8SApple OSS Distributions * Please obtain a copy of the License at
16*4f1223e8SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*4f1223e8SApple OSS Distributions *
18*4f1223e8SApple OSS Distributions * The Original Code and all software distributed under the License are
19*4f1223e8SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*4f1223e8SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*4f1223e8SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*4f1223e8SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*4f1223e8SApple OSS Distributions * Please see the License for the specific language governing rights and
24*4f1223e8SApple OSS Distributions * limitations under the License.
25*4f1223e8SApple OSS Distributions *
26*4f1223e8SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*4f1223e8SApple OSS Distributions */
28*4f1223e8SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*4f1223e8SApple OSS Distributions
30*4f1223e8SApple OSS Distributions #include <sys/cdefs.h>
31*4f1223e8SApple OSS Distributions
32*4f1223e8SApple OSS Distributions #include <IOKit/assert.h>
33*4f1223e8SApple OSS Distributions #include <IOKit/system.h>
34*4f1223e8SApple OSS Distributions #include <IOKit/IOLib.h>
35*4f1223e8SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*4f1223e8SApple OSS Distributions #include <IOKit/IOMapper.h>
37*4f1223e8SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*4f1223e8SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*4f1223e8SApple OSS Distributions
40*4f1223e8SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*4f1223e8SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*4f1223e8SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*4f1223e8SApple OSS Distributions
44*4f1223e8SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*4f1223e8SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*4f1223e8SApple OSS Distributions #include <libkern/OSDebug.h>
47*4f1223e8SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*4f1223e8SApple OSS Distributions
49*4f1223e8SApple OSS Distributions #include "IOKitKernelInternal.h"
50*4f1223e8SApple OSS Distributions
51*4f1223e8SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*4f1223e8SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*4f1223e8SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*4f1223e8SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*4f1223e8SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*4f1223e8SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*4f1223e8SApple OSS Distributions #include <os/overflow.h>
58*4f1223e8SApple OSS Distributions #include <os/cpp_util.h>
59*4f1223e8SApple OSS Distributions #include <os/base_private.h>
60*4f1223e8SApple OSS Distributions
61*4f1223e8SApple OSS Distributions #include <sys/uio.h>
62*4f1223e8SApple OSS Distributions
63*4f1223e8SApple OSS Distributions __BEGIN_DECLS
64*4f1223e8SApple OSS Distributions #include <vm/pmap.h>
65*4f1223e8SApple OSS Distributions #include <vm/vm_pageout_xnu.h>
66*4f1223e8SApple OSS Distributions #include <mach/memory_object_types.h>
67*4f1223e8SApple OSS Distributions #include <device/device_port.h>
68*4f1223e8SApple OSS Distributions
69*4f1223e8SApple OSS Distributions #include <mach/vm_prot.h>
70*4f1223e8SApple OSS Distributions #include <mach/mach_vm.h>
71*4f1223e8SApple OSS Distributions #include <mach/memory_entry.h>
72*4f1223e8SApple OSS Distributions #include <mach/mach_host.h>
73*4f1223e8SApple OSS Distributions #include <vm/vm_fault_xnu.h>
74*4f1223e8SApple OSS Distributions #include <vm/vm_protos.h>
75*4f1223e8SApple OSS Distributions #include <vm/vm_memory_entry.h>
76*4f1223e8SApple OSS Distributions #include <vm/vm_kern_xnu.h>
77*4f1223e8SApple OSS Distributions #include <vm/vm_iokit.h>
78*4f1223e8SApple OSS Distributions #include <vm/vm_map_xnu.h>
79*4f1223e8SApple OSS Distributions
80*4f1223e8SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
81*4f1223e8SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
82*4f1223e8SApple OSS Distributions
83*4f1223e8SApple OSS Distributions __END_DECLS
84*4f1223e8SApple OSS Distributions
85*4f1223e8SApple OSS Distributions #define kIOMapperWaitSystem ((IOMapper *) 1)
86*4f1223e8SApple OSS Distributions
87*4f1223e8SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
88*4f1223e8SApple OSS Distributions
89*4f1223e8SApple OSS Distributions ppnum_t gIOLastPage;
90*4f1223e8SApple OSS Distributions
91*4f1223e8SApple OSS Distributions enum {
92*4f1223e8SApple OSS Distributions kIOMapGuardSizeLarge = 65536
93*4f1223e8SApple OSS Distributions };
94*4f1223e8SApple OSS Distributions
95*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96*4f1223e8SApple OSS Distributions
97*4f1223e8SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
98*4f1223e8SApple OSS Distributions
99*4f1223e8SApple OSS Distributions #define super IOMemoryDescriptor
100*4f1223e8SApple OSS Distributions
101*4f1223e8SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
102*4f1223e8SApple OSS Distributions IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
103*4f1223e8SApple OSS Distributions
104*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105*4f1223e8SApple OSS Distributions
106*4f1223e8SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
107*4f1223e8SApple OSS Distributions
108*4f1223e8SApple OSS Distributions #define LOCK IORecursiveLockLock( gIOMemoryLock)
109*4f1223e8SApple OSS Distributions #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
110*4f1223e8SApple OSS Distributions #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
111*4f1223e8SApple OSS Distributions #define WAKEUP \
112*4f1223e8SApple OSS Distributions IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
113*4f1223e8SApple OSS Distributions
114*4f1223e8SApple OSS Distributions #if 0
115*4f1223e8SApple OSS Distributions #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
116*4f1223e8SApple OSS Distributions #else
117*4f1223e8SApple OSS Distributions #define DEBG(fmt, args...) {}
118*4f1223e8SApple OSS Distributions #endif
119*4f1223e8SApple OSS Distributions
120*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121*4f1223e8SApple OSS Distributions
122*4f1223e8SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
123*4f1223e8SApple OSS Distributions // Function
124*4f1223e8SApple OSS Distributions
125*4f1223e8SApple OSS Distributions enum ioPLBlockFlags {
126*4f1223e8SApple OSS Distributions kIOPLOnDevice = 0x00000001,
127*4f1223e8SApple OSS Distributions kIOPLExternUPL = 0x00000002,
128*4f1223e8SApple OSS Distributions };
129*4f1223e8SApple OSS Distributions
130*4f1223e8SApple OSS Distributions struct IOMDPersistentInitData {
131*4f1223e8SApple OSS Distributions const IOGeneralMemoryDescriptor * fMD;
132*4f1223e8SApple OSS Distributions IOMemoryReference * fMemRef;
133*4f1223e8SApple OSS Distributions };
134*4f1223e8SApple OSS Distributions
135*4f1223e8SApple OSS Distributions struct ioPLBlock {
136*4f1223e8SApple OSS Distributions upl_t fIOPL;
137*4f1223e8SApple OSS Distributions vm_address_t fPageInfo; // Pointer to page list or index into it
138*4f1223e8SApple OSS Distributions uint64_t fIOMDOffset; // The offset of this iopl in descriptor
139*4f1223e8SApple OSS Distributions ppnum_t fMappedPage; // Page number of first page in this iopl
140*4f1223e8SApple OSS Distributions unsigned int fPageOffset; // Offset within first page of iopl
141*4f1223e8SApple OSS Distributions unsigned int fFlags; // Flags
142*4f1223e8SApple OSS Distributions };
143*4f1223e8SApple OSS Distributions
144*4f1223e8SApple OSS Distributions enum { kMaxWireTags = 6 };
145*4f1223e8SApple OSS Distributions
146*4f1223e8SApple OSS Distributions struct ioGMDData {
147*4f1223e8SApple OSS Distributions IOMapper * fMapper;
148*4f1223e8SApple OSS Distributions uint64_t fDMAMapAlignment;
149*4f1223e8SApple OSS Distributions uint64_t fMappedBase;
150*4f1223e8SApple OSS Distributions uint64_t fMappedLength;
151*4f1223e8SApple OSS Distributions uint64_t fPreparationID;
152*4f1223e8SApple OSS Distributions #if IOTRACKING
153*4f1223e8SApple OSS Distributions IOTracking fWireTracking;
154*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
155*4f1223e8SApple OSS Distributions unsigned int fPageCnt;
156*4f1223e8SApple OSS Distributions uint8_t fDMAMapNumAddressBits;
157*4f1223e8SApple OSS Distributions unsigned char fCompletionError:1;
158*4f1223e8SApple OSS Distributions unsigned char fMappedBaseValid:1;
159*4f1223e8SApple OSS Distributions unsigned char _resv:4;
160*4f1223e8SApple OSS Distributions unsigned char fDMAAccess:2;
161*4f1223e8SApple OSS Distributions
162*4f1223e8SApple OSS Distributions /* variable length arrays */
163*4f1223e8SApple OSS Distributions upl_page_info_t fPageList[1]
164*4f1223e8SApple OSS Distributions #if __LP64__
165*4f1223e8SApple OSS Distributions // align fPageList as for ioPLBlock
166*4f1223e8SApple OSS Distributions __attribute__((aligned(sizeof(upl_t))))
167*4f1223e8SApple OSS Distributions #endif
168*4f1223e8SApple OSS Distributions ;
169*4f1223e8SApple OSS Distributions //ioPLBlock fBlocks[1];
170*4f1223e8SApple OSS Distributions };
171*4f1223e8SApple OSS Distributions
172*4f1223e8SApple OSS Distributions #pragma GCC visibility push(hidden)
173*4f1223e8SApple OSS Distributions
174*4f1223e8SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
175*4f1223e8SApple OSS Distributions {
176*4f1223e8SApple OSS Distributions OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
177*4f1223e8SApple OSS Distributions
178*4f1223e8SApple OSS Distributions public:
179*4f1223e8SApple OSS Distributions static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
180*4f1223e8SApple OSS Distributions bool initWithCapacity(size_t capacity);
181*4f1223e8SApple OSS Distributions virtual void free() APPLE_KEXT_OVERRIDE;
182*4f1223e8SApple OSS Distributions
183*4f1223e8SApple OSS Distributions bool appendBytes(const void * bytes, size_t length);
184*4f1223e8SApple OSS Distributions bool setLength(size_t length);
185*4f1223e8SApple OSS Distributions
186*4f1223e8SApple OSS Distributions const void * getBytes() const;
187*4f1223e8SApple OSS Distributions size_t getLength() const;
188*4f1223e8SApple OSS Distributions
189*4f1223e8SApple OSS Distributions private:
190*4f1223e8SApple OSS Distributions void freeMemory();
191*4f1223e8SApple OSS Distributions
192*4f1223e8SApple OSS Distributions void * _data = nullptr;
193*4f1223e8SApple OSS Distributions size_t _length = 0;
194*4f1223e8SApple OSS Distributions size_t _capacity = 0;
195*4f1223e8SApple OSS Distributions };
196*4f1223e8SApple OSS Distributions
197*4f1223e8SApple OSS Distributions #pragma GCC visibility pop
198*4f1223e8SApple OSS Distributions
199*4f1223e8SApple OSS Distributions #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
200*4f1223e8SApple OSS Distributions #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
201*4f1223e8SApple OSS Distributions #define getNumIOPL(osd, d) \
202*4f1223e8SApple OSS Distributions ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
203*4f1223e8SApple OSS Distributions #define getPageList(d) (&(d->fPageList[0]))
204*4f1223e8SApple OSS Distributions #define computeDataSize(p, u) \
205*4f1223e8SApple OSS Distributions (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
206*4f1223e8SApple OSS Distributions
207*4f1223e8SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
208*4f1223e8SApple OSS Distributions
209*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
210*4f1223e8SApple OSS Distributions
211*4f1223e8SApple OSS Distributions extern "C" {
212*4f1223e8SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)213*4f1223e8SApple OSS Distributions device_data_action(
214*4f1223e8SApple OSS Distributions uintptr_t device_handle,
215*4f1223e8SApple OSS Distributions ipc_port_t device_pager,
216*4f1223e8SApple OSS Distributions vm_prot_t protection,
217*4f1223e8SApple OSS Distributions vm_object_offset_t offset,
218*4f1223e8SApple OSS Distributions vm_size_t size)
219*4f1223e8SApple OSS Distributions {
220*4f1223e8SApple OSS Distributions kern_return_t kr;
221*4f1223e8SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
222*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> memDesc;
223*4f1223e8SApple OSS Distributions
224*4f1223e8SApple OSS Distributions LOCK;
225*4f1223e8SApple OSS Distributions if (ref->dp.memory) {
226*4f1223e8SApple OSS Distributions memDesc.reset(ref->dp.memory, OSRetain);
227*4f1223e8SApple OSS Distributions kr = memDesc->handleFault(device_pager, offset, size);
228*4f1223e8SApple OSS Distributions memDesc.reset();
229*4f1223e8SApple OSS Distributions } else {
230*4f1223e8SApple OSS Distributions kr = KERN_ABORTED;
231*4f1223e8SApple OSS Distributions }
232*4f1223e8SApple OSS Distributions UNLOCK;
233*4f1223e8SApple OSS Distributions
234*4f1223e8SApple OSS Distributions return kr;
235*4f1223e8SApple OSS Distributions }
236*4f1223e8SApple OSS Distributions
237*4f1223e8SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)238*4f1223e8SApple OSS Distributions device_close(
239*4f1223e8SApple OSS Distributions uintptr_t device_handle)
240*4f1223e8SApple OSS Distributions {
241*4f1223e8SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
242*4f1223e8SApple OSS Distributions
243*4f1223e8SApple OSS Distributions IOFreeType( ref, IOMemoryDescriptorReserved );
244*4f1223e8SApple OSS Distributions
245*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
246*4f1223e8SApple OSS Distributions }
247*4f1223e8SApple OSS Distributions }; // end extern "C"
248*4f1223e8SApple OSS Distributions
249*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250*4f1223e8SApple OSS Distributions
251*4f1223e8SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
252*4f1223e8SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
253*4f1223e8SApple OSS Distributions // checked for as a NULL reference is illegal.
254*4f1223e8SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)255*4f1223e8SApple OSS Distributions getAddrLenForInd(
256*4f1223e8SApple OSS Distributions mach_vm_address_t &addr,
257*4f1223e8SApple OSS Distributions mach_vm_size_t &len, // Output variables
258*4f1223e8SApple OSS Distributions UInt32 type,
259*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::Ranges r,
260*4f1223e8SApple OSS Distributions UInt32 ind,
261*4f1223e8SApple OSS Distributions task_t task __unused)
262*4f1223e8SApple OSS Distributions {
263*4f1223e8SApple OSS Distributions assert(kIOMemoryTypeUIO == type
264*4f1223e8SApple OSS Distributions || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
265*4f1223e8SApple OSS Distributions || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
266*4f1223e8SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
267*4f1223e8SApple OSS Distributions user_size_t us;
268*4f1223e8SApple OSS Distributions user_addr_t ad;
269*4f1223e8SApple OSS Distributions uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
270*4f1223e8SApple OSS Distributions }
271*4f1223e8SApple OSS Distributions #ifndef __LP64__
272*4f1223e8SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
273*4f1223e8SApple OSS Distributions IOAddressRange cur = r.v64[ind];
274*4f1223e8SApple OSS Distributions addr = cur.address;
275*4f1223e8SApple OSS Distributions len = cur.length;
276*4f1223e8SApple OSS Distributions }
277*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
278*4f1223e8SApple OSS Distributions else {
279*4f1223e8SApple OSS Distributions IOVirtualRange cur = r.v[ind];
280*4f1223e8SApple OSS Distributions addr = cur.address;
281*4f1223e8SApple OSS Distributions len = cur.length;
282*4f1223e8SApple OSS Distributions }
283*4f1223e8SApple OSS Distributions #if CONFIG_PROB_GZALLOC
284*4f1223e8SApple OSS Distributions if (task == kernel_task) {
285*4f1223e8SApple OSS Distributions addr = pgz_decode(addr, len);
286*4f1223e8SApple OSS Distributions }
287*4f1223e8SApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
288*4f1223e8SApple OSS Distributions }
289*4f1223e8SApple OSS Distributions
290*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
291*4f1223e8SApple OSS Distributions
292*4f1223e8SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)293*4f1223e8SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
294*4f1223e8SApple OSS Distributions {
295*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
296*4f1223e8SApple OSS Distributions
297*4f1223e8SApple OSS Distributions *control = VM_PURGABLE_SET_STATE;
298*4f1223e8SApple OSS Distributions
299*4f1223e8SApple OSS Distributions enum { kIOMemoryPurgeableControlMask = 15 };
300*4f1223e8SApple OSS Distributions
301*4f1223e8SApple OSS Distributions switch (kIOMemoryPurgeableControlMask & newState) {
302*4f1223e8SApple OSS Distributions case kIOMemoryPurgeableKeepCurrent:
303*4f1223e8SApple OSS Distributions *control = VM_PURGABLE_GET_STATE;
304*4f1223e8SApple OSS Distributions break;
305*4f1223e8SApple OSS Distributions
306*4f1223e8SApple OSS Distributions case kIOMemoryPurgeableNonVolatile:
307*4f1223e8SApple OSS Distributions *state = VM_PURGABLE_NONVOLATILE;
308*4f1223e8SApple OSS Distributions break;
309*4f1223e8SApple OSS Distributions case kIOMemoryPurgeableVolatile:
310*4f1223e8SApple OSS Distributions *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
311*4f1223e8SApple OSS Distributions break;
312*4f1223e8SApple OSS Distributions case kIOMemoryPurgeableEmpty:
313*4f1223e8SApple OSS Distributions *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
314*4f1223e8SApple OSS Distributions break;
315*4f1223e8SApple OSS Distributions default:
316*4f1223e8SApple OSS Distributions err = kIOReturnBadArgument;
317*4f1223e8SApple OSS Distributions break;
318*4f1223e8SApple OSS Distributions }
319*4f1223e8SApple OSS Distributions
320*4f1223e8SApple OSS Distributions if (*control == VM_PURGABLE_SET_STATE) {
321*4f1223e8SApple OSS Distributions // let VM know this call is from the kernel and is allowed to alter
322*4f1223e8SApple OSS Distributions // the volatility of the memory entry even if it was created with
323*4f1223e8SApple OSS Distributions // MAP_MEM_PURGABLE_KERNEL_ONLY
324*4f1223e8SApple OSS Distributions *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
325*4f1223e8SApple OSS Distributions }
326*4f1223e8SApple OSS Distributions
327*4f1223e8SApple OSS Distributions return err;
328*4f1223e8SApple OSS Distributions }
329*4f1223e8SApple OSS Distributions
330*4f1223e8SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)331*4f1223e8SApple OSS Distributions purgeableStateBits(int * state)
332*4f1223e8SApple OSS Distributions {
333*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
334*4f1223e8SApple OSS Distributions
335*4f1223e8SApple OSS Distributions switch (VM_PURGABLE_STATE_MASK & *state) {
336*4f1223e8SApple OSS Distributions case VM_PURGABLE_NONVOLATILE:
337*4f1223e8SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
338*4f1223e8SApple OSS Distributions break;
339*4f1223e8SApple OSS Distributions case VM_PURGABLE_VOLATILE:
340*4f1223e8SApple OSS Distributions *state = kIOMemoryPurgeableVolatile;
341*4f1223e8SApple OSS Distributions break;
342*4f1223e8SApple OSS Distributions case VM_PURGABLE_EMPTY:
343*4f1223e8SApple OSS Distributions *state = kIOMemoryPurgeableEmpty;
344*4f1223e8SApple OSS Distributions break;
345*4f1223e8SApple OSS Distributions default:
346*4f1223e8SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
347*4f1223e8SApple OSS Distributions err = kIOReturnNotReady;
348*4f1223e8SApple OSS Distributions break;
349*4f1223e8SApple OSS Distributions }
350*4f1223e8SApple OSS Distributions return err;
351*4f1223e8SApple OSS Distributions }
352*4f1223e8SApple OSS Distributions
353*4f1223e8SApple OSS Distributions typedef struct {
354*4f1223e8SApple OSS Distributions unsigned int wimg;
355*4f1223e8SApple OSS Distributions unsigned int object_type;
356*4f1223e8SApple OSS Distributions } iokit_memtype_entry;
357*4f1223e8SApple OSS Distributions
358*4f1223e8SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
359*4f1223e8SApple OSS Distributions [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
360*4f1223e8SApple OSS Distributions [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
361*4f1223e8SApple OSS Distributions [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
362*4f1223e8SApple OSS Distributions [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
363*4f1223e8SApple OSS Distributions [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
364*4f1223e8SApple OSS Distributions [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
365*4f1223e8SApple OSS Distributions [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
366*4f1223e8SApple OSS Distributions [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
367*4f1223e8SApple OSS Distributions [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
368*4f1223e8SApple OSS Distributions [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
369*4f1223e8SApple OSS Distributions };
370*4f1223e8SApple OSS Distributions
371*4f1223e8SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)372*4f1223e8SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
373*4f1223e8SApple OSS Distributions {
374*4f1223e8SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
375*4f1223e8SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
376*4f1223e8SApple OSS Distributions cacheMode = kIODefaultCache;
377*4f1223e8SApple OSS Distributions }
378*4f1223e8SApple OSS Distributions vm_prot_t prot = 0;
379*4f1223e8SApple OSS Distributions SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
380*4f1223e8SApple OSS Distributions return prot;
381*4f1223e8SApple OSS Distributions }
382*4f1223e8SApple OSS Distributions
383*4f1223e8SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)384*4f1223e8SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
385*4f1223e8SApple OSS Distributions {
386*4f1223e8SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
387*4f1223e8SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
388*4f1223e8SApple OSS Distributions cacheMode = kIODefaultCache;
389*4f1223e8SApple OSS Distributions }
390*4f1223e8SApple OSS Distributions if (cacheMode == kIODefaultCache) {
391*4f1223e8SApple OSS Distributions return -1U;
392*4f1223e8SApple OSS Distributions }
393*4f1223e8SApple OSS Distributions return iomd_mem_types[cacheMode].wimg;
394*4f1223e8SApple OSS Distributions }
395*4f1223e8SApple OSS Distributions
396*4f1223e8SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)397*4f1223e8SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
398*4f1223e8SApple OSS Distributions {
399*4f1223e8SApple OSS Distributions pagerFlags &= VM_WIMG_MASK;
400*4f1223e8SApple OSS Distributions IOOptionBits cacheMode = kIODefaultCache;
401*4f1223e8SApple OSS Distributions for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
402*4f1223e8SApple OSS Distributions if (iomd_mem_types[i].wimg == pagerFlags) {
403*4f1223e8SApple OSS Distributions cacheMode = i;
404*4f1223e8SApple OSS Distributions break;
405*4f1223e8SApple OSS Distributions }
406*4f1223e8SApple OSS Distributions }
407*4f1223e8SApple OSS Distributions return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
408*4f1223e8SApple OSS Distributions }
409*4f1223e8SApple OSS Distributions
410*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412*4f1223e8SApple OSS Distributions
413*4f1223e8SApple OSS Distributions struct IOMemoryEntry {
414*4f1223e8SApple OSS Distributions ipc_port_t entry;
415*4f1223e8SApple OSS Distributions int64_t offset;
416*4f1223e8SApple OSS Distributions uint64_t size;
417*4f1223e8SApple OSS Distributions uint64_t start;
418*4f1223e8SApple OSS Distributions };
419*4f1223e8SApple OSS Distributions
420*4f1223e8SApple OSS Distributions struct IOMemoryReference {
421*4f1223e8SApple OSS Distributions volatile SInt32 refCount;
422*4f1223e8SApple OSS Distributions vm_prot_t prot;
423*4f1223e8SApple OSS Distributions uint32_t capacity;
424*4f1223e8SApple OSS Distributions uint32_t count;
425*4f1223e8SApple OSS Distributions struct IOMemoryReference * mapRef;
426*4f1223e8SApple OSS Distributions IOMemoryEntry entries[0];
427*4f1223e8SApple OSS Distributions };
428*4f1223e8SApple OSS Distributions
429*4f1223e8SApple OSS Distributions enum{
430*4f1223e8SApple OSS Distributions kIOMemoryReferenceReuse = 0x00000001,
431*4f1223e8SApple OSS Distributions kIOMemoryReferenceWrite = 0x00000002,
432*4f1223e8SApple OSS Distributions kIOMemoryReferenceCOW = 0x00000004,
433*4f1223e8SApple OSS Distributions };
434*4f1223e8SApple OSS Distributions
435*4f1223e8SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
436*4f1223e8SApple OSS Distributions
437*4f1223e8SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)438*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
439*4f1223e8SApple OSS Distributions {
440*4f1223e8SApple OSS Distributions IOMemoryReference * ref;
441*4f1223e8SApple OSS Distributions size_t oldCapacity;
442*4f1223e8SApple OSS Distributions
443*4f1223e8SApple OSS Distributions if (realloc) {
444*4f1223e8SApple OSS Distributions oldCapacity = realloc->capacity;
445*4f1223e8SApple OSS Distributions } else {
446*4f1223e8SApple OSS Distributions oldCapacity = 0;
447*4f1223e8SApple OSS Distributions }
448*4f1223e8SApple OSS Distributions
449*4f1223e8SApple OSS Distributions // Use the kalloc API instead of manually handling the reallocation
450*4f1223e8SApple OSS Distributions ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
451*4f1223e8SApple OSS Distributions oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
452*4f1223e8SApple OSS Distributions if (ref) {
453*4f1223e8SApple OSS Distributions if (oldCapacity == 0) {
454*4f1223e8SApple OSS Distributions ref->refCount = 1;
455*4f1223e8SApple OSS Distributions OSIncrementAtomic(&gIOMemoryReferenceCount);
456*4f1223e8SApple OSS Distributions }
457*4f1223e8SApple OSS Distributions ref->capacity = capacity;
458*4f1223e8SApple OSS Distributions }
459*4f1223e8SApple OSS Distributions return ref;
460*4f1223e8SApple OSS Distributions }
461*4f1223e8SApple OSS Distributions
462*4f1223e8SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)463*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
464*4f1223e8SApple OSS Distributions {
465*4f1223e8SApple OSS Distributions IOMemoryEntry * entries;
466*4f1223e8SApple OSS Distributions
467*4f1223e8SApple OSS Distributions if (ref->mapRef) {
468*4f1223e8SApple OSS Distributions memoryReferenceFree(ref->mapRef);
469*4f1223e8SApple OSS Distributions ref->mapRef = NULL;
470*4f1223e8SApple OSS Distributions }
471*4f1223e8SApple OSS Distributions
472*4f1223e8SApple OSS Distributions entries = ref->entries + ref->count;
473*4f1223e8SApple OSS Distributions while (entries > &ref->entries[0]) {
474*4f1223e8SApple OSS Distributions entries--;
475*4f1223e8SApple OSS Distributions ipc_port_release_send(entries->entry);
476*4f1223e8SApple OSS Distributions }
477*4f1223e8SApple OSS Distributions kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
478*4f1223e8SApple OSS Distributions
479*4f1223e8SApple OSS Distributions OSDecrementAtomic(&gIOMemoryReferenceCount);
480*4f1223e8SApple OSS Distributions }
481*4f1223e8SApple OSS Distributions
482*4f1223e8SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)483*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
484*4f1223e8SApple OSS Distributions {
485*4f1223e8SApple OSS Distributions if (1 == OSDecrementAtomic(&ref->refCount)) {
486*4f1223e8SApple OSS Distributions memoryReferenceFree(ref);
487*4f1223e8SApple OSS Distributions }
488*4f1223e8SApple OSS Distributions }
489*4f1223e8SApple OSS Distributions
490*4f1223e8SApple OSS Distributions
491*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)492*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
493*4f1223e8SApple OSS Distributions IOOptionBits options,
494*4f1223e8SApple OSS Distributions IOMemoryReference ** reference)
495*4f1223e8SApple OSS Distributions {
496*4f1223e8SApple OSS Distributions enum { kCapacity = 4, kCapacityInc = 4 };
497*4f1223e8SApple OSS Distributions
498*4f1223e8SApple OSS Distributions kern_return_t err;
499*4f1223e8SApple OSS Distributions IOMemoryReference * ref;
500*4f1223e8SApple OSS Distributions IOMemoryEntry * entries;
501*4f1223e8SApple OSS Distributions IOMemoryEntry * cloneEntries = NULL;
502*4f1223e8SApple OSS Distributions vm_map_t map;
503*4f1223e8SApple OSS Distributions ipc_port_t entry, cloneEntry;
504*4f1223e8SApple OSS Distributions vm_prot_t prot;
505*4f1223e8SApple OSS Distributions memory_object_size_t actualSize;
506*4f1223e8SApple OSS Distributions uint32_t rangeIdx;
507*4f1223e8SApple OSS Distributions uint32_t count;
508*4f1223e8SApple OSS Distributions mach_vm_address_t entryAddr, endAddr, entrySize;
509*4f1223e8SApple OSS Distributions mach_vm_size_t srcAddr, srcLen;
510*4f1223e8SApple OSS Distributions mach_vm_size_t nextAddr, nextLen;
511*4f1223e8SApple OSS Distributions mach_vm_size_t offset, remain;
512*4f1223e8SApple OSS Distributions vm_map_offset_t overmap_start = 0, overmap_end = 0;
513*4f1223e8SApple OSS Distributions int misaligned_start = 0, misaligned_end = 0;
514*4f1223e8SApple OSS Distributions IOByteCount physLen;
515*4f1223e8SApple OSS Distributions IOOptionBits type = (_flags & kIOMemoryTypeMask);
516*4f1223e8SApple OSS Distributions IOOptionBits cacheMode;
517*4f1223e8SApple OSS Distributions unsigned int pagerFlags;
518*4f1223e8SApple OSS Distributions vm_tag_t tag;
519*4f1223e8SApple OSS Distributions vm_named_entry_kernel_flags_t vmne_kflags;
520*4f1223e8SApple OSS Distributions
521*4f1223e8SApple OSS Distributions ref = memoryReferenceAlloc(kCapacity, NULL);
522*4f1223e8SApple OSS Distributions if (!ref) {
523*4f1223e8SApple OSS Distributions return kIOReturnNoMemory;
524*4f1223e8SApple OSS Distributions }
525*4f1223e8SApple OSS Distributions
526*4f1223e8SApple OSS Distributions tag = (vm_tag_t) getVMTag(kernel_map);
527*4f1223e8SApple OSS Distributions vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
528*4f1223e8SApple OSS Distributions entries = &ref->entries[0];
529*4f1223e8SApple OSS Distributions count = 0;
530*4f1223e8SApple OSS Distributions err = KERN_SUCCESS;
531*4f1223e8SApple OSS Distributions
532*4f1223e8SApple OSS Distributions offset = 0;
533*4f1223e8SApple OSS Distributions rangeIdx = 0;
534*4f1223e8SApple OSS Distributions remain = _length;
535*4f1223e8SApple OSS Distributions if (_task) {
536*4f1223e8SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
537*4f1223e8SApple OSS Distributions
538*4f1223e8SApple OSS Distributions // account for IOBMD setLength(), use its capacity as length
539*4f1223e8SApple OSS Distributions IOBufferMemoryDescriptor * bmd;
540*4f1223e8SApple OSS Distributions if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
541*4f1223e8SApple OSS Distributions nextLen = bmd->getCapacity();
542*4f1223e8SApple OSS Distributions remain = nextLen;
543*4f1223e8SApple OSS Distributions }
544*4f1223e8SApple OSS Distributions } else {
545*4f1223e8SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
546*4f1223e8SApple OSS Distributions nextLen = physLen;
547*4f1223e8SApple OSS Distributions
548*4f1223e8SApple OSS Distributions // default cache mode for physical
549*4f1223e8SApple OSS Distributions if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
550*4f1223e8SApple OSS Distributions IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
551*4f1223e8SApple OSS Distributions _flags |= (mode << kIOMemoryBufferCacheShift);
552*4f1223e8SApple OSS Distributions }
553*4f1223e8SApple OSS Distributions }
554*4f1223e8SApple OSS Distributions
555*4f1223e8SApple OSS Distributions // cache mode & vm_prot
556*4f1223e8SApple OSS Distributions prot = VM_PROT_READ;
557*4f1223e8SApple OSS Distributions cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
558*4f1223e8SApple OSS Distributions prot |= vmProtForCacheMode(cacheMode);
559*4f1223e8SApple OSS Distributions // VM system requires write access to change cache mode
560*4f1223e8SApple OSS Distributions if (kIODefaultCache != cacheMode) {
561*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
562*4f1223e8SApple OSS Distributions }
563*4f1223e8SApple OSS Distributions if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
564*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
565*4f1223e8SApple OSS Distributions }
566*4f1223e8SApple OSS Distributions if (kIOMemoryReferenceWrite & options) {
567*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
568*4f1223e8SApple OSS Distributions }
569*4f1223e8SApple OSS Distributions if (kIOMemoryReferenceCOW & options) {
570*4f1223e8SApple OSS Distributions prot |= MAP_MEM_VM_COPY;
571*4f1223e8SApple OSS Distributions }
572*4f1223e8SApple OSS Distributions
573*4f1223e8SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
574*4f1223e8SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
575*4f1223e8SApple OSS Distributions }
576*4f1223e8SApple OSS Distributions
577*4f1223e8SApple OSS Distributions if ((kIOMemoryReferenceReuse & options) && _memRef) {
578*4f1223e8SApple OSS Distributions cloneEntries = &_memRef->entries[0];
579*4f1223e8SApple OSS Distributions prot |= MAP_MEM_NAMED_REUSE;
580*4f1223e8SApple OSS Distributions }
581*4f1223e8SApple OSS Distributions
582*4f1223e8SApple OSS Distributions if (_task) {
583*4f1223e8SApple OSS Distributions // virtual ranges
584*4f1223e8SApple OSS Distributions
585*4f1223e8SApple OSS Distributions if (kIOMemoryBufferPageable & _flags) {
586*4f1223e8SApple OSS Distributions int ledger_tag, ledger_no_footprint;
587*4f1223e8SApple OSS Distributions
588*4f1223e8SApple OSS Distributions // IOBufferMemoryDescriptor alloc - set flags for entry + object create
589*4f1223e8SApple OSS Distributions prot |= MAP_MEM_NAMED_CREATE;
590*4f1223e8SApple OSS Distributions
591*4f1223e8SApple OSS Distributions // default accounting settings:
592*4f1223e8SApple OSS Distributions // + "none" ledger tag
593*4f1223e8SApple OSS Distributions // + include in footprint
594*4f1223e8SApple OSS Distributions // can be changed later with ::setOwnership()
595*4f1223e8SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
596*4f1223e8SApple OSS Distributions ledger_no_footprint = 0;
597*4f1223e8SApple OSS Distributions
598*4f1223e8SApple OSS Distributions if (kIOMemoryBufferPurgeable & _flags) {
599*4f1223e8SApple OSS Distributions prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
600*4f1223e8SApple OSS Distributions if (VM_KERN_MEMORY_SKYWALK == tag) {
601*4f1223e8SApple OSS Distributions // Skywalk purgeable memory accounting:
602*4f1223e8SApple OSS Distributions // + "network" ledger tag
603*4f1223e8SApple OSS Distributions // + not included in footprint
604*4f1223e8SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NETWORK;
605*4f1223e8SApple OSS Distributions ledger_no_footprint = 1;
606*4f1223e8SApple OSS Distributions } else {
607*4f1223e8SApple OSS Distributions // regular purgeable memory accounting:
608*4f1223e8SApple OSS Distributions // + no ledger tag
609*4f1223e8SApple OSS Distributions // + included in footprint
610*4f1223e8SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
611*4f1223e8SApple OSS Distributions ledger_no_footprint = 0;
612*4f1223e8SApple OSS Distributions }
613*4f1223e8SApple OSS Distributions }
614*4f1223e8SApple OSS Distributions vmne_kflags.vmnekf_ledger_tag = ledger_tag;
615*4f1223e8SApple OSS Distributions vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
616*4f1223e8SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
617*4f1223e8SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
618*4f1223e8SApple OSS Distributions }
619*4f1223e8SApple OSS Distributions
620*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
621*4f1223e8SApple OSS Distributions map = NULL;
622*4f1223e8SApple OSS Distributions } else {
623*4f1223e8SApple OSS Distributions prot |= MAP_MEM_USE_DATA_ADDR;
624*4f1223e8SApple OSS Distributions map = get_task_map(_task);
625*4f1223e8SApple OSS Distributions }
626*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
627*4f1223e8SApple OSS Distributions
628*4f1223e8SApple OSS Distributions while (remain) {
629*4f1223e8SApple OSS Distributions srcAddr = nextAddr;
630*4f1223e8SApple OSS Distributions srcLen = nextLen;
631*4f1223e8SApple OSS Distributions nextAddr = 0;
632*4f1223e8SApple OSS Distributions nextLen = 0;
633*4f1223e8SApple OSS Distributions // coalesce addr range
634*4f1223e8SApple OSS Distributions for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
635*4f1223e8SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
636*4f1223e8SApple OSS Distributions if ((srcAddr + srcLen) != nextAddr) {
637*4f1223e8SApple OSS Distributions break;
638*4f1223e8SApple OSS Distributions }
639*4f1223e8SApple OSS Distributions srcLen += nextLen;
640*4f1223e8SApple OSS Distributions }
641*4f1223e8SApple OSS Distributions
642*4f1223e8SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
643*4f1223e8SApple OSS Distributions entryAddr = srcAddr;
644*4f1223e8SApple OSS Distributions endAddr = srcAddr + srcLen;
645*4f1223e8SApple OSS Distributions } else {
646*4f1223e8SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
647*4f1223e8SApple OSS Distributions endAddr = round_page_64(srcAddr + srcLen);
648*4f1223e8SApple OSS Distributions }
649*4f1223e8SApple OSS Distributions if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
650*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
651*4f1223e8SApple OSS Distributions }
652*4f1223e8SApple OSS Distributions
653*4f1223e8SApple OSS Distributions do{
654*4f1223e8SApple OSS Distributions entrySize = (endAddr - entryAddr);
655*4f1223e8SApple OSS Distributions if (!entrySize) {
656*4f1223e8SApple OSS Distributions break;
657*4f1223e8SApple OSS Distributions }
658*4f1223e8SApple OSS Distributions actualSize = entrySize;
659*4f1223e8SApple OSS Distributions
660*4f1223e8SApple OSS Distributions cloneEntry = MACH_PORT_NULL;
661*4f1223e8SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
662*4f1223e8SApple OSS Distributions if (cloneEntries < &_memRef->entries[_memRef->count]) {
663*4f1223e8SApple OSS Distributions cloneEntry = cloneEntries->entry;
664*4f1223e8SApple OSS Distributions } else {
665*4f1223e8SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
666*4f1223e8SApple OSS Distributions }
667*4f1223e8SApple OSS Distributions }
668*4f1223e8SApple OSS Distributions
669*4f1223e8SApple OSS Distributions err = mach_make_memory_entry_internal(map,
670*4f1223e8SApple OSS Distributions &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
671*4f1223e8SApple OSS Distributions
672*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
673*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
674*4f1223e8SApple OSS Distributions break;
675*4f1223e8SApple OSS Distributions }
676*4f1223e8SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
677*4f1223e8SApple OSS Distributions if (actualSize > entrySize) {
678*4f1223e8SApple OSS Distributions actualSize = entrySize;
679*4f1223e8SApple OSS Distributions }
680*4f1223e8SApple OSS Distributions } else if (actualSize > entrySize) {
681*4f1223e8SApple OSS Distributions panic("mach_make_memory_entry_64 actualSize");
682*4f1223e8SApple OSS Distributions }
683*4f1223e8SApple OSS Distributions
684*4f1223e8SApple OSS Distributions memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
685*4f1223e8SApple OSS Distributions
686*4f1223e8SApple OSS Distributions if (count && overmap_start) {
687*4f1223e8SApple OSS Distributions /*
688*4f1223e8SApple OSS Distributions * Track misaligned start for all
689*4f1223e8SApple OSS Distributions * except the first entry.
690*4f1223e8SApple OSS Distributions */
691*4f1223e8SApple OSS Distributions misaligned_start++;
692*4f1223e8SApple OSS Distributions }
693*4f1223e8SApple OSS Distributions
694*4f1223e8SApple OSS Distributions if (overmap_end) {
695*4f1223e8SApple OSS Distributions /*
696*4f1223e8SApple OSS Distributions * Ignore misaligned end for the
697*4f1223e8SApple OSS Distributions * last entry.
698*4f1223e8SApple OSS Distributions */
699*4f1223e8SApple OSS Distributions if ((entryAddr + actualSize) != endAddr) {
700*4f1223e8SApple OSS Distributions misaligned_end++;
701*4f1223e8SApple OSS Distributions }
702*4f1223e8SApple OSS Distributions }
703*4f1223e8SApple OSS Distributions
704*4f1223e8SApple OSS Distributions if (count) {
705*4f1223e8SApple OSS Distributions /* Middle entries */
706*4f1223e8SApple OSS Distributions if (misaligned_start || misaligned_end) {
707*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
708*4f1223e8SApple OSS Distributions ipc_port_release_send(entry);
709*4f1223e8SApple OSS Distributions err = KERN_NOT_SUPPORTED;
710*4f1223e8SApple OSS Distributions break;
711*4f1223e8SApple OSS Distributions }
712*4f1223e8SApple OSS Distributions }
713*4f1223e8SApple OSS Distributions
714*4f1223e8SApple OSS Distributions if (count >= ref->capacity) {
715*4f1223e8SApple OSS Distributions ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
716*4f1223e8SApple OSS Distributions entries = &ref->entries[count];
717*4f1223e8SApple OSS Distributions }
718*4f1223e8SApple OSS Distributions entries->entry = entry;
719*4f1223e8SApple OSS Distributions entries->size = actualSize;
720*4f1223e8SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
721*4f1223e8SApple OSS Distributions entries->start = entryAddr;
722*4f1223e8SApple OSS Distributions entryAddr += actualSize;
723*4f1223e8SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
724*4f1223e8SApple OSS Distributions if ((cloneEntries->entry == entries->entry)
725*4f1223e8SApple OSS Distributions && (cloneEntries->size == entries->size)
726*4f1223e8SApple OSS Distributions && (cloneEntries->offset == entries->offset)) {
727*4f1223e8SApple OSS Distributions cloneEntries++;
728*4f1223e8SApple OSS Distributions } else {
729*4f1223e8SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
730*4f1223e8SApple OSS Distributions }
731*4f1223e8SApple OSS Distributions }
732*4f1223e8SApple OSS Distributions entries++;
733*4f1223e8SApple OSS Distributions count++;
734*4f1223e8SApple OSS Distributions }while (true);
735*4f1223e8SApple OSS Distributions offset += srcLen;
736*4f1223e8SApple OSS Distributions remain -= srcLen;
737*4f1223e8SApple OSS Distributions }
738*4f1223e8SApple OSS Distributions } else {
739*4f1223e8SApple OSS Distributions // _task == 0, physical or kIOMemoryTypeUPL
740*4f1223e8SApple OSS Distributions memory_object_t pager;
741*4f1223e8SApple OSS Distributions vm_size_t size = ptoa_64(_pages);
742*4f1223e8SApple OSS Distributions
743*4f1223e8SApple OSS Distributions if (!getKernelReserved()) {
744*4f1223e8SApple OSS Distributions panic("getKernelReserved");
745*4f1223e8SApple OSS Distributions }
746*4f1223e8SApple OSS Distributions
747*4f1223e8SApple OSS Distributions reserved->dp.pagerContig = (1 == _rangesCount);
748*4f1223e8SApple OSS Distributions reserved->dp.memory = this;
749*4f1223e8SApple OSS Distributions
750*4f1223e8SApple OSS Distributions pagerFlags = pagerFlagsForCacheMode(cacheMode);
751*4f1223e8SApple OSS Distributions if (-1U == pagerFlags) {
752*4f1223e8SApple OSS Distributions panic("phys is kIODefaultCache");
753*4f1223e8SApple OSS Distributions }
754*4f1223e8SApple OSS Distributions if (reserved->dp.pagerContig) {
755*4f1223e8SApple OSS Distributions pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
756*4f1223e8SApple OSS Distributions }
757*4f1223e8SApple OSS Distributions
758*4f1223e8SApple OSS Distributions pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
759*4f1223e8SApple OSS Distributions size, pagerFlags);
760*4f1223e8SApple OSS Distributions assert(pager);
761*4f1223e8SApple OSS Distributions if (!pager) {
762*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
763*4f1223e8SApple OSS Distributions err = kIOReturnVMError;
764*4f1223e8SApple OSS Distributions } else {
765*4f1223e8SApple OSS Distributions srcAddr = nextAddr;
766*4f1223e8SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
767*4f1223e8SApple OSS Distributions err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
768*4f1223e8SApple OSS Distributions size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
769*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
770*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
771*4f1223e8SApple OSS Distributions device_pager_deallocate(pager);
772*4f1223e8SApple OSS Distributions } else {
773*4f1223e8SApple OSS Distributions reserved->dp.devicePager = pager;
774*4f1223e8SApple OSS Distributions entries->entry = entry;
775*4f1223e8SApple OSS Distributions entries->size = size;
776*4f1223e8SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
777*4f1223e8SApple OSS Distributions entries++;
778*4f1223e8SApple OSS Distributions count++;
779*4f1223e8SApple OSS Distributions }
780*4f1223e8SApple OSS Distributions }
781*4f1223e8SApple OSS Distributions }
782*4f1223e8SApple OSS Distributions
783*4f1223e8SApple OSS Distributions ref->count = count;
784*4f1223e8SApple OSS Distributions ref->prot = prot;
785*4f1223e8SApple OSS Distributions
786*4f1223e8SApple OSS Distributions if (_task && (KERN_SUCCESS == err)
787*4f1223e8SApple OSS Distributions && (kIOMemoryMapCopyOnWrite & _flags)
788*4f1223e8SApple OSS Distributions && !(kIOMemoryReferenceCOW & options)) {
789*4f1223e8SApple OSS Distributions err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
790*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
791*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
792*4f1223e8SApple OSS Distributions }
793*4f1223e8SApple OSS Distributions }
794*4f1223e8SApple OSS Distributions
795*4f1223e8SApple OSS Distributions if (KERN_SUCCESS == err) {
796*4f1223e8SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
797*4f1223e8SApple OSS Distributions memoryReferenceFree(ref);
798*4f1223e8SApple OSS Distributions OSIncrementAtomic(&_memRef->refCount);
799*4f1223e8SApple OSS Distributions ref = _memRef;
800*4f1223e8SApple OSS Distributions }
801*4f1223e8SApple OSS Distributions } else {
802*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
803*4f1223e8SApple OSS Distributions memoryReferenceFree(ref);
804*4f1223e8SApple OSS Distributions ref = NULL;
805*4f1223e8SApple OSS Distributions }
806*4f1223e8SApple OSS Distributions
807*4f1223e8SApple OSS Distributions *reference = ref;
808*4f1223e8SApple OSS Distributions
809*4f1223e8SApple OSS Distributions return err;
810*4f1223e8SApple OSS Distributions }
811*4f1223e8SApple OSS Distributions
812*4f1223e8SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)813*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
814*4f1223e8SApple OSS Distributions {
815*4f1223e8SApple OSS Distributions switch (kIOMapGuardedMask & options) {
816*4f1223e8SApple OSS Distributions default:
817*4f1223e8SApple OSS Distributions case kIOMapGuardedSmall:
818*4f1223e8SApple OSS Distributions return vm_map_page_size(map);
819*4f1223e8SApple OSS Distributions case kIOMapGuardedLarge:
820*4f1223e8SApple OSS Distributions assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
821*4f1223e8SApple OSS Distributions return kIOMapGuardSizeLarge;
822*4f1223e8SApple OSS Distributions }
823*4f1223e8SApple OSS Distributions ;
824*4f1223e8SApple OSS Distributions }
825*4f1223e8SApple OSS Distributions
826*4f1223e8SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)827*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
828*4f1223e8SApple OSS Distributions vm_map_offset_t addr, mach_vm_size_t size)
829*4f1223e8SApple OSS Distributions {
830*4f1223e8SApple OSS Distributions kern_return_t kr;
831*4f1223e8SApple OSS Distributions vm_map_offset_t actualAddr;
832*4f1223e8SApple OSS Distributions mach_vm_size_t actualSize;
833*4f1223e8SApple OSS Distributions
834*4f1223e8SApple OSS Distributions actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
835*4f1223e8SApple OSS Distributions actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
836*4f1223e8SApple OSS Distributions
837*4f1223e8SApple OSS Distributions if (kIOMapGuardedMask & options) {
838*4f1223e8SApple OSS Distributions mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
839*4f1223e8SApple OSS Distributions actualAddr -= guardSize;
840*4f1223e8SApple OSS Distributions actualSize += 2 * guardSize;
841*4f1223e8SApple OSS Distributions }
842*4f1223e8SApple OSS Distributions kr = mach_vm_deallocate(map, actualAddr, actualSize);
843*4f1223e8SApple OSS Distributions
844*4f1223e8SApple OSS Distributions return kr;
845*4f1223e8SApple OSS Distributions }
846*4f1223e8SApple OSS Distributions
847*4f1223e8SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)848*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
849*4f1223e8SApple OSS Distributions {
850*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
851*4f1223e8SApple OSS Distributions IOReturn err;
852*4f1223e8SApple OSS Distributions vm_map_offset_t addr;
853*4f1223e8SApple OSS Distributions mach_vm_size_t size;
854*4f1223e8SApple OSS Distributions mach_vm_size_t guardSize;
855*4f1223e8SApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
856*4f1223e8SApple OSS Distributions
857*4f1223e8SApple OSS Distributions addr = ref->mapped;
858*4f1223e8SApple OSS Distributions size = ref->size;
859*4f1223e8SApple OSS Distributions guardSize = 0;
860*4f1223e8SApple OSS Distributions
861*4f1223e8SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
862*4f1223e8SApple OSS Distributions if (!(kIOMapAnywhere & ref->options)) {
863*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
864*4f1223e8SApple OSS Distributions }
865*4f1223e8SApple OSS Distributions guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
866*4f1223e8SApple OSS Distributions size += 2 * guardSize;
867*4f1223e8SApple OSS Distributions }
868*4f1223e8SApple OSS Distributions if (kIOMapAnywhere & ref->options) {
869*4f1223e8SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
870*4f1223e8SApple OSS Distributions } else {
871*4f1223e8SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
872*4f1223e8SApple OSS Distributions }
873*4f1223e8SApple OSS Distributions vmk_flags.vm_tag = ref->tag;
874*4f1223e8SApple OSS Distributions
875*4f1223e8SApple OSS Distributions /*
876*4f1223e8SApple OSS Distributions * Mapping memory into the kernel_map using IOMDs use the data range.
877*4f1223e8SApple OSS Distributions * Memory being mapped should not contain kernel pointers.
878*4f1223e8SApple OSS Distributions */
879*4f1223e8SApple OSS Distributions if (map == kernel_map) {
880*4f1223e8SApple OSS Distributions vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
881*4f1223e8SApple OSS Distributions }
882*4f1223e8SApple OSS Distributions
883*4f1223e8SApple OSS Distributions err = mach_vm_map_kernel(map, &addr, size,
884*4f1223e8SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
885*4f1223e8SApple OSS Distributions // TODO4K this should not be necessary...
886*4f1223e8SApple OSS Distributions (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
887*4f1223e8SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
888*4f1223e8SApple OSS Distributions (vm_map_offset_t) 0,
889*4f1223e8SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
890*4f1223e8SApple OSS Distributions vmk_flags,
891*4f1223e8SApple OSS Distributions IPC_PORT_NULL,
892*4f1223e8SApple OSS Distributions (memory_object_offset_t) 0,
893*4f1223e8SApple OSS Distributions false, /* copy */
894*4f1223e8SApple OSS Distributions ref->prot,
895*4f1223e8SApple OSS Distributions ref->prot,
896*4f1223e8SApple OSS Distributions VM_INHERIT_NONE);
897*4f1223e8SApple OSS Distributions if (KERN_SUCCESS == err) {
898*4f1223e8SApple OSS Distributions ref->mapped = (mach_vm_address_t) addr;
899*4f1223e8SApple OSS Distributions ref->map = map;
900*4f1223e8SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
901*4f1223e8SApple OSS Distributions vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
902*4f1223e8SApple OSS Distributions
903*4f1223e8SApple OSS Distributions err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
904*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
905*4f1223e8SApple OSS Distributions err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
906*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
907*4f1223e8SApple OSS Distributions ref->mapped += guardSize;
908*4f1223e8SApple OSS Distributions }
909*4f1223e8SApple OSS Distributions }
910*4f1223e8SApple OSS Distributions
911*4f1223e8SApple OSS Distributions return err;
912*4f1223e8SApple OSS Distributions }
913*4f1223e8SApple OSS Distributions
914*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)915*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
916*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
917*4f1223e8SApple OSS Distributions vm_map_t map,
918*4f1223e8SApple OSS Distributions mach_vm_size_t inoffset,
919*4f1223e8SApple OSS Distributions mach_vm_size_t size,
920*4f1223e8SApple OSS Distributions IOOptionBits options,
921*4f1223e8SApple OSS Distributions mach_vm_address_t * inaddr)
922*4f1223e8SApple OSS Distributions {
923*4f1223e8SApple OSS Distributions IOReturn err;
924*4f1223e8SApple OSS Distributions int64_t offset = inoffset;
925*4f1223e8SApple OSS Distributions uint32_t rangeIdx, entryIdx;
926*4f1223e8SApple OSS Distributions vm_map_offset_t addr, mapAddr;
927*4f1223e8SApple OSS Distributions vm_map_offset_t pageOffset, entryOffset, remain, chunk;
928*4f1223e8SApple OSS Distributions
929*4f1223e8SApple OSS Distributions mach_vm_address_t nextAddr;
930*4f1223e8SApple OSS Distributions mach_vm_size_t nextLen;
931*4f1223e8SApple OSS Distributions IOByteCount physLen;
932*4f1223e8SApple OSS Distributions IOMemoryEntry * entry;
933*4f1223e8SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
934*4f1223e8SApple OSS Distributions IOOptionBits type;
935*4f1223e8SApple OSS Distributions IOOptionBits cacheMode;
936*4f1223e8SApple OSS Distributions vm_tag_t tag;
937*4f1223e8SApple OSS Distributions // for the kIOMapPrefault option.
938*4f1223e8SApple OSS Distributions upl_page_info_t * pageList = NULL;
939*4f1223e8SApple OSS Distributions UInt currentPageIndex = 0;
940*4f1223e8SApple OSS Distributions bool didAlloc;
941*4f1223e8SApple OSS Distributions
942*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
943*4f1223e8SApple OSS Distributions
944*4f1223e8SApple OSS Distributions if (ref->mapRef) {
945*4f1223e8SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
946*4f1223e8SApple OSS Distributions return err;
947*4f1223e8SApple OSS Distributions }
948*4f1223e8SApple OSS Distributions
949*4f1223e8SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
950*4f1223e8SApple OSS Distributions err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
951*4f1223e8SApple OSS Distributions return err;
952*4f1223e8SApple OSS Distributions }
953*4f1223e8SApple OSS Distributions
954*4f1223e8SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
955*4f1223e8SApple OSS Distributions
956*4f1223e8SApple OSS Distributions prot = VM_PROT_READ;
957*4f1223e8SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
958*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
959*4f1223e8SApple OSS Distributions }
960*4f1223e8SApple OSS Distributions prot &= ref->prot;
961*4f1223e8SApple OSS Distributions
962*4f1223e8SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
963*4f1223e8SApple OSS Distributions if (kIODefaultCache != cacheMode) {
964*4f1223e8SApple OSS Distributions // VM system requires write access to update named entry cache mode
965*4f1223e8SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
966*4f1223e8SApple OSS Distributions }
967*4f1223e8SApple OSS Distributions
968*4f1223e8SApple OSS Distributions tag = (typeof(tag))getVMTag(map);
969*4f1223e8SApple OSS Distributions
970*4f1223e8SApple OSS Distributions if (_task) {
971*4f1223e8SApple OSS Distributions // Find first range for offset
972*4f1223e8SApple OSS Distributions if (!_rangesCount) {
973*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
974*4f1223e8SApple OSS Distributions }
975*4f1223e8SApple OSS Distributions for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
976*4f1223e8SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
977*4f1223e8SApple OSS Distributions if (remain < nextLen) {
978*4f1223e8SApple OSS Distributions break;
979*4f1223e8SApple OSS Distributions }
980*4f1223e8SApple OSS Distributions remain -= nextLen;
981*4f1223e8SApple OSS Distributions }
982*4f1223e8SApple OSS Distributions } else {
983*4f1223e8SApple OSS Distributions rangeIdx = 0;
984*4f1223e8SApple OSS Distributions remain = 0;
985*4f1223e8SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
986*4f1223e8SApple OSS Distributions nextLen = size;
987*4f1223e8SApple OSS Distributions }
988*4f1223e8SApple OSS Distributions
989*4f1223e8SApple OSS Distributions assert(remain < nextLen);
990*4f1223e8SApple OSS Distributions if (remain >= nextLen) {
991*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
992*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
993*4f1223e8SApple OSS Distributions }
994*4f1223e8SApple OSS Distributions
995*4f1223e8SApple OSS Distributions nextAddr += remain;
996*4f1223e8SApple OSS Distributions nextLen -= remain;
997*4f1223e8SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
998*4f1223e8SApple OSS Distributions pageOffset = (vm_map_page_mask(map) & nextAddr);
999*4f1223e8SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1000*4f1223e8SApple OSS Distributions pageOffset = (page_mask & nextAddr);
1001*4f1223e8SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1002*4f1223e8SApple OSS Distributions addr = 0;
1003*4f1223e8SApple OSS Distributions didAlloc = false;
1004*4f1223e8SApple OSS Distributions
1005*4f1223e8SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1006*4f1223e8SApple OSS Distributions addr = *inaddr;
1007*4f1223e8SApple OSS Distributions if (pageOffset != (vm_map_page_mask(map) & addr)) {
1008*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1009*4f1223e8SApple OSS Distributions }
1010*4f1223e8SApple OSS Distributions addr -= pageOffset;
1011*4f1223e8SApple OSS Distributions }
1012*4f1223e8SApple OSS Distributions
1013*4f1223e8SApple OSS Distributions // find first entry for offset
1014*4f1223e8SApple OSS Distributions for (entryIdx = 0;
1015*4f1223e8SApple OSS Distributions (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1016*4f1223e8SApple OSS Distributions entryIdx++) {
1017*4f1223e8SApple OSS Distributions }
1018*4f1223e8SApple OSS Distributions entryIdx--;
1019*4f1223e8SApple OSS Distributions entry = &ref->entries[entryIdx];
1020*4f1223e8SApple OSS Distributions
1021*4f1223e8SApple OSS Distributions // allocate VM
1022*4f1223e8SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1023*4f1223e8SApple OSS Distributions size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1024*4f1223e8SApple OSS Distributions #else
1025*4f1223e8SApple OSS Distributions size = round_page_64(size + pageOffset);
1026*4f1223e8SApple OSS Distributions #endif
1027*4f1223e8SApple OSS Distributions if (kIOMapOverwrite & options) {
1028*4f1223e8SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1029*4f1223e8SApple OSS Distributions map = IOPageableMapForAddress(addr);
1030*4f1223e8SApple OSS Distributions }
1031*4f1223e8SApple OSS Distributions err = KERN_SUCCESS;
1032*4f1223e8SApple OSS Distributions } else {
1033*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1034*4f1223e8SApple OSS Distributions ref.map = map;
1035*4f1223e8SApple OSS Distributions ref.tag = tag;
1036*4f1223e8SApple OSS Distributions ref.options = options;
1037*4f1223e8SApple OSS Distributions ref.size = size;
1038*4f1223e8SApple OSS Distributions ref.prot = prot;
1039*4f1223e8SApple OSS Distributions if (options & kIOMapAnywhere) {
1040*4f1223e8SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1041*4f1223e8SApple OSS Distributions ref.mapped = 0;
1042*4f1223e8SApple OSS Distributions } else {
1043*4f1223e8SApple OSS Distributions ref.mapped = addr;
1044*4f1223e8SApple OSS Distributions }
1045*4f1223e8SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1046*4f1223e8SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1047*4f1223e8SApple OSS Distributions } else {
1048*4f1223e8SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1049*4f1223e8SApple OSS Distributions }
1050*4f1223e8SApple OSS Distributions if (KERN_SUCCESS == err) {
1051*4f1223e8SApple OSS Distributions addr = ref.mapped;
1052*4f1223e8SApple OSS Distributions map = ref.map;
1053*4f1223e8SApple OSS Distributions didAlloc = true;
1054*4f1223e8SApple OSS Distributions }
1055*4f1223e8SApple OSS Distributions }
1056*4f1223e8SApple OSS Distributions
1057*4f1223e8SApple OSS Distributions /*
1058*4f1223e8SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1059*4f1223e8SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1060*4f1223e8SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1061*4f1223e8SApple OSS Distributions * operations.
1062*4f1223e8SApple OSS Distributions */
1063*4f1223e8SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1064*4f1223e8SApple OSS Distributions options &= ~kIOMapPrefault;
1065*4f1223e8SApple OSS Distributions }
1066*4f1223e8SApple OSS Distributions
1067*4f1223e8SApple OSS Distributions /*
1068*4f1223e8SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1069*4f1223e8SApple OSS Distributions * memory type, and the underlying data.
1070*4f1223e8SApple OSS Distributions */
1071*4f1223e8SApple OSS Distributions if (options & kIOMapPrefault) {
1072*4f1223e8SApple OSS Distributions /*
1073*4f1223e8SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1074*4f1223e8SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1075*4f1223e8SApple OSS Distributions */
1076*4f1223e8SApple OSS Distributions assert(_wireCount != 0);
1077*4f1223e8SApple OSS Distributions assert(_memoryEntries != NULL);
1078*4f1223e8SApple OSS Distributions if ((_wireCount == 0) ||
1079*4f1223e8SApple OSS Distributions (_memoryEntries == NULL)) {
1080*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1081*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
1082*4f1223e8SApple OSS Distributions }
1083*4f1223e8SApple OSS Distributions
1084*4f1223e8SApple OSS Distributions // Get the page list.
1085*4f1223e8SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1086*4f1223e8SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1087*4f1223e8SApple OSS Distributions pageList = getPageList(dataP);
1088*4f1223e8SApple OSS Distributions
1089*4f1223e8SApple OSS Distributions // Get the number of IOPLs.
1090*4f1223e8SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1091*4f1223e8SApple OSS Distributions
1092*4f1223e8SApple OSS Distributions /*
1093*4f1223e8SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1094*4f1223e8SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1095*4f1223e8SApple OSS Distributions * right range at the end.
1096*4f1223e8SApple OSS Distributions */
1097*4f1223e8SApple OSS Distributions UInt ioplIndex = 0;
1098*4f1223e8SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1099*4f1223e8SApple OSS Distributions ioplIndex++;
1100*4f1223e8SApple OSS Distributions }
1101*4f1223e8SApple OSS Distributions ioplIndex--;
1102*4f1223e8SApple OSS Distributions
1103*4f1223e8SApple OSS Distributions // Retrieve the IOPL info block.
1104*4f1223e8SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1105*4f1223e8SApple OSS Distributions
1106*4f1223e8SApple OSS Distributions /*
1107*4f1223e8SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1108*4f1223e8SApple OSS Distributions * array.
1109*4f1223e8SApple OSS Distributions */
1110*4f1223e8SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1111*4f1223e8SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1112*4f1223e8SApple OSS Distributions } else {
1113*4f1223e8SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1114*4f1223e8SApple OSS Distributions }
1115*4f1223e8SApple OSS Distributions
1116*4f1223e8SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1117*4f1223e8SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1118*4f1223e8SApple OSS Distributions
1119*4f1223e8SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1120*4f1223e8SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1121*4f1223e8SApple OSS Distributions }
1122*4f1223e8SApple OSS Distributions
1123*4f1223e8SApple OSS Distributions // enter mappings
1124*4f1223e8SApple OSS Distributions remain = size;
1125*4f1223e8SApple OSS Distributions mapAddr = addr;
1126*4f1223e8SApple OSS Distributions addr += pageOffset;
1127*4f1223e8SApple OSS Distributions
1128*4f1223e8SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1129*4f1223e8SApple OSS Distributions entryOffset = offset - entry->offset;
1130*4f1223e8SApple OSS Distributions if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1131*4f1223e8SApple OSS Distributions err = kIOReturnNotAligned;
1132*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1133*4f1223e8SApple OSS Distributions break;
1134*4f1223e8SApple OSS Distributions }
1135*4f1223e8SApple OSS Distributions
1136*4f1223e8SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1137*4f1223e8SApple OSS Distributions vm_size_t unused = 0;
1138*4f1223e8SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1139*4f1223e8SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1140*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
1141*4f1223e8SApple OSS Distributions }
1142*4f1223e8SApple OSS Distributions
1143*4f1223e8SApple OSS Distributions entryOffset -= pageOffset;
1144*4f1223e8SApple OSS Distributions if (entryOffset >= entry->size) {
1145*4f1223e8SApple OSS Distributions panic("entryOffset");
1146*4f1223e8SApple OSS Distributions }
1147*4f1223e8SApple OSS Distributions chunk = entry->size - entryOffset;
1148*4f1223e8SApple OSS Distributions if (chunk) {
1149*4f1223e8SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1150*4f1223e8SApple OSS Distributions .vmf_fixed = true,
1151*4f1223e8SApple OSS Distributions .vmf_overwrite = true,
1152*4f1223e8SApple OSS Distributions .vm_tag = tag,
1153*4f1223e8SApple OSS Distributions .vmkf_iokit_acct = true,
1154*4f1223e8SApple OSS Distributions };
1155*4f1223e8SApple OSS Distributions
1156*4f1223e8SApple OSS Distributions if (chunk > remain) {
1157*4f1223e8SApple OSS Distributions chunk = remain;
1158*4f1223e8SApple OSS Distributions }
1159*4f1223e8SApple OSS Distributions if (options & kIOMapPrefault) {
1160*4f1223e8SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1161*4f1223e8SApple OSS Distributions
1162*4f1223e8SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1163*4f1223e8SApple OSS Distributions &mapAddr,
1164*4f1223e8SApple OSS Distributions chunk, 0 /* mask */,
1165*4f1223e8SApple OSS Distributions vmk_flags,
1166*4f1223e8SApple OSS Distributions entry->entry,
1167*4f1223e8SApple OSS Distributions entryOffset,
1168*4f1223e8SApple OSS Distributions prot, // cur
1169*4f1223e8SApple OSS Distributions prot, // max
1170*4f1223e8SApple OSS Distributions &pageList[currentPageIndex],
1171*4f1223e8SApple OSS Distributions nb_pages);
1172*4f1223e8SApple OSS Distributions
1173*4f1223e8SApple OSS Distributions if (err || vm_map_page_mask(map) < PAGE_MASK) {
1174*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1175*4f1223e8SApple OSS Distributions }
1176*4f1223e8SApple OSS Distributions // Compute the next index in the page list.
1177*4f1223e8SApple OSS Distributions currentPageIndex += nb_pages;
1178*4f1223e8SApple OSS Distributions assert(currentPageIndex <= _pages);
1179*4f1223e8SApple OSS Distributions } else {
1180*4f1223e8SApple OSS Distributions err = mach_vm_map_kernel(map,
1181*4f1223e8SApple OSS Distributions &mapAddr,
1182*4f1223e8SApple OSS Distributions chunk, 0 /* mask */,
1183*4f1223e8SApple OSS Distributions vmk_flags,
1184*4f1223e8SApple OSS Distributions entry->entry,
1185*4f1223e8SApple OSS Distributions entryOffset,
1186*4f1223e8SApple OSS Distributions false, // copy
1187*4f1223e8SApple OSS Distributions prot, // cur
1188*4f1223e8SApple OSS Distributions prot, // max
1189*4f1223e8SApple OSS Distributions VM_INHERIT_NONE);
1190*4f1223e8SApple OSS Distributions }
1191*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1192*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1193*4f1223e8SApple OSS Distributions break;
1194*4f1223e8SApple OSS Distributions }
1195*4f1223e8SApple OSS Distributions remain -= chunk;
1196*4f1223e8SApple OSS Distributions if (!remain) {
1197*4f1223e8SApple OSS Distributions break;
1198*4f1223e8SApple OSS Distributions }
1199*4f1223e8SApple OSS Distributions mapAddr += chunk;
1200*4f1223e8SApple OSS Distributions offset += chunk - pageOffset;
1201*4f1223e8SApple OSS Distributions }
1202*4f1223e8SApple OSS Distributions pageOffset = 0;
1203*4f1223e8SApple OSS Distributions entry++;
1204*4f1223e8SApple OSS Distributions entryIdx++;
1205*4f1223e8SApple OSS Distributions if (entryIdx >= ref->count) {
1206*4f1223e8SApple OSS Distributions err = kIOReturnOverrun;
1207*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1208*4f1223e8SApple OSS Distributions break;
1209*4f1223e8SApple OSS Distributions }
1210*4f1223e8SApple OSS Distributions }
1211*4f1223e8SApple OSS Distributions
1212*4f1223e8SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1213*4f1223e8SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1214*4f1223e8SApple OSS Distributions addr = 0;
1215*4f1223e8SApple OSS Distributions }
1216*4f1223e8SApple OSS Distributions *inaddr = addr;
1217*4f1223e8SApple OSS Distributions
1218*4f1223e8SApple OSS Distributions if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1219*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1220*4f1223e8SApple OSS Distributions }
1221*4f1223e8SApple OSS Distributions return err;
1222*4f1223e8SApple OSS Distributions }
1223*4f1223e8SApple OSS Distributions
1224*4f1223e8SApple OSS Distributions #define LOGUNALIGN 0
1225*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1226*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1227*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
1228*4f1223e8SApple OSS Distributions vm_map_t map,
1229*4f1223e8SApple OSS Distributions mach_vm_size_t inoffset,
1230*4f1223e8SApple OSS Distributions mach_vm_size_t size,
1231*4f1223e8SApple OSS Distributions IOOptionBits options,
1232*4f1223e8SApple OSS Distributions mach_vm_address_t * inaddr)
1233*4f1223e8SApple OSS Distributions {
1234*4f1223e8SApple OSS Distributions IOReturn err;
1235*4f1223e8SApple OSS Distributions int64_t offset = inoffset;
1236*4f1223e8SApple OSS Distributions uint32_t entryIdx, firstEntryIdx;
1237*4f1223e8SApple OSS Distributions vm_map_offset_t addr, mapAddr, mapAddrOut;
1238*4f1223e8SApple OSS Distributions vm_map_offset_t entryOffset, remain, chunk;
1239*4f1223e8SApple OSS Distributions
1240*4f1223e8SApple OSS Distributions IOMemoryEntry * entry;
1241*4f1223e8SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
1242*4f1223e8SApple OSS Distributions IOOptionBits type;
1243*4f1223e8SApple OSS Distributions IOOptionBits cacheMode;
1244*4f1223e8SApple OSS Distributions vm_tag_t tag;
1245*4f1223e8SApple OSS Distributions // for the kIOMapPrefault option.
1246*4f1223e8SApple OSS Distributions upl_page_info_t * pageList = NULL;
1247*4f1223e8SApple OSS Distributions UInt currentPageIndex = 0;
1248*4f1223e8SApple OSS Distributions bool didAlloc;
1249*4f1223e8SApple OSS Distributions
1250*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1251*4f1223e8SApple OSS Distributions
1252*4f1223e8SApple OSS Distributions if (ref->mapRef) {
1253*4f1223e8SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1254*4f1223e8SApple OSS Distributions return err;
1255*4f1223e8SApple OSS Distributions }
1256*4f1223e8SApple OSS Distributions
1257*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1258*4f1223e8SApple OSS Distributions printf("MAP offset %qx, %qx\n", inoffset, size);
1259*4f1223e8SApple OSS Distributions #endif
1260*4f1223e8SApple OSS Distributions
1261*4f1223e8SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
1262*4f1223e8SApple OSS Distributions
1263*4f1223e8SApple OSS Distributions prot = VM_PROT_READ;
1264*4f1223e8SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
1265*4f1223e8SApple OSS Distributions prot |= VM_PROT_WRITE;
1266*4f1223e8SApple OSS Distributions }
1267*4f1223e8SApple OSS Distributions prot &= ref->prot;
1268*4f1223e8SApple OSS Distributions
1269*4f1223e8SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1270*4f1223e8SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1271*4f1223e8SApple OSS Distributions // VM system requires write access to update named entry cache mode
1272*4f1223e8SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1273*4f1223e8SApple OSS Distributions }
1274*4f1223e8SApple OSS Distributions
1275*4f1223e8SApple OSS Distributions tag = (vm_tag_t) getVMTag(map);
1276*4f1223e8SApple OSS Distributions
1277*4f1223e8SApple OSS Distributions addr = 0;
1278*4f1223e8SApple OSS Distributions didAlloc = false;
1279*4f1223e8SApple OSS Distributions
1280*4f1223e8SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1281*4f1223e8SApple OSS Distributions addr = *inaddr;
1282*4f1223e8SApple OSS Distributions }
1283*4f1223e8SApple OSS Distributions
1284*4f1223e8SApple OSS Distributions // find first entry for offset
1285*4f1223e8SApple OSS Distributions for (firstEntryIdx = 0;
1286*4f1223e8SApple OSS Distributions (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1287*4f1223e8SApple OSS Distributions firstEntryIdx++) {
1288*4f1223e8SApple OSS Distributions }
1289*4f1223e8SApple OSS Distributions firstEntryIdx--;
1290*4f1223e8SApple OSS Distributions
1291*4f1223e8SApple OSS Distributions // calculate required VM space
1292*4f1223e8SApple OSS Distributions
1293*4f1223e8SApple OSS Distributions entryIdx = firstEntryIdx;
1294*4f1223e8SApple OSS Distributions entry = &ref->entries[entryIdx];
1295*4f1223e8SApple OSS Distributions
1296*4f1223e8SApple OSS Distributions remain = size;
1297*4f1223e8SApple OSS Distributions int64_t iteroffset = offset;
1298*4f1223e8SApple OSS Distributions uint64_t mapSize = 0;
1299*4f1223e8SApple OSS Distributions while (remain) {
1300*4f1223e8SApple OSS Distributions entryOffset = iteroffset - entry->offset;
1301*4f1223e8SApple OSS Distributions if (entryOffset >= entry->size) {
1302*4f1223e8SApple OSS Distributions panic("entryOffset");
1303*4f1223e8SApple OSS Distributions }
1304*4f1223e8SApple OSS Distributions
1305*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1306*4f1223e8SApple OSS Distributions printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1307*4f1223e8SApple OSS Distributions entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1308*4f1223e8SApple OSS Distributions #endif
1309*4f1223e8SApple OSS Distributions
1310*4f1223e8SApple OSS Distributions chunk = entry->size - entryOffset;
1311*4f1223e8SApple OSS Distributions if (chunk) {
1312*4f1223e8SApple OSS Distributions if (chunk > remain) {
1313*4f1223e8SApple OSS Distributions chunk = remain;
1314*4f1223e8SApple OSS Distributions }
1315*4f1223e8SApple OSS Distributions mach_vm_size_t entrySize;
1316*4f1223e8SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1317*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
1318*4f1223e8SApple OSS Distributions mapSize += entrySize;
1319*4f1223e8SApple OSS Distributions
1320*4f1223e8SApple OSS Distributions remain -= chunk;
1321*4f1223e8SApple OSS Distributions if (!remain) {
1322*4f1223e8SApple OSS Distributions break;
1323*4f1223e8SApple OSS Distributions }
1324*4f1223e8SApple OSS Distributions iteroffset += chunk; // - pageOffset;
1325*4f1223e8SApple OSS Distributions }
1326*4f1223e8SApple OSS Distributions entry++;
1327*4f1223e8SApple OSS Distributions entryIdx++;
1328*4f1223e8SApple OSS Distributions if (entryIdx >= ref->count) {
1329*4f1223e8SApple OSS Distributions panic("overrun");
1330*4f1223e8SApple OSS Distributions err = kIOReturnOverrun;
1331*4f1223e8SApple OSS Distributions break;
1332*4f1223e8SApple OSS Distributions }
1333*4f1223e8SApple OSS Distributions }
1334*4f1223e8SApple OSS Distributions
1335*4f1223e8SApple OSS Distributions if (kIOMapOverwrite & options) {
1336*4f1223e8SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1337*4f1223e8SApple OSS Distributions map = IOPageableMapForAddress(addr);
1338*4f1223e8SApple OSS Distributions }
1339*4f1223e8SApple OSS Distributions err = KERN_SUCCESS;
1340*4f1223e8SApple OSS Distributions } else {
1341*4f1223e8SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1342*4f1223e8SApple OSS Distributions ref.map = map;
1343*4f1223e8SApple OSS Distributions ref.tag = tag;
1344*4f1223e8SApple OSS Distributions ref.options = options;
1345*4f1223e8SApple OSS Distributions ref.size = mapSize;
1346*4f1223e8SApple OSS Distributions ref.prot = prot;
1347*4f1223e8SApple OSS Distributions if (options & kIOMapAnywhere) {
1348*4f1223e8SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1349*4f1223e8SApple OSS Distributions ref.mapped = 0;
1350*4f1223e8SApple OSS Distributions } else {
1351*4f1223e8SApple OSS Distributions ref.mapped = addr;
1352*4f1223e8SApple OSS Distributions }
1353*4f1223e8SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1354*4f1223e8SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1355*4f1223e8SApple OSS Distributions } else {
1356*4f1223e8SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1357*4f1223e8SApple OSS Distributions }
1358*4f1223e8SApple OSS Distributions
1359*4f1223e8SApple OSS Distributions if (KERN_SUCCESS == err) {
1360*4f1223e8SApple OSS Distributions addr = ref.mapped;
1361*4f1223e8SApple OSS Distributions map = ref.map;
1362*4f1223e8SApple OSS Distributions didAlloc = true;
1363*4f1223e8SApple OSS Distributions }
1364*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1365*4f1223e8SApple OSS Distributions IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1366*4f1223e8SApple OSS Distributions #endif
1367*4f1223e8SApple OSS Distributions }
1368*4f1223e8SApple OSS Distributions
1369*4f1223e8SApple OSS Distributions /*
1370*4f1223e8SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1371*4f1223e8SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1372*4f1223e8SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1373*4f1223e8SApple OSS Distributions * operations.
1374*4f1223e8SApple OSS Distributions */
1375*4f1223e8SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1376*4f1223e8SApple OSS Distributions options &= ~kIOMapPrefault;
1377*4f1223e8SApple OSS Distributions }
1378*4f1223e8SApple OSS Distributions
1379*4f1223e8SApple OSS Distributions /*
1380*4f1223e8SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1381*4f1223e8SApple OSS Distributions * memory type, and the underlying data.
1382*4f1223e8SApple OSS Distributions */
1383*4f1223e8SApple OSS Distributions if (options & kIOMapPrefault) {
1384*4f1223e8SApple OSS Distributions /*
1385*4f1223e8SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1386*4f1223e8SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1387*4f1223e8SApple OSS Distributions */
1388*4f1223e8SApple OSS Distributions assert(_wireCount != 0);
1389*4f1223e8SApple OSS Distributions assert(_memoryEntries != NULL);
1390*4f1223e8SApple OSS Distributions if ((_wireCount == 0) ||
1391*4f1223e8SApple OSS Distributions (_memoryEntries == NULL)) {
1392*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
1393*4f1223e8SApple OSS Distributions }
1394*4f1223e8SApple OSS Distributions
1395*4f1223e8SApple OSS Distributions // Get the page list.
1396*4f1223e8SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1397*4f1223e8SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1398*4f1223e8SApple OSS Distributions pageList = getPageList(dataP);
1399*4f1223e8SApple OSS Distributions
1400*4f1223e8SApple OSS Distributions // Get the number of IOPLs.
1401*4f1223e8SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1402*4f1223e8SApple OSS Distributions
1403*4f1223e8SApple OSS Distributions /*
1404*4f1223e8SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1405*4f1223e8SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1406*4f1223e8SApple OSS Distributions * right range at the end.
1407*4f1223e8SApple OSS Distributions */
1408*4f1223e8SApple OSS Distributions UInt ioplIndex = 0;
1409*4f1223e8SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1410*4f1223e8SApple OSS Distributions ioplIndex++;
1411*4f1223e8SApple OSS Distributions }
1412*4f1223e8SApple OSS Distributions ioplIndex--;
1413*4f1223e8SApple OSS Distributions
1414*4f1223e8SApple OSS Distributions // Retrieve the IOPL info block.
1415*4f1223e8SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1416*4f1223e8SApple OSS Distributions
1417*4f1223e8SApple OSS Distributions /*
1418*4f1223e8SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1419*4f1223e8SApple OSS Distributions * array.
1420*4f1223e8SApple OSS Distributions */
1421*4f1223e8SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1422*4f1223e8SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1423*4f1223e8SApple OSS Distributions } else {
1424*4f1223e8SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1425*4f1223e8SApple OSS Distributions }
1426*4f1223e8SApple OSS Distributions
1427*4f1223e8SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1428*4f1223e8SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1429*4f1223e8SApple OSS Distributions
1430*4f1223e8SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1431*4f1223e8SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1432*4f1223e8SApple OSS Distributions }
1433*4f1223e8SApple OSS Distributions
1434*4f1223e8SApple OSS Distributions // enter mappings
1435*4f1223e8SApple OSS Distributions remain = size;
1436*4f1223e8SApple OSS Distributions mapAddr = addr;
1437*4f1223e8SApple OSS Distributions entryIdx = firstEntryIdx;
1438*4f1223e8SApple OSS Distributions entry = &ref->entries[entryIdx];
1439*4f1223e8SApple OSS Distributions
1440*4f1223e8SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1441*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1442*4f1223e8SApple OSS Distributions printf("offset %qx, %qx\n", offset, entry->offset);
1443*4f1223e8SApple OSS Distributions #endif
1444*4f1223e8SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1445*4f1223e8SApple OSS Distributions vm_size_t unused = 0;
1446*4f1223e8SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1447*4f1223e8SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1448*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
1449*4f1223e8SApple OSS Distributions }
1450*4f1223e8SApple OSS Distributions entryOffset = offset - entry->offset;
1451*4f1223e8SApple OSS Distributions if (entryOffset >= entry->size) {
1452*4f1223e8SApple OSS Distributions panic("entryOffset");
1453*4f1223e8SApple OSS Distributions }
1454*4f1223e8SApple OSS Distributions chunk = entry->size - entryOffset;
1455*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1456*4f1223e8SApple OSS Distributions printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1457*4f1223e8SApple OSS Distributions #endif
1458*4f1223e8SApple OSS Distributions if (chunk) {
1459*4f1223e8SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1460*4f1223e8SApple OSS Distributions .vmf_fixed = true,
1461*4f1223e8SApple OSS Distributions .vmf_overwrite = true,
1462*4f1223e8SApple OSS Distributions .vmf_return_data_addr = true,
1463*4f1223e8SApple OSS Distributions .vm_tag = tag,
1464*4f1223e8SApple OSS Distributions .vmkf_iokit_acct = true,
1465*4f1223e8SApple OSS Distributions };
1466*4f1223e8SApple OSS Distributions
1467*4f1223e8SApple OSS Distributions if (chunk > remain) {
1468*4f1223e8SApple OSS Distributions chunk = remain;
1469*4f1223e8SApple OSS Distributions }
1470*4f1223e8SApple OSS Distributions mapAddrOut = mapAddr;
1471*4f1223e8SApple OSS Distributions if (options & kIOMapPrefault) {
1472*4f1223e8SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1473*4f1223e8SApple OSS Distributions
1474*4f1223e8SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1475*4f1223e8SApple OSS Distributions &mapAddrOut,
1476*4f1223e8SApple OSS Distributions chunk, 0 /* mask */,
1477*4f1223e8SApple OSS Distributions vmk_flags,
1478*4f1223e8SApple OSS Distributions entry->entry,
1479*4f1223e8SApple OSS Distributions entryOffset,
1480*4f1223e8SApple OSS Distributions prot, // cur
1481*4f1223e8SApple OSS Distributions prot, // max
1482*4f1223e8SApple OSS Distributions &pageList[currentPageIndex],
1483*4f1223e8SApple OSS Distributions nb_pages);
1484*4f1223e8SApple OSS Distributions
1485*4f1223e8SApple OSS Distributions // Compute the next index in the page list.
1486*4f1223e8SApple OSS Distributions currentPageIndex += nb_pages;
1487*4f1223e8SApple OSS Distributions assert(currentPageIndex <= _pages);
1488*4f1223e8SApple OSS Distributions } else {
1489*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1490*4f1223e8SApple OSS Distributions printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1491*4f1223e8SApple OSS Distributions #endif
1492*4f1223e8SApple OSS Distributions err = mach_vm_map_kernel(map,
1493*4f1223e8SApple OSS Distributions &mapAddrOut,
1494*4f1223e8SApple OSS Distributions chunk, 0 /* mask */,
1495*4f1223e8SApple OSS Distributions vmk_flags,
1496*4f1223e8SApple OSS Distributions entry->entry,
1497*4f1223e8SApple OSS Distributions entryOffset,
1498*4f1223e8SApple OSS Distributions false, // copy
1499*4f1223e8SApple OSS Distributions prot, // cur
1500*4f1223e8SApple OSS Distributions prot, // max
1501*4f1223e8SApple OSS Distributions VM_INHERIT_NONE);
1502*4f1223e8SApple OSS Distributions }
1503*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1504*4f1223e8SApple OSS Distributions panic("map enter err %x", err);
1505*4f1223e8SApple OSS Distributions break;
1506*4f1223e8SApple OSS Distributions }
1507*4f1223e8SApple OSS Distributions #if LOGUNALIGN
1508*4f1223e8SApple OSS Distributions printf("mapAddr o %qx\n", mapAddrOut);
1509*4f1223e8SApple OSS Distributions #endif
1510*4f1223e8SApple OSS Distributions if (entryIdx == firstEntryIdx) {
1511*4f1223e8SApple OSS Distributions addr = mapAddrOut;
1512*4f1223e8SApple OSS Distributions }
1513*4f1223e8SApple OSS Distributions remain -= chunk;
1514*4f1223e8SApple OSS Distributions if (!remain) {
1515*4f1223e8SApple OSS Distributions break;
1516*4f1223e8SApple OSS Distributions }
1517*4f1223e8SApple OSS Distributions mach_vm_size_t entrySize;
1518*4f1223e8SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1519*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
1520*4f1223e8SApple OSS Distributions mapAddr += entrySize;
1521*4f1223e8SApple OSS Distributions offset += chunk;
1522*4f1223e8SApple OSS Distributions }
1523*4f1223e8SApple OSS Distributions
1524*4f1223e8SApple OSS Distributions entry++;
1525*4f1223e8SApple OSS Distributions entryIdx++;
1526*4f1223e8SApple OSS Distributions if (entryIdx >= ref->count) {
1527*4f1223e8SApple OSS Distributions err = kIOReturnOverrun;
1528*4f1223e8SApple OSS Distributions break;
1529*4f1223e8SApple OSS Distributions }
1530*4f1223e8SApple OSS Distributions }
1531*4f1223e8SApple OSS Distributions
1532*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1533*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1534*4f1223e8SApple OSS Distributions }
1535*4f1223e8SApple OSS Distributions
1536*4f1223e8SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1537*4f1223e8SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1538*4f1223e8SApple OSS Distributions addr = 0;
1539*4f1223e8SApple OSS Distributions }
1540*4f1223e8SApple OSS Distributions *inaddr = addr;
1541*4f1223e8SApple OSS Distributions
1542*4f1223e8SApple OSS Distributions return err;
1543*4f1223e8SApple OSS Distributions }
1544*4f1223e8SApple OSS Distributions
1545*4f1223e8SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1546*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1547*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
1548*4f1223e8SApple OSS Distributions uint64_t * offset)
1549*4f1223e8SApple OSS Distributions {
1550*4f1223e8SApple OSS Distributions kern_return_t kr;
1551*4f1223e8SApple OSS Distributions vm_object_offset_t data_offset = 0;
1552*4f1223e8SApple OSS Distributions uint64_t total;
1553*4f1223e8SApple OSS Distributions uint32_t idx;
1554*4f1223e8SApple OSS Distributions
1555*4f1223e8SApple OSS Distributions assert(ref->count);
1556*4f1223e8SApple OSS Distributions if (offset) {
1557*4f1223e8SApple OSS Distributions *offset = (uint64_t) data_offset;
1558*4f1223e8SApple OSS Distributions }
1559*4f1223e8SApple OSS Distributions total = 0;
1560*4f1223e8SApple OSS Distributions for (idx = 0; idx < ref->count; idx++) {
1561*4f1223e8SApple OSS Distributions kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1562*4f1223e8SApple OSS Distributions &data_offset);
1563*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != kr) {
1564*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1565*4f1223e8SApple OSS Distributions } else if (0 != data_offset) {
1566*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1567*4f1223e8SApple OSS Distributions }
1568*4f1223e8SApple OSS Distributions if (offset && !idx) {
1569*4f1223e8SApple OSS Distributions *offset = (uint64_t) data_offset;
1570*4f1223e8SApple OSS Distributions }
1571*4f1223e8SApple OSS Distributions total += round_page(data_offset + ref->entries[idx].size);
1572*4f1223e8SApple OSS Distributions }
1573*4f1223e8SApple OSS Distributions
1574*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1575*4f1223e8SApple OSS Distributions (offset ? *offset : (vm_object_offset_t)-1), total);
1576*4f1223e8SApple OSS Distributions
1577*4f1223e8SApple OSS Distributions return total;
1578*4f1223e8SApple OSS Distributions }
1579*4f1223e8SApple OSS Distributions
1580*4f1223e8SApple OSS Distributions
1581*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1582*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1583*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
1584*4f1223e8SApple OSS Distributions IOByteCount * residentPageCount,
1585*4f1223e8SApple OSS Distributions IOByteCount * dirtyPageCount)
1586*4f1223e8SApple OSS Distributions {
1587*4f1223e8SApple OSS Distributions IOReturn err;
1588*4f1223e8SApple OSS Distributions IOMemoryEntry * entries;
1589*4f1223e8SApple OSS Distributions unsigned int resident, dirty;
1590*4f1223e8SApple OSS Distributions unsigned int totalResident, totalDirty;
1591*4f1223e8SApple OSS Distributions
1592*4f1223e8SApple OSS Distributions totalResident = totalDirty = 0;
1593*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
1594*4f1223e8SApple OSS Distributions entries = ref->entries + ref->count;
1595*4f1223e8SApple OSS Distributions while (entries > &ref->entries[0]) {
1596*4f1223e8SApple OSS Distributions entries--;
1597*4f1223e8SApple OSS Distributions err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1598*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1599*4f1223e8SApple OSS Distributions break;
1600*4f1223e8SApple OSS Distributions }
1601*4f1223e8SApple OSS Distributions totalResident += resident;
1602*4f1223e8SApple OSS Distributions totalDirty += dirty;
1603*4f1223e8SApple OSS Distributions }
1604*4f1223e8SApple OSS Distributions
1605*4f1223e8SApple OSS Distributions if (residentPageCount) {
1606*4f1223e8SApple OSS Distributions *residentPageCount = totalResident;
1607*4f1223e8SApple OSS Distributions }
1608*4f1223e8SApple OSS Distributions if (dirtyPageCount) {
1609*4f1223e8SApple OSS Distributions *dirtyPageCount = totalDirty;
1610*4f1223e8SApple OSS Distributions }
1611*4f1223e8SApple OSS Distributions return err;
1612*4f1223e8SApple OSS Distributions }
1613*4f1223e8SApple OSS Distributions
1614*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1615*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1616*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
1617*4f1223e8SApple OSS Distributions IOOptionBits newState,
1618*4f1223e8SApple OSS Distributions IOOptionBits * oldState)
1619*4f1223e8SApple OSS Distributions {
1620*4f1223e8SApple OSS Distributions IOReturn err;
1621*4f1223e8SApple OSS Distributions IOMemoryEntry * entries;
1622*4f1223e8SApple OSS Distributions vm_purgable_t control;
1623*4f1223e8SApple OSS Distributions int totalState, state;
1624*4f1223e8SApple OSS Distributions
1625*4f1223e8SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1626*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
1627*4f1223e8SApple OSS Distributions entries = ref->entries + ref->count;
1628*4f1223e8SApple OSS Distributions while (entries > &ref->entries[0]) {
1629*4f1223e8SApple OSS Distributions entries--;
1630*4f1223e8SApple OSS Distributions
1631*4f1223e8SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
1632*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1633*4f1223e8SApple OSS Distributions break;
1634*4f1223e8SApple OSS Distributions }
1635*4f1223e8SApple OSS Distributions err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1636*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1637*4f1223e8SApple OSS Distributions break;
1638*4f1223e8SApple OSS Distributions }
1639*4f1223e8SApple OSS Distributions err = purgeableStateBits(&state);
1640*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1641*4f1223e8SApple OSS Distributions break;
1642*4f1223e8SApple OSS Distributions }
1643*4f1223e8SApple OSS Distributions
1644*4f1223e8SApple OSS Distributions if (kIOMemoryPurgeableEmpty == state) {
1645*4f1223e8SApple OSS Distributions totalState = kIOMemoryPurgeableEmpty;
1646*4f1223e8SApple OSS Distributions } else if (kIOMemoryPurgeableEmpty == totalState) {
1647*4f1223e8SApple OSS Distributions continue;
1648*4f1223e8SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == totalState) {
1649*4f1223e8SApple OSS Distributions continue;
1650*4f1223e8SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == state) {
1651*4f1223e8SApple OSS Distributions totalState = kIOMemoryPurgeableVolatile;
1652*4f1223e8SApple OSS Distributions } else {
1653*4f1223e8SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1654*4f1223e8SApple OSS Distributions }
1655*4f1223e8SApple OSS Distributions }
1656*4f1223e8SApple OSS Distributions
1657*4f1223e8SApple OSS Distributions if (oldState) {
1658*4f1223e8SApple OSS Distributions *oldState = totalState;
1659*4f1223e8SApple OSS Distributions }
1660*4f1223e8SApple OSS Distributions return err;
1661*4f1223e8SApple OSS Distributions }
1662*4f1223e8SApple OSS Distributions
1663*4f1223e8SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1664*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1665*4f1223e8SApple OSS Distributions IOMemoryReference * ref,
1666*4f1223e8SApple OSS Distributions task_t newOwner,
1667*4f1223e8SApple OSS Distributions int newLedgerTag,
1668*4f1223e8SApple OSS Distributions IOOptionBits newLedgerOptions)
1669*4f1223e8SApple OSS Distributions {
1670*4f1223e8SApple OSS Distributions IOReturn err, totalErr;
1671*4f1223e8SApple OSS Distributions IOMemoryEntry * entries;
1672*4f1223e8SApple OSS Distributions
1673*4f1223e8SApple OSS Distributions totalErr = kIOReturnSuccess;
1674*4f1223e8SApple OSS Distributions entries = ref->entries + ref->count;
1675*4f1223e8SApple OSS Distributions while (entries > &ref->entries[0]) {
1676*4f1223e8SApple OSS Distributions entries--;
1677*4f1223e8SApple OSS Distributions
1678*4f1223e8SApple OSS Distributions err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1679*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
1680*4f1223e8SApple OSS Distributions totalErr = err;
1681*4f1223e8SApple OSS Distributions }
1682*4f1223e8SApple OSS Distributions }
1683*4f1223e8SApple OSS Distributions
1684*4f1223e8SApple OSS Distributions return totalErr;
1685*4f1223e8SApple OSS Distributions }
1686*4f1223e8SApple OSS Distributions
1687*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1688*4f1223e8SApple OSS Distributions
1689*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1690*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withAddress(void * address,
1691*4f1223e8SApple OSS Distributions IOByteCount length,
1692*4f1223e8SApple OSS Distributions IODirection direction)
1693*4f1223e8SApple OSS Distributions {
1694*4f1223e8SApple OSS Distributions return IOMemoryDescriptor::
1695*4f1223e8SApple OSS Distributions withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1696*4f1223e8SApple OSS Distributions }
1697*4f1223e8SApple OSS Distributions
1698*4f1223e8SApple OSS Distributions #ifndef __LP64__
1699*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1700*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1701*4f1223e8SApple OSS Distributions IOByteCount length,
1702*4f1223e8SApple OSS Distributions IODirection direction,
1703*4f1223e8SApple OSS Distributions task_t task)
1704*4f1223e8SApple OSS Distributions {
1705*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1706*4f1223e8SApple OSS Distributions if (that) {
1707*4f1223e8SApple OSS Distributions if (that->initWithAddress(address, length, direction, task)) {
1708*4f1223e8SApple OSS Distributions return os::move(that);
1709*4f1223e8SApple OSS Distributions }
1710*4f1223e8SApple OSS Distributions }
1711*4f1223e8SApple OSS Distributions return nullptr;
1712*4f1223e8SApple OSS Distributions }
1713*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
1714*4f1223e8SApple OSS Distributions
1715*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1716*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1717*4f1223e8SApple OSS Distributions IOPhysicalAddress address,
1718*4f1223e8SApple OSS Distributions IOByteCount length,
1719*4f1223e8SApple OSS Distributions IODirection direction )
1720*4f1223e8SApple OSS Distributions {
1721*4f1223e8SApple OSS Distributions return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1722*4f1223e8SApple OSS Distributions }
1723*4f1223e8SApple OSS Distributions
1724*4f1223e8SApple OSS Distributions #ifndef __LP64__
1725*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1726*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1727*4f1223e8SApple OSS Distributions UInt32 withCount,
1728*4f1223e8SApple OSS Distributions IODirection direction,
1729*4f1223e8SApple OSS Distributions task_t task,
1730*4f1223e8SApple OSS Distributions bool asReference)
1731*4f1223e8SApple OSS Distributions {
1732*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1733*4f1223e8SApple OSS Distributions if (that) {
1734*4f1223e8SApple OSS Distributions if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1735*4f1223e8SApple OSS Distributions return os::move(that);
1736*4f1223e8SApple OSS Distributions }
1737*4f1223e8SApple OSS Distributions }
1738*4f1223e8SApple OSS Distributions return nullptr;
1739*4f1223e8SApple OSS Distributions }
1740*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
1741*4f1223e8SApple OSS Distributions
1742*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1743*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1744*4f1223e8SApple OSS Distributions mach_vm_size_t length,
1745*4f1223e8SApple OSS Distributions IOOptionBits options,
1746*4f1223e8SApple OSS Distributions task_t task)
1747*4f1223e8SApple OSS Distributions {
1748*4f1223e8SApple OSS Distributions IOAddressRange range = { address, length };
1749*4f1223e8SApple OSS Distributions return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1750*4f1223e8SApple OSS Distributions }
1751*4f1223e8SApple OSS Distributions
1752*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1753*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1754*4f1223e8SApple OSS Distributions UInt32 rangeCount,
1755*4f1223e8SApple OSS Distributions IOOptionBits options,
1756*4f1223e8SApple OSS Distributions task_t task)
1757*4f1223e8SApple OSS Distributions {
1758*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1759*4f1223e8SApple OSS Distributions if (that) {
1760*4f1223e8SApple OSS Distributions if (task) {
1761*4f1223e8SApple OSS Distributions options |= kIOMemoryTypeVirtual64;
1762*4f1223e8SApple OSS Distributions } else {
1763*4f1223e8SApple OSS Distributions options |= kIOMemoryTypePhysical64;
1764*4f1223e8SApple OSS Distributions }
1765*4f1223e8SApple OSS Distributions
1766*4f1223e8SApple OSS Distributions if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1767*4f1223e8SApple OSS Distributions return os::move(that);
1768*4f1223e8SApple OSS Distributions }
1769*4f1223e8SApple OSS Distributions }
1770*4f1223e8SApple OSS Distributions
1771*4f1223e8SApple OSS Distributions return nullptr;
1772*4f1223e8SApple OSS Distributions }
1773*4f1223e8SApple OSS Distributions
1774*4f1223e8SApple OSS Distributions
1775*4f1223e8SApple OSS Distributions /*
1776*4f1223e8SApple OSS Distributions * withOptions:
1777*4f1223e8SApple OSS Distributions *
1778*4f1223e8SApple OSS Distributions * Create a new IOMemoryDescriptor. The buffer is made up of several
1779*4f1223e8SApple OSS Distributions * virtual address ranges, from a given task.
1780*4f1223e8SApple OSS Distributions *
1781*4f1223e8SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1782*4f1223e8SApple OSS Distributions */
1783*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1784*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withOptions(void * buffers,
1785*4f1223e8SApple OSS Distributions UInt32 count,
1786*4f1223e8SApple OSS Distributions UInt32 offset,
1787*4f1223e8SApple OSS Distributions task_t task,
1788*4f1223e8SApple OSS Distributions IOOptionBits opts,
1789*4f1223e8SApple OSS Distributions IOMapper * mapper)
1790*4f1223e8SApple OSS Distributions {
1791*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1792*4f1223e8SApple OSS Distributions
1793*4f1223e8SApple OSS Distributions if (self
1794*4f1223e8SApple OSS Distributions && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1795*4f1223e8SApple OSS Distributions return nullptr;
1796*4f1223e8SApple OSS Distributions }
1797*4f1223e8SApple OSS Distributions
1798*4f1223e8SApple OSS Distributions return os::move(self);
1799*4f1223e8SApple OSS Distributions }
1800*4f1223e8SApple OSS Distributions
1801*4f1223e8SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1802*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void * buffers,
1803*4f1223e8SApple OSS Distributions UInt32 count,
1804*4f1223e8SApple OSS Distributions UInt32 offset,
1805*4f1223e8SApple OSS Distributions task_t task,
1806*4f1223e8SApple OSS Distributions IOOptionBits options,
1807*4f1223e8SApple OSS Distributions IOMapper * mapper)
1808*4f1223e8SApple OSS Distributions {
1809*4f1223e8SApple OSS Distributions return false;
1810*4f1223e8SApple OSS Distributions }
1811*4f1223e8SApple OSS Distributions
1812*4f1223e8SApple OSS Distributions #ifndef __LP64__
1813*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1814*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1815*4f1223e8SApple OSS Distributions UInt32 withCount,
1816*4f1223e8SApple OSS Distributions IODirection direction,
1817*4f1223e8SApple OSS Distributions bool asReference)
1818*4f1223e8SApple OSS Distributions {
1819*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1820*4f1223e8SApple OSS Distributions if (that) {
1821*4f1223e8SApple OSS Distributions if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1822*4f1223e8SApple OSS Distributions return os::move(that);
1823*4f1223e8SApple OSS Distributions }
1824*4f1223e8SApple OSS Distributions }
1825*4f1223e8SApple OSS Distributions return nullptr;
1826*4f1223e8SApple OSS Distributions }
1827*4f1223e8SApple OSS Distributions
1828*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1829*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1830*4f1223e8SApple OSS Distributions IOByteCount offset,
1831*4f1223e8SApple OSS Distributions IOByteCount length,
1832*4f1223e8SApple OSS Distributions IODirection direction)
1833*4f1223e8SApple OSS Distributions {
1834*4f1223e8SApple OSS Distributions return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1835*4f1223e8SApple OSS Distributions }
1836*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
1837*4f1223e8SApple OSS Distributions
1838*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1839*4f1223e8SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1840*4f1223e8SApple OSS Distributions {
1841*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor *origGenMD =
1842*4f1223e8SApple OSS Distributions OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1843*4f1223e8SApple OSS Distributions
1844*4f1223e8SApple OSS Distributions if (origGenMD) {
1845*4f1223e8SApple OSS Distributions return IOGeneralMemoryDescriptor::
1846*4f1223e8SApple OSS Distributions withPersistentMemoryDescriptor(origGenMD);
1847*4f1223e8SApple OSS Distributions } else {
1848*4f1223e8SApple OSS Distributions return nullptr;
1849*4f1223e8SApple OSS Distributions }
1850*4f1223e8SApple OSS Distributions }
1851*4f1223e8SApple OSS Distributions
1852*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1853*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1854*4f1223e8SApple OSS Distributions {
1855*4f1223e8SApple OSS Distributions IOMemoryReference * memRef;
1856*4f1223e8SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self;
1857*4f1223e8SApple OSS Distributions
1858*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1859*4f1223e8SApple OSS Distributions return nullptr;
1860*4f1223e8SApple OSS Distributions }
1861*4f1223e8SApple OSS Distributions
1862*4f1223e8SApple OSS Distributions if (memRef == originalMD->_memRef) {
1863*4f1223e8SApple OSS Distributions self.reset(originalMD, OSRetain);
1864*4f1223e8SApple OSS Distributions originalMD->memoryReferenceRelease(memRef);
1865*4f1223e8SApple OSS Distributions return os::move(self);
1866*4f1223e8SApple OSS Distributions }
1867*4f1223e8SApple OSS Distributions
1868*4f1223e8SApple OSS Distributions self = OSMakeShared<IOGeneralMemoryDescriptor>();
1869*4f1223e8SApple OSS Distributions IOMDPersistentInitData initData = { originalMD, memRef };
1870*4f1223e8SApple OSS Distributions
1871*4f1223e8SApple OSS Distributions if (self
1872*4f1223e8SApple OSS Distributions && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1873*4f1223e8SApple OSS Distributions return nullptr;
1874*4f1223e8SApple OSS Distributions }
1875*4f1223e8SApple OSS Distributions return os::move(self);
1876*4f1223e8SApple OSS Distributions }
1877*4f1223e8SApple OSS Distributions
1878*4f1223e8SApple OSS Distributions #ifndef __LP64__
1879*4f1223e8SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1880*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void * address,
1881*4f1223e8SApple OSS Distributions IOByteCount withLength,
1882*4f1223e8SApple OSS Distributions IODirection withDirection)
1883*4f1223e8SApple OSS Distributions {
1884*4f1223e8SApple OSS Distributions _singleRange.v.address = (vm_offset_t) address;
1885*4f1223e8SApple OSS Distributions _singleRange.v.length = withLength;
1886*4f1223e8SApple OSS Distributions
1887*4f1223e8SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1888*4f1223e8SApple OSS Distributions }
1889*4f1223e8SApple OSS Distributions
1890*4f1223e8SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1891*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1892*4f1223e8SApple OSS Distributions IOByteCount withLength,
1893*4f1223e8SApple OSS Distributions IODirection withDirection,
1894*4f1223e8SApple OSS Distributions task_t withTask)
1895*4f1223e8SApple OSS Distributions {
1896*4f1223e8SApple OSS Distributions _singleRange.v.address = address;
1897*4f1223e8SApple OSS Distributions _singleRange.v.length = withLength;
1898*4f1223e8SApple OSS Distributions
1899*4f1223e8SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1900*4f1223e8SApple OSS Distributions }
1901*4f1223e8SApple OSS Distributions
1902*4f1223e8SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1903*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1904*4f1223e8SApple OSS Distributions IOPhysicalAddress address,
1905*4f1223e8SApple OSS Distributions IOByteCount withLength,
1906*4f1223e8SApple OSS Distributions IODirection withDirection )
1907*4f1223e8SApple OSS Distributions {
1908*4f1223e8SApple OSS Distributions _singleRange.p.address = address;
1909*4f1223e8SApple OSS Distributions _singleRange.p.length = withLength;
1910*4f1223e8SApple OSS Distributions
1911*4f1223e8SApple OSS Distributions return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1912*4f1223e8SApple OSS Distributions }
1913*4f1223e8SApple OSS Distributions
1914*4f1223e8SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1915*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1916*4f1223e8SApple OSS Distributions IOPhysicalRange * ranges,
1917*4f1223e8SApple OSS Distributions UInt32 count,
1918*4f1223e8SApple OSS Distributions IODirection direction,
1919*4f1223e8SApple OSS Distributions bool reference)
1920*4f1223e8SApple OSS Distributions {
1921*4f1223e8SApple OSS Distributions IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1922*4f1223e8SApple OSS Distributions
1923*4f1223e8SApple OSS Distributions if (reference) {
1924*4f1223e8SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1925*4f1223e8SApple OSS Distributions }
1926*4f1223e8SApple OSS Distributions
1927*4f1223e8SApple OSS Distributions return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1928*4f1223e8SApple OSS Distributions }
1929*4f1223e8SApple OSS Distributions
1930*4f1223e8SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1931*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1932*4f1223e8SApple OSS Distributions IOVirtualRange * ranges,
1933*4f1223e8SApple OSS Distributions UInt32 count,
1934*4f1223e8SApple OSS Distributions IODirection direction,
1935*4f1223e8SApple OSS Distributions task_t task,
1936*4f1223e8SApple OSS Distributions bool reference)
1937*4f1223e8SApple OSS Distributions {
1938*4f1223e8SApple OSS Distributions IOOptionBits mdOpts = direction;
1939*4f1223e8SApple OSS Distributions
1940*4f1223e8SApple OSS Distributions if (reference) {
1941*4f1223e8SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1942*4f1223e8SApple OSS Distributions }
1943*4f1223e8SApple OSS Distributions
1944*4f1223e8SApple OSS Distributions if (task) {
1945*4f1223e8SApple OSS Distributions mdOpts |= kIOMemoryTypeVirtual;
1946*4f1223e8SApple OSS Distributions
1947*4f1223e8SApple OSS Distributions // Auto-prepare if this is a kernel memory descriptor as very few
1948*4f1223e8SApple OSS Distributions // clients bother to prepare() kernel memory.
1949*4f1223e8SApple OSS Distributions // But it was not enforced so what are you going to do?
1950*4f1223e8SApple OSS Distributions if (task == kernel_task) {
1951*4f1223e8SApple OSS Distributions mdOpts |= kIOMemoryAutoPrepare;
1952*4f1223e8SApple OSS Distributions }
1953*4f1223e8SApple OSS Distributions } else {
1954*4f1223e8SApple OSS Distributions mdOpts |= kIOMemoryTypePhysical;
1955*4f1223e8SApple OSS Distributions }
1956*4f1223e8SApple OSS Distributions
1957*4f1223e8SApple OSS Distributions return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1958*4f1223e8SApple OSS Distributions }
1959*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
1960*4f1223e8SApple OSS Distributions
1961*4f1223e8SApple OSS Distributions /*
1962*4f1223e8SApple OSS Distributions * initWithOptions:
1963*4f1223e8SApple OSS Distributions *
1964*4f1223e8SApple OSS Distributions * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1965*4f1223e8SApple OSS Distributions * from a given task, several physical ranges, an UPL from the ubc
1966*4f1223e8SApple OSS Distributions * system or a uio (may be 64bit) from the BSD subsystem.
1967*4f1223e8SApple OSS Distributions *
1968*4f1223e8SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1969*4f1223e8SApple OSS Distributions *
1970*4f1223e8SApple OSS Distributions * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1971*4f1223e8SApple OSS Distributions * existing instance -- note this behavior is not commonly supported in other
1972*4f1223e8SApple OSS Distributions * I/O Kit classes, although it is supported here.
1973*4f1223e8SApple OSS Distributions */
1974*4f1223e8SApple OSS Distributions
1975*4f1223e8SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1976*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1977*4f1223e8SApple OSS Distributions UInt32 count,
1978*4f1223e8SApple OSS Distributions UInt32 offset,
1979*4f1223e8SApple OSS Distributions task_t task,
1980*4f1223e8SApple OSS Distributions IOOptionBits options,
1981*4f1223e8SApple OSS Distributions IOMapper * mapper)
1982*4f1223e8SApple OSS Distributions {
1983*4f1223e8SApple OSS Distributions IOOptionBits type = options & kIOMemoryTypeMask;
1984*4f1223e8SApple OSS Distributions
1985*4f1223e8SApple OSS Distributions #ifndef __LP64__
1986*4f1223e8SApple OSS Distributions if (task
1987*4f1223e8SApple OSS Distributions && (kIOMemoryTypeVirtual == type)
1988*4f1223e8SApple OSS Distributions && vm_map_is_64bit(get_task_map(task))
1989*4f1223e8SApple OSS Distributions && ((IOVirtualRange *) buffers)->address) {
1990*4f1223e8SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1991*4f1223e8SApple OSS Distributions return false;
1992*4f1223e8SApple OSS Distributions }
1993*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
1994*4f1223e8SApple OSS Distributions
1995*4f1223e8SApple OSS Distributions // Grab the original MD's configuation data to initialse the
1996*4f1223e8SApple OSS Distributions // arguments to this function.
1997*4f1223e8SApple OSS Distributions if (kIOMemoryTypePersistentMD == type) {
1998*4f1223e8SApple OSS Distributions IOMDPersistentInitData *initData = (typeof(initData))buffers;
1999*4f1223e8SApple OSS Distributions const IOGeneralMemoryDescriptor *orig = initData->fMD;
2000*4f1223e8SApple OSS Distributions ioGMDData *dataP = getDataP(orig->_memoryEntries);
2001*4f1223e8SApple OSS Distributions
2002*4f1223e8SApple OSS Distributions // Only accept persistent memory descriptors with valid dataP data.
2003*4f1223e8SApple OSS Distributions assert(orig->_rangesCount == 1);
2004*4f1223e8SApple OSS Distributions if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2005*4f1223e8SApple OSS Distributions return false;
2006*4f1223e8SApple OSS Distributions }
2007*4f1223e8SApple OSS Distributions
2008*4f1223e8SApple OSS Distributions _memRef = initData->fMemRef; // Grab the new named entry
2009*4f1223e8SApple OSS Distributions options = orig->_flags & ~kIOMemoryAsReference;
2010*4f1223e8SApple OSS Distributions type = options & kIOMemoryTypeMask;
2011*4f1223e8SApple OSS Distributions buffers = orig->_ranges.v;
2012*4f1223e8SApple OSS Distributions count = orig->_rangesCount;
2013*4f1223e8SApple OSS Distributions
2014*4f1223e8SApple OSS Distributions // Now grab the original task and whatever mapper was previously used
2015*4f1223e8SApple OSS Distributions task = orig->_task;
2016*4f1223e8SApple OSS Distributions mapper = dataP->fMapper;
2017*4f1223e8SApple OSS Distributions
2018*4f1223e8SApple OSS Distributions // We are ready to go through the original initialisation now
2019*4f1223e8SApple OSS Distributions }
2020*4f1223e8SApple OSS Distributions
2021*4f1223e8SApple OSS Distributions switch (type) {
2022*4f1223e8SApple OSS Distributions case kIOMemoryTypeUIO:
2023*4f1223e8SApple OSS Distributions case kIOMemoryTypeVirtual:
2024*4f1223e8SApple OSS Distributions #ifndef __LP64__
2025*4f1223e8SApple OSS Distributions case kIOMemoryTypeVirtual64:
2026*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2027*4f1223e8SApple OSS Distributions assert(task);
2028*4f1223e8SApple OSS Distributions if (!task) {
2029*4f1223e8SApple OSS Distributions return false;
2030*4f1223e8SApple OSS Distributions }
2031*4f1223e8SApple OSS Distributions break;
2032*4f1223e8SApple OSS Distributions
2033*4f1223e8SApple OSS Distributions case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2034*4f1223e8SApple OSS Distributions #ifndef __LP64__
2035*4f1223e8SApple OSS Distributions case kIOMemoryTypePhysical64:
2036*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2037*4f1223e8SApple OSS Distributions case kIOMemoryTypeUPL:
2038*4f1223e8SApple OSS Distributions assert(!task);
2039*4f1223e8SApple OSS Distributions break;
2040*4f1223e8SApple OSS Distributions default:
2041*4f1223e8SApple OSS Distributions return false; /* bad argument */
2042*4f1223e8SApple OSS Distributions }
2043*4f1223e8SApple OSS Distributions
2044*4f1223e8SApple OSS Distributions assert(buffers);
2045*4f1223e8SApple OSS Distributions assert(count);
2046*4f1223e8SApple OSS Distributions
2047*4f1223e8SApple OSS Distributions /*
2048*4f1223e8SApple OSS Distributions * We can check the _initialized instance variable before having ever set
2049*4f1223e8SApple OSS Distributions * it to an initial value because I/O Kit guarantees that all our instance
2050*4f1223e8SApple OSS Distributions * variables are zeroed on an object's allocation.
2051*4f1223e8SApple OSS Distributions */
2052*4f1223e8SApple OSS Distributions
2053*4f1223e8SApple OSS Distributions if (_initialized) {
2054*4f1223e8SApple OSS Distributions /*
2055*4f1223e8SApple OSS Distributions * An existing memory descriptor is being retargeted to point to
2056*4f1223e8SApple OSS Distributions * somewhere else. Clean up our present state.
2057*4f1223e8SApple OSS Distributions */
2058*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2059*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2060*4f1223e8SApple OSS Distributions while (_wireCount) {
2061*4f1223e8SApple OSS Distributions complete();
2062*4f1223e8SApple OSS Distributions }
2063*4f1223e8SApple OSS Distributions }
2064*4f1223e8SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2065*4f1223e8SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2066*4f1223e8SApple OSS Distributions uio_free((uio_t) _ranges.v);
2067*4f1223e8SApple OSS Distributions }
2068*4f1223e8SApple OSS Distributions #ifndef __LP64__
2069*4f1223e8SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2070*4f1223e8SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2071*4f1223e8SApple OSS Distributions }
2072*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2073*4f1223e8SApple OSS Distributions else {
2074*4f1223e8SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2075*4f1223e8SApple OSS Distributions }
2076*4f1223e8SApple OSS Distributions }
2077*4f1223e8SApple OSS Distributions
2078*4f1223e8SApple OSS Distributions options |= (kIOMemoryRedirected & _flags);
2079*4f1223e8SApple OSS Distributions if (!(kIOMemoryRedirected & options)) {
2080*4f1223e8SApple OSS Distributions if (_memRef) {
2081*4f1223e8SApple OSS Distributions memoryReferenceRelease(_memRef);
2082*4f1223e8SApple OSS Distributions _memRef = NULL;
2083*4f1223e8SApple OSS Distributions }
2084*4f1223e8SApple OSS Distributions if (_mappings) {
2085*4f1223e8SApple OSS Distributions _mappings->flushCollection();
2086*4f1223e8SApple OSS Distributions }
2087*4f1223e8SApple OSS Distributions }
2088*4f1223e8SApple OSS Distributions } else {
2089*4f1223e8SApple OSS Distributions if (!super::init()) {
2090*4f1223e8SApple OSS Distributions return false;
2091*4f1223e8SApple OSS Distributions }
2092*4f1223e8SApple OSS Distributions _initialized = true;
2093*4f1223e8SApple OSS Distributions }
2094*4f1223e8SApple OSS Distributions
2095*4f1223e8SApple OSS Distributions // Grab the appropriate mapper
2096*4f1223e8SApple OSS Distributions if (kIOMemoryHostOrRemote & options) {
2097*4f1223e8SApple OSS Distributions options |= kIOMemoryMapperNone;
2098*4f1223e8SApple OSS Distributions }
2099*4f1223e8SApple OSS Distributions if (kIOMemoryMapperNone & options) {
2100*4f1223e8SApple OSS Distributions mapper = NULL; // No Mapper
2101*4f1223e8SApple OSS Distributions } else if (mapper == kIOMapperSystem) {
2102*4f1223e8SApple OSS Distributions IOMapper::checkForSystemMapper();
2103*4f1223e8SApple OSS Distributions gIOSystemMapper = mapper = IOMapper::gSystem;
2104*4f1223e8SApple OSS Distributions }
2105*4f1223e8SApple OSS Distributions
2106*4f1223e8SApple OSS Distributions // Remove the dynamic internal use flags from the initial setting
2107*4f1223e8SApple OSS Distributions options &= ~(kIOMemoryPreparedReadOnly);
2108*4f1223e8SApple OSS Distributions _flags = options;
2109*4f1223e8SApple OSS Distributions _task = task;
2110*4f1223e8SApple OSS Distributions
2111*4f1223e8SApple OSS Distributions #ifndef __LP64__
2112*4f1223e8SApple OSS Distributions _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2113*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2114*4f1223e8SApple OSS Distributions
2115*4f1223e8SApple OSS Distributions _dmaReferences = 0;
2116*4f1223e8SApple OSS Distributions __iomd_reservedA = 0;
2117*4f1223e8SApple OSS Distributions __iomd_reservedB = 0;
2118*4f1223e8SApple OSS Distributions _highestPage = 0;
2119*4f1223e8SApple OSS Distributions
2120*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & options) {
2121*4f1223e8SApple OSS Distributions if (!_prepareLock) {
2122*4f1223e8SApple OSS Distributions _prepareLock = IOLockAlloc();
2123*4f1223e8SApple OSS Distributions }
2124*4f1223e8SApple OSS Distributions } else if (_prepareLock) {
2125*4f1223e8SApple OSS Distributions IOLockFree(_prepareLock);
2126*4f1223e8SApple OSS Distributions _prepareLock = NULL;
2127*4f1223e8SApple OSS Distributions }
2128*4f1223e8SApple OSS Distributions
2129*4f1223e8SApple OSS Distributions if (kIOMemoryTypeUPL == type) {
2130*4f1223e8SApple OSS Distributions ioGMDData *dataP;
2131*4f1223e8SApple OSS Distributions unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2132*4f1223e8SApple OSS Distributions
2133*4f1223e8SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2134*4f1223e8SApple OSS Distributions return false;
2135*4f1223e8SApple OSS Distributions }
2136*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
2137*4f1223e8SApple OSS Distributions dataP->fPageCnt = 0;
2138*4f1223e8SApple OSS Distributions switch (kIOMemoryDirectionMask & options) {
2139*4f1223e8SApple OSS Distributions case kIODirectionOut:
2140*4f1223e8SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
2141*4f1223e8SApple OSS Distributions break;
2142*4f1223e8SApple OSS Distributions case kIODirectionIn:
2143*4f1223e8SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
2144*4f1223e8SApple OSS Distributions break;
2145*4f1223e8SApple OSS Distributions case kIODirectionNone:
2146*4f1223e8SApple OSS Distributions case kIODirectionOutIn:
2147*4f1223e8SApple OSS Distributions default:
2148*4f1223e8SApple OSS Distributions panic("bad dir for upl 0x%x", (int) options);
2149*4f1223e8SApple OSS Distributions break;
2150*4f1223e8SApple OSS Distributions }
2151*4f1223e8SApple OSS Distributions // _wireCount++; // UPLs start out life wired
2152*4f1223e8SApple OSS Distributions
2153*4f1223e8SApple OSS Distributions _length = count;
2154*4f1223e8SApple OSS Distributions _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2155*4f1223e8SApple OSS Distributions
2156*4f1223e8SApple OSS Distributions ioPLBlock iopl;
2157*4f1223e8SApple OSS Distributions iopl.fIOPL = (upl_t) buffers;
2158*4f1223e8SApple OSS Distributions upl_set_referenced(iopl.fIOPL, true);
2159*4f1223e8SApple OSS Distributions upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2160*4f1223e8SApple OSS Distributions
2161*4f1223e8SApple OSS Distributions if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2162*4f1223e8SApple OSS Distributions panic("short external upl");
2163*4f1223e8SApple OSS Distributions }
2164*4f1223e8SApple OSS Distributions
2165*4f1223e8SApple OSS Distributions _highestPage = upl_get_highest_page(iopl.fIOPL);
2166*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2167*4f1223e8SApple OSS Distributions
2168*4f1223e8SApple OSS Distributions // Set the flag kIOPLOnDevice convieniently equal to 1
2169*4f1223e8SApple OSS Distributions iopl.fFlags = pageList->device | kIOPLExternUPL;
2170*4f1223e8SApple OSS Distributions if (!pageList->device) {
2171*4f1223e8SApple OSS Distributions // Pre-compute the offset into the UPL's page list
2172*4f1223e8SApple OSS Distributions pageList = &pageList[atop_32(offset)];
2173*4f1223e8SApple OSS Distributions offset &= PAGE_MASK;
2174*4f1223e8SApple OSS Distributions }
2175*4f1223e8SApple OSS Distributions iopl.fIOMDOffset = 0;
2176*4f1223e8SApple OSS Distributions iopl.fMappedPage = 0;
2177*4f1223e8SApple OSS Distributions iopl.fPageInfo = (vm_address_t) pageList;
2178*4f1223e8SApple OSS Distributions iopl.fPageOffset = offset;
2179*4f1223e8SApple OSS Distributions _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2180*4f1223e8SApple OSS Distributions } else {
2181*4f1223e8SApple OSS Distributions // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2182*4f1223e8SApple OSS Distributions // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2183*4f1223e8SApple OSS Distributions
2184*4f1223e8SApple OSS Distributions // Initialize the memory descriptor
2185*4f1223e8SApple OSS Distributions if (options & kIOMemoryAsReference) {
2186*4f1223e8SApple OSS Distributions #ifndef __LP64__
2187*4f1223e8SApple OSS Distributions _rangesIsAllocated = false;
2188*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2189*4f1223e8SApple OSS Distributions
2190*4f1223e8SApple OSS Distributions // Hack assignment to get the buffer arg into _ranges.
2191*4f1223e8SApple OSS Distributions // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2192*4f1223e8SApple OSS Distributions // work, C++ sigh.
2193*4f1223e8SApple OSS Distributions // This also initialises the uio & physical ranges.
2194*4f1223e8SApple OSS Distributions _ranges.v = (IOVirtualRange *) buffers;
2195*4f1223e8SApple OSS Distributions } else {
2196*4f1223e8SApple OSS Distributions #ifndef __LP64__
2197*4f1223e8SApple OSS Distributions _rangesIsAllocated = true;
2198*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2199*4f1223e8SApple OSS Distributions switch (type) {
2200*4f1223e8SApple OSS Distributions case kIOMemoryTypeUIO:
2201*4f1223e8SApple OSS Distributions _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2202*4f1223e8SApple OSS Distributions break;
2203*4f1223e8SApple OSS Distributions
2204*4f1223e8SApple OSS Distributions #ifndef __LP64__
2205*4f1223e8SApple OSS Distributions case kIOMemoryTypeVirtual64:
2206*4f1223e8SApple OSS Distributions case kIOMemoryTypePhysical64:
2207*4f1223e8SApple OSS Distributions if (count == 1
2208*4f1223e8SApple OSS Distributions #ifndef __arm__
2209*4f1223e8SApple OSS Distributions && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2210*4f1223e8SApple OSS Distributions #endif
2211*4f1223e8SApple OSS Distributions ) {
2212*4f1223e8SApple OSS Distributions if (kIOMemoryTypeVirtual64 == type) {
2213*4f1223e8SApple OSS Distributions type = kIOMemoryTypeVirtual;
2214*4f1223e8SApple OSS Distributions } else {
2215*4f1223e8SApple OSS Distributions type = kIOMemoryTypePhysical;
2216*4f1223e8SApple OSS Distributions }
2217*4f1223e8SApple OSS Distributions _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2218*4f1223e8SApple OSS Distributions _rangesIsAllocated = false;
2219*4f1223e8SApple OSS Distributions _ranges.v = &_singleRange.v;
2220*4f1223e8SApple OSS Distributions _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2221*4f1223e8SApple OSS Distributions _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2222*4f1223e8SApple OSS Distributions break;
2223*4f1223e8SApple OSS Distributions }
2224*4f1223e8SApple OSS Distributions _ranges.v64 = IONew(IOAddressRange, count);
2225*4f1223e8SApple OSS Distributions if (!_ranges.v64) {
2226*4f1223e8SApple OSS Distributions return false;
2227*4f1223e8SApple OSS Distributions }
2228*4f1223e8SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2229*4f1223e8SApple OSS Distributions break;
2230*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2231*4f1223e8SApple OSS Distributions case kIOMemoryTypeVirtual:
2232*4f1223e8SApple OSS Distributions case kIOMemoryTypePhysical:
2233*4f1223e8SApple OSS Distributions if (count == 1) {
2234*4f1223e8SApple OSS Distributions _flags |= kIOMemoryAsReference;
2235*4f1223e8SApple OSS Distributions #ifndef __LP64__
2236*4f1223e8SApple OSS Distributions _rangesIsAllocated = false;
2237*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2238*4f1223e8SApple OSS Distributions _ranges.v = &_singleRange.v;
2239*4f1223e8SApple OSS Distributions } else {
2240*4f1223e8SApple OSS Distributions _ranges.v = IONew(IOVirtualRange, count);
2241*4f1223e8SApple OSS Distributions if (!_ranges.v) {
2242*4f1223e8SApple OSS Distributions return false;
2243*4f1223e8SApple OSS Distributions }
2244*4f1223e8SApple OSS Distributions }
2245*4f1223e8SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2246*4f1223e8SApple OSS Distributions break;
2247*4f1223e8SApple OSS Distributions }
2248*4f1223e8SApple OSS Distributions }
2249*4f1223e8SApple OSS Distributions _rangesCount = count;
2250*4f1223e8SApple OSS Distributions
2251*4f1223e8SApple OSS Distributions // Find starting address within the vector of ranges
2252*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
2253*4f1223e8SApple OSS Distributions mach_vm_size_t totalLength = 0;
2254*4f1223e8SApple OSS Distributions unsigned int ind, pages = 0;
2255*4f1223e8SApple OSS Distributions for (ind = 0; ind < count; ind++) {
2256*4f1223e8SApple OSS Distributions mach_vm_address_t addr;
2257*4f1223e8SApple OSS Distributions mach_vm_address_t endAddr;
2258*4f1223e8SApple OSS Distributions mach_vm_size_t len;
2259*4f1223e8SApple OSS Distributions
2260*4f1223e8SApple OSS Distributions // addr & len are returned by this function
2261*4f1223e8SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, ind, _task);
2262*4f1223e8SApple OSS Distributions if (_task) {
2263*4f1223e8SApple OSS Distributions mach_vm_size_t phys_size;
2264*4f1223e8SApple OSS Distributions kern_return_t kret;
2265*4f1223e8SApple OSS Distributions kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2266*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != kret) {
2267*4f1223e8SApple OSS Distributions break;
2268*4f1223e8SApple OSS Distributions }
2269*4f1223e8SApple OSS Distributions if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2270*4f1223e8SApple OSS Distributions break;
2271*4f1223e8SApple OSS Distributions }
2272*4f1223e8SApple OSS Distributions } else {
2273*4f1223e8SApple OSS Distributions if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2274*4f1223e8SApple OSS Distributions break;
2275*4f1223e8SApple OSS Distributions }
2276*4f1223e8SApple OSS Distributions if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2277*4f1223e8SApple OSS Distributions break;
2278*4f1223e8SApple OSS Distributions }
2279*4f1223e8SApple OSS Distributions if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2280*4f1223e8SApple OSS Distributions break;
2281*4f1223e8SApple OSS Distributions }
2282*4f1223e8SApple OSS Distributions }
2283*4f1223e8SApple OSS Distributions if (os_add_overflow(totalLength, len, &totalLength)) {
2284*4f1223e8SApple OSS Distributions break;
2285*4f1223e8SApple OSS Distributions }
2286*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2287*4f1223e8SApple OSS Distributions uint64_t highPage = atop_64(addr + len - 1);
2288*4f1223e8SApple OSS Distributions if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2289*4f1223e8SApple OSS Distributions _highestPage = (ppnum_t) highPage;
2290*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2291*4f1223e8SApple OSS Distributions }
2292*4f1223e8SApple OSS Distributions }
2293*4f1223e8SApple OSS Distributions }
2294*4f1223e8SApple OSS Distributions if ((ind < count)
2295*4f1223e8SApple OSS Distributions || (totalLength != ((IOByteCount) totalLength))) {
2296*4f1223e8SApple OSS Distributions return false; /* overflow */
2297*4f1223e8SApple OSS Distributions }
2298*4f1223e8SApple OSS Distributions _length = totalLength;
2299*4f1223e8SApple OSS Distributions _pages = pages;
2300*4f1223e8SApple OSS Distributions
2301*4f1223e8SApple OSS Distributions // Auto-prepare memory at creation time.
2302*4f1223e8SApple OSS Distributions // Implied completion when descriptor is free-ed
2303*4f1223e8SApple OSS Distributions
2304*4f1223e8SApple OSS Distributions
2305*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2306*4f1223e8SApple OSS Distributions _wireCount++; // Physical MDs are, by definition, wired
2307*4f1223e8SApple OSS Distributions } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2308*4f1223e8SApple OSS Distributions ioGMDData *dataP;
2309*4f1223e8SApple OSS Distributions unsigned dataSize;
2310*4f1223e8SApple OSS Distributions
2311*4f1223e8SApple OSS Distributions if (_pages > atop_64(max_mem)) {
2312*4f1223e8SApple OSS Distributions return false;
2313*4f1223e8SApple OSS Distributions }
2314*4f1223e8SApple OSS Distributions
2315*4f1223e8SApple OSS Distributions dataSize = computeDataSize(_pages, /* upls */ count * 2);
2316*4f1223e8SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2317*4f1223e8SApple OSS Distributions return false;
2318*4f1223e8SApple OSS Distributions }
2319*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
2320*4f1223e8SApple OSS Distributions dataP->fPageCnt = _pages;
2321*4f1223e8SApple OSS Distributions
2322*4f1223e8SApple OSS Distributions if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2323*4f1223e8SApple OSS Distributions && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2324*4f1223e8SApple OSS Distributions _kernelTag = IOMemoryTag(kernel_map);
2325*4f1223e8SApple OSS Distributions if (_kernelTag == gIOSurfaceTag) {
2326*4f1223e8SApple OSS Distributions _userTag = VM_MEMORY_IOSURFACE;
2327*4f1223e8SApple OSS Distributions }
2328*4f1223e8SApple OSS Distributions }
2329*4f1223e8SApple OSS Distributions
2330*4f1223e8SApple OSS Distributions if ((kIOMemoryPersistent & _flags) && !_memRef) {
2331*4f1223e8SApple OSS Distributions IOReturn
2332*4f1223e8SApple OSS Distributions err = memoryReferenceCreate(0, &_memRef);
2333*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
2334*4f1223e8SApple OSS Distributions return false;
2335*4f1223e8SApple OSS Distributions }
2336*4f1223e8SApple OSS Distributions }
2337*4f1223e8SApple OSS Distributions
2338*4f1223e8SApple OSS Distributions if ((_flags & kIOMemoryAutoPrepare)
2339*4f1223e8SApple OSS Distributions && prepare() != kIOReturnSuccess) {
2340*4f1223e8SApple OSS Distributions return false;
2341*4f1223e8SApple OSS Distributions }
2342*4f1223e8SApple OSS Distributions }
2343*4f1223e8SApple OSS Distributions }
2344*4f1223e8SApple OSS Distributions
2345*4f1223e8SApple OSS Distributions return true;
2346*4f1223e8SApple OSS Distributions }
2347*4f1223e8SApple OSS Distributions
2348*4f1223e8SApple OSS Distributions /*
2349*4f1223e8SApple OSS Distributions * free
2350*4f1223e8SApple OSS Distributions *
2351*4f1223e8SApple OSS Distributions * Free resources.
2352*4f1223e8SApple OSS Distributions */
2353*4f1223e8SApple OSS Distributions void
free()2354*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2355*4f1223e8SApple OSS Distributions {
2356*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2357*4f1223e8SApple OSS Distributions
2358*4f1223e8SApple OSS Distributions if (reserved && reserved->dp.memory) {
2359*4f1223e8SApple OSS Distributions LOCK;
2360*4f1223e8SApple OSS Distributions reserved->dp.memory = NULL;
2361*4f1223e8SApple OSS Distributions UNLOCK;
2362*4f1223e8SApple OSS Distributions }
2363*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2364*4f1223e8SApple OSS Distributions ioGMDData * dataP;
2365*4f1223e8SApple OSS Distributions if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2366*4f1223e8SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2367*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2368*4f1223e8SApple OSS Distributions }
2369*4f1223e8SApple OSS Distributions } else {
2370*4f1223e8SApple OSS Distributions while (_wireCount) {
2371*4f1223e8SApple OSS Distributions complete();
2372*4f1223e8SApple OSS Distributions }
2373*4f1223e8SApple OSS Distributions }
2374*4f1223e8SApple OSS Distributions
2375*4f1223e8SApple OSS Distributions if (_memoryEntries) {
2376*4f1223e8SApple OSS Distributions _memoryEntries.reset();
2377*4f1223e8SApple OSS Distributions }
2378*4f1223e8SApple OSS Distributions
2379*4f1223e8SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2380*4f1223e8SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2381*4f1223e8SApple OSS Distributions uio_free((uio_t) _ranges.v);
2382*4f1223e8SApple OSS Distributions }
2383*4f1223e8SApple OSS Distributions #ifndef __LP64__
2384*4f1223e8SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2385*4f1223e8SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2386*4f1223e8SApple OSS Distributions }
2387*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2388*4f1223e8SApple OSS Distributions else {
2389*4f1223e8SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2390*4f1223e8SApple OSS Distributions }
2391*4f1223e8SApple OSS Distributions
2392*4f1223e8SApple OSS Distributions _ranges.v = NULL;
2393*4f1223e8SApple OSS Distributions }
2394*4f1223e8SApple OSS Distributions
2395*4f1223e8SApple OSS Distributions if (reserved) {
2396*4f1223e8SApple OSS Distributions cleanKernelReserved(reserved);
2397*4f1223e8SApple OSS Distributions if (reserved->dp.devicePager) {
2398*4f1223e8SApple OSS Distributions // memEntry holds a ref on the device pager which owns reserved
2399*4f1223e8SApple OSS Distributions // (IOMemoryDescriptorReserved) so no reserved access after this point
2400*4f1223e8SApple OSS Distributions device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2401*4f1223e8SApple OSS Distributions } else {
2402*4f1223e8SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
2403*4f1223e8SApple OSS Distributions }
2404*4f1223e8SApple OSS Distributions reserved = NULL;
2405*4f1223e8SApple OSS Distributions }
2406*4f1223e8SApple OSS Distributions
2407*4f1223e8SApple OSS Distributions if (_memRef) {
2408*4f1223e8SApple OSS Distributions memoryReferenceRelease(_memRef);
2409*4f1223e8SApple OSS Distributions }
2410*4f1223e8SApple OSS Distributions if (_prepareLock) {
2411*4f1223e8SApple OSS Distributions IOLockFree(_prepareLock);
2412*4f1223e8SApple OSS Distributions }
2413*4f1223e8SApple OSS Distributions
2414*4f1223e8SApple OSS Distributions super::free();
2415*4f1223e8SApple OSS Distributions }
2416*4f1223e8SApple OSS Distributions
2417*4f1223e8SApple OSS Distributions #ifndef __LP64__
2418*4f1223e8SApple OSS Distributions void
unmapFromKernel()2419*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2420*4f1223e8SApple OSS Distributions {
2421*4f1223e8SApple OSS Distributions panic("IOGMD::unmapFromKernel deprecated");
2422*4f1223e8SApple OSS Distributions }
2423*4f1223e8SApple OSS Distributions
2424*4f1223e8SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2425*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2426*4f1223e8SApple OSS Distributions {
2427*4f1223e8SApple OSS Distributions panic("IOGMD::mapIntoKernel deprecated");
2428*4f1223e8SApple OSS Distributions }
2429*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2430*4f1223e8SApple OSS Distributions
2431*4f1223e8SApple OSS Distributions /*
2432*4f1223e8SApple OSS Distributions * getDirection:
2433*4f1223e8SApple OSS Distributions *
2434*4f1223e8SApple OSS Distributions * Get the direction of the transfer.
2435*4f1223e8SApple OSS Distributions */
2436*4f1223e8SApple OSS Distributions IODirection
getDirection() const2437*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2438*4f1223e8SApple OSS Distributions {
2439*4f1223e8SApple OSS Distributions #ifndef __LP64__
2440*4f1223e8SApple OSS Distributions if (_direction) {
2441*4f1223e8SApple OSS Distributions return _direction;
2442*4f1223e8SApple OSS Distributions }
2443*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2444*4f1223e8SApple OSS Distributions return (IODirection) (_flags & kIOMemoryDirectionMask);
2445*4f1223e8SApple OSS Distributions }
2446*4f1223e8SApple OSS Distributions
2447*4f1223e8SApple OSS Distributions /*
2448*4f1223e8SApple OSS Distributions * getLength:
2449*4f1223e8SApple OSS Distributions *
2450*4f1223e8SApple OSS Distributions * Get the length of the transfer (over all ranges).
2451*4f1223e8SApple OSS Distributions */
2452*4f1223e8SApple OSS Distributions IOByteCount
getLength() const2453*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getLength() const
2454*4f1223e8SApple OSS Distributions {
2455*4f1223e8SApple OSS Distributions return _length;
2456*4f1223e8SApple OSS Distributions }
2457*4f1223e8SApple OSS Distributions
2458*4f1223e8SApple OSS Distributions void
setTag(IOOptionBits tag)2459*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2460*4f1223e8SApple OSS Distributions {
2461*4f1223e8SApple OSS Distributions _tag = tag;
2462*4f1223e8SApple OSS Distributions }
2463*4f1223e8SApple OSS Distributions
2464*4f1223e8SApple OSS Distributions IOOptionBits
getTag(void)2465*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2466*4f1223e8SApple OSS Distributions {
2467*4f1223e8SApple OSS Distributions return _tag;
2468*4f1223e8SApple OSS Distributions }
2469*4f1223e8SApple OSS Distributions
2470*4f1223e8SApple OSS Distributions uint64_t
getFlags(void)2471*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2472*4f1223e8SApple OSS Distributions {
2473*4f1223e8SApple OSS Distributions return _flags;
2474*4f1223e8SApple OSS Distributions }
2475*4f1223e8SApple OSS Distributions
2476*4f1223e8SApple OSS Distributions OSObject *
copyContext(void) const2477*4f1223e8SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2478*4f1223e8SApple OSS Distributions {
2479*4f1223e8SApple OSS Distributions if (reserved) {
2480*4f1223e8SApple OSS Distributions OSObject * context = reserved->contextObject;
2481*4f1223e8SApple OSS Distributions if (context) {
2482*4f1223e8SApple OSS Distributions context->retain();
2483*4f1223e8SApple OSS Distributions }
2484*4f1223e8SApple OSS Distributions return context;
2485*4f1223e8SApple OSS Distributions } else {
2486*4f1223e8SApple OSS Distributions return NULL;
2487*4f1223e8SApple OSS Distributions }
2488*4f1223e8SApple OSS Distributions }
2489*4f1223e8SApple OSS Distributions
2490*4f1223e8SApple OSS Distributions void
setContext(OSObject * obj)2491*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2492*4f1223e8SApple OSS Distributions {
2493*4f1223e8SApple OSS Distributions if (this->reserved == NULL && obj == NULL) {
2494*4f1223e8SApple OSS Distributions // No existing object, and no object to set
2495*4f1223e8SApple OSS Distributions return;
2496*4f1223e8SApple OSS Distributions }
2497*4f1223e8SApple OSS Distributions
2498*4f1223e8SApple OSS Distributions IOMemoryDescriptorReserved * reserved = getKernelReserved();
2499*4f1223e8SApple OSS Distributions if (reserved) {
2500*4f1223e8SApple OSS Distributions OSObject * oldObject = reserved->contextObject;
2501*4f1223e8SApple OSS Distributions if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2502*4f1223e8SApple OSS Distributions oldObject->release();
2503*4f1223e8SApple OSS Distributions }
2504*4f1223e8SApple OSS Distributions if (obj != NULL) {
2505*4f1223e8SApple OSS Distributions obj->retain();
2506*4f1223e8SApple OSS Distributions reserved->contextObject = obj;
2507*4f1223e8SApple OSS Distributions }
2508*4f1223e8SApple OSS Distributions }
2509*4f1223e8SApple OSS Distributions }
2510*4f1223e8SApple OSS Distributions
2511*4f1223e8SApple OSS Distributions #ifndef __LP64__
2512*4f1223e8SApple OSS Distributions #pragma clang diagnostic push
2513*4f1223e8SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2514*4f1223e8SApple OSS Distributions
2515*4f1223e8SApple OSS Distributions // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2516*4f1223e8SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2517*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2518*4f1223e8SApple OSS Distributions {
2519*4f1223e8SApple OSS Distributions addr64_t physAddr = 0;
2520*4f1223e8SApple OSS Distributions
2521*4f1223e8SApple OSS Distributions if (prepare() == kIOReturnSuccess) {
2522*4f1223e8SApple OSS Distributions physAddr = getPhysicalSegment64( offset, length );
2523*4f1223e8SApple OSS Distributions complete();
2524*4f1223e8SApple OSS Distributions }
2525*4f1223e8SApple OSS Distributions
2526*4f1223e8SApple OSS Distributions return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2527*4f1223e8SApple OSS Distributions }
2528*4f1223e8SApple OSS Distributions
2529*4f1223e8SApple OSS Distributions #pragma clang diagnostic pop
2530*4f1223e8SApple OSS Distributions
2531*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2532*4f1223e8SApple OSS Distributions
2533*4f1223e8SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2534*4f1223e8SApple OSS Distributions IOMemoryDescriptor::readBytes
2535*4f1223e8SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2536*4f1223e8SApple OSS Distributions {
2537*4f1223e8SApple OSS Distributions addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2538*4f1223e8SApple OSS Distributions IOByteCount endoffset;
2539*4f1223e8SApple OSS Distributions IOByteCount remaining;
2540*4f1223e8SApple OSS Distributions
2541*4f1223e8SApple OSS Distributions
2542*4f1223e8SApple OSS Distributions // Check that this entire I/O is within the available range
2543*4f1223e8SApple OSS Distributions if ((offset > _length)
2544*4f1223e8SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2545*4f1223e8SApple OSS Distributions || (endoffset > _length)) {
2546*4f1223e8SApple OSS Distributions assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2547*4f1223e8SApple OSS Distributions return 0;
2548*4f1223e8SApple OSS Distributions }
2549*4f1223e8SApple OSS Distributions if (offset >= _length) {
2550*4f1223e8SApple OSS Distributions return 0;
2551*4f1223e8SApple OSS Distributions }
2552*4f1223e8SApple OSS Distributions
2553*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2554*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2555*4f1223e8SApple OSS Distributions return 0;
2556*4f1223e8SApple OSS Distributions }
2557*4f1223e8SApple OSS Distributions
2558*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2559*4f1223e8SApple OSS Distributions LOCK;
2560*4f1223e8SApple OSS Distributions }
2561*4f1223e8SApple OSS Distributions
2562*4f1223e8SApple OSS Distributions remaining = length = min(length, _length - offset);
2563*4f1223e8SApple OSS Distributions while (remaining) { // (process another target segment?)
2564*4f1223e8SApple OSS Distributions addr64_t srcAddr64;
2565*4f1223e8SApple OSS Distributions IOByteCount srcLen;
2566*4f1223e8SApple OSS Distributions
2567*4f1223e8SApple OSS Distributions srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2568*4f1223e8SApple OSS Distributions if (!srcAddr64) {
2569*4f1223e8SApple OSS Distributions break;
2570*4f1223e8SApple OSS Distributions }
2571*4f1223e8SApple OSS Distributions
2572*4f1223e8SApple OSS Distributions // Clip segment length to remaining
2573*4f1223e8SApple OSS Distributions if (srcLen > remaining) {
2574*4f1223e8SApple OSS Distributions srcLen = remaining;
2575*4f1223e8SApple OSS Distributions }
2576*4f1223e8SApple OSS Distributions
2577*4f1223e8SApple OSS Distributions if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2578*4f1223e8SApple OSS Distributions srcLen = (UINT_MAX - PAGE_SIZE + 1);
2579*4f1223e8SApple OSS Distributions }
2580*4f1223e8SApple OSS Distributions copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2581*4f1223e8SApple OSS Distributions cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2582*4f1223e8SApple OSS Distributions
2583*4f1223e8SApple OSS Distributions dstAddr += srcLen;
2584*4f1223e8SApple OSS Distributions offset += srcLen;
2585*4f1223e8SApple OSS Distributions remaining -= srcLen;
2586*4f1223e8SApple OSS Distributions }
2587*4f1223e8SApple OSS Distributions
2588*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2589*4f1223e8SApple OSS Distributions UNLOCK;
2590*4f1223e8SApple OSS Distributions }
2591*4f1223e8SApple OSS Distributions
2592*4f1223e8SApple OSS Distributions assert(!remaining);
2593*4f1223e8SApple OSS Distributions
2594*4f1223e8SApple OSS Distributions return length - remaining;
2595*4f1223e8SApple OSS Distributions }
2596*4f1223e8SApple OSS Distributions
2597*4f1223e8SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2598*4f1223e8SApple OSS Distributions IOMemoryDescriptor::writeBytes
2599*4f1223e8SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2600*4f1223e8SApple OSS Distributions {
2601*4f1223e8SApple OSS Distributions addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2602*4f1223e8SApple OSS Distributions IOByteCount remaining;
2603*4f1223e8SApple OSS Distributions IOByteCount endoffset;
2604*4f1223e8SApple OSS Distributions IOByteCount offset = inoffset;
2605*4f1223e8SApple OSS Distributions
2606*4f1223e8SApple OSS Distributions assert( !(kIOMemoryPreparedReadOnly & _flags));
2607*4f1223e8SApple OSS Distributions
2608*4f1223e8SApple OSS Distributions // Check that this entire I/O is within the available range
2609*4f1223e8SApple OSS Distributions if ((offset > _length)
2610*4f1223e8SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2611*4f1223e8SApple OSS Distributions || (endoffset > _length)) {
2612*4f1223e8SApple OSS Distributions assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2613*4f1223e8SApple OSS Distributions return 0;
2614*4f1223e8SApple OSS Distributions }
2615*4f1223e8SApple OSS Distributions if (kIOMemoryPreparedReadOnly & _flags) {
2616*4f1223e8SApple OSS Distributions return 0;
2617*4f1223e8SApple OSS Distributions }
2618*4f1223e8SApple OSS Distributions if (offset >= _length) {
2619*4f1223e8SApple OSS Distributions return 0;
2620*4f1223e8SApple OSS Distributions }
2621*4f1223e8SApple OSS Distributions
2622*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2623*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2624*4f1223e8SApple OSS Distributions return 0;
2625*4f1223e8SApple OSS Distributions }
2626*4f1223e8SApple OSS Distributions
2627*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2628*4f1223e8SApple OSS Distributions LOCK;
2629*4f1223e8SApple OSS Distributions }
2630*4f1223e8SApple OSS Distributions
2631*4f1223e8SApple OSS Distributions remaining = length = min(length, _length - offset);
2632*4f1223e8SApple OSS Distributions while (remaining) { // (process another target segment?)
2633*4f1223e8SApple OSS Distributions addr64_t dstAddr64;
2634*4f1223e8SApple OSS Distributions IOByteCount dstLen;
2635*4f1223e8SApple OSS Distributions
2636*4f1223e8SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2637*4f1223e8SApple OSS Distributions if (!dstAddr64) {
2638*4f1223e8SApple OSS Distributions break;
2639*4f1223e8SApple OSS Distributions }
2640*4f1223e8SApple OSS Distributions
2641*4f1223e8SApple OSS Distributions // Clip segment length to remaining
2642*4f1223e8SApple OSS Distributions if (dstLen > remaining) {
2643*4f1223e8SApple OSS Distributions dstLen = remaining;
2644*4f1223e8SApple OSS Distributions }
2645*4f1223e8SApple OSS Distributions
2646*4f1223e8SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2647*4f1223e8SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
2648*4f1223e8SApple OSS Distributions }
2649*4f1223e8SApple OSS Distributions if (!srcAddr) {
2650*4f1223e8SApple OSS Distributions bzero_phys(dstAddr64, (unsigned int) dstLen);
2651*4f1223e8SApple OSS Distributions } else {
2652*4f1223e8SApple OSS Distributions copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2653*4f1223e8SApple OSS Distributions cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2654*4f1223e8SApple OSS Distributions srcAddr += dstLen;
2655*4f1223e8SApple OSS Distributions }
2656*4f1223e8SApple OSS Distributions offset += dstLen;
2657*4f1223e8SApple OSS Distributions remaining -= dstLen;
2658*4f1223e8SApple OSS Distributions }
2659*4f1223e8SApple OSS Distributions
2660*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2661*4f1223e8SApple OSS Distributions UNLOCK;
2662*4f1223e8SApple OSS Distributions }
2663*4f1223e8SApple OSS Distributions
2664*4f1223e8SApple OSS Distributions assert(!remaining);
2665*4f1223e8SApple OSS Distributions
2666*4f1223e8SApple OSS Distributions #if defined(__x86_64__)
2667*4f1223e8SApple OSS Distributions // copypv does not cppvFsnk on intel
2668*4f1223e8SApple OSS Distributions #else
2669*4f1223e8SApple OSS Distributions if (!srcAddr) {
2670*4f1223e8SApple OSS Distributions performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2671*4f1223e8SApple OSS Distributions }
2672*4f1223e8SApple OSS Distributions #endif
2673*4f1223e8SApple OSS Distributions
2674*4f1223e8SApple OSS Distributions return length - remaining;
2675*4f1223e8SApple OSS Distributions }
2676*4f1223e8SApple OSS Distributions
2677*4f1223e8SApple OSS Distributions #ifndef __LP64__
2678*4f1223e8SApple OSS Distributions void
setPosition(IOByteCount position)2679*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2680*4f1223e8SApple OSS Distributions {
2681*4f1223e8SApple OSS Distributions panic("IOGMD::setPosition deprecated");
2682*4f1223e8SApple OSS Distributions }
2683*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
2684*4f1223e8SApple OSS Distributions
2685*4f1223e8SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2686*4f1223e8SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2687*4f1223e8SApple OSS Distributions
2688*4f1223e8SApple OSS Distributions uint64_t
getPreparationID(void)2689*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2690*4f1223e8SApple OSS Distributions {
2691*4f1223e8SApple OSS Distributions ioGMDData *dataP;
2692*4f1223e8SApple OSS Distributions
2693*4f1223e8SApple OSS Distributions if (!_wireCount) {
2694*4f1223e8SApple OSS Distributions return kIOPreparationIDUnprepared;
2695*4f1223e8SApple OSS Distributions }
2696*4f1223e8SApple OSS Distributions
2697*4f1223e8SApple OSS Distributions if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2698*4f1223e8SApple OSS Distributions || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2699*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setPreparationID();
2700*4f1223e8SApple OSS Distributions return IOMemoryDescriptor::getPreparationID();
2701*4f1223e8SApple OSS Distributions }
2702*4f1223e8SApple OSS Distributions
2703*4f1223e8SApple OSS Distributions if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2704*4f1223e8SApple OSS Distributions return kIOPreparationIDUnprepared;
2705*4f1223e8SApple OSS Distributions }
2706*4f1223e8SApple OSS Distributions
2707*4f1223e8SApple OSS Distributions if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2708*4f1223e8SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2709*4f1223e8SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2710*4f1223e8SApple OSS Distributions }
2711*4f1223e8SApple OSS Distributions return dataP->fPreparationID;
2712*4f1223e8SApple OSS Distributions }
2713*4f1223e8SApple OSS Distributions
2714*4f1223e8SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2715*4f1223e8SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2716*4f1223e8SApple OSS Distributions {
2717*4f1223e8SApple OSS Distributions if (reserved->creator) {
2718*4f1223e8SApple OSS Distributions task_deallocate(reserved->creator);
2719*4f1223e8SApple OSS Distributions reserved->creator = NULL;
2720*4f1223e8SApple OSS Distributions }
2721*4f1223e8SApple OSS Distributions
2722*4f1223e8SApple OSS Distributions if (reserved->contextObject) {
2723*4f1223e8SApple OSS Distributions reserved->contextObject->release();
2724*4f1223e8SApple OSS Distributions reserved->contextObject = NULL;
2725*4f1223e8SApple OSS Distributions }
2726*4f1223e8SApple OSS Distributions }
2727*4f1223e8SApple OSS Distributions
2728*4f1223e8SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2729*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2730*4f1223e8SApple OSS Distributions {
2731*4f1223e8SApple OSS Distributions if (!reserved) {
2732*4f1223e8SApple OSS Distributions reserved = IOMallocType(IOMemoryDescriptorReserved);
2733*4f1223e8SApple OSS Distributions }
2734*4f1223e8SApple OSS Distributions return reserved;
2735*4f1223e8SApple OSS Distributions }
2736*4f1223e8SApple OSS Distributions
2737*4f1223e8SApple OSS Distributions void
setPreparationID(void)2738*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2739*4f1223e8SApple OSS Distributions {
2740*4f1223e8SApple OSS Distributions if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2741*4f1223e8SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2742*4f1223e8SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2743*4f1223e8SApple OSS Distributions }
2744*4f1223e8SApple OSS Distributions }
2745*4f1223e8SApple OSS Distributions
2746*4f1223e8SApple OSS Distributions uint64_t
getPreparationID(void)2747*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2748*4f1223e8SApple OSS Distributions {
2749*4f1223e8SApple OSS Distributions if (reserved) {
2750*4f1223e8SApple OSS Distributions return reserved->preparationID;
2751*4f1223e8SApple OSS Distributions } else {
2752*4f1223e8SApple OSS Distributions return kIOPreparationIDUnsupported;
2753*4f1223e8SApple OSS Distributions }
2754*4f1223e8SApple OSS Distributions }
2755*4f1223e8SApple OSS Distributions
2756*4f1223e8SApple OSS Distributions void
setDescriptorID(void)2757*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2758*4f1223e8SApple OSS Distributions {
2759*4f1223e8SApple OSS Distributions if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2760*4f1223e8SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2761*4f1223e8SApple OSS Distributions OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2762*4f1223e8SApple OSS Distributions }
2763*4f1223e8SApple OSS Distributions }
2764*4f1223e8SApple OSS Distributions
2765*4f1223e8SApple OSS Distributions uint64_t
getDescriptorID(void)2766*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2767*4f1223e8SApple OSS Distributions {
2768*4f1223e8SApple OSS Distributions setDescriptorID();
2769*4f1223e8SApple OSS Distributions
2770*4f1223e8SApple OSS Distributions if (reserved) {
2771*4f1223e8SApple OSS Distributions return reserved->descriptorID;
2772*4f1223e8SApple OSS Distributions } else {
2773*4f1223e8SApple OSS Distributions return kIODescriptorIDInvalid;
2774*4f1223e8SApple OSS Distributions }
2775*4f1223e8SApple OSS Distributions }
2776*4f1223e8SApple OSS Distributions
2777*4f1223e8SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2778*4f1223e8SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2779*4f1223e8SApple OSS Distributions {
2780*4f1223e8SApple OSS Distributions if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2781*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
2782*4f1223e8SApple OSS Distributions }
2783*4f1223e8SApple OSS Distributions
2784*4f1223e8SApple OSS Distributions assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2785*4f1223e8SApple OSS Distributions if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2786*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
2787*4f1223e8SApple OSS Distributions }
2788*4f1223e8SApple OSS Distributions
2789*4f1223e8SApple OSS Distributions uint64_t descriptorID = getDescriptorID();
2790*4f1223e8SApple OSS Distributions assert(descriptorID != kIODescriptorIDInvalid);
2791*4f1223e8SApple OSS Distributions if (getDescriptorID() == kIODescriptorIDInvalid) {
2792*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
2793*4f1223e8SApple OSS Distributions }
2794*4f1223e8SApple OSS Distributions
2795*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2796*4f1223e8SApple OSS Distributions
2797*4f1223e8SApple OSS Distributions #if __LP64__
2798*4f1223e8SApple OSS Distributions static const uint8_t num_segments_page = 8;
2799*4f1223e8SApple OSS Distributions #else
2800*4f1223e8SApple OSS Distributions static const uint8_t num_segments_page = 4;
2801*4f1223e8SApple OSS Distributions #endif
2802*4f1223e8SApple OSS Distributions static const uint8_t num_segments_long = 2;
2803*4f1223e8SApple OSS Distributions
2804*4f1223e8SApple OSS Distributions IOPhysicalAddress segments_page[num_segments_page];
2805*4f1223e8SApple OSS Distributions IOPhysicalRange segments_long[num_segments_long];
2806*4f1223e8SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2807*4f1223e8SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2808*4f1223e8SApple OSS Distributions
2809*4f1223e8SApple OSS Distributions uint8_t segment_page_idx = 0;
2810*4f1223e8SApple OSS Distributions uint8_t segment_long_idx = 0;
2811*4f1223e8SApple OSS Distributions
2812*4f1223e8SApple OSS Distributions IOPhysicalRange physical_segment;
2813*4f1223e8SApple OSS Distributions for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2814*4f1223e8SApple OSS Distributions physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2815*4f1223e8SApple OSS Distributions
2816*4f1223e8SApple OSS Distributions if (physical_segment.length == 0) {
2817*4f1223e8SApple OSS Distributions break;
2818*4f1223e8SApple OSS Distributions }
2819*4f1223e8SApple OSS Distributions
2820*4f1223e8SApple OSS Distributions /**
2821*4f1223e8SApple OSS Distributions * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2822*4f1223e8SApple OSS Distributions * buffer memory, pack segment events according to the following.
2823*4f1223e8SApple OSS Distributions *
2824*4f1223e8SApple OSS Distributions * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2825*4f1223e8SApple OSS Distributions * IOMDPA_MAPPED event emitted on by the current thread_id.
2826*4f1223e8SApple OSS Distributions *
2827*4f1223e8SApple OSS Distributions * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2828*4f1223e8SApple OSS Distributions * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2829*4f1223e8SApple OSS Distributions * - unmapped pages will have a ppn of MAX_INT_32
2830*4f1223e8SApple OSS Distributions * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2831*4f1223e8SApple OSS Distributions * - address_0, length_0, address_0, length_1
2832*4f1223e8SApple OSS Distributions * - unmapped pages will have an address of 0
2833*4f1223e8SApple OSS Distributions *
2834*4f1223e8SApple OSS Distributions * During each iteration do the following depending on the length of the mapping:
2835*4f1223e8SApple OSS Distributions * 1. add the current segment to the appropriate queue of pending segments
2836*4f1223e8SApple OSS Distributions * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2837*4f1223e8SApple OSS Distributions * 1a. if FALSE emit and reset all events in the previous queue
2838*4f1223e8SApple OSS Distributions * 2. check if we have filled up the current queue of pending events
2839*4f1223e8SApple OSS Distributions * 2a. if TRUE emit and reset all events in the pending queue
2840*4f1223e8SApple OSS Distributions * 3. after completing all iterations emit events in the current queue
2841*4f1223e8SApple OSS Distributions */
2842*4f1223e8SApple OSS Distributions
2843*4f1223e8SApple OSS Distributions bool emit_page = false;
2844*4f1223e8SApple OSS Distributions bool emit_long = false;
2845*4f1223e8SApple OSS Distributions if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2846*4f1223e8SApple OSS Distributions segments_page[segment_page_idx] = physical_segment.address;
2847*4f1223e8SApple OSS Distributions segment_page_idx++;
2848*4f1223e8SApple OSS Distributions
2849*4f1223e8SApple OSS Distributions emit_long = segment_long_idx != 0;
2850*4f1223e8SApple OSS Distributions emit_page = segment_page_idx == num_segments_page;
2851*4f1223e8SApple OSS Distributions
2852*4f1223e8SApple OSS Distributions if (os_unlikely(emit_long)) {
2853*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2854*4f1223e8SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2855*4f1223e8SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2856*4f1223e8SApple OSS Distributions }
2857*4f1223e8SApple OSS Distributions
2858*4f1223e8SApple OSS Distributions if (os_unlikely(emit_page)) {
2859*4f1223e8SApple OSS Distributions #if __LP64__
2860*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2861*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2862*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2863*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2864*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2865*4f1223e8SApple OSS Distributions #else
2866*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2867*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2868*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2869*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2870*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2871*4f1223e8SApple OSS Distributions #endif
2872*4f1223e8SApple OSS Distributions }
2873*4f1223e8SApple OSS Distributions } else {
2874*4f1223e8SApple OSS Distributions segments_long[segment_long_idx] = physical_segment;
2875*4f1223e8SApple OSS Distributions segment_long_idx++;
2876*4f1223e8SApple OSS Distributions
2877*4f1223e8SApple OSS Distributions emit_page = segment_page_idx != 0;
2878*4f1223e8SApple OSS Distributions emit_long = segment_long_idx == num_segments_long;
2879*4f1223e8SApple OSS Distributions
2880*4f1223e8SApple OSS Distributions if (os_unlikely(emit_page)) {
2881*4f1223e8SApple OSS Distributions #if __LP64__
2882*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2883*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2884*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2885*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2886*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2887*4f1223e8SApple OSS Distributions #else
2888*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2889*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2890*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2891*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2892*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2893*4f1223e8SApple OSS Distributions #endif
2894*4f1223e8SApple OSS Distributions }
2895*4f1223e8SApple OSS Distributions
2896*4f1223e8SApple OSS Distributions if (emit_long) {
2897*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2898*4f1223e8SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2899*4f1223e8SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2900*4f1223e8SApple OSS Distributions }
2901*4f1223e8SApple OSS Distributions }
2902*4f1223e8SApple OSS Distributions
2903*4f1223e8SApple OSS Distributions if (os_unlikely(emit_page)) {
2904*4f1223e8SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2905*4f1223e8SApple OSS Distributions segment_page_idx = 0;
2906*4f1223e8SApple OSS Distributions }
2907*4f1223e8SApple OSS Distributions
2908*4f1223e8SApple OSS Distributions if (os_unlikely(emit_long)) {
2909*4f1223e8SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2910*4f1223e8SApple OSS Distributions segment_long_idx = 0;
2911*4f1223e8SApple OSS Distributions }
2912*4f1223e8SApple OSS Distributions }
2913*4f1223e8SApple OSS Distributions
2914*4f1223e8SApple OSS Distributions if (segment_page_idx != 0) {
2915*4f1223e8SApple OSS Distributions assert(segment_long_idx == 0);
2916*4f1223e8SApple OSS Distributions #if __LP64__
2917*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2918*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2919*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2920*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2921*4f1223e8SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2922*4f1223e8SApple OSS Distributions #else
2923*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2924*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2925*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2926*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2927*4f1223e8SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2928*4f1223e8SApple OSS Distributions #endif
2929*4f1223e8SApple OSS Distributions } else if (segment_long_idx != 0) {
2930*4f1223e8SApple OSS Distributions assert(segment_page_idx == 0);
2931*4f1223e8SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2932*4f1223e8SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2933*4f1223e8SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2934*4f1223e8SApple OSS Distributions }
2935*4f1223e8SApple OSS Distributions
2936*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
2937*4f1223e8SApple OSS Distributions }
2938*4f1223e8SApple OSS Distributions
2939*4f1223e8SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2940*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2941*4f1223e8SApple OSS Distributions {
2942*4f1223e8SApple OSS Distributions _kernelTag = (vm_tag_t) kernelTag;
2943*4f1223e8SApple OSS Distributions _userTag = (vm_tag_t) userTag;
2944*4f1223e8SApple OSS Distributions }
2945*4f1223e8SApple OSS Distributions
2946*4f1223e8SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2947*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2948*4f1223e8SApple OSS Distributions {
2949*4f1223e8SApple OSS Distributions if (vm_kernel_map_is_kernel(map)) {
2950*4f1223e8SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _kernelTag) {
2951*4f1223e8SApple OSS Distributions return (uint32_t) _kernelTag;
2952*4f1223e8SApple OSS Distributions }
2953*4f1223e8SApple OSS Distributions } else {
2954*4f1223e8SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _userTag) {
2955*4f1223e8SApple OSS Distributions return (uint32_t) _userTag;
2956*4f1223e8SApple OSS Distributions }
2957*4f1223e8SApple OSS Distributions }
2958*4f1223e8SApple OSS Distributions return IOMemoryTag(map);
2959*4f1223e8SApple OSS Distributions }
2960*4f1223e8SApple OSS Distributions
2961*4f1223e8SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2962*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2963*4f1223e8SApple OSS Distributions {
2964*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
2965*4f1223e8SApple OSS Distributions DMACommandOps params;
2966*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2967*4f1223e8SApple OSS Distributions ioGMDData *dataP;
2968*4f1223e8SApple OSS Distributions
2969*4f1223e8SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
2970*4f1223e8SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
2971*4f1223e8SApple OSS Distributions
2972*4f1223e8SApple OSS Distributions if (kIOMDDMAMap == op) {
2973*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
2974*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
2975*4f1223e8SApple OSS Distributions }
2976*4f1223e8SApple OSS Distributions
2977*4f1223e8SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2978*4f1223e8SApple OSS Distributions
2979*4f1223e8SApple OSS Distributions if (!_memoryEntries
2980*4f1223e8SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2981*4f1223e8SApple OSS Distributions return kIOReturnNoMemory;
2982*4f1223e8SApple OSS Distributions }
2983*4f1223e8SApple OSS Distributions
2984*4f1223e8SApple OSS Distributions if (_memoryEntries && data->fMapper) {
2985*4f1223e8SApple OSS Distributions bool remap, keepMap;
2986*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
2987*4f1223e8SApple OSS Distributions
2988*4f1223e8SApple OSS Distributions if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2989*4f1223e8SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2990*4f1223e8SApple OSS Distributions }
2991*4f1223e8SApple OSS Distributions if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2992*4f1223e8SApple OSS Distributions dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2993*4f1223e8SApple OSS Distributions }
2994*4f1223e8SApple OSS Distributions
2995*4f1223e8SApple OSS Distributions keepMap = (data->fMapper == gIOSystemMapper);
2996*4f1223e8SApple OSS Distributions keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2997*4f1223e8SApple OSS Distributions
2998*4f1223e8SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2999*4f1223e8SApple OSS Distributions IOLockLock(_prepareLock);
3000*4f1223e8SApple OSS Distributions }
3001*4f1223e8SApple OSS Distributions
3002*4f1223e8SApple OSS Distributions remap = (!keepMap);
3003*4f1223e8SApple OSS Distributions remap |= (dataP->fDMAMapNumAddressBits < 64)
3004*4f1223e8SApple OSS Distributions && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3005*4f1223e8SApple OSS Distributions remap |= (dataP->fDMAMapAlignment > page_size);
3006*4f1223e8SApple OSS Distributions
3007*4f1223e8SApple OSS Distributions if (remap || !dataP->fMappedBaseValid) {
3008*4f1223e8SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3009*4f1223e8SApple OSS Distributions if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3010*4f1223e8SApple OSS Distributions dataP->fMappedBase = data->fAlloc;
3011*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = true;
3012*4f1223e8SApple OSS Distributions dataP->fMappedLength = data->fAllocLength;
3013*4f1223e8SApple OSS Distributions data->fAllocLength = 0; // IOMD owns the alloc now
3014*4f1223e8SApple OSS Distributions }
3015*4f1223e8SApple OSS Distributions } else {
3016*4f1223e8SApple OSS Distributions data->fAlloc = dataP->fMappedBase;
3017*4f1223e8SApple OSS Distributions data->fAllocLength = 0; // give out IOMD map
3018*4f1223e8SApple OSS Distributions md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3019*4f1223e8SApple OSS Distributions }
3020*4f1223e8SApple OSS Distributions
3021*4f1223e8SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3022*4f1223e8SApple OSS Distributions IOLockUnlock(_prepareLock);
3023*4f1223e8SApple OSS Distributions }
3024*4f1223e8SApple OSS Distributions }
3025*4f1223e8SApple OSS Distributions return err;
3026*4f1223e8SApple OSS Distributions }
3027*4f1223e8SApple OSS Distributions if (kIOMDDMAUnmap == op) {
3028*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3029*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3030*4f1223e8SApple OSS Distributions }
3031*4f1223e8SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3032*4f1223e8SApple OSS Distributions
3033*4f1223e8SApple OSS Distributions if (_pages) {
3034*4f1223e8SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3035*4f1223e8SApple OSS Distributions }
3036*4f1223e8SApple OSS Distributions
3037*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3038*4f1223e8SApple OSS Distributions }
3039*4f1223e8SApple OSS Distributions
3040*4f1223e8SApple OSS Distributions if (kIOMDAddDMAMapSpec == op) {
3041*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IODMAMapSpecification)) {
3042*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3043*4f1223e8SApple OSS Distributions }
3044*4f1223e8SApple OSS Distributions
3045*4f1223e8SApple OSS Distributions IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046*4f1223e8SApple OSS Distributions
3047*4f1223e8SApple OSS Distributions if (!_memoryEntries
3048*4f1223e8SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049*4f1223e8SApple OSS Distributions return kIOReturnNoMemory;
3050*4f1223e8SApple OSS Distributions }
3051*4f1223e8SApple OSS Distributions
3052*4f1223e8SApple OSS Distributions if (_memoryEntries) {
3053*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
3054*4f1223e8SApple OSS Distributions if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055*4f1223e8SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056*4f1223e8SApple OSS Distributions }
3057*4f1223e8SApple OSS Distributions if (data->alignment > dataP->fDMAMapAlignment) {
3058*4f1223e8SApple OSS Distributions dataP->fDMAMapAlignment = data->alignment;
3059*4f1223e8SApple OSS Distributions }
3060*4f1223e8SApple OSS Distributions }
3061*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3062*4f1223e8SApple OSS Distributions }
3063*4f1223e8SApple OSS Distributions
3064*4f1223e8SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3065*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3067*4f1223e8SApple OSS Distributions }
3068*4f1223e8SApple OSS Distributions
3069*4f1223e8SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070*4f1223e8SApple OSS Distributions data->fLength = _length;
3071*4f1223e8SApple OSS Distributions data->fSGCount = _rangesCount;
3072*4f1223e8SApple OSS Distributions data->fPages = _pages;
3073*4f1223e8SApple OSS Distributions data->fDirection = getDirection();
3074*4f1223e8SApple OSS Distributions if (!_wireCount) {
3075*4f1223e8SApple OSS Distributions data->fIsPrepared = false;
3076*4f1223e8SApple OSS Distributions } else {
3077*4f1223e8SApple OSS Distributions data->fIsPrepared = true;
3078*4f1223e8SApple OSS Distributions data->fHighestPage = _highestPage;
3079*4f1223e8SApple OSS Distributions if (_memoryEntries) {
3080*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
3081*4f1223e8SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
3082*4f1223e8SApple OSS Distributions UInt count = getNumIOPL(_memoryEntries, dataP);
3083*4f1223e8SApple OSS Distributions if (count == 1) {
3084*4f1223e8SApple OSS Distributions data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085*4f1223e8SApple OSS Distributions }
3086*4f1223e8SApple OSS Distributions }
3087*4f1223e8SApple OSS Distributions }
3088*4f1223e8SApple OSS Distributions
3089*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3090*4f1223e8SApple OSS Distributions } else if (kIOMDDMAActive == op) {
3091*4f1223e8SApple OSS Distributions if (params) {
3092*4f1223e8SApple OSS Distributions int16_t prior;
3093*4f1223e8SApple OSS Distributions prior = OSAddAtomic16(1, &md->_dmaReferences);
3094*4f1223e8SApple OSS Distributions if (!prior) {
3095*4f1223e8SApple OSS Distributions md->_mapName = NULL;
3096*4f1223e8SApple OSS Distributions }
3097*4f1223e8SApple OSS Distributions } else {
3098*4f1223e8SApple OSS Distributions if (md->_dmaReferences) {
3099*4f1223e8SApple OSS Distributions OSAddAtomic16(-1, &md->_dmaReferences);
3100*4f1223e8SApple OSS Distributions } else {
3101*4f1223e8SApple OSS Distributions panic("_dmaReferences underflow");
3102*4f1223e8SApple OSS Distributions }
3103*4f1223e8SApple OSS Distributions }
3104*4f1223e8SApple OSS Distributions } else if (kIOMDWalkSegments != op) {
3105*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
3106*4f1223e8SApple OSS Distributions }
3107*4f1223e8SApple OSS Distributions
3108*4f1223e8SApple OSS Distributions // Get the next segment
3109*4f1223e8SApple OSS Distributions struct InternalState {
3110*4f1223e8SApple OSS Distributions IOMDDMAWalkSegmentArgs fIO;
3111*4f1223e8SApple OSS Distributions mach_vm_size_t fOffset2Index;
3112*4f1223e8SApple OSS Distributions mach_vm_size_t fNextOffset;
3113*4f1223e8SApple OSS Distributions UInt fIndex;
3114*4f1223e8SApple OSS Distributions } *isP;
3115*4f1223e8SApple OSS Distributions
3116*4f1223e8SApple OSS Distributions // Find the next segment
3117*4f1223e8SApple OSS Distributions if (dataSize < sizeof(*isP)) {
3118*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3119*4f1223e8SApple OSS Distributions }
3120*4f1223e8SApple OSS Distributions
3121*4f1223e8SApple OSS Distributions isP = (InternalState *) vData;
3122*4f1223e8SApple OSS Distributions uint64_t offset = isP->fIO.fOffset;
3123*4f1223e8SApple OSS Distributions uint8_t mapped = isP->fIO.fMapped;
3124*4f1223e8SApple OSS Distributions uint64_t mappedBase;
3125*4f1223e8SApple OSS Distributions
3126*4f1223e8SApple OSS Distributions if (mapped && (kIOMemoryRemote & _flags)) {
3127*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3128*4f1223e8SApple OSS Distributions }
3129*4f1223e8SApple OSS Distributions
3130*4f1223e8SApple OSS Distributions if (IOMapper::gSystem && mapped
3131*4f1223e8SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3132*4f1223e8SApple OSS Distributions && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133*4f1223e8SApple OSS Distributions // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134*4f1223e8SApple OSS Distributions if (!_memoryEntries
3135*4f1223e8SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136*4f1223e8SApple OSS Distributions return kIOReturnNoMemory;
3137*4f1223e8SApple OSS Distributions }
3138*4f1223e8SApple OSS Distributions
3139*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
3140*4f1223e8SApple OSS Distributions if (dataP->fMapper) {
3141*4f1223e8SApple OSS Distributions IODMAMapSpecification mapSpec;
3142*4f1223e8SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
3143*4f1223e8SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144*4f1223e8SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
3145*4f1223e8SApple OSS Distributions err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
3147*4f1223e8SApple OSS Distributions return err;
3148*4f1223e8SApple OSS Distributions }
3149*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = true;
3150*4f1223e8SApple OSS Distributions }
3151*4f1223e8SApple OSS Distributions }
3152*4f1223e8SApple OSS Distributions
3153*4f1223e8SApple OSS Distributions if (mapped) {
3154*4f1223e8SApple OSS Distributions if (IOMapper::gSystem
3155*4f1223e8SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3156*4f1223e8SApple OSS Distributions && _memoryEntries
3157*4f1223e8SApple OSS Distributions && (dataP = getDataP(_memoryEntries))
3158*4f1223e8SApple OSS Distributions && dataP->fMappedBaseValid) {
3159*4f1223e8SApple OSS Distributions mappedBase = dataP->fMappedBase;
3160*4f1223e8SApple OSS Distributions } else {
3161*4f1223e8SApple OSS Distributions mapped = 0;
3162*4f1223e8SApple OSS Distributions }
3163*4f1223e8SApple OSS Distributions }
3164*4f1223e8SApple OSS Distributions
3165*4f1223e8SApple OSS Distributions if (offset >= _length) {
3166*4f1223e8SApple OSS Distributions return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167*4f1223e8SApple OSS Distributions }
3168*4f1223e8SApple OSS Distributions
3169*4f1223e8SApple OSS Distributions // Validate the previous offset
3170*4f1223e8SApple OSS Distributions UInt ind;
3171*4f1223e8SApple OSS Distributions mach_vm_size_t off2Ind = isP->fOffset2Index;
3172*4f1223e8SApple OSS Distributions if (!params
3173*4f1223e8SApple OSS Distributions && offset
3174*4f1223e8SApple OSS Distributions && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175*4f1223e8SApple OSS Distributions ind = isP->fIndex;
3176*4f1223e8SApple OSS Distributions } else {
3177*4f1223e8SApple OSS Distributions ind = off2Ind = 0; // Start from beginning
3178*4f1223e8SApple OSS Distributions }
3179*4f1223e8SApple OSS Distributions mach_vm_size_t length;
3180*4f1223e8SApple OSS Distributions UInt64 address;
3181*4f1223e8SApple OSS Distributions
3182*4f1223e8SApple OSS Distributions if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183*4f1223e8SApple OSS Distributions // Physical address based memory descriptor
3184*4f1223e8SApple OSS Distributions const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185*4f1223e8SApple OSS Distributions
3186*4f1223e8SApple OSS Distributions // Find the range after the one that contains the offset
3187*4f1223e8SApple OSS Distributions mach_vm_size_t len;
3188*4f1223e8SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3189*4f1223e8SApple OSS Distributions len = physP[ind].length;
3190*4f1223e8SApple OSS Distributions off2Ind += len;
3191*4f1223e8SApple OSS Distributions }
3192*4f1223e8SApple OSS Distributions
3193*4f1223e8SApple OSS Distributions // Calculate length within range and starting address
3194*4f1223e8SApple OSS Distributions length = off2Ind - offset;
3195*4f1223e8SApple OSS Distributions address = physP[ind - 1].address + len - length;
3196*4f1223e8SApple OSS Distributions
3197*4f1223e8SApple OSS Distributions if (true && mapped) {
3198*4f1223e8SApple OSS Distributions address = mappedBase + offset;
3199*4f1223e8SApple OSS Distributions } else {
3200*4f1223e8SApple OSS Distributions // see how far we can coalesce ranges
3201*4f1223e8SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3202*4f1223e8SApple OSS Distributions len = physP[ind].length;
3203*4f1223e8SApple OSS Distributions length += len;
3204*4f1223e8SApple OSS Distributions off2Ind += len;
3205*4f1223e8SApple OSS Distributions ind++;
3206*4f1223e8SApple OSS Distributions }
3207*4f1223e8SApple OSS Distributions }
3208*4f1223e8SApple OSS Distributions
3209*4f1223e8SApple OSS Distributions // correct contiguous check overshoot
3210*4f1223e8SApple OSS Distributions ind--;
3211*4f1223e8SApple OSS Distributions off2Ind -= len;
3212*4f1223e8SApple OSS Distributions }
3213*4f1223e8SApple OSS Distributions #ifndef __LP64__
3214*4f1223e8SApple OSS Distributions else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215*4f1223e8SApple OSS Distributions // Physical address based memory descriptor
3216*4f1223e8SApple OSS Distributions const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217*4f1223e8SApple OSS Distributions
3218*4f1223e8SApple OSS Distributions // Find the range after the one that contains the offset
3219*4f1223e8SApple OSS Distributions mach_vm_size_t len;
3220*4f1223e8SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3221*4f1223e8SApple OSS Distributions len = physP[ind].length;
3222*4f1223e8SApple OSS Distributions off2Ind += len;
3223*4f1223e8SApple OSS Distributions }
3224*4f1223e8SApple OSS Distributions
3225*4f1223e8SApple OSS Distributions // Calculate length within range and starting address
3226*4f1223e8SApple OSS Distributions length = off2Ind - offset;
3227*4f1223e8SApple OSS Distributions address = physP[ind - 1].address + len - length;
3228*4f1223e8SApple OSS Distributions
3229*4f1223e8SApple OSS Distributions if (true && mapped) {
3230*4f1223e8SApple OSS Distributions address = mappedBase + offset;
3231*4f1223e8SApple OSS Distributions } else {
3232*4f1223e8SApple OSS Distributions // see how far we can coalesce ranges
3233*4f1223e8SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3234*4f1223e8SApple OSS Distributions len = physP[ind].length;
3235*4f1223e8SApple OSS Distributions length += len;
3236*4f1223e8SApple OSS Distributions off2Ind += len;
3237*4f1223e8SApple OSS Distributions ind++;
3238*4f1223e8SApple OSS Distributions }
3239*4f1223e8SApple OSS Distributions }
3240*4f1223e8SApple OSS Distributions // correct contiguous check overshoot
3241*4f1223e8SApple OSS Distributions ind--;
3242*4f1223e8SApple OSS Distributions off2Ind -= len;
3243*4f1223e8SApple OSS Distributions }
3244*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
3245*4f1223e8SApple OSS Distributions else {
3246*4f1223e8SApple OSS Distributions do {
3247*4f1223e8SApple OSS Distributions if (!_wireCount) {
3248*4f1223e8SApple OSS Distributions panic("IOGMD: not wired for the IODMACommand");
3249*4f1223e8SApple OSS Distributions }
3250*4f1223e8SApple OSS Distributions
3251*4f1223e8SApple OSS Distributions assert(_memoryEntries);
3252*4f1223e8SApple OSS Distributions
3253*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
3254*4f1223e8SApple OSS Distributions const ioPLBlock *ioplList = getIOPLList(dataP);
3255*4f1223e8SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256*4f1223e8SApple OSS Distributions upl_page_info_t *pageList = getPageList(dataP);
3257*4f1223e8SApple OSS Distributions
3258*4f1223e8SApple OSS Distributions assert(numIOPLs > 0);
3259*4f1223e8SApple OSS Distributions
3260*4f1223e8SApple OSS Distributions // Scan through iopl info blocks looking for block containing offset
3261*4f1223e8SApple OSS Distributions while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262*4f1223e8SApple OSS Distributions ind++;
3263*4f1223e8SApple OSS Distributions }
3264*4f1223e8SApple OSS Distributions
3265*4f1223e8SApple OSS Distributions // Go back to actual range as search goes past it
3266*4f1223e8SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ind - 1];
3267*4f1223e8SApple OSS Distributions off2Ind = ioplInfo.fIOMDOffset;
3268*4f1223e8SApple OSS Distributions
3269*4f1223e8SApple OSS Distributions if (ind < numIOPLs) {
3270*4f1223e8SApple OSS Distributions length = ioplList[ind].fIOMDOffset;
3271*4f1223e8SApple OSS Distributions } else {
3272*4f1223e8SApple OSS Distributions length = _length;
3273*4f1223e8SApple OSS Distributions }
3274*4f1223e8SApple OSS Distributions length -= offset; // Remainder within iopl
3275*4f1223e8SApple OSS Distributions
3276*4f1223e8SApple OSS Distributions // Subtract offset till this iopl in total list
3277*4f1223e8SApple OSS Distributions offset -= off2Ind;
3278*4f1223e8SApple OSS Distributions
3279*4f1223e8SApple OSS Distributions // If a mapped address is requested and this is a pre-mapped IOPL
3280*4f1223e8SApple OSS Distributions // then just need to compute an offset relative to the mapped base.
3281*4f1223e8SApple OSS Distributions if (mapped) {
3282*4f1223e8SApple OSS Distributions offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283*4f1223e8SApple OSS Distributions address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284*4f1223e8SApple OSS Distributions continue; // Done leave do/while(false) now
3285*4f1223e8SApple OSS Distributions }
3286*4f1223e8SApple OSS Distributions
3287*4f1223e8SApple OSS Distributions // The offset is rebased into the current iopl.
3288*4f1223e8SApple OSS Distributions // Now add the iopl 1st page offset.
3289*4f1223e8SApple OSS Distributions offset += ioplInfo.fPageOffset;
3290*4f1223e8SApple OSS Distributions
3291*4f1223e8SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
3292*4f1223e8SApple OSS Distributions // the upl's upl_page_info_t array.
3293*4f1223e8SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
3294*4f1223e8SApple OSS Distributions pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295*4f1223e8SApple OSS Distributions } else {
3296*4f1223e8SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
3297*4f1223e8SApple OSS Distributions }
3298*4f1223e8SApple OSS Distributions
3299*4f1223e8SApple OSS Distributions // Check for direct device non-paged memory
3300*4f1223e8SApple OSS Distributions if (ioplInfo.fFlags & kIOPLOnDevice) {
3301*4f1223e8SApple OSS Distributions address = ptoa_64(pageList->phys_addr) + offset;
3302*4f1223e8SApple OSS Distributions continue; // Done leave do/while(false) now
3303*4f1223e8SApple OSS Distributions }
3304*4f1223e8SApple OSS Distributions
3305*4f1223e8SApple OSS Distributions // Now we need compute the index into the pageList
3306*4f1223e8SApple OSS Distributions UInt pageInd = atop_32(offset);
3307*4f1223e8SApple OSS Distributions offset &= PAGE_MASK;
3308*4f1223e8SApple OSS Distributions
3309*4f1223e8SApple OSS Distributions // Compute the starting address of this segment
3310*4f1223e8SApple OSS Distributions IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311*4f1223e8SApple OSS Distributions if (!pageAddr) {
3312*4f1223e8SApple OSS Distributions panic("!pageList phys_addr");
3313*4f1223e8SApple OSS Distributions }
3314*4f1223e8SApple OSS Distributions
3315*4f1223e8SApple OSS Distributions address = ptoa_64(pageAddr) + offset;
3316*4f1223e8SApple OSS Distributions
3317*4f1223e8SApple OSS Distributions // length is currently set to the length of the remainider of the iopl.
3318*4f1223e8SApple OSS Distributions // We need to check that the remainder of the iopl is contiguous.
3319*4f1223e8SApple OSS Distributions // This is indicated by pageList[ind].phys_addr being sequential.
3320*4f1223e8SApple OSS Distributions IOByteCount contigLength = PAGE_SIZE - offset;
3321*4f1223e8SApple OSS Distributions while (contigLength < length
3322*4f1223e8SApple OSS Distributions && ++pageAddr == pageList[++pageInd].phys_addr) {
3323*4f1223e8SApple OSS Distributions contigLength += PAGE_SIZE;
3324*4f1223e8SApple OSS Distributions }
3325*4f1223e8SApple OSS Distributions
3326*4f1223e8SApple OSS Distributions if (contigLength < length) {
3327*4f1223e8SApple OSS Distributions length = contigLength;
3328*4f1223e8SApple OSS Distributions }
3329*4f1223e8SApple OSS Distributions
3330*4f1223e8SApple OSS Distributions
3331*4f1223e8SApple OSS Distributions assert(address);
3332*4f1223e8SApple OSS Distributions assert(length);
3333*4f1223e8SApple OSS Distributions } while (false);
3334*4f1223e8SApple OSS Distributions }
3335*4f1223e8SApple OSS Distributions
3336*4f1223e8SApple OSS Distributions // Update return values and state
3337*4f1223e8SApple OSS Distributions isP->fIO.fIOVMAddr = address;
3338*4f1223e8SApple OSS Distributions isP->fIO.fLength = length;
3339*4f1223e8SApple OSS Distributions isP->fIndex = ind;
3340*4f1223e8SApple OSS Distributions isP->fOffset2Index = off2Ind;
3341*4f1223e8SApple OSS Distributions isP->fNextOffset = isP->fIO.fOffset + length;
3342*4f1223e8SApple OSS Distributions
3343*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3344*4f1223e8SApple OSS Distributions }
3345*4f1223e8SApple OSS Distributions
3346*4f1223e8SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348*4f1223e8SApple OSS Distributions {
3349*4f1223e8SApple OSS Distributions IOReturn ret;
3350*4f1223e8SApple OSS Distributions mach_vm_address_t address = 0;
3351*4f1223e8SApple OSS Distributions mach_vm_size_t length = 0;
3352*4f1223e8SApple OSS Distributions IOMapper * mapper = gIOSystemMapper;
3353*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3354*4f1223e8SApple OSS Distributions
3355*4f1223e8SApple OSS Distributions if (lengthOfSegment) {
3356*4f1223e8SApple OSS Distributions *lengthOfSegment = 0;
3357*4f1223e8SApple OSS Distributions }
3358*4f1223e8SApple OSS Distributions
3359*4f1223e8SApple OSS Distributions if (offset >= _length) {
3360*4f1223e8SApple OSS Distributions return 0;
3361*4f1223e8SApple OSS Distributions }
3362*4f1223e8SApple OSS Distributions
3363*4f1223e8SApple OSS Distributions // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364*4f1223e8SApple OSS Distributions // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365*4f1223e8SApple OSS Distributions // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366*4f1223e8SApple OSS Distributions // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367*4f1223e8SApple OSS Distributions
3368*4f1223e8SApple OSS Distributions if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369*4f1223e8SApple OSS Distributions unsigned rangesIndex = 0;
3370*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
3371*4f1223e8SApple OSS Distributions mach_vm_address_t addr;
3372*4f1223e8SApple OSS Distributions
3373*4f1223e8SApple OSS Distributions // Find starting address within the vector of ranges
3374*4f1223e8SApple OSS Distributions for (;;) {
3375*4f1223e8SApple OSS Distributions getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376*4f1223e8SApple OSS Distributions if (offset < length) {
3377*4f1223e8SApple OSS Distributions break;
3378*4f1223e8SApple OSS Distributions }
3379*4f1223e8SApple OSS Distributions offset -= length; // (make offset relative)
3380*4f1223e8SApple OSS Distributions rangesIndex++;
3381*4f1223e8SApple OSS Distributions }
3382*4f1223e8SApple OSS Distributions
3383*4f1223e8SApple OSS Distributions // Now that we have the starting range,
3384*4f1223e8SApple OSS Distributions // lets find the last contiguous range
3385*4f1223e8SApple OSS Distributions addr += offset;
3386*4f1223e8SApple OSS Distributions length -= offset;
3387*4f1223e8SApple OSS Distributions
3388*4f1223e8SApple OSS Distributions for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389*4f1223e8SApple OSS Distributions mach_vm_address_t newAddr;
3390*4f1223e8SApple OSS Distributions mach_vm_size_t newLen;
3391*4f1223e8SApple OSS Distributions
3392*4f1223e8SApple OSS Distributions getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393*4f1223e8SApple OSS Distributions if (addr + length != newAddr) {
3394*4f1223e8SApple OSS Distributions break;
3395*4f1223e8SApple OSS Distributions }
3396*4f1223e8SApple OSS Distributions length += newLen;
3397*4f1223e8SApple OSS Distributions }
3398*4f1223e8SApple OSS Distributions if (addr) {
3399*4f1223e8SApple OSS Distributions address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400*4f1223e8SApple OSS Distributions }
3401*4f1223e8SApple OSS Distributions } else {
3402*4f1223e8SApple OSS Distributions IOMDDMAWalkSegmentState _state;
3403*4f1223e8SApple OSS Distributions IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404*4f1223e8SApple OSS Distributions
3405*4f1223e8SApple OSS Distributions state->fOffset = offset;
3406*4f1223e8SApple OSS Distributions state->fLength = _length - offset;
3407*4f1223e8SApple OSS Distributions state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408*4f1223e8SApple OSS Distributions
3409*4f1223e8SApple OSS Distributions ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410*4f1223e8SApple OSS Distributions
3411*4f1223e8SApple OSS Distributions if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412*4f1223e8SApple OSS Distributions DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413*4f1223e8SApple OSS Distributions ret, this, state->fOffset,
3414*4f1223e8SApple OSS Distributions state->fIOVMAddr, state->fLength);
3415*4f1223e8SApple OSS Distributions }
3416*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == ret) {
3417*4f1223e8SApple OSS Distributions address = state->fIOVMAddr;
3418*4f1223e8SApple OSS Distributions length = state->fLength;
3419*4f1223e8SApple OSS Distributions }
3420*4f1223e8SApple OSS Distributions
3421*4f1223e8SApple OSS Distributions // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422*4f1223e8SApple OSS Distributions // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423*4f1223e8SApple OSS Distributions
3424*4f1223e8SApple OSS Distributions if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425*4f1223e8SApple OSS Distributions if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426*4f1223e8SApple OSS Distributions addr64_t origAddr = address;
3427*4f1223e8SApple OSS Distributions IOByteCount origLen = length;
3428*4f1223e8SApple OSS Distributions
3429*4f1223e8SApple OSS Distributions address = mapper->mapToPhysicalAddress(origAddr);
3430*4f1223e8SApple OSS Distributions length = page_size - (address & (page_size - 1));
3431*4f1223e8SApple OSS Distributions while ((length < origLen)
3432*4f1223e8SApple OSS Distributions && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433*4f1223e8SApple OSS Distributions length += page_size;
3434*4f1223e8SApple OSS Distributions }
3435*4f1223e8SApple OSS Distributions if (length > origLen) {
3436*4f1223e8SApple OSS Distributions length = origLen;
3437*4f1223e8SApple OSS Distributions }
3438*4f1223e8SApple OSS Distributions }
3439*4f1223e8SApple OSS Distributions }
3440*4f1223e8SApple OSS Distributions }
3441*4f1223e8SApple OSS Distributions
3442*4f1223e8SApple OSS Distributions if (!address) {
3443*4f1223e8SApple OSS Distributions length = 0;
3444*4f1223e8SApple OSS Distributions }
3445*4f1223e8SApple OSS Distributions
3446*4f1223e8SApple OSS Distributions if (lengthOfSegment) {
3447*4f1223e8SApple OSS Distributions *lengthOfSegment = length;
3448*4f1223e8SApple OSS Distributions }
3449*4f1223e8SApple OSS Distributions
3450*4f1223e8SApple OSS Distributions return address;
3451*4f1223e8SApple OSS Distributions }
3452*4f1223e8SApple OSS Distributions
3453*4f1223e8SApple OSS Distributions #ifndef __LP64__
3454*4f1223e8SApple OSS Distributions #pragma clang diagnostic push
3455*4f1223e8SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456*4f1223e8SApple OSS Distributions
3457*4f1223e8SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459*4f1223e8SApple OSS Distributions {
3460*4f1223e8SApple OSS Distributions addr64_t address = 0;
3461*4f1223e8SApple OSS Distributions
3462*4f1223e8SApple OSS Distributions if (options & _kIOMemorySourceSegment) {
3463*4f1223e8SApple OSS Distributions address = getSourceSegment(offset, lengthOfSegment);
3464*4f1223e8SApple OSS Distributions } else if (options & kIOMemoryMapperNone) {
3465*4f1223e8SApple OSS Distributions address = getPhysicalSegment64(offset, lengthOfSegment);
3466*4f1223e8SApple OSS Distributions } else {
3467*4f1223e8SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment);
3468*4f1223e8SApple OSS Distributions }
3469*4f1223e8SApple OSS Distributions
3470*4f1223e8SApple OSS Distributions return address;
3471*4f1223e8SApple OSS Distributions }
3472*4f1223e8SApple OSS Distributions #pragma clang diagnostic pop
3473*4f1223e8SApple OSS Distributions
3474*4f1223e8SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476*4f1223e8SApple OSS Distributions {
3477*4f1223e8SApple OSS Distributions return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478*4f1223e8SApple OSS Distributions }
3479*4f1223e8SApple OSS Distributions
3480*4f1223e8SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482*4f1223e8SApple OSS Distributions {
3483*4f1223e8SApple OSS Distributions addr64_t address = 0;
3484*4f1223e8SApple OSS Distributions IOByteCount length = 0;
3485*4f1223e8SApple OSS Distributions
3486*4f1223e8SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487*4f1223e8SApple OSS Distributions
3488*4f1223e8SApple OSS Distributions if (lengthOfSegment) {
3489*4f1223e8SApple OSS Distributions length = *lengthOfSegment;
3490*4f1223e8SApple OSS Distributions }
3491*4f1223e8SApple OSS Distributions
3492*4f1223e8SApple OSS Distributions if ((address + length) > 0x100000000ULL) {
3493*4f1223e8SApple OSS Distributions panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494*4f1223e8SApple OSS Distributions address, (long) length, (getMetaClass())->getClassName());
3495*4f1223e8SApple OSS Distributions }
3496*4f1223e8SApple OSS Distributions
3497*4f1223e8SApple OSS Distributions return (IOPhysicalAddress) address;
3498*4f1223e8SApple OSS Distributions }
3499*4f1223e8SApple OSS Distributions
3500*4f1223e8SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502*4f1223e8SApple OSS Distributions {
3503*4f1223e8SApple OSS Distributions IOPhysicalAddress phys32;
3504*4f1223e8SApple OSS Distributions IOByteCount length;
3505*4f1223e8SApple OSS Distributions addr64_t phys64;
3506*4f1223e8SApple OSS Distributions IOMapper * mapper = NULL;
3507*4f1223e8SApple OSS Distributions
3508*4f1223e8SApple OSS Distributions phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509*4f1223e8SApple OSS Distributions if (!phys32) {
3510*4f1223e8SApple OSS Distributions return 0;
3511*4f1223e8SApple OSS Distributions }
3512*4f1223e8SApple OSS Distributions
3513*4f1223e8SApple OSS Distributions if (gIOSystemMapper) {
3514*4f1223e8SApple OSS Distributions mapper = gIOSystemMapper;
3515*4f1223e8SApple OSS Distributions }
3516*4f1223e8SApple OSS Distributions
3517*4f1223e8SApple OSS Distributions if (mapper) {
3518*4f1223e8SApple OSS Distributions IOByteCount origLen;
3519*4f1223e8SApple OSS Distributions
3520*4f1223e8SApple OSS Distributions phys64 = mapper->mapToPhysicalAddress(phys32);
3521*4f1223e8SApple OSS Distributions origLen = *lengthOfSegment;
3522*4f1223e8SApple OSS Distributions length = page_size - (phys64 & (page_size - 1));
3523*4f1223e8SApple OSS Distributions while ((length < origLen)
3524*4f1223e8SApple OSS Distributions && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525*4f1223e8SApple OSS Distributions length += page_size;
3526*4f1223e8SApple OSS Distributions }
3527*4f1223e8SApple OSS Distributions if (length > origLen) {
3528*4f1223e8SApple OSS Distributions length = origLen;
3529*4f1223e8SApple OSS Distributions }
3530*4f1223e8SApple OSS Distributions
3531*4f1223e8SApple OSS Distributions *lengthOfSegment = length;
3532*4f1223e8SApple OSS Distributions } else {
3533*4f1223e8SApple OSS Distributions phys64 = (addr64_t) phys32;
3534*4f1223e8SApple OSS Distributions }
3535*4f1223e8SApple OSS Distributions
3536*4f1223e8SApple OSS Distributions return phys64;
3537*4f1223e8SApple OSS Distributions }
3538*4f1223e8SApple OSS Distributions
3539*4f1223e8SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541*4f1223e8SApple OSS Distributions {
3542*4f1223e8SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543*4f1223e8SApple OSS Distributions }
3544*4f1223e8SApple OSS Distributions
3545*4f1223e8SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547*4f1223e8SApple OSS Distributions {
3548*4f1223e8SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549*4f1223e8SApple OSS Distributions }
3550*4f1223e8SApple OSS Distributions
3551*4f1223e8SApple OSS Distributions #pragma clang diagnostic push
3552*4f1223e8SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553*4f1223e8SApple OSS Distributions
3554*4f1223e8SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556*4f1223e8SApple OSS Distributions IOByteCount * lengthOfSegment)
3557*4f1223e8SApple OSS Distributions {
3558*4f1223e8SApple OSS Distributions if (_task == kernel_task) {
3559*4f1223e8SApple OSS Distributions return (void *) getSourceSegment(offset, lengthOfSegment);
3560*4f1223e8SApple OSS Distributions } else {
3561*4f1223e8SApple OSS Distributions panic("IOGMD::getVirtualSegment deprecated");
3562*4f1223e8SApple OSS Distributions }
3563*4f1223e8SApple OSS Distributions
3564*4f1223e8SApple OSS Distributions return NULL;
3565*4f1223e8SApple OSS Distributions }
3566*4f1223e8SApple OSS Distributions #pragma clang diagnostic pop
3567*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
3568*4f1223e8SApple OSS Distributions
3569*4f1223e8SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570*4f1223e8SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571*4f1223e8SApple OSS Distributions {
3572*4f1223e8SApple OSS Distributions IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573*4f1223e8SApple OSS Distributions DMACommandOps params;
3574*4f1223e8SApple OSS Distributions IOReturn err;
3575*4f1223e8SApple OSS Distributions
3576*4f1223e8SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
3577*4f1223e8SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
3578*4f1223e8SApple OSS Distributions
3579*4f1223e8SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3580*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3582*4f1223e8SApple OSS Distributions }
3583*4f1223e8SApple OSS Distributions
3584*4f1223e8SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585*4f1223e8SApple OSS Distributions data->fLength = getLength();
3586*4f1223e8SApple OSS Distributions data->fSGCount = 0;
3587*4f1223e8SApple OSS Distributions data->fDirection = getDirection();
3588*4f1223e8SApple OSS Distributions data->fIsPrepared = true; // Assume prepared - fails safe
3589*4f1223e8SApple OSS Distributions } else if (kIOMDWalkSegments == op) {
3590*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3592*4f1223e8SApple OSS Distributions }
3593*4f1223e8SApple OSS Distributions
3594*4f1223e8SApple OSS Distributions IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595*4f1223e8SApple OSS Distributions IOByteCount offset = (IOByteCount) data->fOffset;
3596*4f1223e8SApple OSS Distributions IOPhysicalLength length, nextLength;
3597*4f1223e8SApple OSS Distributions addr64_t addr, nextAddr;
3598*4f1223e8SApple OSS Distributions
3599*4f1223e8SApple OSS Distributions if (data->fMapped) {
3600*4f1223e8SApple OSS Distributions panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601*4f1223e8SApple OSS Distributions }
3602*4f1223e8SApple OSS Distributions addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603*4f1223e8SApple OSS Distributions offset += length;
3604*4f1223e8SApple OSS Distributions while (offset < getLength()) {
3605*4f1223e8SApple OSS Distributions nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606*4f1223e8SApple OSS Distributions if ((addr + length) != nextAddr) {
3607*4f1223e8SApple OSS Distributions break;
3608*4f1223e8SApple OSS Distributions }
3609*4f1223e8SApple OSS Distributions length += nextLength;
3610*4f1223e8SApple OSS Distributions offset += nextLength;
3611*4f1223e8SApple OSS Distributions }
3612*4f1223e8SApple OSS Distributions data->fIOVMAddr = addr;
3613*4f1223e8SApple OSS Distributions data->fLength = length;
3614*4f1223e8SApple OSS Distributions } else if (kIOMDAddDMAMapSpec == op) {
3615*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
3616*4f1223e8SApple OSS Distributions } else if (kIOMDDMAMap == op) {
3617*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3619*4f1223e8SApple OSS Distributions }
3620*4f1223e8SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621*4f1223e8SApple OSS Distributions
3622*4f1223e8SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623*4f1223e8SApple OSS Distributions
3624*4f1223e8SApple OSS Distributions return err;
3625*4f1223e8SApple OSS Distributions } else if (kIOMDDMAUnmap == op) {
3626*4f1223e8SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627*4f1223e8SApple OSS Distributions return kIOReturnUnderrun;
3628*4f1223e8SApple OSS Distributions }
3629*4f1223e8SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630*4f1223e8SApple OSS Distributions
3631*4f1223e8SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632*4f1223e8SApple OSS Distributions
3633*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3634*4f1223e8SApple OSS Distributions } else {
3635*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
3636*4f1223e8SApple OSS Distributions }
3637*4f1223e8SApple OSS Distributions
3638*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3639*4f1223e8SApple OSS Distributions }
3640*4f1223e8SApple OSS Distributions
3641*4f1223e8SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643*4f1223e8SApple OSS Distributions IOOptionBits * oldState )
3644*4f1223e8SApple OSS Distributions {
3645*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3646*4f1223e8SApple OSS Distributions
3647*4f1223e8SApple OSS Distributions vm_purgable_t control;
3648*4f1223e8SApple OSS Distributions int state;
3649*4f1223e8SApple OSS Distributions
3650*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3651*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3652*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3653*4f1223e8SApple OSS Distributions }
3654*4f1223e8SApple OSS Distributions
3655*4f1223e8SApple OSS Distributions if (_memRef) {
3656*4f1223e8SApple OSS Distributions err = super::setPurgeable(newState, oldState);
3657*4f1223e8SApple OSS Distributions } else {
3658*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3659*4f1223e8SApple OSS Distributions LOCK;
3660*4f1223e8SApple OSS Distributions }
3661*4f1223e8SApple OSS Distributions do{
3662*4f1223e8SApple OSS Distributions // Find the appropriate vm_map for the given task
3663*4f1223e8SApple OSS Distributions vm_map_t curMap;
3664*4f1223e8SApple OSS Distributions if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665*4f1223e8SApple OSS Distributions err = kIOReturnNotReady;
3666*4f1223e8SApple OSS Distributions break;
3667*4f1223e8SApple OSS Distributions } else if (!_task) {
3668*4f1223e8SApple OSS Distributions err = kIOReturnUnsupported;
3669*4f1223e8SApple OSS Distributions break;
3670*4f1223e8SApple OSS Distributions } else {
3671*4f1223e8SApple OSS Distributions curMap = get_task_map(_task);
3672*4f1223e8SApple OSS Distributions if (NULL == curMap) {
3673*4f1223e8SApple OSS Distributions err = KERN_INVALID_ARGUMENT;
3674*4f1223e8SApple OSS Distributions break;
3675*4f1223e8SApple OSS Distributions }
3676*4f1223e8SApple OSS Distributions }
3677*4f1223e8SApple OSS Distributions
3678*4f1223e8SApple OSS Distributions // can only do one range
3679*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
3680*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3681*4f1223e8SApple OSS Distributions mach_vm_address_t addr;
3682*4f1223e8SApple OSS Distributions mach_vm_size_t len;
3683*4f1223e8SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, 0, _task);
3684*4f1223e8SApple OSS Distributions
3685*4f1223e8SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
3686*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
3687*4f1223e8SApple OSS Distributions break;
3688*4f1223e8SApple OSS Distributions }
3689*4f1223e8SApple OSS Distributions err = vm_map_purgable_control(curMap, addr, control, &state);
3690*4f1223e8SApple OSS Distributions if (oldState) {
3691*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == err) {
3692*4f1223e8SApple OSS Distributions err = purgeableStateBits(&state);
3693*4f1223e8SApple OSS Distributions *oldState = state;
3694*4f1223e8SApple OSS Distributions }
3695*4f1223e8SApple OSS Distributions }
3696*4f1223e8SApple OSS Distributions }while (false);
3697*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3698*4f1223e8SApple OSS Distributions UNLOCK;
3699*4f1223e8SApple OSS Distributions }
3700*4f1223e8SApple OSS Distributions }
3701*4f1223e8SApple OSS Distributions
3702*4f1223e8SApple OSS Distributions return err;
3703*4f1223e8SApple OSS Distributions }
3704*4f1223e8SApple OSS Distributions
3705*4f1223e8SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707*4f1223e8SApple OSS Distributions IOOptionBits * oldState )
3708*4f1223e8SApple OSS Distributions {
3709*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3710*4f1223e8SApple OSS Distributions
3711*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3712*4f1223e8SApple OSS Distributions LOCK;
3713*4f1223e8SApple OSS Distributions }
3714*4f1223e8SApple OSS Distributions if (_memRef) {
3715*4f1223e8SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716*4f1223e8SApple OSS Distributions }
3717*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3718*4f1223e8SApple OSS Distributions UNLOCK;
3719*4f1223e8SApple OSS Distributions }
3720*4f1223e8SApple OSS Distributions
3721*4f1223e8SApple OSS Distributions return err;
3722*4f1223e8SApple OSS Distributions }
3723*4f1223e8SApple OSS Distributions
3724*4f1223e8SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726*4f1223e8SApple OSS Distributions int newLedgerTag,
3727*4f1223e8SApple OSS Distributions IOOptionBits newLedgerOptions )
3728*4f1223e8SApple OSS Distributions {
3729*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3730*4f1223e8SApple OSS Distributions
3731*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3732*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3733*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3734*4f1223e8SApple OSS Distributions }
3735*4f1223e8SApple OSS Distributions
3736*4f1223e8SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3737*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
3738*4f1223e8SApple OSS Distributions }
3739*4f1223e8SApple OSS Distributions
3740*4f1223e8SApple OSS Distributions if (_memRef) {
3741*4f1223e8SApple OSS Distributions err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742*4f1223e8SApple OSS Distributions } else {
3743*4f1223e8SApple OSS Distributions err = kIOReturnUnsupported;
3744*4f1223e8SApple OSS Distributions }
3745*4f1223e8SApple OSS Distributions
3746*4f1223e8SApple OSS Distributions return err;
3747*4f1223e8SApple OSS Distributions }
3748*4f1223e8SApple OSS Distributions
3749*4f1223e8SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3751*4f1223e8SApple OSS Distributions int newLedgerTag,
3752*4f1223e8SApple OSS Distributions IOOptionBits newLedgerOptions )
3753*4f1223e8SApple OSS Distributions {
3754*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3755*4f1223e8SApple OSS Distributions
3756*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3757*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3758*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3759*4f1223e8SApple OSS Distributions }
3760*4f1223e8SApple OSS Distributions
3761*4f1223e8SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3762*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
3763*4f1223e8SApple OSS Distributions }
3764*4f1223e8SApple OSS Distributions
3765*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3766*4f1223e8SApple OSS Distributions LOCK;
3767*4f1223e8SApple OSS Distributions }
3768*4f1223e8SApple OSS Distributions if (_memRef) {
3769*4f1223e8SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770*4f1223e8SApple OSS Distributions } else {
3771*4f1223e8SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3772*4f1223e8SApple OSS Distributions IOSubMemoryDescriptor * smd;
3773*4f1223e8SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774*4f1223e8SApple OSS Distributions err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775*4f1223e8SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776*4f1223e8SApple OSS Distributions err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777*4f1223e8SApple OSS Distributions }
3778*4f1223e8SApple OSS Distributions }
3779*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3780*4f1223e8SApple OSS Distributions UNLOCK;
3781*4f1223e8SApple OSS Distributions }
3782*4f1223e8SApple OSS Distributions
3783*4f1223e8SApple OSS Distributions return err;
3784*4f1223e8SApple OSS Distributions }
3785*4f1223e8SApple OSS Distributions
3786*4f1223e8SApple OSS Distributions
3787*4f1223e8SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3788*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789*4f1223e8SApple OSS Distributions {
3790*4f1223e8SApple OSS Distributions uint64_t length;
3791*4f1223e8SApple OSS Distributions
3792*4f1223e8SApple OSS Distributions if (_memRef) {
3793*4f1223e8SApple OSS Distributions length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794*4f1223e8SApple OSS Distributions } else {
3795*4f1223e8SApple OSS Distributions IOByteCount iterate, segLen;
3796*4f1223e8SApple OSS Distributions IOPhysicalAddress sourceAddr, sourceAlign;
3797*4f1223e8SApple OSS Distributions
3798*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3799*4f1223e8SApple OSS Distributions LOCK;
3800*4f1223e8SApple OSS Distributions }
3801*4f1223e8SApple OSS Distributions length = 0;
3802*4f1223e8SApple OSS Distributions iterate = 0;
3803*4f1223e8SApple OSS Distributions while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804*4f1223e8SApple OSS Distributions sourceAlign = (sourceAddr & page_mask);
3805*4f1223e8SApple OSS Distributions if (offset && !iterate) {
3806*4f1223e8SApple OSS Distributions *offset = sourceAlign;
3807*4f1223e8SApple OSS Distributions }
3808*4f1223e8SApple OSS Distributions length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809*4f1223e8SApple OSS Distributions iterate += segLen;
3810*4f1223e8SApple OSS Distributions }
3811*4f1223e8SApple OSS Distributions if (!iterate) {
3812*4f1223e8SApple OSS Distributions length = getLength();
3813*4f1223e8SApple OSS Distributions if (offset) {
3814*4f1223e8SApple OSS Distributions *offset = 0;
3815*4f1223e8SApple OSS Distributions }
3816*4f1223e8SApple OSS Distributions }
3817*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3818*4f1223e8SApple OSS Distributions UNLOCK;
3819*4f1223e8SApple OSS Distributions }
3820*4f1223e8SApple OSS Distributions }
3821*4f1223e8SApple OSS Distributions
3822*4f1223e8SApple OSS Distributions return length;
3823*4f1223e8SApple OSS Distributions }
3824*4f1223e8SApple OSS Distributions
3825*4f1223e8SApple OSS Distributions
3826*4f1223e8SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828*4f1223e8SApple OSS Distributions IOByteCount * dirtyPageCount )
3829*4f1223e8SApple OSS Distributions {
3830*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3831*4f1223e8SApple OSS Distributions
3832*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3833*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3834*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3835*4f1223e8SApple OSS Distributions }
3836*4f1223e8SApple OSS Distributions
3837*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3838*4f1223e8SApple OSS Distributions LOCK;
3839*4f1223e8SApple OSS Distributions }
3840*4f1223e8SApple OSS Distributions if (_memRef) {
3841*4f1223e8SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842*4f1223e8SApple OSS Distributions } else {
3843*4f1223e8SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3844*4f1223e8SApple OSS Distributions IOSubMemoryDescriptor * smd;
3845*4f1223e8SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846*4f1223e8SApple OSS Distributions err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847*4f1223e8SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848*4f1223e8SApple OSS Distributions err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849*4f1223e8SApple OSS Distributions }
3850*4f1223e8SApple OSS Distributions }
3851*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3852*4f1223e8SApple OSS Distributions UNLOCK;
3853*4f1223e8SApple OSS Distributions }
3854*4f1223e8SApple OSS Distributions
3855*4f1223e8SApple OSS Distributions return err;
3856*4f1223e8SApple OSS Distributions }
3857*4f1223e8SApple OSS Distributions
3858*4f1223e8SApple OSS Distributions
3859*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3860*4f1223e8SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861*4f1223e8SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
3863*4f1223e8SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864*4f1223e8SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
3866*4f1223e8SApple OSS Distributions
3867*4f1223e8SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3868*4f1223e8SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3869*4f1223e8SApple OSS Distributions {
3870*4f1223e8SApple OSS Distributions ppnum_t page, end;
3871*4f1223e8SApple OSS Distributions
3872*4f1223e8SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3873*4f1223e8SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874*4f1223e8SApple OSS Distributions for (; page < end; page++) {
3875*4f1223e8SApple OSS Distributions pmap_clear_noencrypt(page);
3876*4f1223e8SApple OSS Distributions }
3877*4f1223e8SApple OSS Distributions }
3878*4f1223e8SApple OSS Distributions
3879*4f1223e8SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880*4f1223e8SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3881*4f1223e8SApple OSS Distributions {
3882*4f1223e8SApple OSS Distributions ppnum_t page, end;
3883*4f1223e8SApple OSS Distributions
3884*4f1223e8SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3885*4f1223e8SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886*4f1223e8SApple OSS Distributions for (; page < end; page++) {
3887*4f1223e8SApple OSS Distributions pmap_set_noencrypt(page);
3888*4f1223e8SApple OSS Distributions }
3889*4f1223e8SApple OSS Distributions }
3890*4f1223e8SApple OSS Distributions
3891*4f1223e8SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892*4f1223e8SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3893*4f1223e8SApple OSS Distributions IOByteCount offset, IOByteCount length )
3894*4f1223e8SApple OSS Distributions {
3895*4f1223e8SApple OSS Distributions IOByteCount remaining;
3896*4f1223e8SApple OSS Distributions unsigned int res;
3897*4f1223e8SApple OSS Distributions void (*func)(addr64_t pa, unsigned int count) = NULL;
3898*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3899*4f1223e8SApple OSS Distributions void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900*4f1223e8SApple OSS Distributions #endif
3901*4f1223e8SApple OSS Distributions
3902*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3903*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3904*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
3905*4f1223e8SApple OSS Distributions }
3906*4f1223e8SApple OSS Distributions
3907*4f1223e8SApple OSS Distributions switch (options) {
3908*4f1223e8SApple OSS Distributions case kIOMemoryIncoherentIOFlush:
3909*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3910*4f1223e8SApple OSS Distributions func_ext = &dcache_incoherent_io_flush64;
3911*4f1223e8SApple OSS Distributions #if __ARM_COHERENT_IO__
3912*4f1223e8SApple OSS Distributions func_ext(0, 0, 0, &res);
3913*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3914*4f1223e8SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3915*4f1223e8SApple OSS Distributions break;
3916*4f1223e8SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3917*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
3918*4f1223e8SApple OSS Distributions func = &dcache_incoherent_io_flush64;
3919*4f1223e8SApple OSS Distributions break;
3920*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
3921*4f1223e8SApple OSS Distributions case kIOMemoryIncoherentIOStore:
3922*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3923*4f1223e8SApple OSS Distributions func_ext = &dcache_incoherent_io_store64;
3924*4f1223e8SApple OSS Distributions #if __ARM_COHERENT_IO__
3925*4f1223e8SApple OSS Distributions func_ext(0, 0, 0, &res);
3926*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
3927*4f1223e8SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3928*4f1223e8SApple OSS Distributions break;
3929*4f1223e8SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3930*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
3931*4f1223e8SApple OSS Distributions func = &dcache_incoherent_io_store64;
3932*4f1223e8SApple OSS Distributions break;
3933*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
3934*4f1223e8SApple OSS Distributions
3935*4f1223e8SApple OSS Distributions case kIOMemorySetEncrypted:
3936*4f1223e8SApple OSS Distributions func = &SetEncryptOp;
3937*4f1223e8SApple OSS Distributions break;
3938*4f1223e8SApple OSS Distributions case kIOMemoryClearEncrypted:
3939*4f1223e8SApple OSS Distributions func = &ClearEncryptOp;
3940*4f1223e8SApple OSS Distributions break;
3941*4f1223e8SApple OSS Distributions }
3942*4f1223e8SApple OSS Distributions
3943*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3944*4f1223e8SApple OSS Distributions if ((func == NULL) && (func_ext == NULL)) {
3945*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
3946*4f1223e8SApple OSS Distributions }
3947*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
3948*4f1223e8SApple OSS Distributions if (!func) {
3949*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
3950*4f1223e8SApple OSS Distributions }
3951*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
3952*4f1223e8SApple OSS Distributions
3953*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3954*4f1223e8SApple OSS Distributions LOCK;
3955*4f1223e8SApple OSS Distributions }
3956*4f1223e8SApple OSS Distributions
3957*4f1223e8SApple OSS Distributions res = 0x0UL;
3958*4f1223e8SApple OSS Distributions remaining = length = min(length, getLength() - offset);
3959*4f1223e8SApple OSS Distributions while (remaining) {
3960*4f1223e8SApple OSS Distributions // (process another target segment?)
3961*4f1223e8SApple OSS Distributions addr64_t dstAddr64;
3962*4f1223e8SApple OSS Distributions IOByteCount dstLen;
3963*4f1223e8SApple OSS Distributions
3964*4f1223e8SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965*4f1223e8SApple OSS Distributions if (!dstAddr64) {
3966*4f1223e8SApple OSS Distributions break;
3967*4f1223e8SApple OSS Distributions }
3968*4f1223e8SApple OSS Distributions
3969*4f1223e8SApple OSS Distributions // Clip segment length to remaining
3970*4f1223e8SApple OSS Distributions if (dstLen > remaining) {
3971*4f1223e8SApple OSS Distributions dstLen = remaining;
3972*4f1223e8SApple OSS Distributions }
3973*4f1223e8SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974*4f1223e8SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975*4f1223e8SApple OSS Distributions }
3976*4f1223e8SApple OSS Distributions if (remaining > UINT_MAX) {
3977*4f1223e8SApple OSS Distributions remaining = UINT_MAX;
3978*4f1223e8SApple OSS Distributions }
3979*4f1223e8SApple OSS Distributions
3980*4f1223e8SApple OSS Distributions #if defined(__arm64__)
3981*4f1223e8SApple OSS Distributions if (func) {
3982*4f1223e8SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3983*4f1223e8SApple OSS Distributions }
3984*4f1223e8SApple OSS Distributions if (func_ext) {
3985*4f1223e8SApple OSS Distributions (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986*4f1223e8SApple OSS Distributions if (res != 0x0UL) {
3987*4f1223e8SApple OSS Distributions remaining = 0;
3988*4f1223e8SApple OSS Distributions break;
3989*4f1223e8SApple OSS Distributions }
3990*4f1223e8SApple OSS Distributions }
3991*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
3992*4f1223e8SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3993*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
3994*4f1223e8SApple OSS Distributions
3995*4f1223e8SApple OSS Distributions offset += dstLen;
3996*4f1223e8SApple OSS Distributions remaining -= dstLen;
3997*4f1223e8SApple OSS Distributions }
3998*4f1223e8SApple OSS Distributions
3999*4f1223e8SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
4000*4f1223e8SApple OSS Distributions UNLOCK;
4001*4f1223e8SApple OSS Distributions }
4002*4f1223e8SApple OSS Distributions
4003*4f1223e8SApple OSS Distributions return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004*4f1223e8SApple OSS Distributions }
4005*4f1223e8SApple OSS Distributions
4006*4f1223e8SApple OSS Distributions /*
4007*4f1223e8SApple OSS Distributions *
4008*4f1223e8SApple OSS Distributions */
4009*4f1223e8SApple OSS Distributions
4010*4f1223e8SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4011*4f1223e8SApple OSS Distributions
4012*4f1223e8SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013*4f1223e8SApple OSS Distributions
4014*4f1223e8SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015*4f1223e8SApple OSS Distributions * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016*4f1223e8SApple OSS Distributions * kernel non-text data -- should we just add another range instead?
4017*4f1223e8SApple OSS Distributions */
4018*4f1223e8SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4019*4f1223e8SApple OSS Distributions #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020*4f1223e8SApple OSS Distributions
4021*4f1223e8SApple OSS Distributions #elif defined(__arm64__)
4022*4f1223e8SApple OSS Distributions
4023*4f1223e8SApple OSS Distributions extern vm_offset_t static_memory_end;
4024*4f1223e8SApple OSS Distributions
4025*4f1223e8SApple OSS Distributions #if defined(__arm64__)
4026*4f1223e8SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4027*4f1223e8SApple OSS Distributions #else /* defined(__arm64__) */
4028*4f1223e8SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4029*4f1223e8SApple OSS Distributions #endif /* defined(__arm64__) */
4030*4f1223e8SApple OSS Distributions
4031*4f1223e8SApple OSS Distributions #define io_kernel_static_end static_memory_end
4032*4f1223e8SApple OSS Distributions
4033*4f1223e8SApple OSS Distributions #else
4034*4f1223e8SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4035*4f1223e8SApple OSS Distributions #endif
4036*4f1223e8SApple OSS Distributions
4037*4f1223e8SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038*4f1223e8SApple OSS Distributions io_get_kernel_static_upl(
4039*4f1223e8SApple OSS Distributions vm_map_t /* map */,
4040*4f1223e8SApple OSS Distributions uintptr_t offset,
4041*4f1223e8SApple OSS Distributions upl_size_t *upl_size,
4042*4f1223e8SApple OSS Distributions unsigned int *page_offset,
4043*4f1223e8SApple OSS Distributions upl_t *upl,
4044*4f1223e8SApple OSS Distributions upl_page_info_array_t page_list,
4045*4f1223e8SApple OSS Distributions unsigned int *count,
4046*4f1223e8SApple OSS Distributions ppnum_t *highest_page)
4047*4f1223e8SApple OSS Distributions {
4048*4f1223e8SApple OSS Distributions unsigned int pageCount, page;
4049*4f1223e8SApple OSS Distributions ppnum_t phys;
4050*4f1223e8SApple OSS Distributions ppnum_t highestPage = 0;
4051*4f1223e8SApple OSS Distributions
4052*4f1223e8SApple OSS Distributions pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053*4f1223e8SApple OSS Distributions if (pageCount > *count) {
4054*4f1223e8SApple OSS Distributions pageCount = *count;
4055*4f1223e8SApple OSS Distributions }
4056*4f1223e8SApple OSS Distributions *upl_size = (upl_size_t) ptoa_64(pageCount);
4057*4f1223e8SApple OSS Distributions
4058*4f1223e8SApple OSS Distributions *upl = NULL;
4059*4f1223e8SApple OSS Distributions *page_offset = ((unsigned int) page_mask & offset);
4060*4f1223e8SApple OSS Distributions
4061*4f1223e8SApple OSS Distributions for (page = 0; page < pageCount; page++) {
4062*4f1223e8SApple OSS Distributions phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063*4f1223e8SApple OSS Distributions if (!phys) {
4064*4f1223e8SApple OSS Distributions break;
4065*4f1223e8SApple OSS Distributions }
4066*4f1223e8SApple OSS Distributions page_list[page].phys_addr = phys;
4067*4f1223e8SApple OSS Distributions page_list[page].free_when_done = 0;
4068*4f1223e8SApple OSS Distributions page_list[page].absent = 0;
4069*4f1223e8SApple OSS Distributions page_list[page].dirty = 0;
4070*4f1223e8SApple OSS Distributions page_list[page].precious = 0;
4071*4f1223e8SApple OSS Distributions page_list[page].device = 0;
4072*4f1223e8SApple OSS Distributions if (phys > highestPage) {
4073*4f1223e8SApple OSS Distributions highestPage = phys;
4074*4f1223e8SApple OSS Distributions }
4075*4f1223e8SApple OSS Distributions }
4076*4f1223e8SApple OSS Distributions
4077*4f1223e8SApple OSS Distributions *highest_page = highestPage;
4078*4f1223e8SApple OSS Distributions
4079*4f1223e8SApple OSS Distributions return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080*4f1223e8SApple OSS Distributions }
4081*4f1223e8SApple OSS Distributions
4082*4f1223e8SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4083*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084*4f1223e8SApple OSS Distributions {
4085*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4086*4f1223e8SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4087*4f1223e8SApple OSS Distributions ioGMDData *dataP;
4088*4f1223e8SApple OSS Distributions upl_page_info_array_t pageInfo;
4089*4f1223e8SApple OSS Distributions ppnum_t mapBase;
4090*4f1223e8SApple OSS Distributions vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091*4f1223e8SApple OSS Distributions mach_vm_size_t numBytesWired = 0;
4092*4f1223e8SApple OSS Distributions
4093*4f1223e8SApple OSS Distributions assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094*4f1223e8SApple OSS Distributions
4095*4f1223e8SApple OSS Distributions if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096*4f1223e8SApple OSS Distributions forDirection = (IODirection) (forDirection | getDirection());
4097*4f1223e8SApple OSS Distributions }
4098*4f1223e8SApple OSS Distributions
4099*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4100*4f1223e8SApple OSS Distributions upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101*4f1223e8SApple OSS Distributions switch (kIODirectionOutIn & forDirection) {
4102*4f1223e8SApple OSS Distributions case kIODirectionOut:
4103*4f1223e8SApple OSS Distributions // Pages do not need to be marked as dirty on commit
4104*4f1223e8SApple OSS Distributions uplFlags = UPL_COPYOUT_FROM;
4105*4f1223e8SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
4106*4f1223e8SApple OSS Distributions break;
4107*4f1223e8SApple OSS Distributions
4108*4f1223e8SApple OSS Distributions case kIODirectionIn:
4109*4f1223e8SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
4110*4f1223e8SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4111*4f1223e8SApple OSS Distributions break;
4112*4f1223e8SApple OSS Distributions
4113*4f1223e8SApple OSS Distributions default:
4114*4f1223e8SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115*4f1223e8SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4116*4f1223e8SApple OSS Distributions break;
4117*4f1223e8SApple OSS Distributions }
4118*4f1223e8SApple OSS Distributions
4119*4f1223e8SApple OSS Distributions if (_wireCount) {
4120*4f1223e8SApple OSS Distributions if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121*4f1223e8SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122*4f1223e8SApple OSS Distributions (size_t)VM_KERNEL_ADDRPERM(this));
4123*4f1223e8SApple OSS Distributions error = kIOReturnNotWritable;
4124*4f1223e8SApple OSS Distributions }
4125*4f1223e8SApple OSS Distributions } else {
4126*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127*4f1223e8SApple OSS Distributions IOMapper *mapper;
4128*4f1223e8SApple OSS Distributions
4129*4f1223e8SApple OSS Distributions mapper = dataP->fMapper;
4130*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131*4f1223e8SApple OSS Distributions
4132*4f1223e8SApple OSS Distributions uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133*4f1223e8SApple OSS Distributions tag = _kernelTag;
4134*4f1223e8SApple OSS Distributions if (VM_KERN_MEMORY_NONE == tag) {
4135*4f1223e8SApple OSS Distributions tag = IOMemoryTag(kernel_map);
4136*4f1223e8SApple OSS Distributions }
4137*4f1223e8SApple OSS Distributions
4138*4f1223e8SApple OSS Distributions if (kIODirectionPrepareToPhys32 & forDirection) {
4139*4f1223e8SApple OSS Distributions if (!mapper) {
4140*4f1223e8SApple OSS Distributions uplFlags |= UPL_NEED_32BIT_ADDR;
4141*4f1223e8SApple OSS Distributions }
4142*4f1223e8SApple OSS Distributions if (dataP->fDMAMapNumAddressBits > 32) {
4143*4f1223e8SApple OSS Distributions dataP->fDMAMapNumAddressBits = 32;
4144*4f1223e8SApple OSS Distributions }
4145*4f1223e8SApple OSS Distributions }
4146*4f1223e8SApple OSS Distributions if (kIODirectionPrepareNoFault & forDirection) {
4147*4f1223e8SApple OSS Distributions uplFlags |= UPL_REQUEST_NO_FAULT;
4148*4f1223e8SApple OSS Distributions }
4149*4f1223e8SApple OSS Distributions if (kIODirectionPrepareNoZeroFill & forDirection) {
4150*4f1223e8SApple OSS Distributions uplFlags |= UPL_NOZEROFILLIO;
4151*4f1223e8SApple OSS Distributions }
4152*4f1223e8SApple OSS Distributions if (kIODirectionPrepareNonCoherent & forDirection) {
4153*4f1223e8SApple OSS Distributions uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154*4f1223e8SApple OSS Distributions }
4155*4f1223e8SApple OSS Distributions
4156*4f1223e8SApple OSS Distributions mapBase = 0;
4157*4f1223e8SApple OSS Distributions
4158*4f1223e8SApple OSS Distributions // Note that appendBytes(NULL) zeros the data up to the desired length
4159*4f1223e8SApple OSS Distributions size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160*4f1223e8SApple OSS Distributions if (uplPageSize > ((unsigned int)uplPageSize)) {
4161*4f1223e8SApple OSS Distributions error = kIOReturnNoMemory;
4162*4f1223e8SApple OSS Distributions traceInterval.setEndArg2(error);
4163*4f1223e8SApple OSS Distributions return error;
4164*4f1223e8SApple OSS Distributions }
4165*4f1223e8SApple OSS Distributions if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166*4f1223e8SApple OSS Distributions error = kIOReturnNoMemory;
4167*4f1223e8SApple OSS Distributions traceInterval.setEndArg2(error);
4168*4f1223e8SApple OSS Distributions return error;
4169*4f1223e8SApple OSS Distributions }
4170*4f1223e8SApple OSS Distributions dataP = NULL;
4171*4f1223e8SApple OSS Distributions
4172*4f1223e8SApple OSS Distributions // Find the appropriate vm_map for the given task
4173*4f1223e8SApple OSS Distributions vm_map_t curMap;
4174*4f1223e8SApple OSS Distributions if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175*4f1223e8SApple OSS Distributions curMap = NULL;
4176*4f1223e8SApple OSS Distributions } else {
4177*4f1223e8SApple OSS Distributions curMap = get_task_map(_task);
4178*4f1223e8SApple OSS Distributions }
4179*4f1223e8SApple OSS Distributions
4180*4f1223e8SApple OSS Distributions // Iterate over the vector of virtual ranges
4181*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
4182*4f1223e8SApple OSS Distributions unsigned int pageIndex = 0;
4183*4f1223e8SApple OSS Distributions IOByteCount mdOffset = 0;
4184*4f1223e8SApple OSS Distributions ppnum_t highestPage = 0;
4185*4f1223e8SApple OSS Distributions bool byteAlignUPL;
4186*4f1223e8SApple OSS Distributions
4187*4f1223e8SApple OSS Distributions IOMemoryEntry * memRefEntry = NULL;
4188*4f1223e8SApple OSS Distributions if (_memRef) {
4189*4f1223e8SApple OSS Distributions memRefEntry = &_memRef->entries[0];
4190*4f1223e8SApple OSS Distributions byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191*4f1223e8SApple OSS Distributions } else {
4192*4f1223e8SApple OSS Distributions byteAlignUPL = true;
4193*4f1223e8SApple OSS Distributions }
4194*4f1223e8SApple OSS Distributions
4195*4f1223e8SApple OSS Distributions for (UInt range = 0; mdOffset < _length; range++) {
4196*4f1223e8SApple OSS Distributions ioPLBlock iopl;
4197*4f1223e8SApple OSS Distributions mach_vm_address_t startPage, startPageOffset;
4198*4f1223e8SApple OSS Distributions mach_vm_size_t numBytes;
4199*4f1223e8SApple OSS Distributions ppnum_t highPage = 0;
4200*4f1223e8SApple OSS Distributions
4201*4f1223e8SApple OSS Distributions if (_memRef) {
4202*4f1223e8SApple OSS Distributions if (range >= _memRef->count) {
4203*4f1223e8SApple OSS Distributions panic("memRefEntry");
4204*4f1223e8SApple OSS Distributions }
4205*4f1223e8SApple OSS Distributions memRefEntry = &_memRef->entries[range];
4206*4f1223e8SApple OSS Distributions numBytes = memRefEntry->size;
4207*4f1223e8SApple OSS Distributions startPage = -1ULL;
4208*4f1223e8SApple OSS Distributions if (byteAlignUPL) {
4209*4f1223e8SApple OSS Distributions startPageOffset = 0;
4210*4f1223e8SApple OSS Distributions } else {
4211*4f1223e8SApple OSS Distributions startPageOffset = (memRefEntry->start & PAGE_MASK);
4212*4f1223e8SApple OSS Distributions }
4213*4f1223e8SApple OSS Distributions } else {
4214*4f1223e8SApple OSS Distributions // Get the startPage address and length of vec[range]
4215*4f1223e8SApple OSS Distributions getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216*4f1223e8SApple OSS Distributions if (byteAlignUPL) {
4217*4f1223e8SApple OSS Distributions startPageOffset = 0;
4218*4f1223e8SApple OSS Distributions } else {
4219*4f1223e8SApple OSS Distributions startPageOffset = startPage & PAGE_MASK;
4220*4f1223e8SApple OSS Distributions startPage = trunc_page_64(startPage);
4221*4f1223e8SApple OSS Distributions }
4222*4f1223e8SApple OSS Distributions }
4223*4f1223e8SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224*4f1223e8SApple OSS Distributions numBytes += startPageOffset;
4225*4f1223e8SApple OSS Distributions
4226*4f1223e8SApple OSS Distributions if (mapper) {
4227*4f1223e8SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4228*4f1223e8SApple OSS Distributions } else {
4229*4f1223e8SApple OSS Distributions iopl.fMappedPage = 0;
4230*4f1223e8SApple OSS Distributions }
4231*4f1223e8SApple OSS Distributions
4232*4f1223e8SApple OSS Distributions // Iterate over the current range, creating UPLs
4233*4f1223e8SApple OSS Distributions while (numBytes) {
4234*4f1223e8SApple OSS Distributions vm_address_t kernelStart = (vm_address_t) startPage;
4235*4f1223e8SApple OSS Distributions vm_map_t theMap;
4236*4f1223e8SApple OSS Distributions if (curMap) {
4237*4f1223e8SApple OSS Distributions theMap = curMap;
4238*4f1223e8SApple OSS Distributions } else if (_memRef) {
4239*4f1223e8SApple OSS Distributions theMap = NULL;
4240*4f1223e8SApple OSS Distributions } else {
4241*4f1223e8SApple OSS Distributions assert(_task == kernel_task);
4242*4f1223e8SApple OSS Distributions theMap = IOPageableMapForAddress(kernelStart);
4243*4f1223e8SApple OSS Distributions }
4244*4f1223e8SApple OSS Distributions
4245*4f1223e8SApple OSS Distributions // ioplFlags is an in/out parameter
4246*4f1223e8SApple OSS Distributions upl_control_flags_t ioplFlags = uplFlags;
4247*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4248*4f1223e8SApple OSS Distributions pageInfo = getPageList(dataP);
4249*4f1223e8SApple OSS Distributions upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250*4f1223e8SApple OSS Distributions
4251*4f1223e8SApple OSS Distributions mach_vm_size_t ioplPhysSize;
4252*4f1223e8SApple OSS Distributions upl_size_t ioplSize;
4253*4f1223e8SApple OSS Distributions unsigned int numPageInfo;
4254*4f1223e8SApple OSS Distributions
4255*4f1223e8SApple OSS Distributions if (_memRef) {
4256*4f1223e8SApple OSS Distributions error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258*4f1223e8SApple OSS Distributions } else {
4259*4f1223e8SApple OSS Distributions error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261*4f1223e8SApple OSS Distributions }
4262*4f1223e8SApple OSS Distributions if (error != KERN_SUCCESS) {
4263*4f1223e8SApple OSS Distributions if (_memRef) {
4264*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265*4f1223e8SApple OSS Distributions } else {
4266*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267*4f1223e8SApple OSS Distributions }
4268*4f1223e8SApple OSS Distributions printf("entry size error %d\n", error);
4269*4f1223e8SApple OSS Distributions goto abortExit;
4270*4f1223e8SApple OSS Distributions }
4271*4f1223e8SApple OSS Distributions ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272*4f1223e8SApple OSS Distributions numPageInfo = atop_32(ioplPhysSize);
4273*4f1223e8SApple OSS Distributions if (byteAlignUPL) {
4274*4f1223e8SApple OSS Distributions if (numBytes > ioplPhysSize) {
4275*4f1223e8SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276*4f1223e8SApple OSS Distributions } else {
4277*4f1223e8SApple OSS Distributions ioplSize = ((typeof(ioplSize))numBytes);
4278*4f1223e8SApple OSS Distributions }
4279*4f1223e8SApple OSS Distributions } else {
4280*4f1223e8SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281*4f1223e8SApple OSS Distributions }
4282*4f1223e8SApple OSS Distributions
4283*4f1223e8SApple OSS Distributions if (_memRef) {
4284*4f1223e8SApple OSS Distributions memory_object_offset_t entryOffset;
4285*4f1223e8SApple OSS Distributions
4286*4f1223e8SApple OSS Distributions entryOffset = mdOffset;
4287*4f1223e8SApple OSS Distributions if (byteAlignUPL) {
4288*4f1223e8SApple OSS Distributions entryOffset = (entryOffset - memRefEntry->offset);
4289*4f1223e8SApple OSS Distributions } else {
4290*4f1223e8SApple OSS Distributions entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291*4f1223e8SApple OSS Distributions }
4292*4f1223e8SApple OSS Distributions if (ioplSize > (memRefEntry->size - entryOffset)) {
4293*4f1223e8SApple OSS Distributions ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294*4f1223e8SApple OSS Distributions }
4295*4f1223e8SApple OSS Distributions error = memory_object_iopl_request(memRefEntry->entry,
4296*4f1223e8SApple OSS Distributions entryOffset,
4297*4f1223e8SApple OSS Distributions &ioplSize,
4298*4f1223e8SApple OSS Distributions &iopl.fIOPL,
4299*4f1223e8SApple OSS Distributions baseInfo,
4300*4f1223e8SApple OSS Distributions &numPageInfo,
4301*4f1223e8SApple OSS Distributions &ioplFlags,
4302*4f1223e8SApple OSS Distributions tag);
4303*4f1223e8SApple OSS Distributions } else if ((theMap == kernel_map)
4304*4f1223e8SApple OSS Distributions && (kernelStart >= io_kernel_static_start)
4305*4f1223e8SApple OSS Distributions && (kernelStart < io_kernel_static_end)) {
4306*4f1223e8SApple OSS Distributions error = io_get_kernel_static_upl(theMap,
4307*4f1223e8SApple OSS Distributions kernelStart,
4308*4f1223e8SApple OSS Distributions &ioplSize,
4309*4f1223e8SApple OSS Distributions &iopl.fPageOffset,
4310*4f1223e8SApple OSS Distributions &iopl.fIOPL,
4311*4f1223e8SApple OSS Distributions baseInfo,
4312*4f1223e8SApple OSS Distributions &numPageInfo,
4313*4f1223e8SApple OSS Distributions &highPage);
4314*4f1223e8SApple OSS Distributions } else {
4315*4f1223e8SApple OSS Distributions assert(theMap);
4316*4f1223e8SApple OSS Distributions error = vm_map_create_upl(theMap,
4317*4f1223e8SApple OSS Distributions startPage,
4318*4f1223e8SApple OSS Distributions (upl_size_t*)&ioplSize,
4319*4f1223e8SApple OSS Distributions &iopl.fIOPL,
4320*4f1223e8SApple OSS Distributions baseInfo,
4321*4f1223e8SApple OSS Distributions &numPageInfo,
4322*4f1223e8SApple OSS Distributions &ioplFlags,
4323*4f1223e8SApple OSS Distributions tag);
4324*4f1223e8SApple OSS Distributions }
4325*4f1223e8SApple OSS Distributions
4326*4f1223e8SApple OSS Distributions if (error != KERN_SUCCESS) {
4327*4f1223e8SApple OSS Distributions traceInterval.setEndArg2(error);
4328*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329*4f1223e8SApple OSS Distributions goto abortExit;
4330*4f1223e8SApple OSS Distributions }
4331*4f1223e8SApple OSS Distributions
4332*4f1223e8SApple OSS Distributions assert(ioplSize);
4333*4f1223e8SApple OSS Distributions
4334*4f1223e8SApple OSS Distributions if (iopl.fIOPL) {
4335*4f1223e8SApple OSS Distributions highPage = upl_get_highest_page(iopl.fIOPL);
4336*4f1223e8SApple OSS Distributions }
4337*4f1223e8SApple OSS Distributions if (highPage > highestPage) {
4338*4f1223e8SApple OSS Distributions highestPage = highPage;
4339*4f1223e8SApple OSS Distributions }
4340*4f1223e8SApple OSS Distributions
4341*4f1223e8SApple OSS Distributions if (baseInfo->device) {
4342*4f1223e8SApple OSS Distributions numPageInfo = 1;
4343*4f1223e8SApple OSS Distributions iopl.fFlags = kIOPLOnDevice;
4344*4f1223e8SApple OSS Distributions } else {
4345*4f1223e8SApple OSS Distributions iopl.fFlags = 0;
4346*4f1223e8SApple OSS Distributions }
4347*4f1223e8SApple OSS Distributions
4348*4f1223e8SApple OSS Distributions if (byteAlignUPL) {
4349*4f1223e8SApple OSS Distributions if (iopl.fIOPL) {
4350*4f1223e8SApple OSS Distributions DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351*4f1223e8SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352*4f1223e8SApple OSS Distributions }
4353*4f1223e8SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4354*4f1223e8SApple OSS Distributions // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355*4f1223e8SApple OSS Distributions startPage -= iopl.fPageOffset;
4356*4f1223e8SApple OSS Distributions }
4357*4f1223e8SApple OSS Distributions ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358*4f1223e8SApple OSS Distributions numBytes += iopl.fPageOffset;
4359*4f1223e8SApple OSS Distributions }
4360*4f1223e8SApple OSS Distributions
4361*4f1223e8SApple OSS Distributions iopl.fIOMDOffset = mdOffset;
4362*4f1223e8SApple OSS Distributions iopl.fPageInfo = pageIndex;
4363*4f1223e8SApple OSS Distributions
4364*4f1223e8SApple OSS Distributions if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365*4f1223e8SApple OSS Distributions // Clean up partial created and unsaved iopl
4366*4f1223e8SApple OSS Distributions if (iopl.fIOPL) {
4367*4f1223e8SApple OSS Distributions upl_abort(iopl.fIOPL, 0);
4368*4f1223e8SApple OSS Distributions upl_deallocate(iopl.fIOPL);
4369*4f1223e8SApple OSS Distributions }
4370*4f1223e8SApple OSS Distributions error = kIOReturnNoMemory;
4371*4f1223e8SApple OSS Distributions traceInterval.setEndArg2(error);
4372*4f1223e8SApple OSS Distributions goto abortExit;
4373*4f1223e8SApple OSS Distributions }
4374*4f1223e8SApple OSS Distributions dataP = NULL;
4375*4f1223e8SApple OSS Distributions
4376*4f1223e8SApple OSS Distributions // Check for a multiple iopl's in one virtual range
4377*4f1223e8SApple OSS Distributions pageIndex += numPageInfo;
4378*4f1223e8SApple OSS Distributions mdOffset -= iopl.fPageOffset;
4379*4f1223e8SApple OSS Distributions numBytesWired += ioplSize;
4380*4f1223e8SApple OSS Distributions if (ioplSize < numBytes) {
4381*4f1223e8SApple OSS Distributions numBytes -= ioplSize;
4382*4f1223e8SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4383*4f1223e8SApple OSS Distributions startPage += ioplSize;
4384*4f1223e8SApple OSS Distributions }
4385*4f1223e8SApple OSS Distributions mdOffset += ioplSize;
4386*4f1223e8SApple OSS Distributions iopl.fPageOffset = 0;
4387*4f1223e8SApple OSS Distributions if (mapper) {
4388*4f1223e8SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4389*4f1223e8SApple OSS Distributions }
4390*4f1223e8SApple OSS Distributions } else {
4391*4f1223e8SApple OSS Distributions mdOffset += numBytes;
4392*4f1223e8SApple OSS Distributions break;
4393*4f1223e8SApple OSS Distributions }
4394*4f1223e8SApple OSS Distributions }
4395*4f1223e8SApple OSS Distributions }
4396*4f1223e8SApple OSS Distributions
4397*4f1223e8SApple OSS Distributions _highestPage = highestPage;
4398*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399*4f1223e8SApple OSS Distributions
4400*4f1223e8SApple OSS Distributions if (UPL_COPYOUT_FROM & uplFlags) {
4401*4f1223e8SApple OSS Distributions _flags |= kIOMemoryPreparedReadOnly;
4402*4f1223e8SApple OSS Distributions }
4403*4f1223e8SApple OSS Distributions traceInterval.setEndCodes(numBytesWired, error);
4404*4f1223e8SApple OSS Distributions }
4405*4f1223e8SApple OSS Distributions
4406*4f1223e8SApple OSS Distributions #if IOTRACKING
4407*4f1223e8SApple OSS Distributions if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4409*4f1223e8SApple OSS Distributions if (!dataP->fWireTracking.link.next) {
4410*4f1223e8SApple OSS Distributions IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411*4f1223e8SApple OSS Distributions }
4412*4f1223e8SApple OSS Distributions }
4413*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
4414*4f1223e8SApple OSS Distributions
4415*4f1223e8SApple OSS Distributions return error;
4416*4f1223e8SApple OSS Distributions
4417*4f1223e8SApple OSS Distributions abortExit:
4418*4f1223e8SApple OSS Distributions {
4419*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4420*4f1223e8SApple OSS Distributions UInt done = getNumIOPL(_memoryEntries, dataP);
4421*4f1223e8SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4422*4f1223e8SApple OSS Distributions
4423*4f1223e8SApple OSS Distributions for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424*4f1223e8SApple OSS Distributions if (ioplList[ioplIdx].fIOPL) {
4425*4f1223e8SApple OSS Distributions upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426*4f1223e8SApple OSS Distributions upl_deallocate(ioplList[ioplIdx].fIOPL);
4427*4f1223e8SApple OSS Distributions }
4428*4f1223e8SApple OSS Distributions }
4429*4f1223e8SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4430*4f1223e8SApple OSS Distributions }
4431*4f1223e8SApple OSS Distributions
4432*4f1223e8SApple OSS Distributions if (error == KERN_FAILURE) {
4433*4f1223e8SApple OSS Distributions error = kIOReturnCannotWire;
4434*4f1223e8SApple OSS Distributions } else if (error == KERN_MEMORY_ERROR) {
4435*4f1223e8SApple OSS Distributions error = kIOReturnNoResources;
4436*4f1223e8SApple OSS Distributions }
4437*4f1223e8SApple OSS Distributions
4438*4f1223e8SApple OSS Distributions return error;
4439*4f1223e8SApple OSS Distributions }
4440*4f1223e8SApple OSS Distributions
4441*4f1223e8SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4442*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443*4f1223e8SApple OSS Distributions {
4444*4f1223e8SApple OSS Distributions ioGMDData * dataP;
4445*4f1223e8SApple OSS Distributions
4446*4f1223e8SApple OSS Distributions if (size > UINT_MAX) {
4447*4f1223e8SApple OSS Distributions return false;
4448*4f1223e8SApple OSS Distributions }
4449*4f1223e8SApple OSS Distributions if (!_memoryEntries) {
4450*4f1223e8SApple OSS Distributions _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451*4f1223e8SApple OSS Distributions if (!_memoryEntries) {
4452*4f1223e8SApple OSS Distributions return false;
4453*4f1223e8SApple OSS Distributions }
4454*4f1223e8SApple OSS Distributions } else if (!_memoryEntries->initWithCapacity(size)) {
4455*4f1223e8SApple OSS Distributions return false;
4456*4f1223e8SApple OSS Distributions }
4457*4f1223e8SApple OSS Distributions
4458*4f1223e8SApple OSS Distributions _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4460*4f1223e8SApple OSS Distributions
4461*4f1223e8SApple OSS Distributions if (mapper == kIOMapperWaitSystem) {
4462*4f1223e8SApple OSS Distributions IOMapper::checkForSystemMapper();
4463*4f1223e8SApple OSS Distributions mapper = IOMapper::gSystem;
4464*4f1223e8SApple OSS Distributions }
4465*4f1223e8SApple OSS Distributions dataP->fMapper = mapper;
4466*4f1223e8SApple OSS Distributions dataP->fPageCnt = 0;
4467*4f1223e8SApple OSS Distributions dataP->fMappedBase = 0;
4468*4f1223e8SApple OSS Distributions dataP->fDMAMapNumAddressBits = 64;
4469*4f1223e8SApple OSS Distributions dataP->fDMAMapAlignment = 0;
4470*4f1223e8SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4471*4f1223e8SApple OSS Distributions dataP->fCompletionError = false;
4472*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = false;
4473*4f1223e8SApple OSS Distributions
4474*4f1223e8SApple OSS Distributions return true;
4475*4f1223e8SApple OSS Distributions }
4476*4f1223e8SApple OSS Distributions
4477*4f1223e8SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478*4f1223e8SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4479*4f1223e8SApple OSS Distributions IOMapper * mapper,
4480*4f1223e8SApple OSS Distributions IOMemoryDescriptor * memory,
4481*4f1223e8SApple OSS Distributions IODMACommand * command,
4482*4f1223e8SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4483*4f1223e8SApple OSS Distributions uint64_t offset,
4484*4f1223e8SApple OSS Distributions uint64_t length,
4485*4f1223e8SApple OSS Distributions uint64_t * mapAddress,
4486*4f1223e8SApple OSS Distributions uint64_t * mapLength)
4487*4f1223e8SApple OSS Distributions {
4488*4f1223e8SApple OSS Distributions IOReturn err;
4489*4f1223e8SApple OSS Distributions uint32_t mapOptions;
4490*4f1223e8SApple OSS Distributions
4491*4f1223e8SApple OSS Distributions mapOptions = 0;
4492*4f1223e8SApple OSS Distributions mapOptions |= kIODMAMapReadAccess;
4493*4f1223e8SApple OSS Distributions if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494*4f1223e8SApple OSS Distributions mapOptions |= kIODMAMapWriteAccess;
4495*4f1223e8SApple OSS Distributions }
4496*4f1223e8SApple OSS Distributions
4497*4f1223e8SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498*4f1223e8SApple OSS Distributions mapSpec, command, NULL, mapAddress, mapLength);
4499*4f1223e8SApple OSS Distributions
4500*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == err) {
4501*4f1223e8SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4502*4f1223e8SApple OSS Distributions }
4503*4f1223e8SApple OSS Distributions
4504*4f1223e8SApple OSS Distributions return err;
4505*4f1223e8SApple OSS Distributions }
4506*4f1223e8SApple OSS Distributions
4507*4f1223e8SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508*4f1223e8SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4509*4f1223e8SApple OSS Distributions IOMapper * mapper,
4510*4f1223e8SApple OSS Distributions IODMACommand * command,
4511*4f1223e8SApple OSS Distributions uint64_t mapLength)
4512*4f1223e8SApple OSS Distributions {
4513*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514*4f1223e8SApple OSS Distributions kern_allocation_name_t alloc;
4515*4f1223e8SApple OSS Distributions int16_t prior;
4516*4f1223e8SApple OSS Distributions
4517*4f1223e8SApple OSS Distributions if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518*4f1223e8SApple OSS Distributions kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4519*4f1223e8SApple OSS Distributions }
4520*4f1223e8SApple OSS Distributions
4521*4f1223e8SApple OSS Distributions if (!command) {
4522*4f1223e8SApple OSS Distributions return;
4523*4f1223e8SApple OSS Distributions }
4524*4f1223e8SApple OSS Distributions prior = OSAddAtomic16(1, &_dmaReferences);
4525*4f1223e8SApple OSS Distributions if (!prior) {
4526*4f1223e8SApple OSS Distributions if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527*4f1223e8SApple OSS Distributions _mapName = alloc;
4528*4f1223e8SApple OSS Distributions mapLength = _length;
4529*4f1223e8SApple OSS Distributions kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530*4f1223e8SApple OSS Distributions } else {
4531*4f1223e8SApple OSS Distributions _mapName = NULL;
4532*4f1223e8SApple OSS Distributions }
4533*4f1223e8SApple OSS Distributions }
4534*4f1223e8SApple OSS Distributions }
4535*4f1223e8SApple OSS Distributions
4536*4f1223e8SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537*4f1223e8SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4538*4f1223e8SApple OSS Distributions IOMapper * mapper,
4539*4f1223e8SApple OSS Distributions IODMACommand * command,
4540*4f1223e8SApple OSS Distributions uint64_t offset,
4541*4f1223e8SApple OSS Distributions uint64_t mapAddress,
4542*4f1223e8SApple OSS Distributions uint64_t mapLength)
4543*4f1223e8SApple OSS Distributions {
4544*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545*4f1223e8SApple OSS Distributions IOReturn ret;
4546*4f1223e8SApple OSS Distributions kern_allocation_name_t alloc;
4547*4f1223e8SApple OSS Distributions kern_allocation_name_t mapName;
4548*4f1223e8SApple OSS Distributions int16_t prior;
4549*4f1223e8SApple OSS Distributions
4550*4f1223e8SApple OSS Distributions mapName = NULL;
4551*4f1223e8SApple OSS Distributions prior = 0;
4552*4f1223e8SApple OSS Distributions if (command) {
4553*4f1223e8SApple OSS Distributions mapName = _mapName;
4554*4f1223e8SApple OSS Distributions if (_dmaReferences) {
4555*4f1223e8SApple OSS Distributions prior = OSAddAtomic16(-1, &_dmaReferences);
4556*4f1223e8SApple OSS Distributions } else {
4557*4f1223e8SApple OSS Distributions panic("_dmaReferences underflow");
4558*4f1223e8SApple OSS Distributions }
4559*4f1223e8SApple OSS Distributions }
4560*4f1223e8SApple OSS Distributions
4561*4f1223e8SApple OSS Distributions if (!mapLength) {
4562*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4563*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4564*4f1223e8SApple OSS Distributions }
4565*4f1223e8SApple OSS Distributions
4566*4f1223e8SApple OSS Distributions ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567*4f1223e8SApple OSS Distributions
4568*4f1223e8SApple OSS Distributions if ((alloc = mapper->fAllocName)) {
4569*4f1223e8SApple OSS Distributions kern_allocation_update_size(alloc, -mapLength, NULL);
4570*4f1223e8SApple OSS Distributions if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571*4f1223e8SApple OSS Distributions mapLength = _length;
4572*4f1223e8SApple OSS Distributions kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573*4f1223e8SApple OSS Distributions }
4574*4f1223e8SApple OSS Distributions }
4575*4f1223e8SApple OSS Distributions
4576*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(ret);
4577*4f1223e8SApple OSS Distributions return ret;
4578*4f1223e8SApple OSS Distributions }
4579*4f1223e8SApple OSS Distributions
4580*4f1223e8SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4582*4f1223e8SApple OSS Distributions IOMapper * mapper,
4583*4f1223e8SApple OSS Distributions IOMemoryDescriptor * memory,
4584*4f1223e8SApple OSS Distributions IODMACommand * command,
4585*4f1223e8SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4586*4f1223e8SApple OSS Distributions uint64_t offset,
4587*4f1223e8SApple OSS Distributions uint64_t length,
4588*4f1223e8SApple OSS Distributions uint64_t * mapAddress,
4589*4f1223e8SApple OSS Distributions uint64_t * mapLength)
4590*4f1223e8SApple OSS Distributions {
4591*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4592*4f1223e8SApple OSS Distributions ioGMDData * dataP;
4593*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4594*4f1223e8SApple OSS Distributions
4595*4f1223e8SApple OSS Distributions *mapAddress = 0;
4596*4f1223e8SApple OSS Distributions if (kIOMemoryHostOnly & _flags) {
4597*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4598*4f1223e8SApple OSS Distributions }
4599*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4600*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
4601*4f1223e8SApple OSS Distributions }
4602*4f1223e8SApple OSS Distributions
4603*4f1223e8SApple OSS Distributions if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604*4f1223e8SApple OSS Distributions || offset || (length != _length)) {
4605*4f1223e8SApple OSS Distributions err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606*4f1223e8SApple OSS Distributions } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607*4f1223e8SApple OSS Distributions const ioPLBlock * ioplList = getIOPLList(dataP);
4608*4f1223e8SApple OSS Distributions upl_page_info_t * pageList;
4609*4f1223e8SApple OSS Distributions uint32_t mapOptions = 0;
4610*4f1223e8SApple OSS Distributions
4611*4f1223e8SApple OSS Distributions IODMAMapSpecification mapSpec;
4612*4f1223e8SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
4613*4f1223e8SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614*4f1223e8SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
4615*4f1223e8SApple OSS Distributions
4616*4f1223e8SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
4617*4f1223e8SApple OSS Distributions // the upl's upl_page_info_t array.
4618*4f1223e8SApple OSS Distributions if (ioplList->fFlags & kIOPLExternUPL) {
4619*4f1223e8SApple OSS Distributions pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620*4f1223e8SApple OSS Distributions mapOptions |= kIODMAMapPagingPath;
4621*4f1223e8SApple OSS Distributions } else {
4622*4f1223e8SApple OSS Distributions pageList = getPageList(dataP);
4623*4f1223e8SApple OSS Distributions }
4624*4f1223e8SApple OSS Distributions
4625*4f1223e8SApple OSS Distributions if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626*4f1223e8SApple OSS Distributions mapOptions |= kIODMAMapPageListFullyOccupied;
4627*4f1223e8SApple OSS Distributions }
4628*4f1223e8SApple OSS Distributions
4629*4f1223e8SApple OSS Distributions assert(dataP->fDMAAccess);
4630*4f1223e8SApple OSS Distributions mapOptions |= dataP->fDMAAccess;
4631*4f1223e8SApple OSS Distributions
4632*4f1223e8SApple OSS Distributions // Check for direct device non-paged memory
4633*4f1223e8SApple OSS Distributions if (ioplList->fFlags & kIOPLOnDevice) {
4634*4f1223e8SApple OSS Distributions mapOptions |= kIODMAMapPhysicallyContiguous;
4635*4f1223e8SApple OSS Distributions }
4636*4f1223e8SApple OSS Distributions
4637*4f1223e8SApple OSS Distributions IODMAMapPageList dmaPageList =
4638*4f1223e8SApple OSS Distributions {
4639*4f1223e8SApple OSS Distributions .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4640*4f1223e8SApple OSS Distributions .pageListCount = _pages,
4641*4f1223e8SApple OSS Distributions .pageList = &pageList[0]
4642*4f1223e8SApple OSS Distributions };
4643*4f1223e8SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644*4f1223e8SApple OSS Distributions command, &dmaPageList, mapAddress, mapLength);
4645*4f1223e8SApple OSS Distributions
4646*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == err) {
4647*4f1223e8SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4648*4f1223e8SApple OSS Distributions }
4649*4f1223e8SApple OSS Distributions }
4650*4f1223e8SApple OSS Distributions
4651*4f1223e8SApple OSS Distributions return err;
4652*4f1223e8SApple OSS Distributions }
4653*4f1223e8SApple OSS Distributions
4654*4f1223e8SApple OSS Distributions /*
4655*4f1223e8SApple OSS Distributions * prepare
4656*4f1223e8SApple OSS Distributions *
4657*4f1223e8SApple OSS Distributions * Prepare the memory for an I/O transfer. This involves paging in
4658*4f1223e8SApple OSS Distributions * the memory, if necessary, and wiring it down for the duration of
4659*4f1223e8SApple OSS Distributions * the transfer. The complete() method completes the processing of
4660*4f1223e8SApple OSS Distributions * the memory after the I/O transfer finishes. This method needn't
4661*4f1223e8SApple OSS Distributions * called for non-pageable memory.
4662*4f1223e8SApple OSS Distributions */
4663*4f1223e8SApple OSS Distributions
4664*4f1223e8SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4665*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666*4f1223e8SApple OSS Distributions {
4667*4f1223e8SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4668*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4669*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670*4f1223e8SApple OSS Distributions
4671*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4673*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4674*4f1223e8SApple OSS Distributions }
4675*4f1223e8SApple OSS Distributions
4676*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4677*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4678*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4679*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
4680*4f1223e8SApple OSS Distributions }
4681*4f1223e8SApple OSS Distributions
4682*4f1223e8SApple OSS Distributions if (_prepareLock) {
4683*4f1223e8SApple OSS Distributions IOLockLock(_prepareLock);
4684*4f1223e8SApple OSS Distributions }
4685*4f1223e8SApple OSS Distributions
4686*4f1223e8SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687*4f1223e8SApple OSS Distributions if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688*4f1223e8SApple OSS Distributions error = kIOReturnNotReady;
4689*4f1223e8SApple OSS Distributions goto finish;
4690*4f1223e8SApple OSS Distributions }
4691*4f1223e8SApple OSS Distributions error = wireVirtual(forDirection);
4692*4f1223e8SApple OSS Distributions }
4693*4f1223e8SApple OSS Distributions
4694*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == error) {
4695*4f1223e8SApple OSS Distributions if (1 == ++_wireCount) {
4696*4f1223e8SApple OSS Distributions if (kIOMemoryClearEncrypt & _flags) {
4697*4f1223e8SApple OSS Distributions performOperation(kIOMemoryClearEncrypted, 0, _length);
4698*4f1223e8SApple OSS Distributions }
4699*4f1223e8SApple OSS Distributions
4700*4f1223e8SApple OSS Distributions ktraceEmitPhysicalSegments();
4701*4f1223e8SApple OSS Distributions }
4702*4f1223e8SApple OSS Distributions }
4703*4f1223e8SApple OSS Distributions
4704*4f1223e8SApple OSS Distributions finish:
4705*4f1223e8SApple OSS Distributions
4706*4f1223e8SApple OSS Distributions if (_prepareLock) {
4707*4f1223e8SApple OSS Distributions IOLockUnlock(_prepareLock);
4708*4f1223e8SApple OSS Distributions }
4709*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(error);
4710*4f1223e8SApple OSS Distributions
4711*4f1223e8SApple OSS Distributions return error;
4712*4f1223e8SApple OSS Distributions }
4713*4f1223e8SApple OSS Distributions
4714*4f1223e8SApple OSS Distributions /*
4715*4f1223e8SApple OSS Distributions * complete
4716*4f1223e8SApple OSS Distributions *
4717*4f1223e8SApple OSS Distributions * Complete processing of the memory after an I/O transfer finishes.
4718*4f1223e8SApple OSS Distributions * This method should not be called unless a prepare was previously
4719*4f1223e8SApple OSS Distributions * issued; the prepare() and complete() must occur in pairs, before
4720*4f1223e8SApple OSS Distributions * before and after an I/O transfer involving pageable memory.
4721*4f1223e8SApple OSS Distributions */
4722*4f1223e8SApple OSS Distributions
4723*4f1223e8SApple OSS Distributions IOReturn
complete(IODirection forDirection)4724*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725*4f1223e8SApple OSS Distributions {
4726*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4727*4f1223e8SApple OSS Distributions ioGMDData * dataP;
4728*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729*4f1223e8SApple OSS Distributions
4730*4f1223e8SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4732*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4733*4f1223e8SApple OSS Distributions }
4734*4f1223e8SApple OSS Distributions
4735*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4736*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4737*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4738*4f1223e8SApple OSS Distributions return kIOReturnNotAttached;
4739*4f1223e8SApple OSS Distributions }
4740*4f1223e8SApple OSS Distributions
4741*4f1223e8SApple OSS Distributions if (_prepareLock) {
4742*4f1223e8SApple OSS Distributions IOLockLock(_prepareLock);
4743*4f1223e8SApple OSS Distributions }
4744*4f1223e8SApple OSS Distributions do{
4745*4f1223e8SApple OSS Distributions assert(_wireCount);
4746*4f1223e8SApple OSS Distributions if (!_wireCount) {
4747*4f1223e8SApple OSS Distributions break;
4748*4f1223e8SApple OSS Distributions }
4749*4f1223e8SApple OSS Distributions dataP = getDataP(_memoryEntries);
4750*4f1223e8SApple OSS Distributions if (!dataP) {
4751*4f1223e8SApple OSS Distributions break;
4752*4f1223e8SApple OSS Distributions }
4753*4f1223e8SApple OSS Distributions
4754*4f1223e8SApple OSS Distributions if (kIODirectionCompleteWithError & forDirection) {
4755*4f1223e8SApple OSS Distributions dataP->fCompletionError = true;
4756*4f1223e8SApple OSS Distributions }
4757*4f1223e8SApple OSS Distributions
4758*4f1223e8SApple OSS Distributions if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759*4f1223e8SApple OSS Distributions performOperation(kIOMemorySetEncrypted, 0, _length);
4760*4f1223e8SApple OSS Distributions }
4761*4f1223e8SApple OSS Distributions
4762*4f1223e8SApple OSS Distributions _wireCount--;
4763*4f1223e8SApple OSS Distributions if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764*4f1223e8SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4765*4f1223e8SApple OSS Distributions UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766*4f1223e8SApple OSS Distributions
4767*4f1223e8SApple OSS Distributions if (_wireCount) {
4768*4f1223e8SApple OSS Distributions // kIODirectionCompleteWithDataValid & forDirection
4769*4f1223e8SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770*4f1223e8SApple OSS Distributions vm_tag_t tag;
4771*4f1223e8SApple OSS Distributions tag = (typeof(tag))getVMTag(kernel_map);
4772*4f1223e8SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4773*4f1223e8SApple OSS Distributions if (ioplList[ind].fIOPL) {
4774*4f1223e8SApple OSS Distributions iopl_valid_data(ioplList[ind].fIOPL, tag);
4775*4f1223e8SApple OSS Distributions }
4776*4f1223e8SApple OSS Distributions }
4777*4f1223e8SApple OSS Distributions }
4778*4f1223e8SApple OSS Distributions } else {
4779*4f1223e8SApple OSS Distributions if (_dmaReferences) {
4780*4f1223e8SApple OSS Distributions panic("complete() while dma active");
4781*4f1223e8SApple OSS Distributions }
4782*4f1223e8SApple OSS Distributions
4783*4f1223e8SApple OSS Distributions if (dataP->fMappedBaseValid) {
4784*4f1223e8SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785*4f1223e8SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786*4f1223e8SApple OSS Distributions }
4787*4f1223e8SApple OSS Distributions #if IOTRACKING
4788*4f1223e8SApple OSS Distributions if (dataP->fWireTracking.link.next) {
4789*4f1223e8SApple OSS Distributions IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790*4f1223e8SApple OSS Distributions }
4791*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
4792*4f1223e8SApple OSS Distributions // Only complete iopls that we created which are for TypeVirtual
4793*4f1223e8SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*4f1223e8SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4795*4f1223e8SApple OSS Distributions if (ioplList[ind].fIOPL) {
4796*4f1223e8SApple OSS Distributions if (dataP->fCompletionError) {
4797*4f1223e8SApple OSS Distributions upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798*4f1223e8SApple OSS Distributions } else {
4799*4f1223e8SApple OSS Distributions upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800*4f1223e8SApple OSS Distributions }
4801*4f1223e8SApple OSS Distributions upl_deallocate(ioplList[ind].fIOPL);
4802*4f1223e8SApple OSS Distributions }
4803*4f1223e8SApple OSS Distributions }
4804*4f1223e8SApple OSS Distributions } else if (kIOMemoryTypeUPL == type) {
4805*4f1223e8SApple OSS Distributions upl_set_referenced(ioplList[0].fIOPL, false);
4806*4f1223e8SApple OSS Distributions }
4807*4f1223e8SApple OSS Distributions
4808*4f1223e8SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4809*4f1223e8SApple OSS Distributions
4810*4f1223e8SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4811*4f1223e8SApple OSS Distributions _flags &= ~kIOMemoryPreparedReadOnly;
4812*4f1223e8SApple OSS Distributions
4813*4f1223e8SApple OSS Distributions if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814*4f1223e8SApple OSS Distributions IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815*4f1223e8SApple OSS Distributions }
4816*4f1223e8SApple OSS Distributions }
4817*4f1223e8SApple OSS Distributions }
4818*4f1223e8SApple OSS Distributions }while (false);
4819*4f1223e8SApple OSS Distributions
4820*4f1223e8SApple OSS Distributions if (_prepareLock) {
4821*4f1223e8SApple OSS Distributions IOLockUnlock(_prepareLock);
4822*4f1223e8SApple OSS Distributions }
4823*4f1223e8SApple OSS Distributions
4824*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4825*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4826*4f1223e8SApple OSS Distributions }
4827*4f1223e8SApple OSS Distributions
4828*4f1223e8SApple OSS Distributions IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4829*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4830*4f1223e8SApple OSS Distributions {
4831*4f1223e8SApple OSS Distributions IOOptionBits createOptions = 0;
4832*4f1223e8SApple OSS Distributions
4833*4f1223e8SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4834*4f1223e8SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
4835*4f1223e8SApple OSS Distributions }
4836*4f1223e8SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
4837*4f1223e8SApple OSS Distributions createOptions |= kIOMemoryReferenceWrite;
4838*4f1223e8SApple OSS Distributions #if DEVELOPMENT || DEBUG
4839*4f1223e8SApple OSS Distributions if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4840*4f1223e8SApple OSS Distributions && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4841*4f1223e8SApple OSS Distributions OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4842*4f1223e8SApple OSS Distributions }
4843*4f1223e8SApple OSS Distributions #endif
4844*4f1223e8SApple OSS Distributions }
4845*4f1223e8SApple OSS Distributions return createOptions;
4846*4f1223e8SApple OSS Distributions }
4847*4f1223e8SApple OSS Distributions
4848*4f1223e8SApple OSS Distributions /*
4849*4f1223e8SApple OSS Distributions * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4850*4f1223e8SApple OSS Distributions * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4851*4f1223e8SApple OSS Distributions * creation.
4852*4f1223e8SApple OSS Distributions */
4853*4f1223e8SApple OSS Distributions
4854*4f1223e8SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4855*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::makeMapping(
4856*4f1223e8SApple OSS Distributions IOMemoryDescriptor * owner,
4857*4f1223e8SApple OSS Distributions task_t __intoTask,
4858*4f1223e8SApple OSS Distributions IOVirtualAddress __address,
4859*4f1223e8SApple OSS Distributions IOOptionBits options,
4860*4f1223e8SApple OSS Distributions IOByteCount __offset,
4861*4f1223e8SApple OSS Distributions IOByteCount __length )
4862*4f1223e8SApple OSS Distributions {
4863*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4864*4f1223e8SApple OSS Distributions IOMemoryMap * mapping;
4865*4f1223e8SApple OSS Distributions
4866*4f1223e8SApple OSS Distributions if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4867*4f1223e8SApple OSS Distributions struct IOMemoryReference * newRef;
4868*4f1223e8SApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4869*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == err) {
4870*4f1223e8SApple OSS Distributions if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4871*4f1223e8SApple OSS Distributions memoryReferenceFree(newRef);
4872*4f1223e8SApple OSS Distributions }
4873*4f1223e8SApple OSS Distributions }
4874*4f1223e8SApple OSS Distributions }
4875*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
4876*4f1223e8SApple OSS Distributions return NULL;
4877*4f1223e8SApple OSS Distributions }
4878*4f1223e8SApple OSS Distributions mapping = IOMemoryDescriptor::makeMapping(
4879*4f1223e8SApple OSS Distributions owner, __intoTask, __address, options, __offset, __length);
4880*4f1223e8SApple OSS Distributions
4881*4f1223e8SApple OSS Distributions #if IOTRACKING
4882*4f1223e8SApple OSS Distributions if ((mapping == (IOMemoryMap *) __address)
4883*4f1223e8SApple OSS Distributions && (0 == (kIOMapStatic & mapping->fOptions))
4884*4f1223e8SApple OSS Distributions && (NULL == mapping->fSuperMap)
4885*4f1223e8SApple OSS Distributions && ((kIOTracking & gIOKitDebug) || _task)) {
4886*4f1223e8SApple OSS Distributions // only dram maps in the default on development case
4887*4f1223e8SApple OSS Distributions IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4888*4f1223e8SApple OSS Distributions }
4889*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
4890*4f1223e8SApple OSS Distributions
4891*4f1223e8SApple OSS Distributions return mapping;
4892*4f1223e8SApple OSS Distributions }
4893*4f1223e8SApple OSS Distributions
4894*4f1223e8SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4895*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4896*4f1223e8SApple OSS Distributions vm_map_t __addressMap,
4897*4f1223e8SApple OSS Distributions IOVirtualAddress * __address,
4898*4f1223e8SApple OSS Distributions IOOptionBits options,
4899*4f1223e8SApple OSS Distributions IOByteCount __offset,
4900*4f1223e8SApple OSS Distributions IOByteCount __length )
4901*4f1223e8SApple OSS Distributions {
4902*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4903*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4904*4f1223e8SApple OSS Distributions #ifndef __LP64__
4905*4f1223e8SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4906*4f1223e8SApple OSS Distributions panic("IOGeneralMemoryDescriptor::doMap !64bit");
4907*4f1223e8SApple OSS Distributions }
4908*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
4909*4f1223e8SApple OSS Distributions
4910*4f1223e8SApple OSS Distributions kern_return_t err;
4911*4f1223e8SApple OSS Distributions
4912*4f1223e8SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4913*4f1223e8SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
4914*4f1223e8SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
4915*4f1223e8SApple OSS Distributions
4916*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4917*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
4918*4f1223e8SApple OSS Distributions
4919*4f1223e8SApple OSS Distributions mach_vm_address_t range0Addr = 0;
4920*4f1223e8SApple OSS Distributions mach_vm_size_t range0Len = 0;
4921*4f1223e8SApple OSS Distributions
4922*4f1223e8SApple OSS Distributions if ((offset >= _length) || ((offset + length) > _length)) {
4923*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(kIOReturnBadArgument);
4924*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4925*4f1223e8SApple OSS Distributions // assert(offset == 0 && _length == 0 && length == 0);
4926*4f1223e8SApple OSS Distributions return kIOReturnBadArgument;
4927*4f1223e8SApple OSS Distributions }
4928*4f1223e8SApple OSS Distributions
4929*4f1223e8SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4930*4f1223e8SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4931*4f1223e8SApple OSS Distributions return 0;
4932*4f1223e8SApple OSS Distributions }
4933*4f1223e8SApple OSS Distributions
4934*4f1223e8SApple OSS Distributions if (vec.v) {
4935*4f1223e8SApple OSS Distributions getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4936*4f1223e8SApple OSS Distributions }
4937*4f1223e8SApple OSS Distributions
4938*4f1223e8SApple OSS Distributions // mapping source == dest? (could be much better)
4939*4f1223e8SApple OSS Distributions if (_task
4940*4f1223e8SApple OSS Distributions && (mapping->fAddressTask == _task)
4941*4f1223e8SApple OSS Distributions && (mapping->fAddressMap == get_task_map(_task))
4942*4f1223e8SApple OSS Distributions && (options & kIOMapAnywhere)
4943*4f1223e8SApple OSS Distributions && (!(kIOMapUnique & options))
4944*4f1223e8SApple OSS Distributions && (!(kIOMapGuardedMask & options))
4945*4f1223e8SApple OSS Distributions && (1 == _rangesCount)
4946*4f1223e8SApple OSS Distributions && (0 == offset)
4947*4f1223e8SApple OSS Distributions && range0Addr
4948*4f1223e8SApple OSS Distributions && (length <= range0Len)) {
4949*4f1223e8SApple OSS Distributions mapping->fAddress = range0Addr;
4950*4f1223e8SApple OSS Distributions mapping->fOptions |= kIOMapStatic;
4951*4f1223e8SApple OSS Distributions
4952*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
4953*4f1223e8SApple OSS Distributions }
4954*4f1223e8SApple OSS Distributions
4955*4f1223e8SApple OSS Distributions if (!_memRef) {
4956*4f1223e8SApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4957*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
4958*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(err);
4959*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4960*4f1223e8SApple OSS Distributions return err;
4961*4f1223e8SApple OSS Distributions }
4962*4f1223e8SApple OSS Distributions }
4963*4f1223e8SApple OSS Distributions
4964*4f1223e8SApple OSS Distributions memory_object_t pager;
4965*4f1223e8SApple OSS Distributions pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4966*4f1223e8SApple OSS Distributions
4967*4f1223e8SApple OSS Distributions // <upl_transpose //
4968*4f1223e8SApple OSS Distributions if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4969*4f1223e8SApple OSS Distributions do{
4970*4f1223e8SApple OSS Distributions upl_t redirUPL2;
4971*4f1223e8SApple OSS Distributions upl_size_t size;
4972*4f1223e8SApple OSS Distributions upl_control_flags_t flags;
4973*4f1223e8SApple OSS Distributions unsigned int lock_count;
4974*4f1223e8SApple OSS Distributions
4975*4f1223e8SApple OSS Distributions if (!_memRef || (1 != _memRef->count)) {
4976*4f1223e8SApple OSS Distributions err = kIOReturnNotReadable;
4977*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4978*4f1223e8SApple OSS Distributions break;
4979*4f1223e8SApple OSS Distributions }
4980*4f1223e8SApple OSS Distributions
4981*4f1223e8SApple OSS Distributions size = (upl_size_t) round_page(mapping->fLength);
4982*4f1223e8SApple OSS Distributions flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4983*4f1223e8SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4984*4f1223e8SApple OSS Distributions
4985*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4986*4f1223e8SApple OSS Distributions NULL, NULL,
4987*4f1223e8SApple OSS Distributions &flags, (vm_tag_t) getVMTag(kernel_map))) {
4988*4f1223e8SApple OSS Distributions redirUPL2 = NULL;
4989*4f1223e8SApple OSS Distributions }
4990*4f1223e8SApple OSS Distributions
4991*4f1223e8SApple OSS Distributions for (lock_count = 0;
4992*4f1223e8SApple OSS Distributions IORecursiveLockHaveLock(gIOMemoryLock);
4993*4f1223e8SApple OSS Distributions lock_count++) {
4994*4f1223e8SApple OSS Distributions UNLOCK;
4995*4f1223e8SApple OSS Distributions }
4996*4f1223e8SApple OSS Distributions err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4997*4f1223e8SApple OSS Distributions for (;
4998*4f1223e8SApple OSS Distributions lock_count;
4999*4f1223e8SApple OSS Distributions lock_count--) {
5000*4f1223e8SApple OSS Distributions LOCK;
5001*4f1223e8SApple OSS Distributions }
5002*4f1223e8SApple OSS Distributions
5003*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
5004*4f1223e8SApple OSS Distributions IOLog("upl_transpose(%x)\n", err);
5005*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
5006*4f1223e8SApple OSS Distributions }
5007*4f1223e8SApple OSS Distributions
5008*4f1223e8SApple OSS Distributions if (redirUPL2) {
5009*4f1223e8SApple OSS Distributions upl_commit(redirUPL2, NULL, 0);
5010*4f1223e8SApple OSS Distributions upl_deallocate(redirUPL2);
5011*4f1223e8SApple OSS Distributions redirUPL2 = NULL;
5012*4f1223e8SApple OSS Distributions }
5013*4f1223e8SApple OSS Distributions {
5014*4f1223e8SApple OSS Distributions // swap the memEntries since they now refer to different vm_objects
5015*4f1223e8SApple OSS Distributions IOMemoryReference * me = _memRef;
5016*4f1223e8SApple OSS Distributions _memRef = mapping->fMemory->_memRef;
5017*4f1223e8SApple OSS Distributions mapping->fMemory->_memRef = me;
5018*4f1223e8SApple OSS Distributions }
5019*4f1223e8SApple OSS Distributions if (pager) {
5020*4f1223e8SApple OSS Distributions err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5021*4f1223e8SApple OSS Distributions }
5022*4f1223e8SApple OSS Distributions }while (false);
5023*4f1223e8SApple OSS Distributions }
5024*4f1223e8SApple OSS Distributions // upl_transpose> //
5025*4f1223e8SApple OSS Distributions else {
5026*4f1223e8SApple OSS Distributions err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5027*4f1223e8SApple OSS Distributions if (err) {
5028*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5029*4f1223e8SApple OSS Distributions }
5030*4f1223e8SApple OSS Distributions if ((err == KERN_SUCCESS) && pager) {
5031*4f1223e8SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5032*4f1223e8SApple OSS Distributions
5033*4f1223e8SApple OSS Distributions if (err != KERN_SUCCESS) {
5034*4f1223e8SApple OSS Distributions doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5035*4f1223e8SApple OSS Distributions } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5036*4f1223e8SApple OSS Distributions mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5037*4f1223e8SApple OSS Distributions }
5038*4f1223e8SApple OSS Distributions }
5039*4f1223e8SApple OSS Distributions }
5040*4f1223e8SApple OSS Distributions
5041*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(err);
5042*4f1223e8SApple OSS Distributions if (err) {
5043*4f1223e8SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5044*4f1223e8SApple OSS Distributions }
5045*4f1223e8SApple OSS Distributions return err;
5046*4f1223e8SApple OSS Distributions }
5047*4f1223e8SApple OSS Distributions
5048*4f1223e8SApple OSS Distributions #if IOTRACKING
5049*4f1223e8SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5050*4f1223e8SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5051*4f1223e8SApple OSS Distributions mach_vm_address_t * address, mach_vm_size_t * size)
5052*4f1223e8SApple OSS Distributions {
5053*4f1223e8SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5054*4f1223e8SApple OSS Distributions
5055*4f1223e8SApple OSS Distributions IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5056*4f1223e8SApple OSS Distributions
5057*4f1223e8SApple OSS Distributions if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5058*4f1223e8SApple OSS Distributions return kIOReturnNotReady;
5059*4f1223e8SApple OSS Distributions }
5060*4f1223e8SApple OSS Distributions
5061*4f1223e8SApple OSS Distributions *task = map->fAddressTask;
5062*4f1223e8SApple OSS Distributions *address = map->fAddress;
5063*4f1223e8SApple OSS Distributions *size = map->fLength;
5064*4f1223e8SApple OSS Distributions
5065*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
5066*4f1223e8SApple OSS Distributions }
5067*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
5068*4f1223e8SApple OSS Distributions
5069*4f1223e8SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5070*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5071*4f1223e8SApple OSS Distributions vm_map_t addressMap,
5072*4f1223e8SApple OSS Distributions IOVirtualAddress __address,
5073*4f1223e8SApple OSS Distributions IOByteCount __length )
5074*4f1223e8SApple OSS Distributions {
5075*4f1223e8SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5076*4f1223e8SApple OSS Distributions IOReturn ret;
5077*4f1223e8SApple OSS Distributions ret = super::doUnmap(addressMap, __address, __length);
5078*4f1223e8SApple OSS Distributions traceInterval.setEndArg1(ret);
5079*4f1223e8SApple OSS Distributions return ret;
5080*4f1223e8SApple OSS Distributions }
5081*4f1223e8SApple OSS Distributions
5082*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5083*4f1223e8SApple OSS Distributions
5084*4f1223e8SApple OSS Distributions #undef super
5085*4f1223e8SApple OSS Distributions #define super OSObject
5086*4f1223e8SApple OSS Distributions
5087*4f1223e8SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5088*4f1223e8SApple OSS Distributions
5089*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5090*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5091*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5092*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5093*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5094*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5095*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5096*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5097*4f1223e8SApple OSS Distributions
5098*4f1223e8SApple OSS Distributions /* ex-inline function implementation */
5099*4f1223e8SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5100*4f1223e8SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5101*4f1223e8SApple OSS Distributions {
5102*4f1223e8SApple OSS Distributions return getPhysicalSegment( 0, NULL );
5103*4f1223e8SApple OSS Distributions }
5104*4f1223e8SApple OSS Distributions
5105*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5106*4f1223e8SApple OSS Distributions
5107*4f1223e8SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5108*4f1223e8SApple OSS Distributions IOMemoryMap::init(
5109*4f1223e8SApple OSS Distributions task_t intoTask,
5110*4f1223e8SApple OSS Distributions mach_vm_address_t toAddress,
5111*4f1223e8SApple OSS Distributions IOOptionBits _options,
5112*4f1223e8SApple OSS Distributions mach_vm_size_t _offset,
5113*4f1223e8SApple OSS Distributions mach_vm_size_t _length )
5114*4f1223e8SApple OSS Distributions {
5115*4f1223e8SApple OSS Distributions if (!intoTask) {
5116*4f1223e8SApple OSS Distributions return false;
5117*4f1223e8SApple OSS Distributions }
5118*4f1223e8SApple OSS Distributions
5119*4f1223e8SApple OSS Distributions if (!super::init()) {
5120*4f1223e8SApple OSS Distributions return false;
5121*4f1223e8SApple OSS Distributions }
5122*4f1223e8SApple OSS Distributions
5123*4f1223e8SApple OSS Distributions fAddressMap = get_task_map(intoTask);
5124*4f1223e8SApple OSS Distributions if (!fAddressMap) {
5125*4f1223e8SApple OSS Distributions return false;
5126*4f1223e8SApple OSS Distributions }
5127*4f1223e8SApple OSS Distributions vm_map_reference(fAddressMap);
5128*4f1223e8SApple OSS Distributions
5129*4f1223e8SApple OSS Distributions fAddressTask = intoTask;
5130*4f1223e8SApple OSS Distributions fOptions = _options;
5131*4f1223e8SApple OSS Distributions fLength = _length;
5132*4f1223e8SApple OSS Distributions fOffset = _offset;
5133*4f1223e8SApple OSS Distributions fAddress = toAddress;
5134*4f1223e8SApple OSS Distributions
5135*4f1223e8SApple OSS Distributions return true;
5136*4f1223e8SApple OSS Distributions }
5137*4f1223e8SApple OSS Distributions
5138*4f1223e8SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5139*4f1223e8SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5140*4f1223e8SApple OSS Distributions {
5141*4f1223e8SApple OSS Distributions if (!_memory) {
5142*4f1223e8SApple OSS Distributions return false;
5143*4f1223e8SApple OSS Distributions }
5144*4f1223e8SApple OSS Distributions
5145*4f1223e8SApple OSS Distributions if (!fSuperMap) {
5146*4f1223e8SApple OSS Distributions if ((_offset + fLength) > _memory->getLength()) {
5147*4f1223e8SApple OSS Distributions return false;
5148*4f1223e8SApple OSS Distributions }
5149*4f1223e8SApple OSS Distributions fOffset = _offset;
5150*4f1223e8SApple OSS Distributions }
5151*4f1223e8SApple OSS Distributions
5152*4f1223e8SApple OSS Distributions
5153*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5154*4f1223e8SApple OSS Distributions if (fMemory) {
5155*4f1223e8SApple OSS Distributions if (fMemory != _memory) {
5156*4f1223e8SApple OSS Distributions fMemory->removeMapping(this);
5157*4f1223e8SApple OSS Distributions }
5158*4f1223e8SApple OSS Distributions }
5159*4f1223e8SApple OSS Distributions fMemory = os::move(tempval);
5160*4f1223e8SApple OSS Distributions
5161*4f1223e8SApple OSS Distributions return true;
5162*4f1223e8SApple OSS Distributions }
5163*4f1223e8SApple OSS Distributions
5164*4f1223e8SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5165*4f1223e8SApple OSS Distributions IOMemoryDescriptor::doMap(
5166*4f1223e8SApple OSS Distributions vm_map_t __addressMap,
5167*4f1223e8SApple OSS Distributions IOVirtualAddress * __address,
5168*4f1223e8SApple OSS Distributions IOOptionBits options,
5169*4f1223e8SApple OSS Distributions IOByteCount __offset,
5170*4f1223e8SApple OSS Distributions IOByteCount __length )
5171*4f1223e8SApple OSS Distributions {
5172*4f1223e8SApple OSS Distributions return kIOReturnUnsupported;
5173*4f1223e8SApple OSS Distributions }
5174*4f1223e8SApple OSS Distributions
5175*4f1223e8SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5176*4f1223e8SApple OSS Distributions IOMemoryDescriptor::handleFault(
5177*4f1223e8SApple OSS Distributions void * _pager,
5178*4f1223e8SApple OSS Distributions mach_vm_size_t sourceOffset,
5179*4f1223e8SApple OSS Distributions mach_vm_size_t length)
5180*4f1223e8SApple OSS Distributions {
5181*4f1223e8SApple OSS Distributions if (kIOMemoryRedirected & _flags) {
5182*4f1223e8SApple OSS Distributions #if DEBUG
5183*4f1223e8SApple OSS Distributions IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5184*4f1223e8SApple OSS Distributions #endif
5185*4f1223e8SApple OSS Distributions do {
5186*4f1223e8SApple OSS Distributions SLEEP;
5187*4f1223e8SApple OSS Distributions } while (kIOMemoryRedirected & _flags);
5188*4f1223e8SApple OSS Distributions }
5189*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
5190*4f1223e8SApple OSS Distributions }
5191*4f1223e8SApple OSS Distributions
5192*4f1223e8SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5193*4f1223e8SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5194*4f1223e8SApple OSS Distributions void * _pager,
5195*4f1223e8SApple OSS Distributions vm_map_t addressMap,
5196*4f1223e8SApple OSS Distributions mach_vm_address_t address,
5197*4f1223e8SApple OSS Distributions mach_vm_size_t sourceOffset,
5198*4f1223e8SApple OSS Distributions mach_vm_size_t length,
5199*4f1223e8SApple OSS Distributions IOOptionBits options )
5200*4f1223e8SApple OSS Distributions {
5201*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5202*4f1223e8SApple OSS Distributions memory_object_t pager = (memory_object_t) _pager;
5203*4f1223e8SApple OSS Distributions mach_vm_size_t size;
5204*4f1223e8SApple OSS Distributions mach_vm_size_t bytes;
5205*4f1223e8SApple OSS Distributions mach_vm_size_t page;
5206*4f1223e8SApple OSS Distributions mach_vm_size_t pageOffset;
5207*4f1223e8SApple OSS Distributions mach_vm_size_t pagerOffset;
5208*4f1223e8SApple OSS Distributions IOPhysicalLength segLen, chunk;
5209*4f1223e8SApple OSS Distributions addr64_t physAddr;
5210*4f1223e8SApple OSS Distributions IOOptionBits type;
5211*4f1223e8SApple OSS Distributions
5212*4f1223e8SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
5213*4f1223e8SApple OSS Distributions
5214*4f1223e8SApple OSS Distributions if (reserved->dp.pagerContig) {
5215*4f1223e8SApple OSS Distributions sourceOffset = 0;
5216*4f1223e8SApple OSS Distributions pagerOffset = 0;
5217*4f1223e8SApple OSS Distributions }
5218*4f1223e8SApple OSS Distributions
5219*4f1223e8SApple OSS Distributions physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5220*4f1223e8SApple OSS Distributions assert( physAddr );
5221*4f1223e8SApple OSS Distributions pageOffset = physAddr - trunc_page_64( physAddr );
5222*4f1223e8SApple OSS Distributions pagerOffset = sourceOffset;
5223*4f1223e8SApple OSS Distributions
5224*4f1223e8SApple OSS Distributions size = length + pageOffset;
5225*4f1223e8SApple OSS Distributions physAddr -= pageOffset;
5226*4f1223e8SApple OSS Distributions
5227*4f1223e8SApple OSS Distributions segLen += pageOffset;
5228*4f1223e8SApple OSS Distributions bytes = size;
5229*4f1223e8SApple OSS Distributions do{
5230*4f1223e8SApple OSS Distributions // in the middle of the loop only map whole pages
5231*4f1223e8SApple OSS Distributions if (segLen >= bytes) {
5232*4f1223e8SApple OSS Distributions segLen = bytes;
5233*4f1223e8SApple OSS Distributions } else if (segLen != trunc_page_64(segLen)) {
5234*4f1223e8SApple OSS Distributions err = kIOReturnVMError;
5235*4f1223e8SApple OSS Distributions }
5236*4f1223e8SApple OSS Distributions if (physAddr != trunc_page_64(physAddr)) {
5237*4f1223e8SApple OSS Distributions err = kIOReturnBadArgument;
5238*4f1223e8SApple OSS Distributions }
5239*4f1223e8SApple OSS Distributions
5240*4f1223e8SApple OSS Distributions if (kIOReturnSuccess != err) {
5241*4f1223e8SApple OSS Distributions break;
5242*4f1223e8SApple OSS Distributions }
5243*4f1223e8SApple OSS Distributions
5244*4f1223e8SApple OSS Distributions #if DEBUG || DEVELOPMENT
5245*4f1223e8SApple OSS Distributions if ((kIOMemoryTypeUPL != type)
5246*4f1223e8SApple OSS Distributions && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5247*4f1223e8SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5248*4f1223e8SApple OSS Distributions physAddr, (uint64_t)segLen);
5249*4f1223e8SApple OSS Distributions }
5250*4f1223e8SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5251*4f1223e8SApple OSS Distributions
5252*4f1223e8SApple OSS Distributions chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5253*4f1223e8SApple OSS Distributions for (page = 0;
5254*4f1223e8SApple OSS Distributions (page < segLen) && (KERN_SUCCESS == err);
5255*4f1223e8SApple OSS Distributions page += chunk) {
5256*4f1223e8SApple OSS Distributions err = device_pager_populate_object(pager, pagerOffset,
5257*4f1223e8SApple OSS Distributions (ppnum_t)(atop_64(physAddr + page)), chunk);
5258*4f1223e8SApple OSS Distributions pagerOffset += chunk;
5259*4f1223e8SApple OSS Distributions }
5260*4f1223e8SApple OSS Distributions
5261*4f1223e8SApple OSS Distributions assert(KERN_SUCCESS == err);
5262*4f1223e8SApple OSS Distributions if (err) {
5263*4f1223e8SApple OSS Distributions break;
5264*4f1223e8SApple OSS Distributions }
5265*4f1223e8SApple OSS Distributions
5266*4f1223e8SApple OSS Distributions // This call to vm_fault causes an early pmap level resolution
5267*4f1223e8SApple OSS Distributions // of the mappings created above for kernel mappings, since
5268*4f1223e8SApple OSS Distributions // faulting in later can't take place from interrupt level.
5269*4f1223e8SApple OSS Distributions if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5270*4f1223e8SApple OSS Distributions err = vm_fault(addressMap,
5271*4f1223e8SApple OSS Distributions (vm_map_offset_t)trunc_page_64(address),
5272*4f1223e8SApple OSS Distributions options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5273*4f1223e8SApple OSS Distributions FALSE, VM_KERN_MEMORY_NONE,
5274*4f1223e8SApple OSS Distributions THREAD_UNINT, NULL,
5275*4f1223e8SApple OSS Distributions (vm_map_offset_t)0);
5276*4f1223e8SApple OSS Distributions
5277*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != err) {
5278*4f1223e8SApple OSS Distributions break;
5279*4f1223e8SApple OSS Distributions }
5280*4f1223e8SApple OSS Distributions }
5281*4f1223e8SApple OSS Distributions
5282*4f1223e8SApple OSS Distributions sourceOffset += segLen - pageOffset;
5283*4f1223e8SApple OSS Distributions address += segLen;
5284*4f1223e8SApple OSS Distributions bytes -= segLen;
5285*4f1223e8SApple OSS Distributions pageOffset = 0;
5286*4f1223e8SApple OSS Distributions }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5287*4f1223e8SApple OSS Distributions
5288*4f1223e8SApple OSS Distributions if (bytes) {
5289*4f1223e8SApple OSS Distributions err = kIOReturnBadArgument;
5290*4f1223e8SApple OSS Distributions }
5291*4f1223e8SApple OSS Distributions
5292*4f1223e8SApple OSS Distributions return err;
5293*4f1223e8SApple OSS Distributions }
5294*4f1223e8SApple OSS Distributions
5295*4f1223e8SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5296*4f1223e8SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5297*4f1223e8SApple OSS Distributions vm_map_t addressMap,
5298*4f1223e8SApple OSS Distributions IOVirtualAddress __address,
5299*4f1223e8SApple OSS Distributions IOByteCount __length )
5300*4f1223e8SApple OSS Distributions {
5301*4f1223e8SApple OSS Distributions IOReturn err;
5302*4f1223e8SApple OSS Distributions IOMemoryMap * mapping;
5303*4f1223e8SApple OSS Distributions mach_vm_address_t address;
5304*4f1223e8SApple OSS Distributions mach_vm_size_t length;
5305*4f1223e8SApple OSS Distributions
5306*4f1223e8SApple OSS Distributions if (__length) {
5307*4f1223e8SApple OSS Distributions panic("doUnmap");
5308*4f1223e8SApple OSS Distributions }
5309*4f1223e8SApple OSS Distributions
5310*4f1223e8SApple OSS Distributions mapping = (IOMemoryMap *) __address;
5311*4f1223e8SApple OSS Distributions addressMap = mapping->fAddressMap;
5312*4f1223e8SApple OSS Distributions address = mapping->fAddress;
5313*4f1223e8SApple OSS Distributions length = mapping->fLength;
5314*4f1223e8SApple OSS Distributions
5315*4f1223e8SApple OSS Distributions if (kIOMapOverwrite & mapping->fOptions) {
5316*4f1223e8SApple OSS Distributions err = KERN_SUCCESS;
5317*4f1223e8SApple OSS Distributions } else {
5318*4f1223e8SApple OSS Distributions if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5319*4f1223e8SApple OSS Distributions addressMap = IOPageableMapForAddress( address );
5320*4f1223e8SApple OSS Distributions }
5321*4f1223e8SApple OSS Distributions #if DEBUG
5322*4f1223e8SApple OSS Distributions if (kIOLogMapping & gIOKitDebug) {
5323*4f1223e8SApple OSS Distributions IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5324*4f1223e8SApple OSS Distributions addressMap, address, length );
5325*4f1223e8SApple OSS Distributions }
5326*4f1223e8SApple OSS Distributions #endif
5327*4f1223e8SApple OSS Distributions err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5328*4f1223e8SApple OSS Distributions if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5329*4f1223e8SApple OSS Distributions DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5330*4f1223e8SApple OSS Distributions }
5331*4f1223e8SApple OSS Distributions }
5332*4f1223e8SApple OSS Distributions
5333*4f1223e8SApple OSS Distributions #if IOTRACKING
5334*4f1223e8SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5335*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
5336*4f1223e8SApple OSS Distributions
5337*4f1223e8SApple OSS Distributions return err;
5338*4f1223e8SApple OSS Distributions }
5339*4f1223e8SApple OSS Distributions
5340*4f1223e8SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5341*4f1223e8SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5342*4f1223e8SApple OSS Distributions {
5343*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5344*4f1223e8SApple OSS Distributions IOMemoryMap * mapping = NULL;
5345*4f1223e8SApple OSS Distributions OSSharedPtr<OSIterator> iter;
5346*4f1223e8SApple OSS Distributions
5347*4f1223e8SApple OSS Distributions LOCK;
5348*4f1223e8SApple OSS Distributions
5349*4f1223e8SApple OSS Distributions if (doRedirect) {
5350*4f1223e8SApple OSS Distributions _flags |= kIOMemoryRedirected;
5351*4f1223e8SApple OSS Distributions } else {
5352*4f1223e8SApple OSS Distributions _flags &= ~kIOMemoryRedirected;
5353*4f1223e8SApple OSS Distributions }
5354*4f1223e8SApple OSS Distributions
5355*4f1223e8SApple OSS Distributions do {
5356*4f1223e8SApple OSS Distributions if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5357*4f1223e8SApple OSS Distributions memory_object_t pager;
5358*4f1223e8SApple OSS Distributions
5359*4f1223e8SApple OSS Distributions if (reserved) {
5360*4f1223e8SApple OSS Distributions pager = (memory_object_t) reserved->dp.devicePager;
5361*4f1223e8SApple OSS Distributions } else {
5362*4f1223e8SApple OSS Distributions pager = MACH_PORT_NULL;
5363*4f1223e8SApple OSS Distributions }
5364*4f1223e8SApple OSS Distributions
5365*4f1223e8SApple OSS Distributions while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5366*4f1223e8SApple OSS Distributions mapping->redirect( safeTask, doRedirect );
5367*4f1223e8SApple OSS Distributions if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5368*4f1223e8SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5369*4f1223e8SApple OSS Distributions }
5370*4f1223e8SApple OSS Distributions }
5371*4f1223e8SApple OSS Distributions
5372*4f1223e8SApple OSS Distributions iter.reset();
5373*4f1223e8SApple OSS Distributions }
5374*4f1223e8SApple OSS Distributions } while (false);
5375*4f1223e8SApple OSS Distributions
5376*4f1223e8SApple OSS Distributions if (!doRedirect) {
5377*4f1223e8SApple OSS Distributions WAKEUP;
5378*4f1223e8SApple OSS Distributions }
5379*4f1223e8SApple OSS Distributions
5380*4f1223e8SApple OSS Distributions UNLOCK;
5381*4f1223e8SApple OSS Distributions
5382*4f1223e8SApple OSS Distributions #ifndef __LP64__
5383*4f1223e8SApple OSS Distributions // temporary binary compatibility
5384*4f1223e8SApple OSS Distributions IOSubMemoryDescriptor * subMem;
5385*4f1223e8SApple OSS Distributions if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5386*4f1223e8SApple OSS Distributions err = subMem->redirect( safeTask, doRedirect );
5387*4f1223e8SApple OSS Distributions } else {
5388*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
5389*4f1223e8SApple OSS Distributions }
5390*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5391*4f1223e8SApple OSS Distributions
5392*4f1223e8SApple OSS Distributions return err;
5393*4f1223e8SApple OSS Distributions }
5394*4f1223e8SApple OSS Distributions
5395*4f1223e8SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5396*4f1223e8SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5397*4f1223e8SApple OSS Distributions {
5398*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5399*4f1223e8SApple OSS Distributions
5400*4f1223e8SApple OSS Distributions if (fSuperMap) {
5401*4f1223e8SApple OSS Distributions // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5402*4f1223e8SApple OSS Distributions } else {
5403*4f1223e8SApple OSS Distributions LOCK;
5404*4f1223e8SApple OSS Distributions
5405*4f1223e8SApple OSS Distributions do{
5406*4f1223e8SApple OSS Distributions if (!fAddress) {
5407*4f1223e8SApple OSS Distributions break;
5408*4f1223e8SApple OSS Distributions }
5409*4f1223e8SApple OSS Distributions if (!fAddressMap) {
5410*4f1223e8SApple OSS Distributions break;
5411*4f1223e8SApple OSS Distributions }
5412*4f1223e8SApple OSS Distributions
5413*4f1223e8SApple OSS Distributions if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5414*4f1223e8SApple OSS Distributions && (0 == (fOptions & kIOMapStatic))) {
5415*4f1223e8SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5416*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
5417*4f1223e8SApple OSS Distributions #if DEBUG
5418*4f1223e8SApple OSS Distributions IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5419*4f1223e8SApple OSS Distributions #endif
5420*4f1223e8SApple OSS Distributions } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5421*4f1223e8SApple OSS Distributions IOOptionBits newMode;
5422*4f1223e8SApple OSS Distributions newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5423*4f1223e8SApple OSS Distributions IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5424*4f1223e8SApple OSS Distributions }
5425*4f1223e8SApple OSS Distributions }while (false);
5426*4f1223e8SApple OSS Distributions UNLOCK;
5427*4f1223e8SApple OSS Distributions }
5428*4f1223e8SApple OSS Distributions
5429*4f1223e8SApple OSS Distributions if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5430*4f1223e8SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5431*4f1223e8SApple OSS Distributions && safeTask
5432*4f1223e8SApple OSS Distributions && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5433*4f1223e8SApple OSS Distributions fMemory->redirect(safeTask, doRedirect);
5434*4f1223e8SApple OSS Distributions }
5435*4f1223e8SApple OSS Distributions
5436*4f1223e8SApple OSS Distributions return err;
5437*4f1223e8SApple OSS Distributions }
5438*4f1223e8SApple OSS Distributions
5439*4f1223e8SApple OSS Distributions IOReturn
unmap(void)5440*4f1223e8SApple OSS Distributions IOMemoryMap::unmap( void )
5441*4f1223e8SApple OSS Distributions {
5442*4f1223e8SApple OSS Distributions IOReturn err;
5443*4f1223e8SApple OSS Distributions
5444*4f1223e8SApple OSS Distributions LOCK;
5445*4f1223e8SApple OSS Distributions
5446*4f1223e8SApple OSS Distributions if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5447*4f1223e8SApple OSS Distributions && (0 == (kIOMapStatic & fOptions))) {
5448*4f1223e8SApple OSS Distributions err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5449*4f1223e8SApple OSS Distributions } else {
5450*4f1223e8SApple OSS Distributions err = kIOReturnSuccess;
5451*4f1223e8SApple OSS Distributions }
5452*4f1223e8SApple OSS Distributions
5453*4f1223e8SApple OSS Distributions if (fAddressMap) {
5454*4f1223e8SApple OSS Distributions vm_map_deallocate(fAddressMap);
5455*4f1223e8SApple OSS Distributions fAddressMap = NULL;
5456*4f1223e8SApple OSS Distributions }
5457*4f1223e8SApple OSS Distributions
5458*4f1223e8SApple OSS Distributions fAddress = 0;
5459*4f1223e8SApple OSS Distributions
5460*4f1223e8SApple OSS Distributions UNLOCK;
5461*4f1223e8SApple OSS Distributions
5462*4f1223e8SApple OSS Distributions return err;
5463*4f1223e8SApple OSS Distributions }
5464*4f1223e8SApple OSS Distributions
5465*4f1223e8SApple OSS Distributions void
taskDied(void)5466*4f1223e8SApple OSS Distributions IOMemoryMap::taskDied( void )
5467*4f1223e8SApple OSS Distributions {
5468*4f1223e8SApple OSS Distributions LOCK;
5469*4f1223e8SApple OSS Distributions if (fUserClientUnmap) {
5470*4f1223e8SApple OSS Distributions unmap();
5471*4f1223e8SApple OSS Distributions }
5472*4f1223e8SApple OSS Distributions #if IOTRACKING
5473*4f1223e8SApple OSS Distributions else {
5474*4f1223e8SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5475*4f1223e8SApple OSS Distributions }
5476*4f1223e8SApple OSS Distributions #endif /* IOTRACKING */
5477*4f1223e8SApple OSS Distributions
5478*4f1223e8SApple OSS Distributions if (fAddressMap) {
5479*4f1223e8SApple OSS Distributions vm_map_deallocate(fAddressMap);
5480*4f1223e8SApple OSS Distributions fAddressMap = NULL;
5481*4f1223e8SApple OSS Distributions }
5482*4f1223e8SApple OSS Distributions fAddressTask = NULL;
5483*4f1223e8SApple OSS Distributions fAddress = 0;
5484*4f1223e8SApple OSS Distributions UNLOCK;
5485*4f1223e8SApple OSS Distributions }
5486*4f1223e8SApple OSS Distributions
5487*4f1223e8SApple OSS Distributions IOReturn
userClientUnmap(void)5488*4f1223e8SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5489*4f1223e8SApple OSS Distributions {
5490*4f1223e8SApple OSS Distributions fUserClientUnmap = true;
5491*4f1223e8SApple OSS Distributions return kIOReturnSuccess;
5492*4f1223e8SApple OSS Distributions }
5493*4f1223e8SApple OSS Distributions
5494*4f1223e8SApple OSS Distributions // Overload the release mechanism. All mappings must be a member
5495*4f1223e8SApple OSS Distributions // of a memory descriptors _mappings set. This means that we
5496*4f1223e8SApple OSS Distributions // always have 2 references on a mapping. When either of these mappings
5497*4f1223e8SApple OSS Distributions // are released we need to free ourselves.
5498*4f1223e8SApple OSS Distributions void
taggedRelease(const void * tag) const5499*4f1223e8SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5500*4f1223e8SApple OSS Distributions {
5501*4f1223e8SApple OSS Distributions LOCK;
5502*4f1223e8SApple OSS Distributions super::taggedRelease(tag, 2);
5503*4f1223e8SApple OSS Distributions UNLOCK;
5504*4f1223e8SApple OSS Distributions }
5505*4f1223e8SApple OSS Distributions
5506*4f1223e8SApple OSS Distributions void
free()5507*4f1223e8SApple OSS Distributions IOMemoryMap::free()
5508*4f1223e8SApple OSS Distributions {
5509*4f1223e8SApple OSS Distributions unmap();
5510*4f1223e8SApple OSS Distributions
5511*4f1223e8SApple OSS Distributions if (fMemory) {
5512*4f1223e8SApple OSS Distributions LOCK;
5513*4f1223e8SApple OSS Distributions fMemory->removeMapping(this);
5514*4f1223e8SApple OSS Distributions UNLOCK;
5515*4f1223e8SApple OSS Distributions fMemory.reset();
5516*4f1223e8SApple OSS Distributions }
5517*4f1223e8SApple OSS Distributions
5518*4f1223e8SApple OSS Distributions if (fSuperMap) {
5519*4f1223e8SApple OSS Distributions fSuperMap.reset();
5520*4f1223e8SApple OSS Distributions }
5521*4f1223e8SApple OSS Distributions
5522*4f1223e8SApple OSS Distributions if (fRedirUPL) {
5523*4f1223e8SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5524*4f1223e8SApple OSS Distributions upl_deallocate(fRedirUPL);
5525*4f1223e8SApple OSS Distributions }
5526*4f1223e8SApple OSS Distributions
5527*4f1223e8SApple OSS Distributions super::free();
5528*4f1223e8SApple OSS Distributions }
5529*4f1223e8SApple OSS Distributions
5530*4f1223e8SApple OSS Distributions IOByteCount
getLength()5531*4f1223e8SApple OSS Distributions IOMemoryMap::getLength()
5532*4f1223e8SApple OSS Distributions {
5533*4f1223e8SApple OSS Distributions return fLength;
5534*4f1223e8SApple OSS Distributions }
5535*4f1223e8SApple OSS Distributions
5536*4f1223e8SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5537*4f1223e8SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5538*4f1223e8SApple OSS Distributions {
5539*4f1223e8SApple OSS Distributions #ifndef __LP64__
5540*4f1223e8SApple OSS Distributions if (fSuperMap) {
5541*4f1223e8SApple OSS Distributions fSuperMap->getVirtualAddress();
5542*4f1223e8SApple OSS Distributions } else if (fAddressMap
5543*4f1223e8SApple OSS Distributions && vm_map_is_64bit(fAddressMap)
5544*4f1223e8SApple OSS Distributions && (sizeof(IOVirtualAddress) < 8)) {
5545*4f1223e8SApple OSS Distributions OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5546*4f1223e8SApple OSS Distributions }
5547*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5548*4f1223e8SApple OSS Distributions
5549*4f1223e8SApple OSS Distributions return fAddress;
5550*4f1223e8SApple OSS Distributions }
5551*4f1223e8SApple OSS Distributions
5552*4f1223e8SApple OSS Distributions #ifndef __LP64__
5553*4f1223e8SApple OSS Distributions mach_vm_address_t
getAddress()5554*4f1223e8SApple OSS Distributions IOMemoryMap::getAddress()
5555*4f1223e8SApple OSS Distributions {
5556*4f1223e8SApple OSS Distributions return fAddress;
5557*4f1223e8SApple OSS Distributions }
5558*4f1223e8SApple OSS Distributions
5559*4f1223e8SApple OSS Distributions mach_vm_size_t
getSize()5560*4f1223e8SApple OSS Distributions IOMemoryMap::getSize()
5561*4f1223e8SApple OSS Distributions {
5562*4f1223e8SApple OSS Distributions return fLength;
5563*4f1223e8SApple OSS Distributions }
5564*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5565*4f1223e8SApple OSS Distributions
5566*4f1223e8SApple OSS Distributions
5567*4f1223e8SApple OSS Distributions task_t
getAddressTask()5568*4f1223e8SApple OSS Distributions IOMemoryMap::getAddressTask()
5569*4f1223e8SApple OSS Distributions {
5570*4f1223e8SApple OSS Distributions if (fSuperMap) {
5571*4f1223e8SApple OSS Distributions return fSuperMap->getAddressTask();
5572*4f1223e8SApple OSS Distributions } else {
5573*4f1223e8SApple OSS Distributions return fAddressTask;
5574*4f1223e8SApple OSS Distributions }
5575*4f1223e8SApple OSS Distributions }
5576*4f1223e8SApple OSS Distributions
5577*4f1223e8SApple OSS Distributions IOOptionBits
getMapOptions()5578*4f1223e8SApple OSS Distributions IOMemoryMap::getMapOptions()
5579*4f1223e8SApple OSS Distributions {
5580*4f1223e8SApple OSS Distributions return fOptions;
5581*4f1223e8SApple OSS Distributions }
5582*4f1223e8SApple OSS Distributions
5583*4f1223e8SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5584*4f1223e8SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5585*4f1223e8SApple OSS Distributions {
5586*4f1223e8SApple OSS Distributions return fMemory.get();
5587*4f1223e8SApple OSS Distributions }
5588*4f1223e8SApple OSS Distributions
5589*4f1223e8SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5590*4f1223e8SApple OSS Distributions IOMemoryMap::copyCompatible(
5591*4f1223e8SApple OSS Distributions IOMemoryMap * newMapping )
5592*4f1223e8SApple OSS Distributions {
5593*4f1223e8SApple OSS Distributions task_t task = newMapping->getAddressTask();
5594*4f1223e8SApple OSS Distributions mach_vm_address_t toAddress = newMapping->fAddress;
5595*4f1223e8SApple OSS Distributions IOOptionBits _options = newMapping->fOptions;
5596*4f1223e8SApple OSS Distributions mach_vm_size_t _offset = newMapping->fOffset;
5597*4f1223e8SApple OSS Distributions mach_vm_size_t _length = newMapping->fLength;
5598*4f1223e8SApple OSS Distributions
5599*4f1223e8SApple OSS Distributions if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5600*4f1223e8SApple OSS Distributions return NULL;
5601*4f1223e8SApple OSS Distributions }
5602*4f1223e8SApple OSS Distributions if ((fOptions ^ _options) & kIOMapReadOnly) {
5603*4f1223e8SApple OSS Distributions return NULL;
5604*4f1223e8SApple OSS Distributions }
5605*4f1223e8SApple OSS Distributions if ((fOptions ^ _options) & kIOMapGuardedMask) {
5606*4f1223e8SApple OSS Distributions return NULL;
5607*4f1223e8SApple OSS Distributions }
5608*4f1223e8SApple OSS Distributions if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5609*4f1223e8SApple OSS Distributions && ((fOptions ^ _options) & kIOMapCacheMask)) {
5610*4f1223e8SApple OSS Distributions return NULL;
5611*4f1223e8SApple OSS Distributions }
5612*4f1223e8SApple OSS Distributions
5613*4f1223e8SApple OSS Distributions if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5614*4f1223e8SApple OSS Distributions return NULL;
5615*4f1223e8SApple OSS Distributions }
5616*4f1223e8SApple OSS Distributions
5617*4f1223e8SApple OSS Distributions if (_offset < fOffset) {
5618*4f1223e8SApple OSS Distributions return NULL;
5619*4f1223e8SApple OSS Distributions }
5620*4f1223e8SApple OSS Distributions
5621*4f1223e8SApple OSS Distributions _offset -= fOffset;
5622*4f1223e8SApple OSS Distributions
5623*4f1223e8SApple OSS Distributions if ((_offset + _length) > fLength) {
5624*4f1223e8SApple OSS Distributions return NULL;
5625*4f1223e8SApple OSS Distributions }
5626*4f1223e8SApple OSS Distributions
5627*4f1223e8SApple OSS Distributions if ((fLength == _length) && (!_offset)) {
5628*4f1223e8SApple OSS Distributions retain();
5629*4f1223e8SApple OSS Distributions newMapping = this;
5630*4f1223e8SApple OSS Distributions } else {
5631*4f1223e8SApple OSS Distributions newMapping->fSuperMap.reset(this, OSRetain);
5632*4f1223e8SApple OSS Distributions newMapping->fOffset = fOffset + _offset;
5633*4f1223e8SApple OSS Distributions newMapping->fAddress = fAddress + _offset;
5634*4f1223e8SApple OSS Distributions }
5635*4f1223e8SApple OSS Distributions
5636*4f1223e8SApple OSS Distributions return newMapping;
5637*4f1223e8SApple OSS Distributions }
5638*4f1223e8SApple OSS Distributions
5639*4f1223e8SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5640*4f1223e8SApple OSS Distributions IOMemoryMap::wireRange(
5641*4f1223e8SApple OSS Distributions uint32_t options,
5642*4f1223e8SApple OSS Distributions mach_vm_size_t offset,
5643*4f1223e8SApple OSS Distributions mach_vm_size_t length)
5644*4f1223e8SApple OSS Distributions {
5645*4f1223e8SApple OSS Distributions IOReturn kr;
5646*4f1223e8SApple OSS Distributions mach_vm_address_t start = trunc_page_64(fAddress + offset);
5647*4f1223e8SApple OSS Distributions mach_vm_address_t end = round_page_64(fAddress + offset + length);
5648*4f1223e8SApple OSS Distributions vm_prot_t prot;
5649*4f1223e8SApple OSS Distributions
5650*4f1223e8SApple OSS Distributions prot = (kIODirectionOutIn & options);
5651*4f1223e8SApple OSS Distributions if (prot) {
5652*4f1223e8SApple OSS Distributions kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5653*4f1223e8SApple OSS Distributions } else {
5654*4f1223e8SApple OSS Distributions kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5655*4f1223e8SApple OSS Distributions }
5656*4f1223e8SApple OSS Distributions
5657*4f1223e8SApple OSS Distributions return kr;
5658*4f1223e8SApple OSS Distributions }
5659*4f1223e8SApple OSS Distributions
5660*4f1223e8SApple OSS Distributions
5661*4f1223e8SApple OSS Distributions IOPhysicalAddress
5662*4f1223e8SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5663*4f1223e8SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5664*4f1223e8SApple OSS Distributions #else /* !__LP64__ */
5665*4f1223e8SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5666*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5667*4f1223e8SApple OSS Distributions {
5668*4f1223e8SApple OSS Distributions IOPhysicalAddress address;
5669*4f1223e8SApple OSS Distributions
5670*4f1223e8SApple OSS Distributions LOCK;
5671*4f1223e8SApple OSS Distributions #ifdef __LP64__
5672*4f1223e8SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5673*4f1223e8SApple OSS Distributions #else /* !__LP64__ */
5674*4f1223e8SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5675*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5676*4f1223e8SApple OSS Distributions UNLOCK;
5677*4f1223e8SApple OSS Distributions
5678*4f1223e8SApple OSS Distributions return address;
5679*4f1223e8SApple OSS Distributions }
5680*4f1223e8SApple OSS Distributions
5681*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5682*4f1223e8SApple OSS Distributions
5683*4f1223e8SApple OSS Distributions #undef super
5684*4f1223e8SApple OSS Distributions #define super OSObject
5685*4f1223e8SApple OSS Distributions
5686*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5687*4f1223e8SApple OSS Distributions
5688*4f1223e8SApple OSS Distributions void
initialize(void)5689*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5690*4f1223e8SApple OSS Distributions {
5691*4f1223e8SApple OSS Distributions if (NULL == gIOMemoryLock) {
5692*4f1223e8SApple OSS Distributions gIOMemoryLock = IORecursiveLockAlloc();
5693*4f1223e8SApple OSS Distributions }
5694*4f1223e8SApple OSS Distributions
5695*4f1223e8SApple OSS Distributions gIOLastPage = IOGetLastPageNumber();
5696*4f1223e8SApple OSS Distributions }
5697*4f1223e8SApple OSS Distributions
5698*4f1223e8SApple OSS Distributions void
free(void)5699*4f1223e8SApple OSS Distributions IOMemoryDescriptor::free( void )
5700*4f1223e8SApple OSS Distributions {
5701*4f1223e8SApple OSS Distributions if (_mappings) {
5702*4f1223e8SApple OSS Distributions _mappings.reset();
5703*4f1223e8SApple OSS Distributions }
5704*4f1223e8SApple OSS Distributions
5705*4f1223e8SApple OSS Distributions if (reserved) {
5706*4f1223e8SApple OSS Distributions cleanKernelReserved(reserved);
5707*4f1223e8SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
5708*4f1223e8SApple OSS Distributions reserved = NULL;
5709*4f1223e8SApple OSS Distributions }
5710*4f1223e8SApple OSS Distributions super::free();
5711*4f1223e8SApple OSS Distributions }
5712*4f1223e8SApple OSS Distributions
5713*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5714*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setMapping(
5715*4f1223e8SApple OSS Distributions task_t intoTask,
5716*4f1223e8SApple OSS Distributions IOVirtualAddress mapAddress,
5717*4f1223e8SApple OSS Distributions IOOptionBits options )
5718*4f1223e8SApple OSS Distributions {
5719*4f1223e8SApple OSS Distributions return createMappingInTask( intoTask, mapAddress,
5720*4f1223e8SApple OSS Distributions options | kIOMapStatic,
5721*4f1223e8SApple OSS Distributions 0, getLength());
5722*4f1223e8SApple OSS Distributions }
5723*4f1223e8SApple OSS Distributions
5724*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5725*4f1223e8SApple OSS Distributions IOMemoryDescriptor::map(
5726*4f1223e8SApple OSS Distributions IOOptionBits options )
5727*4f1223e8SApple OSS Distributions {
5728*4f1223e8SApple OSS Distributions return createMappingInTask( kernel_task, 0,
5729*4f1223e8SApple OSS Distributions options | kIOMapAnywhere,
5730*4f1223e8SApple OSS Distributions 0, getLength());
5731*4f1223e8SApple OSS Distributions }
5732*4f1223e8SApple OSS Distributions
5733*4f1223e8SApple OSS Distributions #ifndef __LP64__
5734*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5735*4f1223e8SApple OSS Distributions IOMemoryDescriptor::map(
5736*4f1223e8SApple OSS Distributions task_t intoTask,
5737*4f1223e8SApple OSS Distributions IOVirtualAddress atAddress,
5738*4f1223e8SApple OSS Distributions IOOptionBits options,
5739*4f1223e8SApple OSS Distributions IOByteCount offset,
5740*4f1223e8SApple OSS Distributions IOByteCount length )
5741*4f1223e8SApple OSS Distributions {
5742*4f1223e8SApple OSS Distributions if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5743*4f1223e8SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5744*4f1223e8SApple OSS Distributions return NULL;
5745*4f1223e8SApple OSS Distributions }
5746*4f1223e8SApple OSS Distributions
5747*4f1223e8SApple OSS Distributions return createMappingInTask(intoTask, atAddress,
5748*4f1223e8SApple OSS Distributions options, offset, length);
5749*4f1223e8SApple OSS Distributions }
5750*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5751*4f1223e8SApple OSS Distributions
5752*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5753*4f1223e8SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5754*4f1223e8SApple OSS Distributions task_t intoTask,
5755*4f1223e8SApple OSS Distributions mach_vm_address_t atAddress,
5756*4f1223e8SApple OSS Distributions IOOptionBits options,
5757*4f1223e8SApple OSS Distributions mach_vm_size_t offset,
5758*4f1223e8SApple OSS Distributions mach_vm_size_t length)
5759*4f1223e8SApple OSS Distributions {
5760*4f1223e8SApple OSS Distributions IOMemoryMap * result;
5761*4f1223e8SApple OSS Distributions IOMemoryMap * mapping;
5762*4f1223e8SApple OSS Distributions
5763*4f1223e8SApple OSS Distributions if (0 == length) {
5764*4f1223e8SApple OSS Distributions length = getLength();
5765*4f1223e8SApple OSS Distributions }
5766*4f1223e8SApple OSS Distributions
5767*4f1223e8SApple OSS Distributions mapping = new IOMemoryMap;
5768*4f1223e8SApple OSS Distributions
5769*4f1223e8SApple OSS Distributions if (mapping
5770*4f1223e8SApple OSS Distributions && !mapping->init( intoTask, atAddress,
5771*4f1223e8SApple OSS Distributions options, offset, length )) {
5772*4f1223e8SApple OSS Distributions mapping->release();
5773*4f1223e8SApple OSS Distributions mapping = NULL;
5774*4f1223e8SApple OSS Distributions }
5775*4f1223e8SApple OSS Distributions
5776*4f1223e8SApple OSS Distributions if (mapping) {
5777*4f1223e8SApple OSS Distributions result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5778*4f1223e8SApple OSS Distributions } else {
5779*4f1223e8SApple OSS Distributions result = nullptr;
5780*4f1223e8SApple OSS Distributions }
5781*4f1223e8SApple OSS Distributions
5782*4f1223e8SApple OSS Distributions #if DEBUG
5783*4f1223e8SApple OSS Distributions if (!result) {
5784*4f1223e8SApple OSS Distributions IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5785*4f1223e8SApple OSS Distributions this, atAddress, (uint32_t) options, offset, length);
5786*4f1223e8SApple OSS Distributions }
5787*4f1223e8SApple OSS Distributions #endif
5788*4f1223e8SApple OSS Distributions
5789*4f1223e8SApple OSS Distributions // already retained through makeMapping
5790*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5791*4f1223e8SApple OSS Distributions
5792*4f1223e8SApple OSS Distributions return retval;
5793*4f1223e8SApple OSS Distributions }
5794*4f1223e8SApple OSS Distributions
5795*4f1223e8SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5796*4f1223e8SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5797*4f1223e8SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5798*4f1223e8SApple OSS Distributions IOOptionBits options,
5799*4f1223e8SApple OSS Distributions IOByteCount offset)
5800*4f1223e8SApple OSS Distributions {
5801*4f1223e8SApple OSS Distributions return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5802*4f1223e8SApple OSS Distributions }
5803*4f1223e8SApple OSS Distributions #endif
5804*4f1223e8SApple OSS Distributions
5805*4f1223e8SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5806*4f1223e8SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5807*4f1223e8SApple OSS Distributions IOOptionBits options,
5808*4f1223e8SApple OSS Distributions mach_vm_size_t offset)
5809*4f1223e8SApple OSS Distributions {
5810*4f1223e8SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5811*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> physMem;
5812*4f1223e8SApple OSS Distributions
5813*4f1223e8SApple OSS Distributions LOCK;
5814*4f1223e8SApple OSS Distributions
5815*4f1223e8SApple OSS Distributions if (fAddress && fAddressMap) {
5816*4f1223e8SApple OSS Distributions do{
5817*4f1223e8SApple OSS Distributions if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5818*4f1223e8SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5819*4f1223e8SApple OSS Distributions physMem = fMemory;
5820*4f1223e8SApple OSS Distributions }
5821*4f1223e8SApple OSS Distributions
5822*4f1223e8SApple OSS Distributions if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5823*4f1223e8SApple OSS Distributions upl_size_t size = (typeof(size))round_page(fLength);
5824*4f1223e8SApple OSS Distributions upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5825*4f1223e8SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5826*4f1223e8SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5827*4f1223e8SApple OSS Distributions NULL, NULL,
5828*4f1223e8SApple OSS Distributions &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5829*4f1223e8SApple OSS Distributions fRedirUPL = NULL;
5830*4f1223e8SApple OSS Distributions }
5831*4f1223e8SApple OSS Distributions
5832*4f1223e8SApple OSS Distributions if (physMem) {
5833*4f1223e8SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5834*4f1223e8SApple OSS Distributions if ((false)) {
5835*4f1223e8SApple OSS Distributions physMem->redirect(NULL, true);
5836*4f1223e8SApple OSS Distributions }
5837*4f1223e8SApple OSS Distributions }
5838*4f1223e8SApple OSS Distributions }
5839*4f1223e8SApple OSS Distributions
5840*4f1223e8SApple OSS Distributions if (newBackingMemory) {
5841*4f1223e8SApple OSS Distributions if (newBackingMemory != fMemory) {
5842*4f1223e8SApple OSS Distributions fOffset = 0;
5843*4f1223e8SApple OSS Distributions if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5844*4f1223e8SApple OSS Distributions options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5845*4f1223e8SApple OSS Distributions offset, fLength)) {
5846*4f1223e8SApple OSS Distributions err = kIOReturnError;
5847*4f1223e8SApple OSS Distributions }
5848*4f1223e8SApple OSS Distributions }
5849*4f1223e8SApple OSS Distributions if (fRedirUPL) {
5850*4f1223e8SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5851*4f1223e8SApple OSS Distributions upl_deallocate(fRedirUPL);
5852*4f1223e8SApple OSS Distributions fRedirUPL = NULL;
5853*4f1223e8SApple OSS Distributions }
5854*4f1223e8SApple OSS Distributions if ((false) && physMem) {
5855*4f1223e8SApple OSS Distributions physMem->redirect(NULL, false);
5856*4f1223e8SApple OSS Distributions }
5857*4f1223e8SApple OSS Distributions }
5858*4f1223e8SApple OSS Distributions }while (false);
5859*4f1223e8SApple OSS Distributions }
5860*4f1223e8SApple OSS Distributions
5861*4f1223e8SApple OSS Distributions UNLOCK;
5862*4f1223e8SApple OSS Distributions
5863*4f1223e8SApple OSS Distributions return err;
5864*4f1223e8SApple OSS Distributions }
5865*4f1223e8SApple OSS Distributions
5866*4f1223e8SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5867*4f1223e8SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5868*4f1223e8SApple OSS Distributions IOMemoryDescriptor * owner,
5869*4f1223e8SApple OSS Distributions task_t __intoTask,
5870*4f1223e8SApple OSS Distributions IOVirtualAddress __address,
5871*4f1223e8SApple OSS Distributions IOOptionBits options,
5872*4f1223e8SApple OSS Distributions IOByteCount __offset,
5873*4f1223e8SApple OSS Distributions IOByteCount __length )
5874*4f1223e8SApple OSS Distributions {
5875*4f1223e8SApple OSS Distributions #ifndef __LP64__
5876*4f1223e8SApple OSS Distributions if (!(kIOMap64Bit & options)) {
5877*4f1223e8SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
5878*4f1223e8SApple OSS Distributions }
5879*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
5880*4f1223e8SApple OSS Distributions
5881*4f1223e8SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> mapDesc;
5882*4f1223e8SApple OSS Distributions __block IOMemoryMap * result = NULL;
5883*4f1223e8SApple OSS Distributions
5884*4f1223e8SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) __address;
5885*4f1223e8SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
5886*4f1223e8SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
5887*4f1223e8SApple OSS Distributions
5888*4f1223e8SApple OSS Distributions mapping->fOffset = offset;
5889*4f1223e8SApple OSS Distributions
5890*4f1223e8SApple OSS Distributions LOCK;
5891*4f1223e8SApple OSS Distributions
5892*4f1223e8SApple OSS Distributions do{
5893*4f1223e8SApple OSS Distributions if (kIOMapStatic & options) {
5894*4f1223e8SApple OSS Distributions result = mapping;
5895*4f1223e8SApple OSS Distributions addMapping(mapping);
5896*4f1223e8SApple OSS Distributions mapping->setMemoryDescriptor(this, 0);
5897*4f1223e8SApple OSS Distributions continue;
5898*4f1223e8SApple OSS Distributions }
5899*4f1223e8SApple OSS Distributions
5900*4f1223e8SApple OSS Distributions if (kIOMapUnique & options) {
5901*4f1223e8SApple OSS Distributions addr64_t phys;
5902*4f1223e8SApple OSS Distributions IOByteCount physLen;
5903*4f1223e8SApple OSS Distributions
5904*4f1223e8SApple OSS Distributions // if (owner != this) continue;
5905*4f1223e8SApple OSS Distributions
5906*4f1223e8SApple OSS Distributions if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5907*4f1223e8SApple OSS Distributions || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5908*4f1223e8SApple OSS Distributions phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5909*4f1223e8SApple OSS Distributions if (!phys || (physLen < length)) {
5910*4f1223e8SApple OSS Distributions continue;
5911*4f1223e8SApple OSS Distributions }
5912*4f1223e8SApple OSS Distributions
5913*4f1223e8SApple OSS Distributions mapDesc = IOMemoryDescriptor::withAddressRange(
5914*4f1223e8SApple OSS Distributions phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5915*4f1223e8SApple OSS Distributions if (!mapDesc) {
5916*4f1223e8SApple OSS Distributions continue;
5917*4f1223e8SApple OSS Distributions }
5918*4f1223e8SApple OSS Distributions offset = 0;
5919*4f1223e8SApple OSS Distributions mapping->fOffset = offset;
5920*4f1223e8SApple OSS Distributions }
5921*4f1223e8SApple OSS Distributions } else {
5922*4f1223e8SApple OSS Distributions // look for a compatible existing mapping
5923*4f1223e8SApple OSS Distributions if (_mappings) {
5924*4f1223e8SApple OSS Distributions _mappings->iterateObjects(^(OSObject * object)
5925*4f1223e8SApple OSS Distributions {
5926*4f1223e8SApple OSS Distributions IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5927*4f1223e8SApple OSS Distributions if ((result = lookMapping->copyCompatible(mapping))) {
5928*4f1223e8SApple OSS Distributions addMapping(result);
5929*4f1223e8SApple OSS Distributions result->setMemoryDescriptor(this, offset);
5930*4f1223e8SApple OSS Distributions return true;
5931*4f1223e8SApple OSS Distributions }
5932*4f1223e8SApple OSS Distributions return false;
5933*4f1223e8SApple OSS Distributions });
5934*4f1223e8SApple OSS Distributions }
5935*4f1223e8SApple OSS Distributions if (result || (options & kIOMapReference)) {
5936*4f1223e8SApple OSS Distributions if (result != mapping) {
5937*4f1223e8SApple OSS Distributions mapping->release();
5938*4f1223e8SApple OSS Distributions mapping = NULL;
5939*4f1223e8SApple OSS Distributions }
5940*4f1223e8SApple OSS Distributions continue;
5941*4f1223e8SApple OSS Distributions }
5942*4f1223e8SApple OSS Distributions }
5943*4f1223e8SApple OSS Distributions
5944*4f1223e8SApple OSS Distributions if (!mapDesc) {
5945*4f1223e8SApple OSS Distributions mapDesc.reset(this, OSRetain);
5946*4f1223e8SApple OSS Distributions }
5947*4f1223e8SApple OSS Distributions IOReturn
5948*4f1223e8SApple OSS Distributions kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5949*4f1223e8SApple OSS Distributions if (kIOReturnSuccess == kr) {
5950*4f1223e8SApple OSS Distributions result = mapping;
5951*4f1223e8SApple OSS Distributions mapDesc->addMapping(result);
5952*4f1223e8SApple OSS Distributions result->setMemoryDescriptor(mapDesc.get(), offset);
5953*4f1223e8SApple OSS Distributions } else {
5954*4f1223e8SApple OSS Distributions mapping->release();
5955*4f1223e8SApple OSS Distributions mapping = NULL;
5956*4f1223e8SApple OSS Distributions }
5957*4f1223e8SApple OSS Distributions }while (false);
5958*4f1223e8SApple OSS Distributions
5959*4f1223e8SApple OSS Distributions UNLOCK;
5960*4f1223e8SApple OSS Distributions
5961*4f1223e8SApple OSS Distributions return result;
5962*4f1223e8SApple OSS Distributions }
5963*4f1223e8SApple OSS Distributions
5964*4f1223e8SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5965*4f1223e8SApple OSS Distributions IOMemoryDescriptor::addMapping(
5966*4f1223e8SApple OSS Distributions IOMemoryMap * mapping )
5967*4f1223e8SApple OSS Distributions {
5968*4f1223e8SApple OSS Distributions if (mapping) {
5969*4f1223e8SApple OSS Distributions if (NULL == _mappings) {
5970*4f1223e8SApple OSS Distributions _mappings = OSSet::withCapacity(1);
5971*4f1223e8SApple OSS Distributions }
5972*4f1223e8SApple OSS Distributions if (_mappings) {
5973*4f1223e8SApple OSS Distributions _mappings->setObject( mapping );
5974*4f1223e8SApple OSS Distributions }
5975*4f1223e8SApple OSS Distributions }
5976*4f1223e8SApple OSS Distributions }
5977*4f1223e8SApple OSS Distributions
5978*4f1223e8SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5979*4f1223e8SApple OSS Distributions IOMemoryDescriptor::removeMapping(
5980*4f1223e8SApple OSS Distributions IOMemoryMap * mapping )
5981*4f1223e8SApple OSS Distributions {
5982*4f1223e8SApple OSS Distributions if (_mappings) {
5983*4f1223e8SApple OSS Distributions _mappings->removeObject( mapping);
5984*4f1223e8SApple OSS Distributions }
5985*4f1223e8SApple OSS Distributions }
5986*4f1223e8SApple OSS Distributions
5987*4f1223e8SApple OSS Distributions void
setMapperOptions(uint16_t options)5988*4f1223e8SApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5989*4f1223e8SApple OSS Distributions {
5990*4f1223e8SApple OSS Distributions _iomapperOptions = options;
5991*4f1223e8SApple OSS Distributions }
5992*4f1223e8SApple OSS Distributions
5993*4f1223e8SApple OSS Distributions uint16_t
getMapperOptions(void)5994*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5995*4f1223e8SApple OSS Distributions {
5996*4f1223e8SApple OSS Distributions return _iomapperOptions;
5997*4f1223e8SApple OSS Distributions }
5998*4f1223e8SApple OSS Distributions
5999*4f1223e8SApple OSS Distributions #ifndef __LP64__
6000*4f1223e8SApple OSS Distributions // obsolete initializers
6001*4f1223e8SApple OSS Distributions // - initWithOptions is the designated initializer
6002*4f1223e8SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6003*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void * address,
6004*4f1223e8SApple OSS Distributions IOByteCount length,
6005*4f1223e8SApple OSS Distributions IODirection direction)
6006*4f1223e8SApple OSS Distributions {
6007*4f1223e8SApple OSS Distributions return false;
6008*4f1223e8SApple OSS Distributions }
6009*4f1223e8SApple OSS Distributions
6010*4f1223e8SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6011*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6012*4f1223e8SApple OSS Distributions IOByteCount length,
6013*4f1223e8SApple OSS Distributions IODirection direction,
6014*4f1223e8SApple OSS Distributions task_t task)
6015*4f1223e8SApple OSS Distributions {
6016*4f1223e8SApple OSS Distributions return false;
6017*4f1223e8SApple OSS Distributions }
6018*4f1223e8SApple OSS Distributions
6019*4f1223e8SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6020*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
6021*4f1223e8SApple OSS Distributions IOPhysicalAddress address,
6022*4f1223e8SApple OSS Distributions IOByteCount length,
6023*4f1223e8SApple OSS Distributions IODirection direction )
6024*4f1223e8SApple OSS Distributions {
6025*4f1223e8SApple OSS Distributions return false;
6026*4f1223e8SApple OSS Distributions }
6027*4f1223e8SApple OSS Distributions
6028*4f1223e8SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6029*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
6030*4f1223e8SApple OSS Distributions IOVirtualRange * ranges,
6031*4f1223e8SApple OSS Distributions UInt32 withCount,
6032*4f1223e8SApple OSS Distributions IODirection direction,
6033*4f1223e8SApple OSS Distributions task_t task,
6034*4f1223e8SApple OSS Distributions bool asReference)
6035*4f1223e8SApple OSS Distributions {
6036*4f1223e8SApple OSS Distributions return false;
6037*4f1223e8SApple OSS Distributions }
6038*4f1223e8SApple OSS Distributions
6039*4f1223e8SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6040*4f1223e8SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6041*4f1223e8SApple OSS Distributions UInt32 withCount,
6042*4f1223e8SApple OSS Distributions IODirection direction,
6043*4f1223e8SApple OSS Distributions bool asReference)
6044*4f1223e8SApple OSS Distributions {
6045*4f1223e8SApple OSS Distributions return false;
6046*4f1223e8SApple OSS Distributions }
6047*4f1223e8SApple OSS Distributions
6048*4f1223e8SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6049*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6050*4f1223e8SApple OSS Distributions IOByteCount * lengthOfSegment)
6051*4f1223e8SApple OSS Distributions {
6052*4f1223e8SApple OSS Distributions return NULL;
6053*4f1223e8SApple OSS Distributions }
6054*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
6055*4f1223e8SApple OSS Distributions
6056*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6057*4f1223e8SApple OSS Distributions
6058*4f1223e8SApple OSS Distributions bool
serialize(OSSerialize * s) const6059*4f1223e8SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6060*4f1223e8SApple OSS Distributions {
6061*4f1223e8SApple OSS Distributions OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6062*4f1223e8SApple OSS Distributions OSSharedPtr<OSObject> values[2] = {NULL};
6063*4f1223e8SApple OSS Distributions OSSharedPtr<OSArray> array;
6064*4f1223e8SApple OSS Distributions
6065*4f1223e8SApple OSS Distributions struct SerData {
6066*4f1223e8SApple OSS Distributions user_addr_t address;
6067*4f1223e8SApple OSS Distributions user_size_t length;
6068*4f1223e8SApple OSS Distributions };
6069*4f1223e8SApple OSS Distributions
6070*4f1223e8SApple OSS Distributions unsigned int index;
6071*4f1223e8SApple OSS Distributions
6072*4f1223e8SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
6073*4f1223e8SApple OSS Distributions
6074*4f1223e8SApple OSS Distributions if (s == NULL) {
6075*4f1223e8SApple OSS Distributions return false;
6076*4f1223e8SApple OSS Distributions }
6077*4f1223e8SApple OSS Distributions
6078*4f1223e8SApple OSS Distributions array = OSArray::withCapacity(4);
6079*4f1223e8SApple OSS Distributions if (!array) {
6080*4f1223e8SApple OSS Distributions return false;
6081*4f1223e8SApple OSS Distributions }
6082*4f1223e8SApple OSS Distributions
6083*4f1223e8SApple OSS Distributions OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6084*4f1223e8SApple OSS Distributions if (!vcopy) {
6085*4f1223e8SApple OSS Distributions return false;
6086*4f1223e8SApple OSS Distributions }
6087*4f1223e8SApple OSS Distributions
6088*4f1223e8SApple OSS Distributions keys[0] = OSSymbol::withCString("address");
6089*4f1223e8SApple OSS Distributions keys[1] = OSSymbol::withCString("length");
6090*4f1223e8SApple OSS Distributions
6091*4f1223e8SApple OSS Distributions // Copy the volatile data so we don't have to allocate memory
6092*4f1223e8SApple OSS Distributions // while the lock is held.
6093*4f1223e8SApple OSS Distributions LOCK;
6094*4f1223e8SApple OSS Distributions if (vcopy.size() == _rangesCount) {
6095*4f1223e8SApple OSS Distributions Ranges vec = _ranges;
6096*4f1223e8SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6097*4f1223e8SApple OSS Distributions mach_vm_address_t addr; mach_vm_size_t len;
6098*4f1223e8SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, index, _task);
6099*4f1223e8SApple OSS Distributions vcopy[index].address = addr;
6100*4f1223e8SApple OSS Distributions vcopy[index].length = len;
6101*4f1223e8SApple OSS Distributions }
6102*4f1223e8SApple OSS Distributions } else {
6103*4f1223e8SApple OSS Distributions // The descriptor changed out from under us. Give up.
6104*4f1223e8SApple OSS Distributions UNLOCK;
6105*4f1223e8SApple OSS Distributions return false;
6106*4f1223e8SApple OSS Distributions }
6107*4f1223e8SApple OSS Distributions UNLOCK;
6108*4f1223e8SApple OSS Distributions
6109*4f1223e8SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6110*4f1223e8SApple OSS Distributions user_addr_t addr = vcopy[index].address;
6111*4f1223e8SApple OSS Distributions IOByteCount len = (IOByteCount) vcopy[index].length;
6112*4f1223e8SApple OSS Distributions values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6113*4f1223e8SApple OSS Distributions if (values[0] == NULL) {
6114*4f1223e8SApple OSS Distributions return false;
6115*4f1223e8SApple OSS Distributions }
6116*4f1223e8SApple OSS Distributions values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6117*4f1223e8SApple OSS Distributions if (values[1] == NULL) {
6118*4f1223e8SApple OSS Distributions return false;
6119*4f1223e8SApple OSS Distributions }
6120*4f1223e8SApple OSS Distributions OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6121*4f1223e8SApple OSS Distributions if (dict == NULL) {
6122*4f1223e8SApple OSS Distributions return false;
6123*4f1223e8SApple OSS Distributions }
6124*4f1223e8SApple OSS Distributions array->setObject(dict.get());
6125*4f1223e8SApple OSS Distributions dict.reset();
6126*4f1223e8SApple OSS Distributions values[0].reset();
6127*4f1223e8SApple OSS Distributions values[1].reset();
6128*4f1223e8SApple OSS Distributions }
6129*4f1223e8SApple OSS Distributions
6130*4f1223e8SApple OSS Distributions return array->serialize(s);
6131*4f1223e8SApple OSS Distributions }
6132*4f1223e8SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6133*4f1223e8SApple OSS Distributions
6134*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6135*4f1223e8SApple OSS Distributions #ifdef __LP64__
6136*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6137*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6138*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6139*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6140*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6141*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6142*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6143*4f1223e8SApple OSS Distributions #else /* !__LP64__ */
6144*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6145*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6146*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6147*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6148*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6149*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6150*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6151*4f1223e8SApple OSS Distributions #endif /* !__LP64__ */
6152*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6153*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6154*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6155*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6156*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6157*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6158*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6159*4f1223e8SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6160*4f1223e8SApple OSS Distributions
6161*4f1223e8SApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6162*4f1223e8SApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6163*4f1223e8SApple OSS Distributions struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6164*4f1223e8SApple OSS Distributions
6165*4f1223e8SApple OSS Distributions /* ex-inline function implementation */
6166*4f1223e8SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6167*4f1223e8SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6168*4f1223e8SApple OSS Distributions {
6169*4f1223e8SApple OSS Distributions return getPhysicalSegment( 0, NULL );
6170*4f1223e8SApple OSS Distributions }
6171*4f1223e8SApple OSS Distributions
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6172*4f1223e8SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6173*4f1223e8SApple OSS Distributions
6174*4f1223e8SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6175*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6176*4f1223e8SApple OSS Distributions {
6177*4f1223e8SApple OSS Distributions OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6178*4f1223e8SApple OSS Distributions if (me && !me->initWithCapacity(capacity)) {
6179*4f1223e8SApple OSS Distributions return nullptr;
6180*4f1223e8SApple OSS Distributions }
6181*4f1223e8SApple OSS Distributions return me;
6182*4f1223e8SApple OSS Distributions }
6183*4f1223e8SApple OSS Distributions
6184*4f1223e8SApple OSS Distributions bool
initWithCapacity(size_t capacity)6185*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6186*4f1223e8SApple OSS Distributions {
6187*4f1223e8SApple OSS Distributions if (_data && (!capacity || (_capacity < capacity))) {
6188*4f1223e8SApple OSS Distributions freeMemory();
6189*4f1223e8SApple OSS Distributions }
6190*4f1223e8SApple OSS Distributions
6191*4f1223e8SApple OSS Distributions if (!OSObject::init()) {
6192*4f1223e8SApple OSS Distributions return false;
6193*4f1223e8SApple OSS Distributions }
6194*4f1223e8SApple OSS Distributions
6195*4f1223e8SApple OSS Distributions if (!_data && capacity) {
6196*4f1223e8SApple OSS Distributions _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6197*4f1223e8SApple OSS Distributions Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6198*4f1223e8SApple OSS Distributions if (!_data) {
6199*4f1223e8SApple OSS Distributions return false;
6200*4f1223e8SApple OSS Distributions }
6201*4f1223e8SApple OSS Distributions _capacity = capacity;
6202*4f1223e8SApple OSS Distributions }
6203*4f1223e8SApple OSS Distributions
6204*4f1223e8SApple OSS Distributions _length = 0;
6205*4f1223e8SApple OSS Distributions
6206*4f1223e8SApple OSS Distributions return true;
6207*4f1223e8SApple OSS Distributions }
6208*4f1223e8SApple OSS Distributions
6209*4f1223e8SApple OSS Distributions void
free()6210*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6211*4f1223e8SApple OSS Distributions {
6212*4f1223e8SApple OSS Distributions freeMemory();
6213*4f1223e8SApple OSS Distributions OSObject::free();
6214*4f1223e8SApple OSS Distributions }
6215*4f1223e8SApple OSS Distributions
6216*4f1223e8SApple OSS Distributions void
freeMemory()6217*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6218*4f1223e8SApple OSS Distributions {
6219*4f1223e8SApple OSS Distributions kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6220*4f1223e8SApple OSS Distributions _data = nullptr;
6221*4f1223e8SApple OSS Distributions _capacity = _length = 0;
6222*4f1223e8SApple OSS Distributions }
6223*4f1223e8SApple OSS Distributions
6224*4f1223e8SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6225*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6226*4f1223e8SApple OSS Distributions {
6227*4f1223e8SApple OSS Distributions const auto oldLength = getLength();
6228*4f1223e8SApple OSS Distributions size_t newLength;
6229*4f1223e8SApple OSS Distributions if (os_add_overflow(oldLength, length, &newLength)) {
6230*4f1223e8SApple OSS Distributions return false;
6231*4f1223e8SApple OSS Distributions }
6232*4f1223e8SApple OSS Distributions
6233*4f1223e8SApple OSS Distributions if (!setLength(newLength)) {
6234*4f1223e8SApple OSS Distributions return false;
6235*4f1223e8SApple OSS Distributions }
6236*4f1223e8SApple OSS Distributions
6237*4f1223e8SApple OSS Distributions unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6238*4f1223e8SApple OSS Distributions if (bytes) {
6239*4f1223e8SApple OSS Distributions bcopy(bytes, dest, length);
6240*4f1223e8SApple OSS Distributions }
6241*4f1223e8SApple OSS Distributions
6242*4f1223e8SApple OSS Distributions return true;
6243*4f1223e8SApple OSS Distributions }
6244*4f1223e8SApple OSS Distributions
6245*4f1223e8SApple OSS Distributions bool
setLength(size_t length)6246*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6247*4f1223e8SApple OSS Distributions {
6248*4f1223e8SApple OSS Distributions if (!_data || (length > _capacity)) {
6249*4f1223e8SApple OSS Distributions void *newData;
6250*4f1223e8SApple OSS Distributions
6251*4f1223e8SApple OSS Distributions newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6252*4f1223e8SApple OSS Distributions length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6253*4f1223e8SApple OSS Distributions NULL);
6254*4f1223e8SApple OSS Distributions if (!newData) {
6255*4f1223e8SApple OSS Distributions return false;
6256*4f1223e8SApple OSS Distributions }
6257*4f1223e8SApple OSS Distributions
6258*4f1223e8SApple OSS Distributions _data = newData;
6259*4f1223e8SApple OSS Distributions _capacity = length;
6260*4f1223e8SApple OSS Distributions }
6261*4f1223e8SApple OSS Distributions
6262*4f1223e8SApple OSS Distributions _length = length;
6263*4f1223e8SApple OSS Distributions return true;
6264*4f1223e8SApple OSS Distributions }
6265*4f1223e8SApple OSS Distributions
6266*4f1223e8SApple OSS Distributions const void *
getBytes() const6267*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6268*4f1223e8SApple OSS Distributions {
6269*4f1223e8SApple OSS Distributions return _length ? _data : nullptr;
6270*4f1223e8SApple OSS Distributions }
6271*4f1223e8SApple OSS Distributions
6272*4f1223e8SApple OSS Distributions size_t
getLength() const6273*4f1223e8SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6274*4f1223e8SApple OSS Distributions {
6275*4f1223e8SApple OSS Distributions return _data ? _length : 0;
6276*4f1223e8SApple OSS Distributions }
6277