1*e3723e1fSApple OSS Distributions /*
2*e3723e1fSApple OSS Distributions * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*e3723e1fSApple OSS Distributions *
4*e3723e1fSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*e3723e1fSApple OSS Distributions *
6*e3723e1fSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*e3723e1fSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*e3723e1fSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*e3723e1fSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*e3723e1fSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*e3723e1fSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*e3723e1fSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*e3723e1fSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*e3723e1fSApple OSS Distributions *
15*e3723e1fSApple OSS Distributions * Please obtain a copy of the License at
16*e3723e1fSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*e3723e1fSApple OSS Distributions *
18*e3723e1fSApple OSS Distributions * The Original Code and all software distributed under the License are
19*e3723e1fSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*e3723e1fSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*e3723e1fSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*e3723e1fSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*e3723e1fSApple OSS Distributions * Please see the License for the specific language governing rights and
24*e3723e1fSApple OSS Distributions * limitations under the License.
25*e3723e1fSApple OSS Distributions *
26*e3723e1fSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*e3723e1fSApple OSS Distributions */
28*e3723e1fSApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*e3723e1fSApple OSS Distributions
30*e3723e1fSApple OSS Distributions #include <sys/cdefs.h>
31*e3723e1fSApple OSS Distributions
32*e3723e1fSApple OSS Distributions #include <IOKit/assert.h>
33*e3723e1fSApple OSS Distributions #include <IOKit/system.h>
34*e3723e1fSApple OSS Distributions #include <IOKit/IOLib.h>
35*e3723e1fSApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*e3723e1fSApple OSS Distributions #include <IOKit/IOMapper.h>
37*e3723e1fSApple OSS Distributions #include <IOKit/IODMACommand.h>
38*e3723e1fSApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*e3723e1fSApple OSS Distributions
40*e3723e1fSApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*e3723e1fSApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*e3723e1fSApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*e3723e1fSApple OSS Distributions
44*e3723e1fSApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*e3723e1fSApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*e3723e1fSApple OSS Distributions #include <libkern/OSDebug.h>
47*e3723e1fSApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*e3723e1fSApple OSS Distributions
49*e3723e1fSApple OSS Distributions #include "IOKitKernelInternal.h"
50*e3723e1fSApple OSS Distributions
51*e3723e1fSApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*e3723e1fSApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*e3723e1fSApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*e3723e1fSApple OSS Distributions #include <libkern/c++/OSArray.h>
55*e3723e1fSApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*e3723e1fSApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*e3723e1fSApple OSS Distributions #include <os/overflow.h>
58*e3723e1fSApple OSS Distributions #include <os/cpp_util.h>
59*e3723e1fSApple OSS Distributions #include <os/base_private.h>
60*e3723e1fSApple OSS Distributions
61*e3723e1fSApple OSS Distributions #include <sys/uio.h>
62*e3723e1fSApple OSS Distributions
63*e3723e1fSApple OSS Distributions __BEGIN_DECLS
64*e3723e1fSApple OSS Distributions #include <vm/pmap.h>
65*e3723e1fSApple OSS Distributions #include <vm/vm_pageout_xnu.h>
66*e3723e1fSApple OSS Distributions #include <mach/memory_object_types.h>
67*e3723e1fSApple OSS Distributions #include <device/device_port.h>
68*e3723e1fSApple OSS Distributions
69*e3723e1fSApple OSS Distributions #include <mach/vm_prot.h>
70*e3723e1fSApple OSS Distributions #include <mach/mach_vm.h>
71*e3723e1fSApple OSS Distributions #include <mach/memory_entry.h>
72*e3723e1fSApple OSS Distributions #include <mach/mach_host.h>
73*e3723e1fSApple OSS Distributions #include <vm/vm_fault_xnu.h>
74*e3723e1fSApple OSS Distributions #include <vm/vm_protos.h>
75*e3723e1fSApple OSS Distributions #include <vm/vm_memory_entry.h>
76*e3723e1fSApple OSS Distributions #include <vm/vm_kern_xnu.h>
77*e3723e1fSApple OSS Distributions #include <vm/vm_iokit.h>
78*e3723e1fSApple OSS Distributions #include <vm/vm_map_xnu.h>
79*e3723e1fSApple OSS Distributions #include <kern/thread.h>
80*e3723e1fSApple OSS Distributions
81*e3723e1fSApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
82*e3723e1fSApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
83*e3723e1fSApple OSS Distributions
84*e3723e1fSApple OSS Distributions __END_DECLS
85*e3723e1fSApple OSS Distributions
86*e3723e1fSApple OSS Distributions #define kIOMapperWaitSystem ((IOMapper *) 1)
87*e3723e1fSApple OSS Distributions
88*e3723e1fSApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
89*e3723e1fSApple OSS Distributions
90*e3723e1fSApple OSS Distributions ppnum_t gIOLastPage;
91*e3723e1fSApple OSS Distributions
92*e3723e1fSApple OSS Distributions enum {
93*e3723e1fSApple OSS Distributions kIOMapGuardSizeLarge = 65536
94*e3723e1fSApple OSS Distributions };
95*e3723e1fSApple OSS Distributions
96*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97*e3723e1fSApple OSS Distributions
98*e3723e1fSApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
99*e3723e1fSApple OSS Distributions
100*e3723e1fSApple OSS Distributions #define super IOMemoryDescriptor
101*e3723e1fSApple OSS Distributions
102*e3723e1fSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
103*e3723e1fSApple OSS Distributions IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
104*e3723e1fSApple OSS Distributions
105*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106*e3723e1fSApple OSS Distributions
107*e3723e1fSApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
108*e3723e1fSApple OSS Distributions
109*e3723e1fSApple OSS Distributions #define LOCK IORecursiveLockLock( gIOMemoryLock)
110*e3723e1fSApple OSS Distributions #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111*e3723e1fSApple OSS Distributions #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112*e3723e1fSApple OSS Distributions #define WAKEUP \
113*e3723e1fSApple OSS Distributions IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114*e3723e1fSApple OSS Distributions
115*e3723e1fSApple OSS Distributions #if 0
116*e3723e1fSApple OSS Distributions #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117*e3723e1fSApple OSS Distributions #else
118*e3723e1fSApple OSS Distributions #define DEBG(fmt, args...) {}
119*e3723e1fSApple OSS Distributions #endif
120*e3723e1fSApple OSS Distributions
121*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122*e3723e1fSApple OSS Distributions
123*e3723e1fSApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
124*e3723e1fSApple OSS Distributions // Function
125*e3723e1fSApple OSS Distributions
126*e3723e1fSApple OSS Distributions enum ioPLBlockFlags {
127*e3723e1fSApple OSS Distributions kIOPLOnDevice = 0x00000001,
128*e3723e1fSApple OSS Distributions kIOPLExternUPL = 0x00000002,
129*e3723e1fSApple OSS Distributions };
130*e3723e1fSApple OSS Distributions
131*e3723e1fSApple OSS Distributions struct IOMDPersistentInitData {
132*e3723e1fSApple OSS Distributions const IOGeneralMemoryDescriptor * fMD;
133*e3723e1fSApple OSS Distributions IOMemoryReference * fMemRef;
134*e3723e1fSApple OSS Distributions };
135*e3723e1fSApple OSS Distributions
136*e3723e1fSApple OSS Distributions struct ioPLBlock {
137*e3723e1fSApple OSS Distributions upl_t fIOPL;
138*e3723e1fSApple OSS Distributions vm_address_t fPageInfo; // Pointer to page list or index into it
139*e3723e1fSApple OSS Distributions uint64_t fIOMDOffset; // The offset of this iopl in descriptor
140*e3723e1fSApple OSS Distributions ppnum_t fMappedPage; // Page number of first page in this iopl
141*e3723e1fSApple OSS Distributions unsigned int fPageOffset; // Offset within first page of iopl
142*e3723e1fSApple OSS Distributions unsigned int fFlags; // Flags
143*e3723e1fSApple OSS Distributions };
144*e3723e1fSApple OSS Distributions
145*e3723e1fSApple OSS Distributions enum { kMaxWireTags = 6 };
146*e3723e1fSApple OSS Distributions
147*e3723e1fSApple OSS Distributions struct ioGMDData {
148*e3723e1fSApple OSS Distributions IOMapper * fMapper;
149*e3723e1fSApple OSS Distributions uint64_t fDMAMapAlignment;
150*e3723e1fSApple OSS Distributions uint64_t fMappedBase;
151*e3723e1fSApple OSS Distributions uint64_t fMappedLength;
152*e3723e1fSApple OSS Distributions uint64_t fPreparationID;
153*e3723e1fSApple OSS Distributions #if IOTRACKING
154*e3723e1fSApple OSS Distributions IOTracking fWireTracking;
155*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
156*e3723e1fSApple OSS Distributions unsigned int fPageCnt;
157*e3723e1fSApple OSS Distributions uint8_t fDMAMapNumAddressBits;
158*e3723e1fSApple OSS Distributions unsigned char fCompletionError:1;
159*e3723e1fSApple OSS Distributions unsigned char fMappedBaseValid:1;
160*e3723e1fSApple OSS Distributions unsigned char _resv:4;
161*e3723e1fSApple OSS Distributions unsigned char fDMAAccess:2;
162*e3723e1fSApple OSS Distributions
163*e3723e1fSApple OSS Distributions /* variable length arrays */
164*e3723e1fSApple OSS Distributions upl_page_info_t fPageList[1]
165*e3723e1fSApple OSS Distributions #if __LP64__
166*e3723e1fSApple OSS Distributions // align fPageList as for ioPLBlock
167*e3723e1fSApple OSS Distributions __attribute__((aligned(sizeof(upl_t))))
168*e3723e1fSApple OSS Distributions #endif
169*e3723e1fSApple OSS Distributions ;
170*e3723e1fSApple OSS Distributions //ioPLBlock fBlocks[1];
171*e3723e1fSApple OSS Distributions };
172*e3723e1fSApple OSS Distributions
173*e3723e1fSApple OSS Distributions #pragma GCC visibility push(hidden)
174*e3723e1fSApple OSS Distributions
175*e3723e1fSApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
176*e3723e1fSApple OSS Distributions {
177*e3723e1fSApple OSS Distributions OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
178*e3723e1fSApple OSS Distributions
179*e3723e1fSApple OSS Distributions public:
180*e3723e1fSApple OSS Distributions static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
181*e3723e1fSApple OSS Distributions bool initWithCapacity(size_t capacity);
182*e3723e1fSApple OSS Distributions virtual void free() APPLE_KEXT_OVERRIDE;
183*e3723e1fSApple OSS Distributions
184*e3723e1fSApple OSS Distributions bool appendBytes(const void * bytes, size_t length);
185*e3723e1fSApple OSS Distributions bool setLength(size_t length);
186*e3723e1fSApple OSS Distributions
187*e3723e1fSApple OSS Distributions const void * getBytes() const;
188*e3723e1fSApple OSS Distributions size_t getLength() const;
189*e3723e1fSApple OSS Distributions
190*e3723e1fSApple OSS Distributions private:
191*e3723e1fSApple OSS Distributions void freeMemory();
192*e3723e1fSApple OSS Distributions
193*e3723e1fSApple OSS Distributions void * _data = nullptr;
194*e3723e1fSApple OSS Distributions size_t _length = 0;
195*e3723e1fSApple OSS Distributions size_t _capacity = 0;
196*e3723e1fSApple OSS Distributions };
197*e3723e1fSApple OSS Distributions
198*e3723e1fSApple OSS Distributions #pragma GCC visibility pop
199*e3723e1fSApple OSS Distributions
200*e3723e1fSApple OSS Distributions #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
201*e3723e1fSApple OSS Distributions #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
202*e3723e1fSApple OSS Distributions #define getNumIOPL(osd, d) \
203*e3723e1fSApple OSS Distributions ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
204*e3723e1fSApple OSS Distributions #define getPageList(d) (&(d->fPageList[0]))
205*e3723e1fSApple OSS Distributions #define computeDataSize(p, u) \
206*e3723e1fSApple OSS Distributions (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
207*e3723e1fSApple OSS Distributions
208*e3723e1fSApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
209*e3723e1fSApple OSS Distributions
210*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211*e3723e1fSApple OSS Distributions
212*e3723e1fSApple OSS Distributions extern "C" {
213*e3723e1fSApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)214*e3723e1fSApple OSS Distributions device_data_action(
215*e3723e1fSApple OSS Distributions uintptr_t device_handle,
216*e3723e1fSApple OSS Distributions ipc_port_t device_pager,
217*e3723e1fSApple OSS Distributions vm_prot_t protection,
218*e3723e1fSApple OSS Distributions vm_object_offset_t offset,
219*e3723e1fSApple OSS Distributions vm_size_t size)
220*e3723e1fSApple OSS Distributions {
221*e3723e1fSApple OSS Distributions kern_return_t kr;
222*e3723e1fSApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
223*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> memDesc;
224*e3723e1fSApple OSS Distributions
225*e3723e1fSApple OSS Distributions LOCK;
226*e3723e1fSApple OSS Distributions if (ref->dp.memory) {
227*e3723e1fSApple OSS Distributions memDesc.reset(ref->dp.memory, OSRetain);
228*e3723e1fSApple OSS Distributions kr = memDesc->handleFault(device_pager, offset, size);
229*e3723e1fSApple OSS Distributions memDesc.reset();
230*e3723e1fSApple OSS Distributions } else {
231*e3723e1fSApple OSS Distributions kr = KERN_ABORTED;
232*e3723e1fSApple OSS Distributions }
233*e3723e1fSApple OSS Distributions UNLOCK;
234*e3723e1fSApple OSS Distributions
235*e3723e1fSApple OSS Distributions return kr;
236*e3723e1fSApple OSS Distributions }
237*e3723e1fSApple OSS Distributions
238*e3723e1fSApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)239*e3723e1fSApple OSS Distributions device_close(
240*e3723e1fSApple OSS Distributions uintptr_t device_handle)
241*e3723e1fSApple OSS Distributions {
242*e3723e1fSApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
243*e3723e1fSApple OSS Distributions
244*e3723e1fSApple OSS Distributions IOFreeType( ref, IOMemoryDescriptorReserved );
245*e3723e1fSApple OSS Distributions
246*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
247*e3723e1fSApple OSS Distributions }
248*e3723e1fSApple OSS Distributions }; // end extern "C"
249*e3723e1fSApple OSS Distributions
250*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251*e3723e1fSApple OSS Distributions
252*e3723e1fSApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
253*e3723e1fSApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
254*e3723e1fSApple OSS Distributions // checked for as a NULL reference is illegal.
255*e3723e1fSApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)256*e3723e1fSApple OSS Distributions getAddrLenForInd(
257*e3723e1fSApple OSS Distributions mach_vm_address_t &addr,
258*e3723e1fSApple OSS Distributions mach_vm_size_t &len, // Output variables
259*e3723e1fSApple OSS Distributions UInt32 type,
260*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::Ranges r,
261*e3723e1fSApple OSS Distributions UInt32 ind,
262*e3723e1fSApple OSS Distributions task_t task __unused)
263*e3723e1fSApple OSS Distributions {
264*e3723e1fSApple OSS Distributions assert(kIOMemoryTypeUIO == type
265*e3723e1fSApple OSS Distributions || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
266*e3723e1fSApple OSS Distributions || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
267*e3723e1fSApple OSS Distributions if (kIOMemoryTypeUIO == type) {
268*e3723e1fSApple OSS Distributions user_size_t us;
269*e3723e1fSApple OSS Distributions user_addr_t ad;
270*e3723e1fSApple OSS Distributions uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
271*e3723e1fSApple OSS Distributions }
272*e3723e1fSApple OSS Distributions #ifndef __LP64__
273*e3723e1fSApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
274*e3723e1fSApple OSS Distributions IOAddressRange cur = r.v64[ind];
275*e3723e1fSApple OSS Distributions addr = cur.address;
276*e3723e1fSApple OSS Distributions len = cur.length;
277*e3723e1fSApple OSS Distributions }
278*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
279*e3723e1fSApple OSS Distributions else {
280*e3723e1fSApple OSS Distributions IOVirtualRange cur = r.v[ind];
281*e3723e1fSApple OSS Distributions addr = cur.address;
282*e3723e1fSApple OSS Distributions len = cur.length;
283*e3723e1fSApple OSS Distributions }
284*e3723e1fSApple OSS Distributions #if CONFIG_PROB_GZALLOC
285*e3723e1fSApple OSS Distributions if (task == kernel_task) {
286*e3723e1fSApple OSS Distributions addr = pgz_decode(addr, len);
287*e3723e1fSApple OSS Distributions }
288*e3723e1fSApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
289*e3723e1fSApple OSS Distributions }
290*e3723e1fSApple OSS Distributions
291*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292*e3723e1fSApple OSS Distributions
293*e3723e1fSApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)294*e3723e1fSApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
295*e3723e1fSApple OSS Distributions {
296*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
297*e3723e1fSApple OSS Distributions
298*e3723e1fSApple OSS Distributions *control = VM_PURGABLE_SET_STATE;
299*e3723e1fSApple OSS Distributions
300*e3723e1fSApple OSS Distributions enum { kIOMemoryPurgeableControlMask = 15 };
301*e3723e1fSApple OSS Distributions
302*e3723e1fSApple OSS Distributions switch (kIOMemoryPurgeableControlMask & newState) {
303*e3723e1fSApple OSS Distributions case kIOMemoryPurgeableKeepCurrent:
304*e3723e1fSApple OSS Distributions *control = VM_PURGABLE_GET_STATE;
305*e3723e1fSApple OSS Distributions break;
306*e3723e1fSApple OSS Distributions
307*e3723e1fSApple OSS Distributions case kIOMemoryPurgeableNonVolatile:
308*e3723e1fSApple OSS Distributions *state = VM_PURGABLE_NONVOLATILE;
309*e3723e1fSApple OSS Distributions break;
310*e3723e1fSApple OSS Distributions case kIOMemoryPurgeableVolatile:
311*e3723e1fSApple OSS Distributions *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
312*e3723e1fSApple OSS Distributions break;
313*e3723e1fSApple OSS Distributions case kIOMemoryPurgeableEmpty:
314*e3723e1fSApple OSS Distributions *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
315*e3723e1fSApple OSS Distributions break;
316*e3723e1fSApple OSS Distributions default:
317*e3723e1fSApple OSS Distributions err = kIOReturnBadArgument;
318*e3723e1fSApple OSS Distributions break;
319*e3723e1fSApple OSS Distributions }
320*e3723e1fSApple OSS Distributions
321*e3723e1fSApple OSS Distributions if (*control == VM_PURGABLE_SET_STATE) {
322*e3723e1fSApple OSS Distributions // let VM know this call is from the kernel and is allowed to alter
323*e3723e1fSApple OSS Distributions // the volatility of the memory entry even if it was created with
324*e3723e1fSApple OSS Distributions // MAP_MEM_PURGABLE_KERNEL_ONLY
325*e3723e1fSApple OSS Distributions *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
326*e3723e1fSApple OSS Distributions }
327*e3723e1fSApple OSS Distributions
328*e3723e1fSApple OSS Distributions return err;
329*e3723e1fSApple OSS Distributions }
330*e3723e1fSApple OSS Distributions
331*e3723e1fSApple OSS Distributions static IOReturn
purgeableStateBits(int * state)332*e3723e1fSApple OSS Distributions purgeableStateBits(int * state)
333*e3723e1fSApple OSS Distributions {
334*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
335*e3723e1fSApple OSS Distributions
336*e3723e1fSApple OSS Distributions switch (VM_PURGABLE_STATE_MASK & *state) {
337*e3723e1fSApple OSS Distributions case VM_PURGABLE_NONVOLATILE:
338*e3723e1fSApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
339*e3723e1fSApple OSS Distributions break;
340*e3723e1fSApple OSS Distributions case VM_PURGABLE_VOLATILE:
341*e3723e1fSApple OSS Distributions *state = kIOMemoryPurgeableVolatile;
342*e3723e1fSApple OSS Distributions break;
343*e3723e1fSApple OSS Distributions case VM_PURGABLE_EMPTY:
344*e3723e1fSApple OSS Distributions *state = kIOMemoryPurgeableEmpty;
345*e3723e1fSApple OSS Distributions break;
346*e3723e1fSApple OSS Distributions default:
347*e3723e1fSApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
348*e3723e1fSApple OSS Distributions err = kIOReturnNotReady;
349*e3723e1fSApple OSS Distributions break;
350*e3723e1fSApple OSS Distributions }
351*e3723e1fSApple OSS Distributions return err;
352*e3723e1fSApple OSS Distributions }
353*e3723e1fSApple OSS Distributions
354*e3723e1fSApple OSS Distributions typedef struct {
355*e3723e1fSApple OSS Distributions unsigned int wimg;
356*e3723e1fSApple OSS Distributions unsigned int object_type;
357*e3723e1fSApple OSS Distributions } iokit_memtype_entry;
358*e3723e1fSApple OSS Distributions
359*e3723e1fSApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
360*e3723e1fSApple OSS Distributions [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
361*e3723e1fSApple OSS Distributions [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
362*e3723e1fSApple OSS Distributions [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
363*e3723e1fSApple OSS Distributions [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
364*e3723e1fSApple OSS Distributions [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
365*e3723e1fSApple OSS Distributions [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
366*e3723e1fSApple OSS Distributions [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
367*e3723e1fSApple OSS Distributions [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
368*e3723e1fSApple OSS Distributions [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
369*e3723e1fSApple OSS Distributions [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
370*e3723e1fSApple OSS Distributions };
371*e3723e1fSApple OSS Distributions
372*e3723e1fSApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)373*e3723e1fSApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
374*e3723e1fSApple OSS Distributions {
375*e3723e1fSApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376*e3723e1fSApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
377*e3723e1fSApple OSS Distributions cacheMode = kIODefaultCache;
378*e3723e1fSApple OSS Distributions }
379*e3723e1fSApple OSS Distributions vm_prot_t prot = 0;
380*e3723e1fSApple OSS Distributions SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
381*e3723e1fSApple OSS Distributions return prot;
382*e3723e1fSApple OSS Distributions }
383*e3723e1fSApple OSS Distributions
384*e3723e1fSApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)385*e3723e1fSApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
386*e3723e1fSApple OSS Distributions {
387*e3723e1fSApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
388*e3723e1fSApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
389*e3723e1fSApple OSS Distributions cacheMode = kIODefaultCache;
390*e3723e1fSApple OSS Distributions }
391*e3723e1fSApple OSS Distributions if (cacheMode == kIODefaultCache) {
392*e3723e1fSApple OSS Distributions return -1U;
393*e3723e1fSApple OSS Distributions }
394*e3723e1fSApple OSS Distributions return iomd_mem_types[cacheMode].wimg;
395*e3723e1fSApple OSS Distributions }
396*e3723e1fSApple OSS Distributions
397*e3723e1fSApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)398*e3723e1fSApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
399*e3723e1fSApple OSS Distributions {
400*e3723e1fSApple OSS Distributions pagerFlags &= VM_WIMG_MASK;
401*e3723e1fSApple OSS Distributions IOOptionBits cacheMode = kIODefaultCache;
402*e3723e1fSApple OSS Distributions for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
403*e3723e1fSApple OSS Distributions if (iomd_mem_types[i].wimg == pagerFlags) {
404*e3723e1fSApple OSS Distributions cacheMode = i;
405*e3723e1fSApple OSS Distributions break;
406*e3723e1fSApple OSS Distributions }
407*e3723e1fSApple OSS Distributions }
408*e3723e1fSApple OSS Distributions return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
409*e3723e1fSApple OSS Distributions }
410*e3723e1fSApple OSS Distributions
411*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413*e3723e1fSApple OSS Distributions
414*e3723e1fSApple OSS Distributions struct IOMemoryEntry {
415*e3723e1fSApple OSS Distributions ipc_port_t entry;
416*e3723e1fSApple OSS Distributions int64_t offset;
417*e3723e1fSApple OSS Distributions uint64_t size;
418*e3723e1fSApple OSS Distributions uint64_t start;
419*e3723e1fSApple OSS Distributions };
420*e3723e1fSApple OSS Distributions
421*e3723e1fSApple OSS Distributions struct IOMemoryReference {
422*e3723e1fSApple OSS Distributions volatile SInt32 refCount;
423*e3723e1fSApple OSS Distributions vm_prot_t prot;
424*e3723e1fSApple OSS Distributions uint32_t capacity;
425*e3723e1fSApple OSS Distributions uint32_t count;
426*e3723e1fSApple OSS Distributions struct IOMemoryReference * mapRef;
427*e3723e1fSApple OSS Distributions IOMemoryEntry entries[0];
428*e3723e1fSApple OSS Distributions };
429*e3723e1fSApple OSS Distributions
430*e3723e1fSApple OSS Distributions enum{
431*e3723e1fSApple OSS Distributions kIOMemoryReferenceReuse = 0x00000001,
432*e3723e1fSApple OSS Distributions kIOMemoryReferenceWrite = 0x00000002,
433*e3723e1fSApple OSS Distributions kIOMemoryReferenceCOW = 0x00000004,
434*e3723e1fSApple OSS Distributions };
435*e3723e1fSApple OSS Distributions
436*e3723e1fSApple OSS Distributions SInt32 gIOMemoryReferenceCount;
437*e3723e1fSApple OSS Distributions
438*e3723e1fSApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)439*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
440*e3723e1fSApple OSS Distributions {
441*e3723e1fSApple OSS Distributions IOMemoryReference * ref;
442*e3723e1fSApple OSS Distributions size_t oldCapacity;
443*e3723e1fSApple OSS Distributions
444*e3723e1fSApple OSS Distributions if (realloc) {
445*e3723e1fSApple OSS Distributions oldCapacity = realloc->capacity;
446*e3723e1fSApple OSS Distributions } else {
447*e3723e1fSApple OSS Distributions oldCapacity = 0;
448*e3723e1fSApple OSS Distributions }
449*e3723e1fSApple OSS Distributions
450*e3723e1fSApple OSS Distributions // Use the kalloc API instead of manually handling the reallocation
451*e3723e1fSApple OSS Distributions ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
452*e3723e1fSApple OSS Distributions oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
453*e3723e1fSApple OSS Distributions if (ref) {
454*e3723e1fSApple OSS Distributions if (oldCapacity == 0) {
455*e3723e1fSApple OSS Distributions ref->refCount = 1;
456*e3723e1fSApple OSS Distributions OSIncrementAtomic(&gIOMemoryReferenceCount);
457*e3723e1fSApple OSS Distributions }
458*e3723e1fSApple OSS Distributions ref->capacity = capacity;
459*e3723e1fSApple OSS Distributions }
460*e3723e1fSApple OSS Distributions return ref;
461*e3723e1fSApple OSS Distributions }
462*e3723e1fSApple OSS Distributions
463*e3723e1fSApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)464*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
465*e3723e1fSApple OSS Distributions {
466*e3723e1fSApple OSS Distributions IOMemoryEntry * entries;
467*e3723e1fSApple OSS Distributions
468*e3723e1fSApple OSS Distributions if (ref->mapRef) {
469*e3723e1fSApple OSS Distributions memoryReferenceFree(ref->mapRef);
470*e3723e1fSApple OSS Distributions ref->mapRef = NULL;
471*e3723e1fSApple OSS Distributions }
472*e3723e1fSApple OSS Distributions
473*e3723e1fSApple OSS Distributions entries = ref->entries + ref->count;
474*e3723e1fSApple OSS Distributions while (entries > &ref->entries[0]) {
475*e3723e1fSApple OSS Distributions entries--;
476*e3723e1fSApple OSS Distributions ipc_port_release_send(entries->entry);
477*e3723e1fSApple OSS Distributions }
478*e3723e1fSApple OSS Distributions kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
479*e3723e1fSApple OSS Distributions
480*e3723e1fSApple OSS Distributions OSDecrementAtomic(&gIOMemoryReferenceCount);
481*e3723e1fSApple OSS Distributions }
482*e3723e1fSApple OSS Distributions
483*e3723e1fSApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)484*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
485*e3723e1fSApple OSS Distributions {
486*e3723e1fSApple OSS Distributions if (1 == OSDecrementAtomic(&ref->refCount)) {
487*e3723e1fSApple OSS Distributions memoryReferenceFree(ref);
488*e3723e1fSApple OSS Distributions }
489*e3723e1fSApple OSS Distributions }
490*e3723e1fSApple OSS Distributions
491*e3723e1fSApple OSS Distributions
492*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)493*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
494*e3723e1fSApple OSS Distributions IOOptionBits options,
495*e3723e1fSApple OSS Distributions IOMemoryReference ** reference)
496*e3723e1fSApple OSS Distributions {
497*e3723e1fSApple OSS Distributions enum { kCapacity = 4, kCapacityInc = 4 };
498*e3723e1fSApple OSS Distributions
499*e3723e1fSApple OSS Distributions kern_return_t err;
500*e3723e1fSApple OSS Distributions IOMemoryReference * ref;
501*e3723e1fSApple OSS Distributions IOMemoryEntry * entries;
502*e3723e1fSApple OSS Distributions IOMemoryEntry * cloneEntries = NULL;
503*e3723e1fSApple OSS Distributions vm_map_t map;
504*e3723e1fSApple OSS Distributions ipc_port_t entry, cloneEntry;
505*e3723e1fSApple OSS Distributions vm_prot_t prot;
506*e3723e1fSApple OSS Distributions memory_object_size_t actualSize;
507*e3723e1fSApple OSS Distributions uint32_t rangeIdx;
508*e3723e1fSApple OSS Distributions uint32_t count;
509*e3723e1fSApple OSS Distributions mach_vm_address_t entryAddr, endAddr, entrySize;
510*e3723e1fSApple OSS Distributions mach_vm_size_t srcAddr, srcLen;
511*e3723e1fSApple OSS Distributions mach_vm_size_t nextAddr, nextLen;
512*e3723e1fSApple OSS Distributions mach_vm_size_t offset, remain;
513*e3723e1fSApple OSS Distributions vm_map_offset_t overmap_start = 0, overmap_end = 0;
514*e3723e1fSApple OSS Distributions int misaligned_start = 0, misaligned_end = 0;
515*e3723e1fSApple OSS Distributions IOByteCount physLen;
516*e3723e1fSApple OSS Distributions IOOptionBits type = (_flags & kIOMemoryTypeMask);
517*e3723e1fSApple OSS Distributions IOOptionBits cacheMode;
518*e3723e1fSApple OSS Distributions unsigned int pagerFlags;
519*e3723e1fSApple OSS Distributions vm_tag_t tag;
520*e3723e1fSApple OSS Distributions vm_named_entry_kernel_flags_t vmne_kflags;
521*e3723e1fSApple OSS Distributions
522*e3723e1fSApple OSS Distributions ref = memoryReferenceAlloc(kCapacity, NULL);
523*e3723e1fSApple OSS Distributions if (!ref) {
524*e3723e1fSApple OSS Distributions return kIOReturnNoMemory;
525*e3723e1fSApple OSS Distributions }
526*e3723e1fSApple OSS Distributions
527*e3723e1fSApple OSS Distributions tag = (vm_tag_t) getVMTag(kernel_map);
528*e3723e1fSApple OSS Distributions vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
529*e3723e1fSApple OSS Distributions entries = &ref->entries[0];
530*e3723e1fSApple OSS Distributions count = 0;
531*e3723e1fSApple OSS Distributions err = KERN_SUCCESS;
532*e3723e1fSApple OSS Distributions
533*e3723e1fSApple OSS Distributions offset = 0;
534*e3723e1fSApple OSS Distributions rangeIdx = 0;
535*e3723e1fSApple OSS Distributions remain = _length;
536*e3723e1fSApple OSS Distributions if (_task) {
537*e3723e1fSApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
538*e3723e1fSApple OSS Distributions
539*e3723e1fSApple OSS Distributions // account for IOBMD setLength(), use its capacity as length
540*e3723e1fSApple OSS Distributions IOBufferMemoryDescriptor * bmd;
541*e3723e1fSApple OSS Distributions if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
542*e3723e1fSApple OSS Distributions nextLen = bmd->getCapacity();
543*e3723e1fSApple OSS Distributions remain = nextLen;
544*e3723e1fSApple OSS Distributions }
545*e3723e1fSApple OSS Distributions } else {
546*e3723e1fSApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
547*e3723e1fSApple OSS Distributions nextLen = physLen;
548*e3723e1fSApple OSS Distributions
549*e3723e1fSApple OSS Distributions // default cache mode for physical
550*e3723e1fSApple OSS Distributions if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
551*e3723e1fSApple OSS Distributions IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
552*e3723e1fSApple OSS Distributions _flags |= (mode << kIOMemoryBufferCacheShift);
553*e3723e1fSApple OSS Distributions }
554*e3723e1fSApple OSS Distributions }
555*e3723e1fSApple OSS Distributions
556*e3723e1fSApple OSS Distributions // cache mode & vm_prot
557*e3723e1fSApple OSS Distributions prot = VM_PROT_READ;
558*e3723e1fSApple OSS Distributions cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
559*e3723e1fSApple OSS Distributions prot |= vmProtForCacheMode(cacheMode);
560*e3723e1fSApple OSS Distributions // VM system requires write access to change cache mode
561*e3723e1fSApple OSS Distributions if (kIODefaultCache != cacheMode) {
562*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
563*e3723e1fSApple OSS Distributions }
564*e3723e1fSApple OSS Distributions if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
565*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
566*e3723e1fSApple OSS Distributions }
567*e3723e1fSApple OSS Distributions if (kIOMemoryReferenceWrite & options) {
568*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
569*e3723e1fSApple OSS Distributions }
570*e3723e1fSApple OSS Distributions if (kIOMemoryReferenceCOW & options) {
571*e3723e1fSApple OSS Distributions prot |= MAP_MEM_VM_COPY;
572*e3723e1fSApple OSS Distributions }
573*e3723e1fSApple OSS Distributions
574*e3723e1fSApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
575*e3723e1fSApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
576*e3723e1fSApple OSS Distributions }
577*e3723e1fSApple OSS Distributions
578*e3723e1fSApple OSS Distributions if ((kIOMemoryReferenceReuse & options) && _memRef) {
579*e3723e1fSApple OSS Distributions cloneEntries = &_memRef->entries[0];
580*e3723e1fSApple OSS Distributions prot |= MAP_MEM_NAMED_REUSE;
581*e3723e1fSApple OSS Distributions }
582*e3723e1fSApple OSS Distributions
583*e3723e1fSApple OSS Distributions if (_task) {
584*e3723e1fSApple OSS Distributions // virtual ranges
585*e3723e1fSApple OSS Distributions
586*e3723e1fSApple OSS Distributions if (kIOMemoryBufferPageable & _flags) {
587*e3723e1fSApple OSS Distributions int ledger_tag, ledger_no_footprint;
588*e3723e1fSApple OSS Distributions
589*e3723e1fSApple OSS Distributions // IOBufferMemoryDescriptor alloc - set flags for entry + object create
590*e3723e1fSApple OSS Distributions prot |= MAP_MEM_NAMED_CREATE;
591*e3723e1fSApple OSS Distributions
592*e3723e1fSApple OSS Distributions // default accounting settings:
593*e3723e1fSApple OSS Distributions // + "none" ledger tag
594*e3723e1fSApple OSS Distributions // + include in footprint
595*e3723e1fSApple OSS Distributions // can be changed later with ::setOwnership()
596*e3723e1fSApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
597*e3723e1fSApple OSS Distributions ledger_no_footprint = 0;
598*e3723e1fSApple OSS Distributions
599*e3723e1fSApple OSS Distributions if (kIOMemoryBufferPurgeable & _flags) {
600*e3723e1fSApple OSS Distributions prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
601*e3723e1fSApple OSS Distributions if (VM_KERN_MEMORY_SKYWALK == tag) {
602*e3723e1fSApple OSS Distributions // Skywalk purgeable memory accounting:
603*e3723e1fSApple OSS Distributions // + "network" ledger tag
604*e3723e1fSApple OSS Distributions // + not included in footprint
605*e3723e1fSApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NETWORK;
606*e3723e1fSApple OSS Distributions ledger_no_footprint = 1;
607*e3723e1fSApple OSS Distributions } else {
608*e3723e1fSApple OSS Distributions // regular purgeable memory accounting:
609*e3723e1fSApple OSS Distributions // + no ledger tag
610*e3723e1fSApple OSS Distributions // + included in footprint
611*e3723e1fSApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
612*e3723e1fSApple OSS Distributions ledger_no_footprint = 0;
613*e3723e1fSApple OSS Distributions }
614*e3723e1fSApple OSS Distributions }
615*e3723e1fSApple OSS Distributions vmne_kflags.vmnekf_ledger_tag = ledger_tag;
616*e3723e1fSApple OSS Distributions vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
617*e3723e1fSApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
618*e3723e1fSApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
619*e3723e1fSApple OSS Distributions }
620*e3723e1fSApple OSS Distributions
621*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
622*e3723e1fSApple OSS Distributions map = NULL;
623*e3723e1fSApple OSS Distributions } else {
624*e3723e1fSApple OSS Distributions prot |= MAP_MEM_USE_DATA_ADDR;
625*e3723e1fSApple OSS Distributions map = get_task_map(_task);
626*e3723e1fSApple OSS Distributions }
627*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
628*e3723e1fSApple OSS Distributions
629*e3723e1fSApple OSS Distributions while (remain) {
630*e3723e1fSApple OSS Distributions srcAddr = nextAddr;
631*e3723e1fSApple OSS Distributions srcLen = nextLen;
632*e3723e1fSApple OSS Distributions nextAddr = 0;
633*e3723e1fSApple OSS Distributions nextLen = 0;
634*e3723e1fSApple OSS Distributions // coalesce addr range
635*e3723e1fSApple OSS Distributions for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
636*e3723e1fSApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
637*e3723e1fSApple OSS Distributions if ((srcAddr + srcLen) != nextAddr) {
638*e3723e1fSApple OSS Distributions break;
639*e3723e1fSApple OSS Distributions }
640*e3723e1fSApple OSS Distributions srcLen += nextLen;
641*e3723e1fSApple OSS Distributions }
642*e3723e1fSApple OSS Distributions
643*e3723e1fSApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
644*e3723e1fSApple OSS Distributions entryAddr = srcAddr;
645*e3723e1fSApple OSS Distributions endAddr = srcAddr + srcLen;
646*e3723e1fSApple OSS Distributions } else {
647*e3723e1fSApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
648*e3723e1fSApple OSS Distributions endAddr = round_page_64(srcAddr + srcLen);
649*e3723e1fSApple OSS Distributions }
650*e3723e1fSApple OSS Distributions if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
651*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
652*e3723e1fSApple OSS Distributions }
653*e3723e1fSApple OSS Distributions
654*e3723e1fSApple OSS Distributions do{
655*e3723e1fSApple OSS Distributions entrySize = (endAddr - entryAddr);
656*e3723e1fSApple OSS Distributions if (!entrySize) {
657*e3723e1fSApple OSS Distributions break;
658*e3723e1fSApple OSS Distributions }
659*e3723e1fSApple OSS Distributions actualSize = entrySize;
660*e3723e1fSApple OSS Distributions
661*e3723e1fSApple OSS Distributions cloneEntry = MACH_PORT_NULL;
662*e3723e1fSApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
663*e3723e1fSApple OSS Distributions if (cloneEntries < &_memRef->entries[_memRef->count]) {
664*e3723e1fSApple OSS Distributions cloneEntry = cloneEntries->entry;
665*e3723e1fSApple OSS Distributions } else {
666*e3723e1fSApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
667*e3723e1fSApple OSS Distributions }
668*e3723e1fSApple OSS Distributions }
669*e3723e1fSApple OSS Distributions
670*e3723e1fSApple OSS Distributions mach_vm_offset_t entryAddrForVm = entryAddr;
671*e3723e1fSApple OSS Distributions err = mach_make_memory_entry_internal(map,
672*e3723e1fSApple OSS Distributions &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
673*e3723e1fSApple OSS Distributions
674*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
675*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
676*e3723e1fSApple OSS Distributions break;
677*e3723e1fSApple OSS Distributions }
678*e3723e1fSApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
679*e3723e1fSApple OSS Distributions if (actualSize > entrySize) {
680*e3723e1fSApple OSS Distributions actualSize = entrySize;
681*e3723e1fSApple OSS Distributions }
682*e3723e1fSApple OSS Distributions } else if (actualSize > entrySize) {
683*e3723e1fSApple OSS Distributions panic("mach_make_memory_entry_64 actualSize");
684*e3723e1fSApple OSS Distributions }
685*e3723e1fSApple OSS Distributions
686*e3723e1fSApple OSS Distributions memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687*e3723e1fSApple OSS Distributions
688*e3723e1fSApple OSS Distributions if (count && overmap_start) {
689*e3723e1fSApple OSS Distributions /*
690*e3723e1fSApple OSS Distributions * Track misaligned start for all
691*e3723e1fSApple OSS Distributions * except the first entry.
692*e3723e1fSApple OSS Distributions */
693*e3723e1fSApple OSS Distributions misaligned_start++;
694*e3723e1fSApple OSS Distributions }
695*e3723e1fSApple OSS Distributions
696*e3723e1fSApple OSS Distributions if (overmap_end) {
697*e3723e1fSApple OSS Distributions /*
698*e3723e1fSApple OSS Distributions * Ignore misaligned end for the
699*e3723e1fSApple OSS Distributions * last entry.
700*e3723e1fSApple OSS Distributions */
701*e3723e1fSApple OSS Distributions if ((entryAddr + actualSize) != endAddr) {
702*e3723e1fSApple OSS Distributions misaligned_end++;
703*e3723e1fSApple OSS Distributions }
704*e3723e1fSApple OSS Distributions }
705*e3723e1fSApple OSS Distributions
706*e3723e1fSApple OSS Distributions if (count) {
707*e3723e1fSApple OSS Distributions /* Middle entries */
708*e3723e1fSApple OSS Distributions if (misaligned_start || misaligned_end) {
709*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710*e3723e1fSApple OSS Distributions ipc_port_release_send(entry);
711*e3723e1fSApple OSS Distributions err = KERN_NOT_SUPPORTED;
712*e3723e1fSApple OSS Distributions break;
713*e3723e1fSApple OSS Distributions }
714*e3723e1fSApple OSS Distributions }
715*e3723e1fSApple OSS Distributions
716*e3723e1fSApple OSS Distributions if (count >= ref->capacity) {
717*e3723e1fSApple OSS Distributions ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718*e3723e1fSApple OSS Distributions entries = &ref->entries[count];
719*e3723e1fSApple OSS Distributions }
720*e3723e1fSApple OSS Distributions entries->entry = entry;
721*e3723e1fSApple OSS Distributions entries->size = actualSize;
722*e3723e1fSApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
723*e3723e1fSApple OSS Distributions entries->start = entryAddr;
724*e3723e1fSApple OSS Distributions entryAddr += actualSize;
725*e3723e1fSApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
726*e3723e1fSApple OSS Distributions if ((cloneEntries->entry == entries->entry)
727*e3723e1fSApple OSS Distributions && (cloneEntries->size == entries->size)
728*e3723e1fSApple OSS Distributions && (cloneEntries->offset == entries->offset)) {
729*e3723e1fSApple OSS Distributions cloneEntries++;
730*e3723e1fSApple OSS Distributions } else {
731*e3723e1fSApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
732*e3723e1fSApple OSS Distributions }
733*e3723e1fSApple OSS Distributions }
734*e3723e1fSApple OSS Distributions entries++;
735*e3723e1fSApple OSS Distributions count++;
736*e3723e1fSApple OSS Distributions }while (true);
737*e3723e1fSApple OSS Distributions offset += srcLen;
738*e3723e1fSApple OSS Distributions remain -= srcLen;
739*e3723e1fSApple OSS Distributions }
740*e3723e1fSApple OSS Distributions } else {
741*e3723e1fSApple OSS Distributions // _task == 0, physical or kIOMemoryTypeUPL
742*e3723e1fSApple OSS Distributions memory_object_t pager;
743*e3723e1fSApple OSS Distributions vm_size_t size = ptoa_64(_pages);
744*e3723e1fSApple OSS Distributions
745*e3723e1fSApple OSS Distributions if (!getKernelReserved()) {
746*e3723e1fSApple OSS Distributions panic("getKernelReserved");
747*e3723e1fSApple OSS Distributions }
748*e3723e1fSApple OSS Distributions
749*e3723e1fSApple OSS Distributions reserved->dp.pagerContig = (1 == _rangesCount);
750*e3723e1fSApple OSS Distributions reserved->dp.memory = this;
751*e3723e1fSApple OSS Distributions
752*e3723e1fSApple OSS Distributions pagerFlags = pagerFlagsForCacheMode(cacheMode);
753*e3723e1fSApple OSS Distributions if (-1U == pagerFlags) {
754*e3723e1fSApple OSS Distributions panic("phys is kIODefaultCache");
755*e3723e1fSApple OSS Distributions }
756*e3723e1fSApple OSS Distributions if (reserved->dp.pagerContig) {
757*e3723e1fSApple OSS Distributions pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758*e3723e1fSApple OSS Distributions }
759*e3723e1fSApple OSS Distributions
760*e3723e1fSApple OSS Distributions pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761*e3723e1fSApple OSS Distributions size, pagerFlags);
762*e3723e1fSApple OSS Distributions assert(pager);
763*e3723e1fSApple OSS Distributions if (!pager) {
764*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765*e3723e1fSApple OSS Distributions err = kIOReturnVMError;
766*e3723e1fSApple OSS Distributions } else {
767*e3723e1fSApple OSS Distributions srcAddr = nextAddr;
768*e3723e1fSApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
769*e3723e1fSApple OSS Distributions err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770*e3723e1fSApple OSS Distributions size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
772*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
773*e3723e1fSApple OSS Distributions device_pager_deallocate(pager);
774*e3723e1fSApple OSS Distributions } else {
775*e3723e1fSApple OSS Distributions reserved->dp.devicePager = pager;
776*e3723e1fSApple OSS Distributions entries->entry = entry;
777*e3723e1fSApple OSS Distributions entries->size = size;
778*e3723e1fSApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
779*e3723e1fSApple OSS Distributions entries++;
780*e3723e1fSApple OSS Distributions count++;
781*e3723e1fSApple OSS Distributions }
782*e3723e1fSApple OSS Distributions }
783*e3723e1fSApple OSS Distributions }
784*e3723e1fSApple OSS Distributions
785*e3723e1fSApple OSS Distributions ref->count = count;
786*e3723e1fSApple OSS Distributions ref->prot = prot;
787*e3723e1fSApple OSS Distributions
788*e3723e1fSApple OSS Distributions if (_task && (KERN_SUCCESS == err)
789*e3723e1fSApple OSS Distributions && (kIOMemoryMapCopyOnWrite & _flags)
790*e3723e1fSApple OSS Distributions && !(kIOMemoryReferenceCOW & options)) {
791*e3723e1fSApple OSS Distributions err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
793*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794*e3723e1fSApple OSS Distributions }
795*e3723e1fSApple OSS Distributions }
796*e3723e1fSApple OSS Distributions
797*e3723e1fSApple OSS Distributions if (KERN_SUCCESS == err) {
798*e3723e1fSApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
799*e3723e1fSApple OSS Distributions memoryReferenceFree(ref);
800*e3723e1fSApple OSS Distributions OSIncrementAtomic(&_memRef->refCount);
801*e3723e1fSApple OSS Distributions ref = _memRef;
802*e3723e1fSApple OSS Distributions }
803*e3723e1fSApple OSS Distributions } else {
804*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805*e3723e1fSApple OSS Distributions memoryReferenceFree(ref);
806*e3723e1fSApple OSS Distributions ref = NULL;
807*e3723e1fSApple OSS Distributions }
808*e3723e1fSApple OSS Distributions
809*e3723e1fSApple OSS Distributions *reference = ref;
810*e3723e1fSApple OSS Distributions
811*e3723e1fSApple OSS Distributions return err;
812*e3723e1fSApple OSS Distributions }
813*e3723e1fSApple OSS Distributions
814*e3723e1fSApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816*e3723e1fSApple OSS Distributions {
817*e3723e1fSApple OSS Distributions switch (kIOMapGuardedMask & options) {
818*e3723e1fSApple OSS Distributions default:
819*e3723e1fSApple OSS Distributions case kIOMapGuardedSmall:
820*e3723e1fSApple OSS Distributions return vm_map_page_size(map);
821*e3723e1fSApple OSS Distributions case kIOMapGuardedLarge:
822*e3723e1fSApple OSS Distributions assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823*e3723e1fSApple OSS Distributions return kIOMapGuardSizeLarge;
824*e3723e1fSApple OSS Distributions }
825*e3723e1fSApple OSS Distributions ;
826*e3723e1fSApple OSS Distributions }
827*e3723e1fSApple OSS Distributions
828*e3723e1fSApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830*e3723e1fSApple OSS Distributions vm_map_offset_t addr, mach_vm_size_t size)
831*e3723e1fSApple OSS Distributions {
832*e3723e1fSApple OSS Distributions kern_return_t kr;
833*e3723e1fSApple OSS Distributions vm_map_offset_t actualAddr;
834*e3723e1fSApple OSS Distributions mach_vm_size_t actualSize;
835*e3723e1fSApple OSS Distributions
836*e3723e1fSApple OSS Distributions actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837*e3723e1fSApple OSS Distributions actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838*e3723e1fSApple OSS Distributions
839*e3723e1fSApple OSS Distributions if (kIOMapGuardedMask & options) {
840*e3723e1fSApple OSS Distributions mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841*e3723e1fSApple OSS Distributions actualAddr -= guardSize;
842*e3723e1fSApple OSS Distributions actualSize += 2 * guardSize;
843*e3723e1fSApple OSS Distributions }
844*e3723e1fSApple OSS Distributions kr = mach_vm_deallocate(map, actualAddr, actualSize);
845*e3723e1fSApple OSS Distributions
846*e3723e1fSApple OSS Distributions return kr;
847*e3723e1fSApple OSS Distributions }
848*e3723e1fSApple OSS Distributions
849*e3723e1fSApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851*e3723e1fSApple OSS Distributions {
852*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853*e3723e1fSApple OSS Distributions IOReturn err;
854*e3723e1fSApple OSS Distributions vm_map_offset_t addr;
855*e3723e1fSApple OSS Distributions mach_vm_size_t size;
856*e3723e1fSApple OSS Distributions mach_vm_size_t guardSize;
857*e3723e1fSApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
858*e3723e1fSApple OSS Distributions
859*e3723e1fSApple OSS Distributions addr = ref->mapped;
860*e3723e1fSApple OSS Distributions size = ref->size;
861*e3723e1fSApple OSS Distributions guardSize = 0;
862*e3723e1fSApple OSS Distributions
863*e3723e1fSApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
864*e3723e1fSApple OSS Distributions if (!(kIOMapAnywhere & ref->options)) {
865*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
866*e3723e1fSApple OSS Distributions }
867*e3723e1fSApple OSS Distributions guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868*e3723e1fSApple OSS Distributions size += 2 * guardSize;
869*e3723e1fSApple OSS Distributions }
870*e3723e1fSApple OSS Distributions if (kIOMapAnywhere & ref->options) {
871*e3723e1fSApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872*e3723e1fSApple OSS Distributions } else {
873*e3723e1fSApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874*e3723e1fSApple OSS Distributions }
875*e3723e1fSApple OSS Distributions vmk_flags.vm_tag = ref->tag;
876*e3723e1fSApple OSS Distributions
877*e3723e1fSApple OSS Distributions /*
878*e3723e1fSApple OSS Distributions * Mapping memory into the kernel_map using IOMDs use a dedicated range.
879*e3723e1fSApple OSS Distributions * Memory being mapped should not contain kernel pointers.
880*e3723e1fSApple OSS Distributions */
881*e3723e1fSApple OSS Distributions if (map == kernel_map) {
882*e3723e1fSApple OSS Distributions vmk_flags.vmkf_range_id = KMEM_RANGE_ID_IOKIT;
883*e3723e1fSApple OSS Distributions }
884*e3723e1fSApple OSS Distributions
885*e3723e1fSApple OSS Distributions err = mach_vm_map_kernel(map, &addr, size,
886*e3723e1fSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
887*e3723e1fSApple OSS Distributions // TODO4K this should not be necessary...
888*e3723e1fSApple OSS Distributions (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889*e3723e1fSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
890*e3723e1fSApple OSS Distributions (vm_map_offset_t) 0,
891*e3723e1fSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
892*e3723e1fSApple OSS Distributions vmk_flags,
893*e3723e1fSApple OSS Distributions IPC_PORT_NULL,
894*e3723e1fSApple OSS Distributions (memory_object_offset_t) 0,
895*e3723e1fSApple OSS Distributions false, /* copy */
896*e3723e1fSApple OSS Distributions ref->prot,
897*e3723e1fSApple OSS Distributions ref->prot,
898*e3723e1fSApple OSS Distributions VM_INHERIT_NONE);
899*e3723e1fSApple OSS Distributions if (KERN_SUCCESS == err) {
900*e3723e1fSApple OSS Distributions ref->mapped = (mach_vm_address_t) addr;
901*e3723e1fSApple OSS Distributions ref->map = map;
902*e3723e1fSApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
903*e3723e1fSApple OSS Distributions vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904*e3723e1fSApple OSS Distributions
905*e3723e1fSApple OSS Distributions err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
906*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
907*e3723e1fSApple OSS Distributions err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
908*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
909*e3723e1fSApple OSS Distributions ref->mapped += guardSize;
910*e3723e1fSApple OSS Distributions }
911*e3723e1fSApple OSS Distributions }
912*e3723e1fSApple OSS Distributions
913*e3723e1fSApple OSS Distributions return err;
914*e3723e1fSApple OSS Distributions }
915*e3723e1fSApple OSS Distributions
916*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
918*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
919*e3723e1fSApple OSS Distributions vm_map_t map,
920*e3723e1fSApple OSS Distributions mach_vm_size_t inoffset,
921*e3723e1fSApple OSS Distributions mach_vm_size_t size,
922*e3723e1fSApple OSS Distributions IOOptionBits options,
923*e3723e1fSApple OSS Distributions mach_vm_address_t * inaddr)
924*e3723e1fSApple OSS Distributions {
925*e3723e1fSApple OSS Distributions IOReturn err;
926*e3723e1fSApple OSS Distributions int64_t offset = inoffset;
927*e3723e1fSApple OSS Distributions uint32_t rangeIdx, entryIdx;
928*e3723e1fSApple OSS Distributions vm_map_offset_t addr, mapAddr;
929*e3723e1fSApple OSS Distributions vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930*e3723e1fSApple OSS Distributions
931*e3723e1fSApple OSS Distributions mach_vm_address_t nextAddr;
932*e3723e1fSApple OSS Distributions mach_vm_size_t nextLen;
933*e3723e1fSApple OSS Distributions IOByteCount physLen;
934*e3723e1fSApple OSS Distributions IOMemoryEntry * entry;
935*e3723e1fSApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
936*e3723e1fSApple OSS Distributions IOOptionBits type;
937*e3723e1fSApple OSS Distributions IOOptionBits cacheMode;
938*e3723e1fSApple OSS Distributions vm_tag_t tag;
939*e3723e1fSApple OSS Distributions // for the kIOMapPrefault option.
940*e3723e1fSApple OSS Distributions upl_page_info_t * pageList = NULL;
941*e3723e1fSApple OSS Distributions UInt currentPageIndex = 0;
942*e3723e1fSApple OSS Distributions bool didAlloc;
943*e3723e1fSApple OSS Distributions
944*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945*e3723e1fSApple OSS Distributions
946*e3723e1fSApple OSS Distributions if (ref->mapRef) {
947*e3723e1fSApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948*e3723e1fSApple OSS Distributions return err;
949*e3723e1fSApple OSS Distributions }
950*e3723e1fSApple OSS Distributions
951*e3723e1fSApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952*e3723e1fSApple OSS Distributions err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953*e3723e1fSApple OSS Distributions return err;
954*e3723e1fSApple OSS Distributions }
955*e3723e1fSApple OSS Distributions
956*e3723e1fSApple OSS Distributions type = _flags & kIOMemoryTypeMask;
957*e3723e1fSApple OSS Distributions
958*e3723e1fSApple OSS Distributions prot = VM_PROT_READ;
959*e3723e1fSApple OSS Distributions if (!(kIOMapReadOnly & options)) {
960*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
961*e3723e1fSApple OSS Distributions }
962*e3723e1fSApple OSS Distributions prot &= ref->prot;
963*e3723e1fSApple OSS Distributions
964*e3723e1fSApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965*e3723e1fSApple OSS Distributions if (kIODefaultCache != cacheMode) {
966*e3723e1fSApple OSS Distributions // VM system requires write access to update named entry cache mode
967*e3723e1fSApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968*e3723e1fSApple OSS Distributions }
969*e3723e1fSApple OSS Distributions
970*e3723e1fSApple OSS Distributions tag = (typeof(tag))getVMTag(map);
971*e3723e1fSApple OSS Distributions
972*e3723e1fSApple OSS Distributions if (_task) {
973*e3723e1fSApple OSS Distributions // Find first range for offset
974*e3723e1fSApple OSS Distributions if (!_rangesCount) {
975*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
976*e3723e1fSApple OSS Distributions }
977*e3723e1fSApple OSS Distributions for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978*e3723e1fSApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979*e3723e1fSApple OSS Distributions if (remain < nextLen) {
980*e3723e1fSApple OSS Distributions break;
981*e3723e1fSApple OSS Distributions }
982*e3723e1fSApple OSS Distributions remain -= nextLen;
983*e3723e1fSApple OSS Distributions }
984*e3723e1fSApple OSS Distributions } else {
985*e3723e1fSApple OSS Distributions rangeIdx = 0;
986*e3723e1fSApple OSS Distributions remain = 0;
987*e3723e1fSApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988*e3723e1fSApple OSS Distributions nextLen = size;
989*e3723e1fSApple OSS Distributions }
990*e3723e1fSApple OSS Distributions
991*e3723e1fSApple OSS Distributions assert(remain < nextLen);
992*e3723e1fSApple OSS Distributions if (remain >= nextLen) {
993*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
995*e3723e1fSApple OSS Distributions }
996*e3723e1fSApple OSS Distributions
997*e3723e1fSApple OSS Distributions nextAddr += remain;
998*e3723e1fSApple OSS Distributions nextLen -= remain;
999*e3723e1fSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1000*e3723e1fSApple OSS Distributions pageOffset = (vm_map_page_mask(map) & nextAddr);
1001*e3723e1fSApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1002*e3723e1fSApple OSS Distributions pageOffset = (page_mask & nextAddr);
1003*e3723e1fSApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004*e3723e1fSApple OSS Distributions addr = 0;
1005*e3723e1fSApple OSS Distributions didAlloc = false;
1006*e3723e1fSApple OSS Distributions
1007*e3723e1fSApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1008*e3723e1fSApple OSS Distributions addr = *inaddr;
1009*e3723e1fSApple OSS Distributions if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011*e3723e1fSApple OSS Distributions }
1012*e3723e1fSApple OSS Distributions addr -= pageOffset;
1013*e3723e1fSApple OSS Distributions }
1014*e3723e1fSApple OSS Distributions
1015*e3723e1fSApple OSS Distributions // find first entry for offset
1016*e3723e1fSApple OSS Distributions for (entryIdx = 0;
1017*e3723e1fSApple OSS Distributions (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018*e3723e1fSApple OSS Distributions entryIdx++) {
1019*e3723e1fSApple OSS Distributions }
1020*e3723e1fSApple OSS Distributions entryIdx--;
1021*e3723e1fSApple OSS Distributions entry = &ref->entries[entryIdx];
1022*e3723e1fSApple OSS Distributions
1023*e3723e1fSApple OSS Distributions // allocate VM
1024*e3723e1fSApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1025*e3723e1fSApple OSS Distributions size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026*e3723e1fSApple OSS Distributions #else
1027*e3723e1fSApple OSS Distributions size = round_page_64(size + pageOffset);
1028*e3723e1fSApple OSS Distributions #endif
1029*e3723e1fSApple OSS Distributions if (kIOMapOverwrite & options) {
1030*e3723e1fSApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031*e3723e1fSApple OSS Distributions map = IOPageableMapForAddress(addr);
1032*e3723e1fSApple OSS Distributions }
1033*e3723e1fSApple OSS Distributions err = KERN_SUCCESS;
1034*e3723e1fSApple OSS Distributions } else {
1035*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1036*e3723e1fSApple OSS Distributions ref.map = map;
1037*e3723e1fSApple OSS Distributions ref.tag = tag;
1038*e3723e1fSApple OSS Distributions ref.options = options;
1039*e3723e1fSApple OSS Distributions ref.size = size;
1040*e3723e1fSApple OSS Distributions ref.prot = prot;
1041*e3723e1fSApple OSS Distributions if (options & kIOMapAnywhere) {
1042*e3723e1fSApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043*e3723e1fSApple OSS Distributions ref.mapped = 0;
1044*e3723e1fSApple OSS Distributions } else {
1045*e3723e1fSApple OSS Distributions ref.mapped = addr;
1046*e3723e1fSApple OSS Distributions }
1047*e3723e1fSApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048*e3723e1fSApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049*e3723e1fSApple OSS Distributions } else {
1050*e3723e1fSApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051*e3723e1fSApple OSS Distributions }
1052*e3723e1fSApple OSS Distributions if (KERN_SUCCESS == err) {
1053*e3723e1fSApple OSS Distributions addr = ref.mapped;
1054*e3723e1fSApple OSS Distributions map = ref.map;
1055*e3723e1fSApple OSS Distributions didAlloc = true;
1056*e3723e1fSApple OSS Distributions }
1057*e3723e1fSApple OSS Distributions }
1058*e3723e1fSApple OSS Distributions
1059*e3723e1fSApple OSS Distributions /*
1060*e3723e1fSApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1061*e3723e1fSApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1062*e3723e1fSApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063*e3723e1fSApple OSS Distributions * operations.
1064*e3723e1fSApple OSS Distributions */
1065*e3723e1fSApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066*e3723e1fSApple OSS Distributions options &= ~kIOMapPrefault;
1067*e3723e1fSApple OSS Distributions }
1068*e3723e1fSApple OSS Distributions
1069*e3723e1fSApple OSS Distributions /*
1070*e3723e1fSApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1071*e3723e1fSApple OSS Distributions * memory type, and the underlying data.
1072*e3723e1fSApple OSS Distributions */
1073*e3723e1fSApple OSS Distributions if (options & kIOMapPrefault) {
1074*e3723e1fSApple OSS Distributions /*
1075*e3723e1fSApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1076*e3723e1fSApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077*e3723e1fSApple OSS Distributions */
1078*e3723e1fSApple OSS Distributions assert(_wireCount != 0);
1079*e3723e1fSApple OSS Distributions assert(_memoryEntries != NULL);
1080*e3723e1fSApple OSS Distributions if ((_wireCount == 0) ||
1081*e3723e1fSApple OSS Distributions (_memoryEntries == NULL)) {
1082*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
1084*e3723e1fSApple OSS Distributions }
1085*e3723e1fSApple OSS Distributions
1086*e3723e1fSApple OSS Distributions // Get the page list.
1087*e3723e1fSApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1088*e3723e1fSApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1089*e3723e1fSApple OSS Distributions pageList = getPageList(dataP);
1090*e3723e1fSApple OSS Distributions
1091*e3723e1fSApple OSS Distributions // Get the number of IOPLs.
1092*e3723e1fSApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093*e3723e1fSApple OSS Distributions
1094*e3723e1fSApple OSS Distributions /*
1095*e3723e1fSApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1096*e3723e1fSApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1097*e3723e1fSApple OSS Distributions * right range at the end.
1098*e3723e1fSApple OSS Distributions */
1099*e3723e1fSApple OSS Distributions UInt ioplIndex = 0;
1100*e3723e1fSApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101*e3723e1fSApple OSS Distributions ioplIndex++;
1102*e3723e1fSApple OSS Distributions }
1103*e3723e1fSApple OSS Distributions ioplIndex--;
1104*e3723e1fSApple OSS Distributions
1105*e3723e1fSApple OSS Distributions // Retrieve the IOPL info block.
1106*e3723e1fSApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1107*e3723e1fSApple OSS Distributions
1108*e3723e1fSApple OSS Distributions /*
1109*e3723e1fSApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110*e3723e1fSApple OSS Distributions * array.
1111*e3723e1fSApple OSS Distributions */
1112*e3723e1fSApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1113*e3723e1fSApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114*e3723e1fSApple OSS Distributions } else {
1115*e3723e1fSApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1116*e3723e1fSApple OSS Distributions }
1117*e3723e1fSApple OSS Distributions
1118*e3723e1fSApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1119*e3723e1fSApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120*e3723e1fSApple OSS Distributions
1121*e3723e1fSApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1122*e3723e1fSApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1123*e3723e1fSApple OSS Distributions }
1124*e3723e1fSApple OSS Distributions
1125*e3723e1fSApple OSS Distributions // enter mappings
1126*e3723e1fSApple OSS Distributions remain = size;
1127*e3723e1fSApple OSS Distributions mapAddr = addr;
1128*e3723e1fSApple OSS Distributions addr += pageOffset;
1129*e3723e1fSApple OSS Distributions
1130*e3723e1fSApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1131*e3723e1fSApple OSS Distributions entryOffset = offset - entry->offset;
1132*e3723e1fSApple OSS Distributions if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133*e3723e1fSApple OSS Distributions err = kIOReturnNotAligned;
1134*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135*e3723e1fSApple OSS Distributions break;
1136*e3723e1fSApple OSS Distributions }
1137*e3723e1fSApple OSS Distributions
1138*e3723e1fSApple OSS Distributions if (kIODefaultCache != cacheMode) {
1139*e3723e1fSApple OSS Distributions vm_size_t unused = 0;
1140*e3723e1fSApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141*e3723e1fSApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1142*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
1143*e3723e1fSApple OSS Distributions }
1144*e3723e1fSApple OSS Distributions
1145*e3723e1fSApple OSS Distributions entryOffset -= pageOffset;
1146*e3723e1fSApple OSS Distributions if (entryOffset >= entry->size) {
1147*e3723e1fSApple OSS Distributions panic("entryOffset");
1148*e3723e1fSApple OSS Distributions }
1149*e3723e1fSApple OSS Distributions chunk = entry->size - entryOffset;
1150*e3723e1fSApple OSS Distributions if (chunk) {
1151*e3723e1fSApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1152*e3723e1fSApple OSS Distributions .vmf_fixed = true,
1153*e3723e1fSApple OSS Distributions .vmf_overwrite = true,
1154*e3723e1fSApple OSS Distributions .vm_tag = tag,
1155*e3723e1fSApple OSS Distributions .vmkf_iokit_acct = true,
1156*e3723e1fSApple OSS Distributions };
1157*e3723e1fSApple OSS Distributions
1158*e3723e1fSApple OSS Distributions if (chunk > remain) {
1159*e3723e1fSApple OSS Distributions chunk = remain;
1160*e3723e1fSApple OSS Distributions }
1161*e3723e1fSApple OSS Distributions if (options & kIOMapPrefault) {
1162*e3723e1fSApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163*e3723e1fSApple OSS Distributions
1164*e3723e1fSApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1165*e3723e1fSApple OSS Distributions &mapAddr,
1166*e3723e1fSApple OSS Distributions chunk, 0 /* mask */,
1167*e3723e1fSApple OSS Distributions vmk_flags,
1168*e3723e1fSApple OSS Distributions entry->entry,
1169*e3723e1fSApple OSS Distributions entryOffset,
1170*e3723e1fSApple OSS Distributions prot, // cur
1171*e3723e1fSApple OSS Distributions prot, // max
1172*e3723e1fSApple OSS Distributions &pageList[currentPageIndex],
1173*e3723e1fSApple OSS Distributions nb_pages);
1174*e3723e1fSApple OSS Distributions
1175*e3723e1fSApple OSS Distributions if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177*e3723e1fSApple OSS Distributions }
1178*e3723e1fSApple OSS Distributions // Compute the next index in the page list.
1179*e3723e1fSApple OSS Distributions currentPageIndex += nb_pages;
1180*e3723e1fSApple OSS Distributions assert(currentPageIndex <= _pages);
1181*e3723e1fSApple OSS Distributions } else {
1182*e3723e1fSApple OSS Distributions err = mach_vm_map_kernel(map,
1183*e3723e1fSApple OSS Distributions &mapAddr,
1184*e3723e1fSApple OSS Distributions chunk, 0 /* mask */,
1185*e3723e1fSApple OSS Distributions vmk_flags,
1186*e3723e1fSApple OSS Distributions entry->entry,
1187*e3723e1fSApple OSS Distributions entryOffset,
1188*e3723e1fSApple OSS Distributions false, // copy
1189*e3723e1fSApple OSS Distributions prot, // cur
1190*e3723e1fSApple OSS Distributions prot, // max
1191*e3723e1fSApple OSS Distributions VM_INHERIT_NONE);
1192*e3723e1fSApple OSS Distributions }
1193*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1194*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195*e3723e1fSApple OSS Distributions break;
1196*e3723e1fSApple OSS Distributions }
1197*e3723e1fSApple OSS Distributions remain -= chunk;
1198*e3723e1fSApple OSS Distributions if (!remain) {
1199*e3723e1fSApple OSS Distributions break;
1200*e3723e1fSApple OSS Distributions }
1201*e3723e1fSApple OSS Distributions mapAddr += chunk;
1202*e3723e1fSApple OSS Distributions offset += chunk - pageOffset;
1203*e3723e1fSApple OSS Distributions }
1204*e3723e1fSApple OSS Distributions pageOffset = 0;
1205*e3723e1fSApple OSS Distributions entry++;
1206*e3723e1fSApple OSS Distributions entryIdx++;
1207*e3723e1fSApple OSS Distributions if (entryIdx >= ref->count) {
1208*e3723e1fSApple OSS Distributions err = kIOReturnOverrun;
1209*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210*e3723e1fSApple OSS Distributions break;
1211*e3723e1fSApple OSS Distributions }
1212*e3723e1fSApple OSS Distributions }
1213*e3723e1fSApple OSS Distributions
1214*e3723e1fSApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1215*e3723e1fSApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216*e3723e1fSApple OSS Distributions addr = 0;
1217*e3723e1fSApple OSS Distributions }
1218*e3723e1fSApple OSS Distributions *inaddr = addr;
1219*e3723e1fSApple OSS Distributions
1220*e3723e1fSApple OSS Distributions if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222*e3723e1fSApple OSS Distributions }
1223*e3723e1fSApple OSS Distributions return err;
1224*e3723e1fSApple OSS Distributions }
1225*e3723e1fSApple OSS Distributions
1226*e3723e1fSApple OSS Distributions #define LOGUNALIGN 0
1227*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
1230*e3723e1fSApple OSS Distributions vm_map_t map,
1231*e3723e1fSApple OSS Distributions mach_vm_size_t inoffset,
1232*e3723e1fSApple OSS Distributions mach_vm_size_t size,
1233*e3723e1fSApple OSS Distributions IOOptionBits options,
1234*e3723e1fSApple OSS Distributions mach_vm_address_t * inaddr)
1235*e3723e1fSApple OSS Distributions {
1236*e3723e1fSApple OSS Distributions IOReturn err;
1237*e3723e1fSApple OSS Distributions int64_t offset = inoffset;
1238*e3723e1fSApple OSS Distributions uint32_t entryIdx, firstEntryIdx;
1239*e3723e1fSApple OSS Distributions vm_map_offset_t addr, mapAddr, mapAddrOut;
1240*e3723e1fSApple OSS Distributions vm_map_offset_t entryOffset, remain, chunk;
1241*e3723e1fSApple OSS Distributions
1242*e3723e1fSApple OSS Distributions IOMemoryEntry * entry;
1243*e3723e1fSApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
1244*e3723e1fSApple OSS Distributions IOOptionBits type;
1245*e3723e1fSApple OSS Distributions IOOptionBits cacheMode;
1246*e3723e1fSApple OSS Distributions vm_tag_t tag;
1247*e3723e1fSApple OSS Distributions // for the kIOMapPrefault option.
1248*e3723e1fSApple OSS Distributions upl_page_info_t * pageList = NULL;
1249*e3723e1fSApple OSS Distributions UInt currentPageIndex = 0;
1250*e3723e1fSApple OSS Distributions bool didAlloc;
1251*e3723e1fSApple OSS Distributions
1252*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253*e3723e1fSApple OSS Distributions
1254*e3723e1fSApple OSS Distributions if (ref->mapRef) {
1255*e3723e1fSApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256*e3723e1fSApple OSS Distributions return err;
1257*e3723e1fSApple OSS Distributions }
1258*e3723e1fSApple OSS Distributions
1259*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1260*e3723e1fSApple OSS Distributions printf("MAP offset %qx, %qx\n", inoffset, size);
1261*e3723e1fSApple OSS Distributions #endif
1262*e3723e1fSApple OSS Distributions
1263*e3723e1fSApple OSS Distributions type = _flags & kIOMemoryTypeMask;
1264*e3723e1fSApple OSS Distributions
1265*e3723e1fSApple OSS Distributions prot = VM_PROT_READ;
1266*e3723e1fSApple OSS Distributions if (!(kIOMapReadOnly & options)) {
1267*e3723e1fSApple OSS Distributions prot |= VM_PROT_WRITE;
1268*e3723e1fSApple OSS Distributions }
1269*e3723e1fSApple OSS Distributions prot &= ref->prot;
1270*e3723e1fSApple OSS Distributions
1271*e3723e1fSApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272*e3723e1fSApple OSS Distributions if (kIODefaultCache != cacheMode) {
1273*e3723e1fSApple OSS Distributions // VM system requires write access to update named entry cache mode
1274*e3723e1fSApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275*e3723e1fSApple OSS Distributions }
1276*e3723e1fSApple OSS Distributions
1277*e3723e1fSApple OSS Distributions tag = (vm_tag_t) getVMTag(map);
1278*e3723e1fSApple OSS Distributions
1279*e3723e1fSApple OSS Distributions addr = 0;
1280*e3723e1fSApple OSS Distributions didAlloc = false;
1281*e3723e1fSApple OSS Distributions
1282*e3723e1fSApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1283*e3723e1fSApple OSS Distributions addr = *inaddr;
1284*e3723e1fSApple OSS Distributions }
1285*e3723e1fSApple OSS Distributions
1286*e3723e1fSApple OSS Distributions // find first entry for offset
1287*e3723e1fSApple OSS Distributions for (firstEntryIdx = 0;
1288*e3723e1fSApple OSS Distributions (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289*e3723e1fSApple OSS Distributions firstEntryIdx++) {
1290*e3723e1fSApple OSS Distributions }
1291*e3723e1fSApple OSS Distributions firstEntryIdx--;
1292*e3723e1fSApple OSS Distributions
1293*e3723e1fSApple OSS Distributions // calculate required VM space
1294*e3723e1fSApple OSS Distributions
1295*e3723e1fSApple OSS Distributions entryIdx = firstEntryIdx;
1296*e3723e1fSApple OSS Distributions entry = &ref->entries[entryIdx];
1297*e3723e1fSApple OSS Distributions
1298*e3723e1fSApple OSS Distributions remain = size;
1299*e3723e1fSApple OSS Distributions int64_t iteroffset = offset;
1300*e3723e1fSApple OSS Distributions uint64_t mapSize = 0;
1301*e3723e1fSApple OSS Distributions while (remain) {
1302*e3723e1fSApple OSS Distributions entryOffset = iteroffset - entry->offset;
1303*e3723e1fSApple OSS Distributions if (entryOffset >= entry->size) {
1304*e3723e1fSApple OSS Distributions panic("entryOffset");
1305*e3723e1fSApple OSS Distributions }
1306*e3723e1fSApple OSS Distributions
1307*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1308*e3723e1fSApple OSS Distributions printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309*e3723e1fSApple OSS Distributions entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310*e3723e1fSApple OSS Distributions #endif
1311*e3723e1fSApple OSS Distributions
1312*e3723e1fSApple OSS Distributions chunk = entry->size - entryOffset;
1313*e3723e1fSApple OSS Distributions if (chunk) {
1314*e3723e1fSApple OSS Distributions if (chunk > remain) {
1315*e3723e1fSApple OSS Distributions chunk = remain;
1316*e3723e1fSApple OSS Distributions }
1317*e3723e1fSApple OSS Distributions mach_vm_size_t entrySize;
1318*e3723e1fSApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
1320*e3723e1fSApple OSS Distributions mapSize += entrySize;
1321*e3723e1fSApple OSS Distributions
1322*e3723e1fSApple OSS Distributions remain -= chunk;
1323*e3723e1fSApple OSS Distributions if (!remain) {
1324*e3723e1fSApple OSS Distributions break;
1325*e3723e1fSApple OSS Distributions }
1326*e3723e1fSApple OSS Distributions iteroffset += chunk; // - pageOffset;
1327*e3723e1fSApple OSS Distributions }
1328*e3723e1fSApple OSS Distributions entry++;
1329*e3723e1fSApple OSS Distributions entryIdx++;
1330*e3723e1fSApple OSS Distributions if (entryIdx >= ref->count) {
1331*e3723e1fSApple OSS Distributions panic("overrun");
1332*e3723e1fSApple OSS Distributions err = kIOReturnOverrun;
1333*e3723e1fSApple OSS Distributions break;
1334*e3723e1fSApple OSS Distributions }
1335*e3723e1fSApple OSS Distributions }
1336*e3723e1fSApple OSS Distributions
1337*e3723e1fSApple OSS Distributions if (kIOMapOverwrite & options) {
1338*e3723e1fSApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*e3723e1fSApple OSS Distributions map = IOPageableMapForAddress(addr);
1340*e3723e1fSApple OSS Distributions }
1341*e3723e1fSApple OSS Distributions err = KERN_SUCCESS;
1342*e3723e1fSApple OSS Distributions } else {
1343*e3723e1fSApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1344*e3723e1fSApple OSS Distributions ref.map = map;
1345*e3723e1fSApple OSS Distributions ref.tag = tag;
1346*e3723e1fSApple OSS Distributions ref.options = options;
1347*e3723e1fSApple OSS Distributions ref.size = mapSize;
1348*e3723e1fSApple OSS Distributions ref.prot = prot;
1349*e3723e1fSApple OSS Distributions if (options & kIOMapAnywhere) {
1350*e3723e1fSApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351*e3723e1fSApple OSS Distributions ref.mapped = 0;
1352*e3723e1fSApple OSS Distributions } else {
1353*e3723e1fSApple OSS Distributions ref.mapped = addr;
1354*e3723e1fSApple OSS Distributions }
1355*e3723e1fSApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356*e3723e1fSApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357*e3723e1fSApple OSS Distributions } else {
1358*e3723e1fSApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359*e3723e1fSApple OSS Distributions }
1360*e3723e1fSApple OSS Distributions
1361*e3723e1fSApple OSS Distributions if (KERN_SUCCESS == err) {
1362*e3723e1fSApple OSS Distributions addr = ref.mapped;
1363*e3723e1fSApple OSS Distributions map = ref.map;
1364*e3723e1fSApple OSS Distributions didAlloc = true;
1365*e3723e1fSApple OSS Distributions }
1366*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1367*e3723e1fSApple OSS Distributions IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368*e3723e1fSApple OSS Distributions #endif
1369*e3723e1fSApple OSS Distributions }
1370*e3723e1fSApple OSS Distributions
1371*e3723e1fSApple OSS Distributions /*
1372*e3723e1fSApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1373*e3723e1fSApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1374*e3723e1fSApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375*e3723e1fSApple OSS Distributions * operations.
1376*e3723e1fSApple OSS Distributions */
1377*e3723e1fSApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378*e3723e1fSApple OSS Distributions options &= ~kIOMapPrefault;
1379*e3723e1fSApple OSS Distributions }
1380*e3723e1fSApple OSS Distributions
1381*e3723e1fSApple OSS Distributions /*
1382*e3723e1fSApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1383*e3723e1fSApple OSS Distributions * memory type, and the underlying data.
1384*e3723e1fSApple OSS Distributions */
1385*e3723e1fSApple OSS Distributions if (options & kIOMapPrefault) {
1386*e3723e1fSApple OSS Distributions /*
1387*e3723e1fSApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1388*e3723e1fSApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389*e3723e1fSApple OSS Distributions */
1390*e3723e1fSApple OSS Distributions assert(_wireCount != 0);
1391*e3723e1fSApple OSS Distributions assert(_memoryEntries != NULL);
1392*e3723e1fSApple OSS Distributions if ((_wireCount == 0) ||
1393*e3723e1fSApple OSS Distributions (_memoryEntries == NULL)) {
1394*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
1395*e3723e1fSApple OSS Distributions }
1396*e3723e1fSApple OSS Distributions
1397*e3723e1fSApple OSS Distributions // Get the page list.
1398*e3723e1fSApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1399*e3723e1fSApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1400*e3723e1fSApple OSS Distributions pageList = getPageList(dataP);
1401*e3723e1fSApple OSS Distributions
1402*e3723e1fSApple OSS Distributions // Get the number of IOPLs.
1403*e3723e1fSApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404*e3723e1fSApple OSS Distributions
1405*e3723e1fSApple OSS Distributions /*
1406*e3723e1fSApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1407*e3723e1fSApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1408*e3723e1fSApple OSS Distributions * right range at the end.
1409*e3723e1fSApple OSS Distributions */
1410*e3723e1fSApple OSS Distributions UInt ioplIndex = 0;
1411*e3723e1fSApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412*e3723e1fSApple OSS Distributions ioplIndex++;
1413*e3723e1fSApple OSS Distributions }
1414*e3723e1fSApple OSS Distributions ioplIndex--;
1415*e3723e1fSApple OSS Distributions
1416*e3723e1fSApple OSS Distributions // Retrieve the IOPL info block.
1417*e3723e1fSApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1418*e3723e1fSApple OSS Distributions
1419*e3723e1fSApple OSS Distributions /*
1420*e3723e1fSApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421*e3723e1fSApple OSS Distributions * array.
1422*e3723e1fSApple OSS Distributions */
1423*e3723e1fSApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1424*e3723e1fSApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425*e3723e1fSApple OSS Distributions } else {
1426*e3723e1fSApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1427*e3723e1fSApple OSS Distributions }
1428*e3723e1fSApple OSS Distributions
1429*e3723e1fSApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1430*e3723e1fSApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431*e3723e1fSApple OSS Distributions
1432*e3723e1fSApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1433*e3723e1fSApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1434*e3723e1fSApple OSS Distributions }
1435*e3723e1fSApple OSS Distributions
1436*e3723e1fSApple OSS Distributions // enter mappings
1437*e3723e1fSApple OSS Distributions remain = size;
1438*e3723e1fSApple OSS Distributions mapAddr = addr;
1439*e3723e1fSApple OSS Distributions entryIdx = firstEntryIdx;
1440*e3723e1fSApple OSS Distributions entry = &ref->entries[entryIdx];
1441*e3723e1fSApple OSS Distributions
1442*e3723e1fSApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1443*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1444*e3723e1fSApple OSS Distributions printf("offset %qx, %qx\n", offset, entry->offset);
1445*e3723e1fSApple OSS Distributions #endif
1446*e3723e1fSApple OSS Distributions if (kIODefaultCache != cacheMode) {
1447*e3723e1fSApple OSS Distributions vm_size_t unused = 0;
1448*e3723e1fSApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449*e3723e1fSApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1450*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
1451*e3723e1fSApple OSS Distributions }
1452*e3723e1fSApple OSS Distributions entryOffset = offset - entry->offset;
1453*e3723e1fSApple OSS Distributions if (entryOffset >= entry->size) {
1454*e3723e1fSApple OSS Distributions panic("entryOffset");
1455*e3723e1fSApple OSS Distributions }
1456*e3723e1fSApple OSS Distributions chunk = entry->size - entryOffset;
1457*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1458*e3723e1fSApple OSS Distributions printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459*e3723e1fSApple OSS Distributions #endif
1460*e3723e1fSApple OSS Distributions if (chunk) {
1461*e3723e1fSApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1462*e3723e1fSApple OSS Distributions .vmf_fixed = true,
1463*e3723e1fSApple OSS Distributions .vmf_overwrite = true,
1464*e3723e1fSApple OSS Distributions .vmf_return_data_addr = true,
1465*e3723e1fSApple OSS Distributions .vm_tag = tag,
1466*e3723e1fSApple OSS Distributions .vmkf_iokit_acct = true,
1467*e3723e1fSApple OSS Distributions };
1468*e3723e1fSApple OSS Distributions
1469*e3723e1fSApple OSS Distributions if (chunk > remain) {
1470*e3723e1fSApple OSS Distributions chunk = remain;
1471*e3723e1fSApple OSS Distributions }
1472*e3723e1fSApple OSS Distributions mapAddrOut = mapAddr;
1473*e3723e1fSApple OSS Distributions if (options & kIOMapPrefault) {
1474*e3723e1fSApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475*e3723e1fSApple OSS Distributions
1476*e3723e1fSApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1477*e3723e1fSApple OSS Distributions &mapAddrOut,
1478*e3723e1fSApple OSS Distributions chunk, 0 /* mask */,
1479*e3723e1fSApple OSS Distributions vmk_flags,
1480*e3723e1fSApple OSS Distributions entry->entry,
1481*e3723e1fSApple OSS Distributions entryOffset,
1482*e3723e1fSApple OSS Distributions prot, // cur
1483*e3723e1fSApple OSS Distributions prot, // max
1484*e3723e1fSApple OSS Distributions &pageList[currentPageIndex],
1485*e3723e1fSApple OSS Distributions nb_pages);
1486*e3723e1fSApple OSS Distributions
1487*e3723e1fSApple OSS Distributions // Compute the next index in the page list.
1488*e3723e1fSApple OSS Distributions currentPageIndex += nb_pages;
1489*e3723e1fSApple OSS Distributions assert(currentPageIndex <= _pages);
1490*e3723e1fSApple OSS Distributions } else {
1491*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1492*e3723e1fSApple OSS Distributions printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493*e3723e1fSApple OSS Distributions #endif
1494*e3723e1fSApple OSS Distributions err = mach_vm_map_kernel(map,
1495*e3723e1fSApple OSS Distributions &mapAddrOut,
1496*e3723e1fSApple OSS Distributions chunk, 0 /* mask */,
1497*e3723e1fSApple OSS Distributions vmk_flags,
1498*e3723e1fSApple OSS Distributions entry->entry,
1499*e3723e1fSApple OSS Distributions entryOffset,
1500*e3723e1fSApple OSS Distributions false, // copy
1501*e3723e1fSApple OSS Distributions prot, // cur
1502*e3723e1fSApple OSS Distributions prot, // max
1503*e3723e1fSApple OSS Distributions VM_INHERIT_NONE);
1504*e3723e1fSApple OSS Distributions }
1505*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1506*e3723e1fSApple OSS Distributions panic("map enter err %x", err);
1507*e3723e1fSApple OSS Distributions break;
1508*e3723e1fSApple OSS Distributions }
1509*e3723e1fSApple OSS Distributions #if LOGUNALIGN
1510*e3723e1fSApple OSS Distributions printf("mapAddr o %qx\n", mapAddrOut);
1511*e3723e1fSApple OSS Distributions #endif
1512*e3723e1fSApple OSS Distributions if (entryIdx == firstEntryIdx) {
1513*e3723e1fSApple OSS Distributions addr = mapAddrOut;
1514*e3723e1fSApple OSS Distributions }
1515*e3723e1fSApple OSS Distributions remain -= chunk;
1516*e3723e1fSApple OSS Distributions if (!remain) {
1517*e3723e1fSApple OSS Distributions break;
1518*e3723e1fSApple OSS Distributions }
1519*e3723e1fSApple OSS Distributions mach_vm_size_t entrySize;
1520*e3723e1fSApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
1522*e3723e1fSApple OSS Distributions mapAddr += entrySize;
1523*e3723e1fSApple OSS Distributions offset += chunk;
1524*e3723e1fSApple OSS Distributions }
1525*e3723e1fSApple OSS Distributions
1526*e3723e1fSApple OSS Distributions entry++;
1527*e3723e1fSApple OSS Distributions entryIdx++;
1528*e3723e1fSApple OSS Distributions if (entryIdx >= ref->count) {
1529*e3723e1fSApple OSS Distributions err = kIOReturnOverrun;
1530*e3723e1fSApple OSS Distributions break;
1531*e3723e1fSApple OSS Distributions }
1532*e3723e1fSApple OSS Distributions }
1533*e3723e1fSApple OSS Distributions
1534*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1535*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536*e3723e1fSApple OSS Distributions }
1537*e3723e1fSApple OSS Distributions
1538*e3723e1fSApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1539*e3723e1fSApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540*e3723e1fSApple OSS Distributions addr = 0;
1541*e3723e1fSApple OSS Distributions }
1542*e3723e1fSApple OSS Distributions *inaddr = addr;
1543*e3723e1fSApple OSS Distributions
1544*e3723e1fSApple OSS Distributions return err;
1545*e3723e1fSApple OSS Distributions }
1546*e3723e1fSApple OSS Distributions
1547*e3723e1fSApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
1550*e3723e1fSApple OSS Distributions uint64_t * offset)
1551*e3723e1fSApple OSS Distributions {
1552*e3723e1fSApple OSS Distributions kern_return_t kr;
1553*e3723e1fSApple OSS Distributions vm_object_offset_t data_offset = 0;
1554*e3723e1fSApple OSS Distributions uint64_t total;
1555*e3723e1fSApple OSS Distributions uint32_t idx;
1556*e3723e1fSApple OSS Distributions
1557*e3723e1fSApple OSS Distributions assert(ref->count);
1558*e3723e1fSApple OSS Distributions if (offset) {
1559*e3723e1fSApple OSS Distributions *offset = (uint64_t) data_offset;
1560*e3723e1fSApple OSS Distributions }
1561*e3723e1fSApple OSS Distributions total = 0;
1562*e3723e1fSApple OSS Distributions for (idx = 0; idx < ref->count; idx++) {
1563*e3723e1fSApple OSS Distributions kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564*e3723e1fSApple OSS Distributions &data_offset);
1565*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != kr) {
1566*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567*e3723e1fSApple OSS Distributions } else if (0 != data_offset) {
1568*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569*e3723e1fSApple OSS Distributions }
1570*e3723e1fSApple OSS Distributions if (offset && !idx) {
1571*e3723e1fSApple OSS Distributions *offset = (uint64_t) data_offset;
1572*e3723e1fSApple OSS Distributions }
1573*e3723e1fSApple OSS Distributions total += round_page(data_offset + ref->entries[idx].size);
1574*e3723e1fSApple OSS Distributions }
1575*e3723e1fSApple OSS Distributions
1576*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577*e3723e1fSApple OSS Distributions (offset ? *offset : (vm_object_offset_t)-1), total);
1578*e3723e1fSApple OSS Distributions
1579*e3723e1fSApple OSS Distributions return total;
1580*e3723e1fSApple OSS Distributions }
1581*e3723e1fSApple OSS Distributions
1582*e3723e1fSApple OSS Distributions
1583*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
1586*e3723e1fSApple OSS Distributions IOByteCount * residentPageCount,
1587*e3723e1fSApple OSS Distributions IOByteCount * dirtyPageCount)
1588*e3723e1fSApple OSS Distributions {
1589*e3723e1fSApple OSS Distributions IOReturn err;
1590*e3723e1fSApple OSS Distributions IOMemoryEntry * entries;
1591*e3723e1fSApple OSS Distributions unsigned int resident, dirty;
1592*e3723e1fSApple OSS Distributions unsigned int totalResident, totalDirty;
1593*e3723e1fSApple OSS Distributions
1594*e3723e1fSApple OSS Distributions totalResident = totalDirty = 0;
1595*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
1596*e3723e1fSApple OSS Distributions entries = ref->entries + ref->count;
1597*e3723e1fSApple OSS Distributions while (entries > &ref->entries[0]) {
1598*e3723e1fSApple OSS Distributions entries--;
1599*e3723e1fSApple OSS Distributions err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1601*e3723e1fSApple OSS Distributions break;
1602*e3723e1fSApple OSS Distributions }
1603*e3723e1fSApple OSS Distributions totalResident += resident;
1604*e3723e1fSApple OSS Distributions totalDirty += dirty;
1605*e3723e1fSApple OSS Distributions }
1606*e3723e1fSApple OSS Distributions
1607*e3723e1fSApple OSS Distributions if (residentPageCount) {
1608*e3723e1fSApple OSS Distributions *residentPageCount = totalResident;
1609*e3723e1fSApple OSS Distributions }
1610*e3723e1fSApple OSS Distributions if (dirtyPageCount) {
1611*e3723e1fSApple OSS Distributions *dirtyPageCount = totalDirty;
1612*e3723e1fSApple OSS Distributions }
1613*e3723e1fSApple OSS Distributions return err;
1614*e3723e1fSApple OSS Distributions }
1615*e3723e1fSApple OSS Distributions
1616*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
1619*e3723e1fSApple OSS Distributions IOOptionBits newState,
1620*e3723e1fSApple OSS Distributions IOOptionBits * oldState)
1621*e3723e1fSApple OSS Distributions {
1622*e3723e1fSApple OSS Distributions IOReturn err;
1623*e3723e1fSApple OSS Distributions IOMemoryEntry * entries;
1624*e3723e1fSApple OSS Distributions vm_purgable_t control;
1625*e3723e1fSApple OSS Distributions int totalState, state;
1626*e3723e1fSApple OSS Distributions
1627*e3723e1fSApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1628*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
1629*e3723e1fSApple OSS Distributions entries = ref->entries + ref->count;
1630*e3723e1fSApple OSS Distributions while (entries > &ref->entries[0]) {
1631*e3723e1fSApple OSS Distributions entries--;
1632*e3723e1fSApple OSS Distributions
1633*e3723e1fSApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
1634*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1635*e3723e1fSApple OSS Distributions break;
1636*e3723e1fSApple OSS Distributions }
1637*e3723e1fSApple OSS Distributions err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1639*e3723e1fSApple OSS Distributions break;
1640*e3723e1fSApple OSS Distributions }
1641*e3723e1fSApple OSS Distributions err = purgeableStateBits(&state);
1642*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1643*e3723e1fSApple OSS Distributions break;
1644*e3723e1fSApple OSS Distributions }
1645*e3723e1fSApple OSS Distributions
1646*e3723e1fSApple OSS Distributions if (kIOMemoryPurgeableEmpty == state) {
1647*e3723e1fSApple OSS Distributions totalState = kIOMemoryPurgeableEmpty;
1648*e3723e1fSApple OSS Distributions } else if (kIOMemoryPurgeableEmpty == totalState) {
1649*e3723e1fSApple OSS Distributions continue;
1650*e3723e1fSApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == totalState) {
1651*e3723e1fSApple OSS Distributions continue;
1652*e3723e1fSApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == state) {
1653*e3723e1fSApple OSS Distributions totalState = kIOMemoryPurgeableVolatile;
1654*e3723e1fSApple OSS Distributions } else {
1655*e3723e1fSApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1656*e3723e1fSApple OSS Distributions }
1657*e3723e1fSApple OSS Distributions }
1658*e3723e1fSApple OSS Distributions
1659*e3723e1fSApple OSS Distributions if (oldState) {
1660*e3723e1fSApple OSS Distributions *oldState = totalState;
1661*e3723e1fSApple OSS Distributions }
1662*e3723e1fSApple OSS Distributions return err;
1663*e3723e1fSApple OSS Distributions }
1664*e3723e1fSApple OSS Distributions
1665*e3723e1fSApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667*e3723e1fSApple OSS Distributions IOMemoryReference * ref,
1668*e3723e1fSApple OSS Distributions task_t newOwner,
1669*e3723e1fSApple OSS Distributions int newLedgerTag,
1670*e3723e1fSApple OSS Distributions IOOptionBits newLedgerOptions)
1671*e3723e1fSApple OSS Distributions {
1672*e3723e1fSApple OSS Distributions IOReturn err, totalErr;
1673*e3723e1fSApple OSS Distributions IOMemoryEntry * entries;
1674*e3723e1fSApple OSS Distributions
1675*e3723e1fSApple OSS Distributions totalErr = kIOReturnSuccess;
1676*e3723e1fSApple OSS Distributions entries = ref->entries + ref->count;
1677*e3723e1fSApple OSS Distributions while (entries > &ref->entries[0]) {
1678*e3723e1fSApple OSS Distributions entries--;
1679*e3723e1fSApple OSS Distributions
1680*e3723e1fSApple OSS Distributions err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
1682*e3723e1fSApple OSS Distributions totalErr = err;
1683*e3723e1fSApple OSS Distributions }
1684*e3723e1fSApple OSS Distributions }
1685*e3723e1fSApple OSS Distributions
1686*e3723e1fSApple OSS Distributions return totalErr;
1687*e3723e1fSApple OSS Distributions }
1688*e3723e1fSApple OSS Distributions
1689*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690*e3723e1fSApple OSS Distributions
1691*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withAddress(void * address,
1693*e3723e1fSApple OSS Distributions IOByteCount length,
1694*e3723e1fSApple OSS Distributions IODirection direction)
1695*e3723e1fSApple OSS Distributions {
1696*e3723e1fSApple OSS Distributions return IOMemoryDescriptor::
1697*e3723e1fSApple OSS Distributions withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698*e3723e1fSApple OSS Distributions }
1699*e3723e1fSApple OSS Distributions
1700*e3723e1fSApple OSS Distributions #ifndef __LP64__
1701*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703*e3723e1fSApple OSS Distributions IOByteCount length,
1704*e3723e1fSApple OSS Distributions IODirection direction,
1705*e3723e1fSApple OSS Distributions task_t task)
1706*e3723e1fSApple OSS Distributions {
1707*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708*e3723e1fSApple OSS Distributions if (that) {
1709*e3723e1fSApple OSS Distributions if (that->initWithAddress(address, length, direction, task)) {
1710*e3723e1fSApple OSS Distributions return os::move(that);
1711*e3723e1fSApple OSS Distributions }
1712*e3723e1fSApple OSS Distributions }
1713*e3723e1fSApple OSS Distributions return nullptr;
1714*e3723e1fSApple OSS Distributions }
1715*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
1716*e3723e1fSApple OSS Distributions
1717*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1719*e3723e1fSApple OSS Distributions IOPhysicalAddress address,
1720*e3723e1fSApple OSS Distributions IOByteCount length,
1721*e3723e1fSApple OSS Distributions IODirection direction )
1722*e3723e1fSApple OSS Distributions {
1723*e3723e1fSApple OSS Distributions return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724*e3723e1fSApple OSS Distributions }
1725*e3723e1fSApple OSS Distributions
1726*e3723e1fSApple OSS Distributions #ifndef __LP64__
1727*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729*e3723e1fSApple OSS Distributions UInt32 withCount,
1730*e3723e1fSApple OSS Distributions IODirection direction,
1731*e3723e1fSApple OSS Distributions task_t task,
1732*e3723e1fSApple OSS Distributions bool asReference)
1733*e3723e1fSApple OSS Distributions {
1734*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735*e3723e1fSApple OSS Distributions if (that) {
1736*e3723e1fSApple OSS Distributions if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737*e3723e1fSApple OSS Distributions return os::move(that);
1738*e3723e1fSApple OSS Distributions }
1739*e3723e1fSApple OSS Distributions }
1740*e3723e1fSApple OSS Distributions return nullptr;
1741*e3723e1fSApple OSS Distributions }
1742*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
1743*e3723e1fSApple OSS Distributions
1744*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746*e3723e1fSApple OSS Distributions mach_vm_size_t length,
1747*e3723e1fSApple OSS Distributions IOOptionBits options,
1748*e3723e1fSApple OSS Distributions task_t task)
1749*e3723e1fSApple OSS Distributions {
1750*e3723e1fSApple OSS Distributions IOAddressRange range = { address, length };
1751*e3723e1fSApple OSS Distributions return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752*e3723e1fSApple OSS Distributions }
1753*e3723e1fSApple OSS Distributions
1754*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1756*e3723e1fSApple OSS Distributions UInt32 rangeCount,
1757*e3723e1fSApple OSS Distributions IOOptionBits options,
1758*e3723e1fSApple OSS Distributions task_t task)
1759*e3723e1fSApple OSS Distributions {
1760*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761*e3723e1fSApple OSS Distributions if (that) {
1762*e3723e1fSApple OSS Distributions if (task) {
1763*e3723e1fSApple OSS Distributions options |= kIOMemoryTypeVirtual64;
1764*e3723e1fSApple OSS Distributions } else {
1765*e3723e1fSApple OSS Distributions options |= kIOMemoryTypePhysical64;
1766*e3723e1fSApple OSS Distributions }
1767*e3723e1fSApple OSS Distributions
1768*e3723e1fSApple OSS Distributions if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769*e3723e1fSApple OSS Distributions return os::move(that);
1770*e3723e1fSApple OSS Distributions }
1771*e3723e1fSApple OSS Distributions }
1772*e3723e1fSApple OSS Distributions
1773*e3723e1fSApple OSS Distributions return nullptr;
1774*e3723e1fSApple OSS Distributions }
1775*e3723e1fSApple OSS Distributions
1776*e3723e1fSApple OSS Distributions
1777*e3723e1fSApple OSS Distributions /*
1778*e3723e1fSApple OSS Distributions * withOptions:
1779*e3723e1fSApple OSS Distributions *
1780*e3723e1fSApple OSS Distributions * Create a new IOMemoryDescriptor. The buffer is made up of several
1781*e3723e1fSApple OSS Distributions * virtual address ranges, from a given task.
1782*e3723e1fSApple OSS Distributions *
1783*e3723e1fSApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1784*e3723e1fSApple OSS Distributions */
1785*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withOptions(void * buffers,
1787*e3723e1fSApple OSS Distributions UInt32 count,
1788*e3723e1fSApple OSS Distributions UInt32 offset,
1789*e3723e1fSApple OSS Distributions task_t task,
1790*e3723e1fSApple OSS Distributions IOOptionBits opts,
1791*e3723e1fSApple OSS Distributions IOMapper * mapper)
1792*e3723e1fSApple OSS Distributions {
1793*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794*e3723e1fSApple OSS Distributions
1795*e3723e1fSApple OSS Distributions if (self
1796*e3723e1fSApple OSS Distributions && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797*e3723e1fSApple OSS Distributions return nullptr;
1798*e3723e1fSApple OSS Distributions }
1799*e3723e1fSApple OSS Distributions
1800*e3723e1fSApple OSS Distributions return os::move(self);
1801*e3723e1fSApple OSS Distributions }
1802*e3723e1fSApple OSS Distributions
1803*e3723e1fSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithOptions(void * buffers,
1805*e3723e1fSApple OSS Distributions UInt32 count,
1806*e3723e1fSApple OSS Distributions UInt32 offset,
1807*e3723e1fSApple OSS Distributions task_t task,
1808*e3723e1fSApple OSS Distributions IOOptionBits options,
1809*e3723e1fSApple OSS Distributions IOMapper * mapper)
1810*e3723e1fSApple OSS Distributions {
1811*e3723e1fSApple OSS Distributions return false;
1812*e3723e1fSApple OSS Distributions }
1813*e3723e1fSApple OSS Distributions
1814*e3723e1fSApple OSS Distributions #ifndef __LP64__
1815*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817*e3723e1fSApple OSS Distributions UInt32 withCount,
1818*e3723e1fSApple OSS Distributions IODirection direction,
1819*e3723e1fSApple OSS Distributions bool asReference)
1820*e3723e1fSApple OSS Distributions {
1821*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822*e3723e1fSApple OSS Distributions if (that) {
1823*e3723e1fSApple OSS Distributions if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824*e3723e1fSApple OSS Distributions return os::move(that);
1825*e3723e1fSApple OSS Distributions }
1826*e3723e1fSApple OSS Distributions }
1827*e3723e1fSApple OSS Distributions return nullptr;
1828*e3723e1fSApple OSS Distributions }
1829*e3723e1fSApple OSS Distributions
1830*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1832*e3723e1fSApple OSS Distributions IOByteCount offset,
1833*e3723e1fSApple OSS Distributions IOByteCount length,
1834*e3723e1fSApple OSS Distributions IODirection direction)
1835*e3723e1fSApple OSS Distributions {
1836*e3723e1fSApple OSS Distributions return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837*e3723e1fSApple OSS Distributions }
1838*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
1839*e3723e1fSApple OSS Distributions
1840*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841*e3723e1fSApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842*e3723e1fSApple OSS Distributions {
1843*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor *origGenMD =
1844*e3723e1fSApple OSS Distributions OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845*e3723e1fSApple OSS Distributions
1846*e3723e1fSApple OSS Distributions if (origGenMD) {
1847*e3723e1fSApple OSS Distributions return IOGeneralMemoryDescriptor::
1848*e3723e1fSApple OSS Distributions withPersistentMemoryDescriptor(origGenMD);
1849*e3723e1fSApple OSS Distributions } else {
1850*e3723e1fSApple OSS Distributions return nullptr;
1851*e3723e1fSApple OSS Distributions }
1852*e3723e1fSApple OSS Distributions }
1853*e3723e1fSApple OSS Distributions
1854*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856*e3723e1fSApple OSS Distributions {
1857*e3723e1fSApple OSS Distributions IOMemoryReference * memRef;
1858*e3723e1fSApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859*e3723e1fSApple OSS Distributions
1860*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861*e3723e1fSApple OSS Distributions return nullptr;
1862*e3723e1fSApple OSS Distributions }
1863*e3723e1fSApple OSS Distributions
1864*e3723e1fSApple OSS Distributions if (memRef == originalMD->_memRef) {
1865*e3723e1fSApple OSS Distributions self.reset(originalMD, OSRetain);
1866*e3723e1fSApple OSS Distributions originalMD->memoryReferenceRelease(memRef);
1867*e3723e1fSApple OSS Distributions return os::move(self);
1868*e3723e1fSApple OSS Distributions }
1869*e3723e1fSApple OSS Distributions
1870*e3723e1fSApple OSS Distributions self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871*e3723e1fSApple OSS Distributions IOMDPersistentInitData initData = { originalMD, memRef };
1872*e3723e1fSApple OSS Distributions
1873*e3723e1fSApple OSS Distributions if (self
1874*e3723e1fSApple OSS Distributions && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875*e3723e1fSApple OSS Distributions return nullptr;
1876*e3723e1fSApple OSS Distributions }
1877*e3723e1fSApple OSS Distributions return os::move(self);
1878*e3723e1fSApple OSS Distributions }
1879*e3723e1fSApple OSS Distributions
1880*e3723e1fSApple OSS Distributions #ifndef __LP64__
1881*e3723e1fSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void * address,
1883*e3723e1fSApple OSS Distributions IOByteCount withLength,
1884*e3723e1fSApple OSS Distributions IODirection withDirection)
1885*e3723e1fSApple OSS Distributions {
1886*e3723e1fSApple OSS Distributions _singleRange.v.address = (vm_offset_t) address;
1887*e3723e1fSApple OSS Distributions _singleRange.v.length = withLength;
1888*e3723e1fSApple OSS Distributions
1889*e3723e1fSApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890*e3723e1fSApple OSS Distributions }
1891*e3723e1fSApple OSS Distributions
1892*e3723e1fSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894*e3723e1fSApple OSS Distributions IOByteCount withLength,
1895*e3723e1fSApple OSS Distributions IODirection withDirection,
1896*e3723e1fSApple OSS Distributions task_t withTask)
1897*e3723e1fSApple OSS Distributions {
1898*e3723e1fSApple OSS Distributions _singleRange.v.address = address;
1899*e3723e1fSApple OSS Distributions _singleRange.v.length = withLength;
1900*e3723e1fSApple OSS Distributions
1901*e3723e1fSApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902*e3723e1fSApple OSS Distributions }
1903*e3723e1fSApple OSS Distributions
1904*e3723e1fSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906*e3723e1fSApple OSS Distributions IOPhysicalAddress address,
1907*e3723e1fSApple OSS Distributions IOByteCount withLength,
1908*e3723e1fSApple OSS Distributions IODirection withDirection )
1909*e3723e1fSApple OSS Distributions {
1910*e3723e1fSApple OSS Distributions _singleRange.p.address = address;
1911*e3723e1fSApple OSS Distributions _singleRange.p.length = withLength;
1912*e3723e1fSApple OSS Distributions
1913*e3723e1fSApple OSS Distributions return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914*e3723e1fSApple OSS Distributions }
1915*e3723e1fSApple OSS Distributions
1916*e3723e1fSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918*e3723e1fSApple OSS Distributions IOPhysicalRange * ranges,
1919*e3723e1fSApple OSS Distributions UInt32 count,
1920*e3723e1fSApple OSS Distributions IODirection direction,
1921*e3723e1fSApple OSS Distributions bool reference)
1922*e3723e1fSApple OSS Distributions {
1923*e3723e1fSApple OSS Distributions IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924*e3723e1fSApple OSS Distributions
1925*e3723e1fSApple OSS Distributions if (reference) {
1926*e3723e1fSApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1927*e3723e1fSApple OSS Distributions }
1928*e3723e1fSApple OSS Distributions
1929*e3723e1fSApple OSS Distributions return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930*e3723e1fSApple OSS Distributions }
1931*e3723e1fSApple OSS Distributions
1932*e3723e1fSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1934*e3723e1fSApple OSS Distributions IOVirtualRange * ranges,
1935*e3723e1fSApple OSS Distributions UInt32 count,
1936*e3723e1fSApple OSS Distributions IODirection direction,
1937*e3723e1fSApple OSS Distributions task_t task,
1938*e3723e1fSApple OSS Distributions bool reference)
1939*e3723e1fSApple OSS Distributions {
1940*e3723e1fSApple OSS Distributions IOOptionBits mdOpts = direction;
1941*e3723e1fSApple OSS Distributions
1942*e3723e1fSApple OSS Distributions if (reference) {
1943*e3723e1fSApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1944*e3723e1fSApple OSS Distributions }
1945*e3723e1fSApple OSS Distributions
1946*e3723e1fSApple OSS Distributions if (task) {
1947*e3723e1fSApple OSS Distributions mdOpts |= kIOMemoryTypeVirtual;
1948*e3723e1fSApple OSS Distributions
1949*e3723e1fSApple OSS Distributions // Auto-prepare if this is a kernel memory descriptor as very few
1950*e3723e1fSApple OSS Distributions // clients bother to prepare() kernel memory.
1951*e3723e1fSApple OSS Distributions // But it was not enforced so what are you going to do?
1952*e3723e1fSApple OSS Distributions if (task == kernel_task) {
1953*e3723e1fSApple OSS Distributions mdOpts |= kIOMemoryAutoPrepare;
1954*e3723e1fSApple OSS Distributions }
1955*e3723e1fSApple OSS Distributions } else {
1956*e3723e1fSApple OSS Distributions mdOpts |= kIOMemoryTypePhysical;
1957*e3723e1fSApple OSS Distributions }
1958*e3723e1fSApple OSS Distributions
1959*e3723e1fSApple OSS Distributions return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960*e3723e1fSApple OSS Distributions }
1961*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
1962*e3723e1fSApple OSS Distributions
1963*e3723e1fSApple OSS Distributions /*
1964*e3723e1fSApple OSS Distributions * initWithOptions:
1965*e3723e1fSApple OSS Distributions *
1966*e3723e1fSApple OSS Distributions * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967*e3723e1fSApple OSS Distributions * from a given task, several physical ranges, an UPL from the ubc
1968*e3723e1fSApple OSS Distributions * system or a uio (may be 64bit) from the BSD subsystem.
1969*e3723e1fSApple OSS Distributions *
1970*e3723e1fSApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1971*e3723e1fSApple OSS Distributions *
1972*e3723e1fSApple OSS Distributions * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973*e3723e1fSApple OSS Distributions * existing instance -- note this behavior is not commonly supported in other
1974*e3723e1fSApple OSS Distributions * I/O Kit classes, although it is supported here.
1975*e3723e1fSApple OSS Distributions */
1976*e3723e1fSApple OSS Distributions
1977*e3723e1fSApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1979*e3723e1fSApple OSS Distributions UInt32 count,
1980*e3723e1fSApple OSS Distributions UInt32 offset,
1981*e3723e1fSApple OSS Distributions task_t task,
1982*e3723e1fSApple OSS Distributions IOOptionBits options,
1983*e3723e1fSApple OSS Distributions IOMapper * mapper)
1984*e3723e1fSApple OSS Distributions {
1985*e3723e1fSApple OSS Distributions IOOptionBits type = options & kIOMemoryTypeMask;
1986*e3723e1fSApple OSS Distributions
1987*e3723e1fSApple OSS Distributions #ifndef __LP64__
1988*e3723e1fSApple OSS Distributions if (task
1989*e3723e1fSApple OSS Distributions && (kIOMemoryTypeVirtual == type)
1990*e3723e1fSApple OSS Distributions && vm_map_is_64bit(get_task_map(task))
1991*e3723e1fSApple OSS Distributions && ((IOVirtualRange *) buffers)->address) {
1992*e3723e1fSApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993*e3723e1fSApple OSS Distributions return false;
1994*e3723e1fSApple OSS Distributions }
1995*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
1996*e3723e1fSApple OSS Distributions
1997*e3723e1fSApple OSS Distributions // Grab the original MD's configuation data to initialse the
1998*e3723e1fSApple OSS Distributions // arguments to this function.
1999*e3723e1fSApple OSS Distributions if (kIOMemoryTypePersistentMD == type) {
2000*e3723e1fSApple OSS Distributions IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001*e3723e1fSApple OSS Distributions const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002*e3723e1fSApple OSS Distributions ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003*e3723e1fSApple OSS Distributions
2004*e3723e1fSApple OSS Distributions // Only accept persistent memory descriptors with valid dataP data.
2005*e3723e1fSApple OSS Distributions assert(orig->_rangesCount == 1);
2006*e3723e1fSApple OSS Distributions if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007*e3723e1fSApple OSS Distributions return false;
2008*e3723e1fSApple OSS Distributions }
2009*e3723e1fSApple OSS Distributions
2010*e3723e1fSApple OSS Distributions _memRef = initData->fMemRef; // Grab the new named entry
2011*e3723e1fSApple OSS Distributions options = orig->_flags & ~kIOMemoryAsReference;
2012*e3723e1fSApple OSS Distributions type = options & kIOMemoryTypeMask;
2013*e3723e1fSApple OSS Distributions buffers = orig->_ranges.v;
2014*e3723e1fSApple OSS Distributions count = orig->_rangesCount;
2015*e3723e1fSApple OSS Distributions
2016*e3723e1fSApple OSS Distributions // Now grab the original task and whatever mapper was previously used
2017*e3723e1fSApple OSS Distributions task = orig->_task;
2018*e3723e1fSApple OSS Distributions mapper = dataP->fMapper;
2019*e3723e1fSApple OSS Distributions
2020*e3723e1fSApple OSS Distributions // We are ready to go through the original initialisation now
2021*e3723e1fSApple OSS Distributions }
2022*e3723e1fSApple OSS Distributions
2023*e3723e1fSApple OSS Distributions switch (type) {
2024*e3723e1fSApple OSS Distributions case kIOMemoryTypeUIO:
2025*e3723e1fSApple OSS Distributions case kIOMemoryTypeVirtual:
2026*e3723e1fSApple OSS Distributions #ifndef __LP64__
2027*e3723e1fSApple OSS Distributions case kIOMemoryTypeVirtual64:
2028*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2029*e3723e1fSApple OSS Distributions assert(task);
2030*e3723e1fSApple OSS Distributions if (!task) {
2031*e3723e1fSApple OSS Distributions return false;
2032*e3723e1fSApple OSS Distributions }
2033*e3723e1fSApple OSS Distributions break;
2034*e3723e1fSApple OSS Distributions
2035*e3723e1fSApple OSS Distributions case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2036*e3723e1fSApple OSS Distributions #ifndef __LP64__
2037*e3723e1fSApple OSS Distributions case kIOMemoryTypePhysical64:
2038*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2039*e3723e1fSApple OSS Distributions case kIOMemoryTypeUPL:
2040*e3723e1fSApple OSS Distributions assert(!task);
2041*e3723e1fSApple OSS Distributions break;
2042*e3723e1fSApple OSS Distributions default:
2043*e3723e1fSApple OSS Distributions return false; /* bad argument */
2044*e3723e1fSApple OSS Distributions }
2045*e3723e1fSApple OSS Distributions
2046*e3723e1fSApple OSS Distributions assert(buffers);
2047*e3723e1fSApple OSS Distributions assert(count);
2048*e3723e1fSApple OSS Distributions
2049*e3723e1fSApple OSS Distributions /*
2050*e3723e1fSApple OSS Distributions * We can check the _initialized instance variable before having ever set
2051*e3723e1fSApple OSS Distributions * it to an initial value because I/O Kit guarantees that all our instance
2052*e3723e1fSApple OSS Distributions * variables are zeroed on an object's allocation.
2053*e3723e1fSApple OSS Distributions */
2054*e3723e1fSApple OSS Distributions
2055*e3723e1fSApple OSS Distributions if (_initialized) {
2056*e3723e1fSApple OSS Distributions /*
2057*e3723e1fSApple OSS Distributions * An existing memory descriptor is being retargeted to point to
2058*e3723e1fSApple OSS Distributions * somewhere else. Clean up our present state.
2059*e3723e1fSApple OSS Distributions */
2060*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2061*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062*e3723e1fSApple OSS Distributions while (_wireCount) {
2063*e3723e1fSApple OSS Distributions complete();
2064*e3723e1fSApple OSS Distributions }
2065*e3723e1fSApple OSS Distributions }
2066*e3723e1fSApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067*e3723e1fSApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2068*e3723e1fSApple OSS Distributions uio_free((uio_t) _ranges.v);
2069*e3723e1fSApple OSS Distributions }
2070*e3723e1fSApple OSS Distributions #ifndef __LP64__
2071*e3723e1fSApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072*e3723e1fSApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073*e3723e1fSApple OSS Distributions }
2074*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2075*e3723e1fSApple OSS Distributions else {
2076*e3723e1fSApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077*e3723e1fSApple OSS Distributions }
2078*e3723e1fSApple OSS Distributions }
2079*e3723e1fSApple OSS Distributions
2080*e3723e1fSApple OSS Distributions options |= (kIOMemoryRedirected & _flags);
2081*e3723e1fSApple OSS Distributions if (!(kIOMemoryRedirected & options)) {
2082*e3723e1fSApple OSS Distributions if (_memRef) {
2083*e3723e1fSApple OSS Distributions memoryReferenceRelease(_memRef);
2084*e3723e1fSApple OSS Distributions _memRef = NULL;
2085*e3723e1fSApple OSS Distributions }
2086*e3723e1fSApple OSS Distributions if (_mappings) {
2087*e3723e1fSApple OSS Distributions _mappings->flushCollection();
2088*e3723e1fSApple OSS Distributions }
2089*e3723e1fSApple OSS Distributions }
2090*e3723e1fSApple OSS Distributions } else {
2091*e3723e1fSApple OSS Distributions if (!super::init()) {
2092*e3723e1fSApple OSS Distributions return false;
2093*e3723e1fSApple OSS Distributions }
2094*e3723e1fSApple OSS Distributions _initialized = true;
2095*e3723e1fSApple OSS Distributions }
2096*e3723e1fSApple OSS Distributions
2097*e3723e1fSApple OSS Distributions // Grab the appropriate mapper
2098*e3723e1fSApple OSS Distributions if (kIOMemoryHostOrRemote & options) {
2099*e3723e1fSApple OSS Distributions options |= kIOMemoryMapperNone;
2100*e3723e1fSApple OSS Distributions }
2101*e3723e1fSApple OSS Distributions if (kIOMemoryMapperNone & options) {
2102*e3723e1fSApple OSS Distributions mapper = NULL; // No Mapper
2103*e3723e1fSApple OSS Distributions } else if (mapper == kIOMapperSystem) {
2104*e3723e1fSApple OSS Distributions IOMapper::checkForSystemMapper();
2105*e3723e1fSApple OSS Distributions gIOSystemMapper = mapper = IOMapper::gSystem;
2106*e3723e1fSApple OSS Distributions }
2107*e3723e1fSApple OSS Distributions
2108*e3723e1fSApple OSS Distributions // Remove the dynamic internal use flags from the initial setting
2109*e3723e1fSApple OSS Distributions options &= ~(kIOMemoryPreparedReadOnly);
2110*e3723e1fSApple OSS Distributions _flags = options;
2111*e3723e1fSApple OSS Distributions _task = task;
2112*e3723e1fSApple OSS Distributions
2113*e3723e1fSApple OSS Distributions #ifndef __LP64__
2114*e3723e1fSApple OSS Distributions _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2115*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2116*e3723e1fSApple OSS Distributions
2117*e3723e1fSApple OSS Distributions _dmaReferences = 0;
2118*e3723e1fSApple OSS Distributions __iomd_reservedA = 0;
2119*e3723e1fSApple OSS Distributions __iomd_reservedB = 0;
2120*e3723e1fSApple OSS Distributions _highestPage = 0;
2121*e3723e1fSApple OSS Distributions
2122*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & options) {
2123*e3723e1fSApple OSS Distributions if (!_prepareLock) {
2124*e3723e1fSApple OSS Distributions _prepareLock = IOLockAlloc();
2125*e3723e1fSApple OSS Distributions }
2126*e3723e1fSApple OSS Distributions } else if (_prepareLock) {
2127*e3723e1fSApple OSS Distributions IOLockFree(_prepareLock);
2128*e3723e1fSApple OSS Distributions _prepareLock = NULL;
2129*e3723e1fSApple OSS Distributions }
2130*e3723e1fSApple OSS Distributions
2131*e3723e1fSApple OSS Distributions if (kIOMemoryTypeUPL == type) {
2132*e3723e1fSApple OSS Distributions ioGMDData *dataP;
2133*e3723e1fSApple OSS Distributions unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134*e3723e1fSApple OSS Distributions
2135*e3723e1fSApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2136*e3723e1fSApple OSS Distributions return false;
2137*e3723e1fSApple OSS Distributions }
2138*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
2139*e3723e1fSApple OSS Distributions dataP->fPageCnt = 0;
2140*e3723e1fSApple OSS Distributions switch (kIOMemoryDirectionMask & options) {
2141*e3723e1fSApple OSS Distributions case kIODirectionOut:
2142*e3723e1fSApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
2143*e3723e1fSApple OSS Distributions break;
2144*e3723e1fSApple OSS Distributions case kIODirectionIn:
2145*e3723e1fSApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
2146*e3723e1fSApple OSS Distributions break;
2147*e3723e1fSApple OSS Distributions case kIODirectionNone:
2148*e3723e1fSApple OSS Distributions case kIODirectionOutIn:
2149*e3723e1fSApple OSS Distributions default:
2150*e3723e1fSApple OSS Distributions panic("bad dir for upl 0x%x", (int) options);
2151*e3723e1fSApple OSS Distributions break;
2152*e3723e1fSApple OSS Distributions }
2153*e3723e1fSApple OSS Distributions // _wireCount++; // UPLs start out life wired
2154*e3723e1fSApple OSS Distributions
2155*e3723e1fSApple OSS Distributions _length = count;
2156*e3723e1fSApple OSS Distributions _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157*e3723e1fSApple OSS Distributions
2158*e3723e1fSApple OSS Distributions ioPLBlock iopl;
2159*e3723e1fSApple OSS Distributions iopl.fIOPL = (upl_t) buffers;
2160*e3723e1fSApple OSS Distributions upl_set_referenced(iopl.fIOPL, true);
2161*e3723e1fSApple OSS Distributions upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162*e3723e1fSApple OSS Distributions
2163*e3723e1fSApple OSS Distributions if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164*e3723e1fSApple OSS Distributions panic("short external upl");
2165*e3723e1fSApple OSS Distributions }
2166*e3723e1fSApple OSS Distributions
2167*e3723e1fSApple OSS Distributions _highestPage = upl_get_highest_page(iopl.fIOPL);
2168*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169*e3723e1fSApple OSS Distributions
2170*e3723e1fSApple OSS Distributions // Set the flag kIOPLOnDevice convieniently equal to 1
2171*e3723e1fSApple OSS Distributions iopl.fFlags = pageList->device | kIOPLExternUPL;
2172*e3723e1fSApple OSS Distributions if (!pageList->device) {
2173*e3723e1fSApple OSS Distributions // Pre-compute the offset into the UPL's page list
2174*e3723e1fSApple OSS Distributions pageList = &pageList[atop_32(offset)];
2175*e3723e1fSApple OSS Distributions offset &= PAGE_MASK;
2176*e3723e1fSApple OSS Distributions }
2177*e3723e1fSApple OSS Distributions iopl.fIOMDOffset = 0;
2178*e3723e1fSApple OSS Distributions iopl.fMappedPage = 0;
2179*e3723e1fSApple OSS Distributions iopl.fPageInfo = (vm_address_t) pageList;
2180*e3723e1fSApple OSS Distributions iopl.fPageOffset = offset;
2181*e3723e1fSApple OSS Distributions _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182*e3723e1fSApple OSS Distributions } else {
2183*e3723e1fSApple OSS Distributions // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184*e3723e1fSApple OSS Distributions // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185*e3723e1fSApple OSS Distributions
2186*e3723e1fSApple OSS Distributions // Initialize the memory descriptor
2187*e3723e1fSApple OSS Distributions if (options & kIOMemoryAsReference) {
2188*e3723e1fSApple OSS Distributions #ifndef __LP64__
2189*e3723e1fSApple OSS Distributions _rangesIsAllocated = false;
2190*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2191*e3723e1fSApple OSS Distributions
2192*e3723e1fSApple OSS Distributions // Hack assignment to get the buffer arg into _ranges.
2193*e3723e1fSApple OSS Distributions // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194*e3723e1fSApple OSS Distributions // work, C++ sigh.
2195*e3723e1fSApple OSS Distributions // This also initialises the uio & physical ranges.
2196*e3723e1fSApple OSS Distributions _ranges.v = (IOVirtualRange *) buffers;
2197*e3723e1fSApple OSS Distributions } else {
2198*e3723e1fSApple OSS Distributions #ifndef __LP64__
2199*e3723e1fSApple OSS Distributions _rangesIsAllocated = true;
2200*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2201*e3723e1fSApple OSS Distributions switch (type) {
2202*e3723e1fSApple OSS Distributions case kIOMemoryTypeUIO:
2203*e3723e1fSApple OSS Distributions _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204*e3723e1fSApple OSS Distributions break;
2205*e3723e1fSApple OSS Distributions
2206*e3723e1fSApple OSS Distributions #ifndef __LP64__
2207*e3723e1fSApple OSS Distributions case kIOMemoryTypeVirtual64:
2208*e3723e1fSApple OSS Distributions case kIOMemoryTypePhysical64:
2209*e3723e1fSApple OSS Distributions if (count == 1
2210*e3723e1fSApple OSS Distributions #ifndef __arm__
2211*e3723e1fSApple OSS Distributions && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212*e3723e1fSApple OSS Distributions #endif
2213*e3723e1fSApple OSS Distributions ) {
2214*e3723e1fSApple OSS Distributions if (type == kIOMemoryTypeVirtual64) {
2215*e3723e1fSApple OSS Distributions type = kIOMemoryTypeVirtual;
2216*e3723e1fSApple OSS Distributions } else {
2217*e3723e1fSApple OSS Distributions type = kIOMemoryTypePhysical;
2218*e3723e1fSApple OSS Distributions }
2219*e3723e1fSApple OSS Distributions _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220*e3723e1fSApple OSS Distributions _rangesIsAllocated = false;
2221*e3723e1fSApple OSS Distributions _ranges.v = &_singleRange.v;
2222*e3723e1fSApple OSS Distributions _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223*e3723e1fSApple OSS Distributions _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2224*e3723e1fSApple OSS Distributions break;
2225*e3723e1fSApple OSS Distributions }
2226*e3723e1fSApple OSS Distributions _ranges.v64 = IONew(IOAddressRange, count);
2227*e3723e1fSApple OSS Distributions if (!_ranges.v64) {
2228*e3723e1fSApple OSS Distributions return false;
2229*e3723e1fSApple OSS Distributions }
2230*e3723e1fSApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231*e3723e1fSApple OSS Distributions break;
2232*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2233*e3723e1fSApple OSS Distributions case kIOMemoryTypeVirtual:
2234*e3723e1fSApple OSS Distributions case kIOMemoryTypePhysical:
2235*e3723e1fSApple OSS Distributions if (count == 1) {
2236*e3723e1fSApple OSS Distributions _flags |= kIOMemoryAsReference;
2237*e3723e1fSApple OSS Distributions #ifndef __LP64__
2238*e3723e1fSApple OSS Distributions _rangesIsAllocated = false;
2239*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2240*e3723e1fSApple OSS Distributions _ranges.v = &_singleRange.v;
2241*e3723e1fSApple OSS Distributions } else {
2242*e3723e1fSApple OSS Distributions _ranges.v = IONew(IOVirtualRange, count);
2243*e3723e1fSApple OSS Distributions if (!_ranges.v) {
2244*e3723e1fSApple OSS Distributions return false;
2245*e3723e1fSApple OSS Distributions }
2246*e3723e1fSApple OSS Distributions }
2247*e3723e1fSApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248*e3723e1fSApple OSS Distributions break;
2249*e3723e1fSApple OSS Distributions }
2250*e3723e1fSApple OSS Distributions }
2251*e3723e1fSApple OSS Distributions _rangesCount = count;
2252*e3723e1fSApple OSS Distributions
2253*e3723e1fSApple OSS Distributions // Find starting address within the vector of ranges
2254*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
2255*e3723e1fSApple OSS Distributions mach_vm_size_t totalLength = 0;
2256*e3723e1fSApple OSS Distributions unsigned int ind, pages = 0;
2257*e3723e1fSApple OSS Distributions for (ind = 0; ind < count; ind++) {
2258*e3723e1fSApple OSS Distributions mach_vm_address_t addr;
2259*e3723e1fSApple OSS Distributions mach_vm_address_t endAddr;
2260*e3723e1fSApple OSS Distributions mach_vm_size_t len;
2261*e3723e1fSApple OSS Distributions
2262*e3723e1fSApple OSS Distributions // addr & len are returned by this function
2263*e3723e1fSApple OSS Distributions getAddrLenForInd(addr, len, type, vec, ind, _task);
2264*e3723e1fSApple OSS Distributions if (_task) {
2265*e3723e1fSApple OSS Distributions mach_vm_size_t phys_size;
2266*e3723e1fSApple OSS Distributions kern_return_t kret;
2267*e3723e1fSApple OSS Distributions kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != kret) {
2269*e3723e1fSApple OSS Distributions break;
2270*e3723e1fSApple OSS Distributions }
2271*e3723e1fSApple OSS Distributions if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272*e3723e1fSApple OSS Distributions break;
2273*e3723e1fSApple OSS Distributions }
2274*e3723e1fSApple OSS Distributions } else {
2275*e3723e1fSApple OSS Distributions if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276*e3723e1fSApple OSS Distributions break;
2277*e3723e1fSApple OSS Distributions }
2278*e3723e1fSApple OSS Distributions if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279*e3723e1fSApple OSS Distributions break;
2280*e3723e1fSApple OSS Distributions }
2281*e3723e1fSApple OSS Distributions if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282*e3723e1fSApple OSS Distributions break;
2283*e3723e1fSApple OSS Distributions }
2284*e3723e1fSApple OSS Distributions }
2285*e3723e1fSApple OSS Distributions if (os_add_overflow(totalLength, len, &totalLength)) {
2286*e3723e1fSApple OSS Distributions break;
2287*e3723e1fSApple OSS Distributions }
2288*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289*e3723e1fSApple OSS Distributions uint64_t highPage = atop_64(addr + len - 1);
2290*e3723e1fSApple OSS Distributions if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291*e3723e1fSApple OSS Distributions _highestPage = (ppnum_t) highPage;
2292*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293*e3723e1fSApple OSS Distributions }
2294*e3723e1fSApple OSS Distributions }
2295*e3723e1fSApple OSS Distributions }
2296*e3723e1fSApple OSS Distributions if ((ind < count)
2297*e3723e1fSApple OSS Distributions || (totalLength != ((IOByteCount) totalLength))) {
2298*e3723e1fSApple OSS Distributions return false; /* overflow */
2299*e3723e1fSApple OSS Distributions }
2300*e3723e1fSApple OSS Distributions _length = totalLength;
2301*e3723e1fSApple OSS Distributions _pages = pages;
2302*e3723e1fSApple OSS Distributions
2303*e3723e1fSApple OSS Distributions // Auto-prepare memory at creation time.
2304*e3723e1fSApple OSS Distributions // Implied completion when descriptor is free-ed
2305*e3723e1fSApple OSS Distributions
2306*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2307*e3723e1fSApple OSS Distributions _wireCount++; // Physical MDs are, by definition, wired
2308*e3723e1fSApple OSS Distributions } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2309*e3723e1fSApple OSS Distributions ioGMDData *dataP;
2310*e3723e1fSApple OSS Distributions unsigned dataSize;
2311*e3723e1fSApple OSS Distributions
2312*e3723e1fSApple OSS Distributions if (_pages > atop_64(max_mem)) {
2313*e3723e1fSApple OSS Distributions return false;
2314*e3723e1fSApple OSS Distributions }
2315*e3723e1fSApple OSS Distributions
2316*e3723e1fSApple OSS Distributions dataSize = computeDataSize(_pages, /* upls */ count * 2);
2317*e3723e1fSApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2318*e3723e1fSApple OSS Distributions return false;
2319*e3723e1fSApple OSS Distributions }
2320*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
2321*e3723e1fSApple OSS Distributions dataP->fPageCnt = _pages;
2322*e3723e1fSApple OSS Distributions
2323*e3723e1fSApple OSS Distributions if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2324*e3723e1fSApple OSS Distributions && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2325*e3723e1fSApple OSS Distributions _kernelTag = IOMemoryTag(kernel_map);
2326*e3723e1fSApple OSS Distributions if (_kernelTag == gIOSurfaceTag) {
2327*e3723e1fSApple OSS Distributions _userTag = VM_MEMORY_IOSURFACE;
2328*e3723e1fSApple OSS Distributions }
2329*e3723e1fSApple OSS Distributions }
2330*e3723e1fSApple OSS Distributions
2331*e3723e1fSApple OSS Distributions if ((kIOMemoryPersistent & _flags) && !_memRef) {
2332*e3723e1fSApple OSS Distributions IOReturn
2333*e3723e1fSApple OSS Distributions err = memoryReferenceCreate(0, &_memRef);
2334*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
2335*e3723e1fSApple OSS Distributions return false;
2336*e3723e1fSApple OSS Distributions }
2337*e3723e1fSApple OSS Distributions }
2338*e3723e1fSApple OSS Distributions
2339*e3723e1fSApple OSS Distributions if ((_flags & kIOMemoryAutoPrepare)
2340*e3723e1fSApple OSS Distributions && prepare() != kIOReturnSuccess) {
2341*e3723e1fSApple OSS Distributions return false;
2342*e3723e1fSApple OSS Distributions }
2343*e3723e1fSApple OSS Distributions }
2344*e3723e1fSApple OSS Distributions }
2345*e3723e1fSApple OSS Distributions
2346*e3723e1fSApple OSS Distributions return true;
2347*e3723e1fSApple OSS Distributions }
2348*e3723e1fSApple OSS Distributions
2349*e3723e1fSApple OSS Distributions /*
2350*e3723e1fSApple OSS Distributions * free
2351*e3723e1fSApple OSS Distributions *
2352*e3723e1fSApple OSS Distributions * Free resources.
2353*e3723e1fSApple OSS Distributions */
2354*e3723e1fSApple OSS Distributions void
free()2355*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::free()
2356*e3723e1fSApple OSS Distributions {
2357*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2358*e3723e1fSApple OSS Distributions
2359*e3723e1fSApple OSS Distributions if (reserved && reserved->dp.memory) {
2360*e3723e1fSApple OSS Distributions LOCK;
2361*e3723e1fSApple OSS Distributions reserved->dp.memory = NULL;
2362*e3723e1fSApple OSS Distributions UNLOCK;
2363*e3723e1fSApple OSS Distributions }
2364*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2365*e3723e1fSApple OSS Distributions ioGMDData * dataP;
2366*e3723e1fSApple OSS Distributions if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2367*e3723e1fSApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2368*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2369*e3723e1fSApple OSS Distributions }
2370*e3723e1fSApple OSS Distributions } else {
2371*e3723e1fSApple OSS Distributions while (_wireCount) {
2372*e3723e1fSApple OSS Distributions complete();
2373*e3723e1fSApple OSS Distributions }
2374*e3723e1fSApple OSS Distributions }
2375*e3723e1fSApple OSS Distributions
2376*e3723e1fSApple OSS Distributions if (_memoryEntries) {
2377*e3723e1fSApple OSS Distributions _memoryEntries.reset();
2378*e3723e1fSApple OSS Distributions }
2379*e3723e1fSApple OSS Distributions
2380*e3723e1fSApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2381*e3723e1fSApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2382*e3723e1fSApple OSS Distributions uio_free((uio_t) _ranges.v);
2383*e3723e1fSApple OSS Distributions }
2384*e3723e1fSApple OSS Distributions #ifndef __LP64__
2385*e3723e1fSApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2386*e3723e1fSApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2387*e3723e1fSApple OSS Distributions }
2388*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2389*e3723e1fSApple OSS Distributions else {
2390*e3723e1fSApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2391*e3723e1fSApple OSS Distributions }
2392*e3723e1fSApple OSS Distributions
2393*e3723e1fSApple OSS Distributions _ranges.v = NULL;
2394*e3723e1fSApple OSS Distributions }
2395*e3723e1fSApple OSS Distributions
2396*e3723e1fSApple OSS Distributions if (reserved) {
2397*e3723e1fSApple OSS Distributions cleanKernelReserved(reserved);
2398*e3723e1fSApple OSS Distributions if (reserved->dp.devicePager) {
2399*e3723e1fSApple OSS Distributions // memEntry holds a ref on the device pager which owns reserved
2400*e3723e1fSApple OSS Distributions // (IOMemoryDescriptorReserved) so no reserved access after this point
2401*e3723e1fSApple OSS Distributions device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2402*e3723e1fSApple OSS Distributions } else {
2403*e3723e1fSApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
2404*e3723e1fSApple OSS Distributions }
2405*e3723e1fSApple OSS Distributions reserved = NULL;
2406*e3723e1fSApple OSS Distributions }
2407*e3723e1fSApple OSS Distributions
2408*e3723e1fSApple OSS Distributions if (_memRef) {
2409*e3723e1fSApple OSS Distributions memoryReferenceRelease(_memRef);
2410*e3723e1fSApple OSS Distributions }
2411*e3723e1fSApple OSS Distributions if (_prepareLock) {
2412*e3723e1fSApple OSS Distributions IOLockFree(_prepareLock);
2413*e3723e1fSApple OSS Distributions }
2414*e3723e1fSApple OSS Distributions
2415*e3723e1fSApple OSS Distributions super::free();
2416*e3723e1fSApple OSS Distributions }
2417*e3723e1fSApple OSS Distributions
2418*e3723e1fSApple OSS Distributions #ifndef __LP64__
2419*e3723e1fSApple OSS Distributions void
unmapFromKernel()2420*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2421*e3723e1fSApple OSS Distributions {
2422*e3723e1fSApple OSS Distributions panic("IOGMD::unmapFromKernel deprecated");
2423*e3723e1fSApple OSS Distributions }
2424*e3723e1fSApple OSS Distributions
2425*e3723e1fSApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2426*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2427*e3723e1fSApple OSS Distributions {
2428*e3723e1fSApple OSS Distributions panic("IOGMD::mapIntoKernel deprecated");
2429*e3723e1fSApple OSS Distributions }
2430*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2431*e3723e1fSApple OSS Distributions
2432*e3723e1fSApple OSS Distributions /*
2433*e3723e1fSApple OSS Distributions * getDirection:
2434*e3723e1fSApple OSS Distributions *
2435*e3723e1fSApple OSS Distributions * Get the direction of the transfer.
2436*e3723e1fSApple OSS Distributions */
2437*e3723e1fSApple OSS Distributions IODirection
getDirection() const2438*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getDirection() const
2439*e3723e1fSApple OSS Distributions {
2440*e3723e1fSApple OSS Distributions #ifndef __LP64__
2441*e3723e1fSApple OSS Distributions if (_direction) {
2442*e3723e1fSApple OSS Distributions return _direction;
2443*e3723e1fSApple OSS Distributions }
2444*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2445*e3723e1fSApple OSS Distributions return (IODirection) (_flags & kIOMemoryDirectionMask);
2446*e3723e1fSApple OSS Distributions }
2447*e3723e1fSApple OSS Distributions
2448*e3723e1fSApple OSS Distributions /*
2449*e3723e1fSApple OSS Distributions * getLength:
2450*e3723e1fSApple OSS Distributions *
2451*e3723e1fSApple OSS Distributions * Get the length of the transfer (over all ranges).
2452*e3723e1fSApple OSS Distributions */
2453*e3723e1fSApple OSS Distributions IOByteCount
getLength() const2454*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getLength() const
2455*e3723e1fSApple OSS Distributions {
2456*e3723e1fSApple OSS Distributions return _length;
2457*e3723e1fSApple OSS Distributions }
2458*e3723e1fSApple OSS Distributions
2459*e3723e1fSApple OSS Distributions void
setTag(IOOptionBits tag)2460*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2461*e3723e1fSApple OSS Distributions {
2462*e3723e1fSApple OSS Distributions _tag = tag;
2463*e3723e1fSApple OSS Distributions }
2464*e3723e1fSApple OSS Distributions
2465*e3723e1fSApple OSS Distributions IOOptionBits
getTag(void)2466*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getTag( void )
2467*e3723e1fSApple OSS Distributions {
2468*e3723e1fSApple OSS Distributions return _tag;
2469*e3723e1fSApple OSS Distributions }
2470*e3723e1fSApple OSS Distributions
2471*e3723e1fSApple OSS Distributions uint64_t
getFlags(void)2472*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2473*e3723e1fSApple OSS Distributions {
2474*e3723e1fSApple OSS Distributions return _flags;
2475*e3723e1fSApple OSS Distributions }
2476*e3723e1fSApple OSS Distributions
2477*e3723e1fSApple OSS Distributions OSObject *
copyContext(void) const2478*e3723e1fSApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2479*e3723e1fSApple OSS Distributions {
2480*e3723e1fSApple OSS Distributions if (reserved) {
2481*e3723e1fSApple OSS Distributions OSObject * context = reserved->contextObject;
2482*e3723e1fSApple OSS Distributions if (context) {
2483*e3723e1fSApple OSS Distributions context->retain();
2484*e3723e1fSApple OSS Distributions }
2485*e3723e1fSApple OSS Distributions return context;
2486*e3723e1fSApple OSS Distributions } else {
2487*e3723e1fSApple OSS Distributions return NULL;
2488*e3723e1fSApple OSS Distributions }
2489*e3723e1fSApple OSS Distributions }
2490*e3723e1fSApple OSS Distributions
2491*e3723e1fSApple OSS Distributions void
setContext(OSObject * obj)2492*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2493*e3723e1fSApple OSS Distributions {
2494*e3723e1fSApple OSS Distributions if (this->reserved == NULL && obj == NULL) {
2495*e3723e1fSApple OSS Distributions // No existing object, and no object to set
2496*e3723e1fSApple OSS Distributions return;
2497*e3723e1fSApple OSS Distributions }
2498*e3723e1fSApple OSS Distributions
2499*e3723e1fSApple OSS Distributions IOMemoryDescriptorReserved * reserved = getKernelReserved();
2500*e3723e1fSApple OSS Distributions if (reserved) {
2501*e3723e1fSApple OSS Distributions OSObject * oldObject = reserved->contextObject;
2502*e3723e1fSApple OSS Distributions if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2503*e3723e1fSApple OSS Distributions oldObject->release();
2504*e3723e1fSApple OSS Distributions }
2505*e3723e1fSApple OSS Distributions if (obj != NULL) {
2506*e3723e1fSApple OSS Distributions obj->retain();
2507*e3723e1fSApple OSS Distributions reserved->contextObject = obj;
2508*e3723e1fSApple OSS Distributions }
2509*e3723e1fSApple OSS Distributions }
2510*e3723e1fSApple OSS Distributions }
2511*e3723e1fSApple OSS Distributions
2512*e3723e1fSApple OSS Distributions #ifndef __LP64__
2513*e3723e1fSApple OSS Distributions #pragma clang diagnostic push
2514*e3723e1fSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2515*e3723e1fSApple OSS Distributions
2516*e3723e1fSApple OSS Distributions // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2517*e3723e1fSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2518*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2519*e3723e1fSApple OSS Distributions {
2520*e3723e1fSApple OSS Distributions addr64_t physAddr = 0;
2521*e3723e1fSApple OSS Distributions
2522*e3723e1fSApple OSS Distributions if (prepare() == kIOReturnSuccess) {
2523*e3723e1fSApple OSS Distributions physAddr = getPhysicalSegment64( offset, length );
2524*e3723e1fSApple OSS Distributions complete();
2525*e3723e1fSApple OSS Distributions }
2526*e3723e1fSApple OSS Distributions
2527*e3723e1fSApple OSS Distributions return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2528*e3723e1fSApple OSS Distributions }
2529*e3723e1fSApple OSS Distributions
2530*e3723e1fSApple OSS Distributions #pragma clang diagnostic pop
2531*e3723e1fSApple OSS Distributions
2532*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2533*e3723e1fSApple OSS Distributions
2534*e3723e1fSApple OSS Distributions
2535*e3723e1fSApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536*e3723e1fSApple OSS Distributions IOMemoryDescriptor::readBytes
2537*e3723e1fSApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2538*e3723e1fSApple OSS Distributions {
2539*e3723e1fSApple OSS Distributions addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540*e3723e1fSApple OSS Distributions IOByteCount endoffset;
2541*e3723e1fSApple OSS Distributions IOByteCount remaining;
2542*e3723e1fSApple OSS Distributions
2543*e3723e1fSApple OSS Distributions // Check that this entire I/O is within the available range
2544*e3723e1fSApple OSS Distributions if ((offset > _length)
2545*e3723e1fSApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2546*e3723e1fSApple OSS Distributions || (endoffset > _length)) {
2547*e3723e1fSApple OSS Distributions assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2548*e3723e1fSApple OSS Distributions return 0;
2549*e3723e1fSApple OSS Distributions }
2550*e3723e1fSApple OSS Distributions if (offset >= _length) {
2551*e3723e1fSApple OSS Distributions return 0;
2552*e3723e1fSApple OSS Distributions }
2553*e3723e1fSApple OSS Distributions
2554*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2555*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
2556*e3723e1fSApple OSS Distributions return 0;
2557*e3723e1fSApple OSS Distributions }
2558*e3723e1fSApple OSS Distributions
2559*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2560*e3723e1fSApple OSS Distributions LOCK;
2561*e3723e1fSApple OSS Distributions }
2562*e3723e1fSApple OSS Distributions
2563*e3723e1fSApple OSS Distributions remaining = length = min(length, _length - offset);
2564*e3723e1fSApple OSS Distributions while (remaining) { // (process another target segment?)
2565*e3723e1fSApple OSS Distributions addr64_t srcAddr64;
2566*e3723e1fSApple OSS Distributions IOByteCount srcLen;
2567*e3723e1fSApple OSS Distributions int options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2568*e3723e1fSApple OSS Distributions
2569*e3723e1fSApple OSS Distributions IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2570*e3723e1fSApple OSS Distributions srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2571*e3723e1fSApple OSS Distributions if (!srcAddr64) {
2572*e3723e1fSApple OSS Distributions break;
2573*e3723e1fSApple OSS Distributions }
2574*e3723e1fSApple OSS Distributions
2575*e3723e1fSApple OSS Distributions // Clip segment length to remaining
2576*e3723e1fSApple OSS Distributions if (srcLen > remaining) {
2577*e3723e1fSApple OSS Distributions srcLen = remaining;
2578*e3723e1fSApple OSS Distributions }
2579*e3723e1fSApple OSS Distributions
2580*e3723e1fSApple OSS Distributions if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2581*e3723e1fSApple OSS Distributions srcLen = (UINT_MAX - PAGE_SIZE + 1);
2582*e3723e1fSApple OSS Distributions }
2583*e3723e1fSApple OSS Distributions
2584*e3723e1fSApple OSS Distributions
2585*e3723e1fSApple OSS Distributions kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2586*e3723e1fSApple OSS Distributions #pragma unused(copy_ret)
2587*e3723e1fSApple OSS Distributions
2588*e3723e1fSApple OSS Distributions dstAddr += srcLen;
2589*e3723e1fSApple OSS Distributions offset += srcLen;
2590*e3723e1fSApple OSS Distributions remaining -= srcLen;
2591*e3723e1fSApple OSS Distributions }
2592*e3723e1fSApple OSS Distributions
2593*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2594*e3723e1fSApple OSS Distributions UNLOCK;
2595*e3723e1fSApple OSS Distributions }
2596*e3723e1fSApple OSS Distributions
2597*e3723e1fSApple OSS Distributions assert(!remaining);
2598*e3723e1fSApple OSS Distributions
2599*e3723e1fSApple OSS Distributions return length - remaining;
2600*e3723e1fSApple OSS Distributions }
2601*e3723e1fSApple OSS Distributions
2602*e3723e1fSApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2603*e3723e1fSApple OSS Distributions IOMemoryDescriptor::writeBytes
2604*e3723e1fSApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2605*e3723e1fSApple OSS Distributions {
2606*e3723e1fSApple OSS Distributions addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2607*e3723e1fSApple OSS Distributions IOByteCount remaining;
2608*e3723e1fSApple OSS Distributions IOByteCount endoffset;
2609*e3723e1fSApple OSS Distributions IOByteCount offset = inoffset;
2610*e3723e1fSApple OSS Distributions
2611*e3723e1fSApple OSS Distributions assert( !(kIOMemoryPreparedReadOnly & _flags));
2612*e3723e1fSApple OSS Distributions
2613*e3723e1fSApple OSS Distributions // Check that this entire I/O is within the available range
2614*e3723e1fSApple OSS Distributions if ((offset > _length)
2615*e3723e1fSApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2616*e3723e1fSApple OSS Distributions || (endoffset > _length)) {
2617*e3723e1fSApple OSS Distributions assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2618*e3723e1fSApple OSS Distributions return 0;
2619*e3723e1fSApple OSS Distributions }
2620*e3723e1fSApple OSS Distributions if (kIOMemoryPreparedReadOnly & _flags) {
2621*e3723e1fSApple OSS Distributions return 0;
2622*e3723e1fSApple OSS Distributions }
2623*e3723e1fSApple OSS Distributions if (offset >= _length) {
2624*e3723e1fSApple OSS Distributions return 0;
2625*e3723e1fSApple OSS Distributions }
2626*e3723e1fSApple OSS Distributions
2627*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2628*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
2629*e3723e1fSApple OSS Distributions return 0;
2630*e3723e1fSApple OSS Distributions }
2631*e3723e1fSApple OSS Distributions
2632*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2633*e3723e1fSApple OSS Distributions LOCK;
2634*e3723e1fSApple OSS Distributions }
2635*e3723e1fSApple OSS Distributions
2636*e3723e1fSApple OSS Distributions remaining = length = min(length, _length - offset);
2637*e3723e1fSApple OSS Distributions while (remaining) { // (process another target segment?)
2638*e3723e1fSApple OSS Distributions addr64_t dstAddr64;
2639*e3723e1fSApple OSS Distributions IOByteCount dstLen;
2640*e3723e1fSApple OSS Distributions int options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2641*e3723e1fSApple OSS Distributions
2642*e3723e1fSApple OSS Distributions IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2643*e3723e1fSApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2644*e3723e1fSApple OSS Distributions if (!dstAddr64) {
2645*e3723e1fSApple OSS Distributions break;
2646*e3723e1fSApple OSS Distributions }
2647*e3723e1fSApple OSS Distributions
2648*e3723e1fSApple OSS Distributions // Clip segment length to remaining
2649*e3723e1fSApple OSS Distributions if (dstLen > remaining) {
2650*e3723e1fSApple OSS Distributions dstLen = remaining;
2651*e3723e1fSApple OSS Distributions }
2652*e3723e1fSApple OSS Distributions
2653*e3723e1fSApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2654*e3723e1fSApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
2655*e3723e1fSApple OSS Distributions }
2656*e3723e1fSApple OSS Distributions
2657*e3723e1fSApple OSS Distributions
2658*e3723e1fSApple OSS Distributions if (!srcAddr) {
2659*e3723e1fSApple OSS Distributions bzero_phys(dstAddr64, (unsigned int) dstLen);
2660*e3723e1fSApple OSS Distributions } else {
2661*e3723e1fSApple OSS Distributions kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2662*e3723e1fSApple OSS Distributions #pragma unused(copy_ret)
2663*e3723e1fSApple OSS Distributions srcAddr += dstLen;
2664*e3723e1fSApple OSS Distributions }
2665*e3723e1fSApple OSS Distributions offset += dstLen;
2666*e3723e1fSApple OSS Distributions remaining -= dstLen;
2667*e3723e1fSApple OSS Distributions }
2668*e3723e1fSApple OSS Distributions
2669*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2670*e3723e1fSApple OSS Distributions UNLOCK;
2671*e3723e1fSApple OSS Distributions }
2672*e3723e1fSApple OSS Distributions
2673*e3723e1fSApple OSS Distributions assert(!remaining);
2674*e3723e1fSApple OSS Distributions
2675*e3723e1fSApple OSS Distributions #if defined(__x86_64__)
2676*e3723e1fSApple OSS Distributions // copypv does not cppvFsnk on intel
2677*e3723e1fSApple OSS Distributions #else
2678*e3723e1fSApple OSS Distributions if (!srcAddr) {
2679*e3723e1fSApple OSS Distributions performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2680*e3723e1fSApple OSS Distributions }
2681*e3723e1fSApple OSS Distributions #endif
2682*e3723e1fSApple OSS Distributions
2683*e3723e1fSApple OSS Distributions return length - remaining;
2684*e3723e1fSApple OSS Distributions }
2685*e3723e1fSApple OSS Distributions
2686*e3723e1fSApple OSS Distributions #ifndef __LP64__
2687*e3723e1fSApple OSS Distributions void
setPosition(IOByteCount position)2688*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2689*e3723e1fSApple OSS Distributions {
2690*e3723e1fSApple OSS Distributions panic("IOGMD::setPosition deprecated");
2691*e3723e1fSApple OSS Distributions }
2692*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
2693*e3723e1fSApple OSS Distributions
2694*e3723e1fSApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2695*e3723e1fSApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2696*e3723e1fSApple OSS Distributions
2697*e3723e1fSApple OSS Distributions uint64_t
getPreparationID(void)2698*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2699*e3723e1fSApple OSS Distributions {
2700*e3723e1fSApple OSS Distributions ioGMDData *dataP;
2701*e3723e1fSApple OSS Distributions
2702*e3723e1fSApple OSS Distributions if (!_wireCount) {
2703*e3723e1fSApple OSS Distributions return kIOPreparationIDUnprepared;
2704*e3723e1fSApple OSS Distributions }
2705*e3723e1fSApple OSS Distributions
2706*e3723e1fSApple OSS Distributions if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2707*e3723e1fSApple OSS Distributions || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2708*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setPreparationID();
2709*e3723e1fSApple OSS Distributions return IOMemoryDescriptor::getPreparationID();
2710*e3723e1fSApple OSS Distributions }
2711*e3723e1fSApple OSS Distributions
2712*e3723e1fSApple OSS Distributions if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2713*e3723e1fSApple OSS Distributions return kIOPreparationIDUnprepared;
2714*e3723e1fSApple OSS Distributions }
2715*e3723e1fSApple OSS Distributions
2716*e3723e1fSApple OSS Distributions if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2717*e3723e1fSApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2718*e3723e1fSApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2719*e3723e1fSApple OSS Distributions }
2720*e3723e1fSApple OSS Distributions return dataP->fPreparationID;
2721*e3723e1fSApple OSS Distributions }
2722*e3723e1fSApple OSS Distributions
2723*e3723e1fSApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2724*e3723e1fSApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2725*e3723e1fSApple OSS Distributions {
2726*e3723e1fSApple OSS Distributions if (reserved->creator) {
2727*e3723e1fSApple OSS Distributions task_deallocate(reserved->creator);
2728*e3723e1fSApple OSS Distributions reserved->creator = NULL;
2729*e3723e1fSApple OSS Distributions }
2730*e3723e1fSApple OSS Distributions
2731*e3723e1fSApple OSS Distributions if (reserved->contextObject) {
2732*e3723e1fSApple OSS Distributions reserved->contextObject->release();
2733*e3723e1fSApple OSS Distributions reserved->contextObject = NULL;
2734*e3723e1fSApple OSS Distributions }
2735*e3723e1fSApple OSS Distributions }
2736*e3723e1fSApple OSS Distributions
2737*e3723e1fSApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2738*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2739*e3723e1fSApple OSS Distributions {
2740*e3723e1fSApple OSS Distributions if (!reserved) {
2741*e3723e1fSApple OSS Distributions reserved = IOMallocType(IOMemoryDescriptorReserved);
2742*e3723e1fSApple OSS Distributions }
2743*e3723e1fSApple OSS Distributions return reserved;
2744*e3723e1fSApple OSS Distributions }
2745*e3723e1fSApple OSS Distributions
2746*e3723e1fSApple OSS Distributions void
setPreparationID(void)2747*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2748*e3723e1fSApple OSS Distributions {
2749*e3723e1fSApple OSS Distributions if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2750*e3723e1fSApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2751*e3723e1fSApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2752*e3723e1fSApple OSS Distributions }
2753*e3723e1fSApple OSS Distributions }
2754*e3723e1fSApple OSS Distributions
2755*e3723e1fSApple OSS Distributions uint64_t
getPreparationID(void)2756*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2757*e3723e1fSApple OSS Distributions {
2758*e3723e1fSApple OSS Distributions if (reserved) {
2759*e3723e1fSApple OSS Distributions return reserved->preparationID;
2760*e3723e1fSApple OSS Distributions } else {
2761*e3723e1fSApple OSS Distributions return kIOPreparationIDUnsupported;
2762*e3723e1fSApple OSS Distributions }
2763*e3723e1fSApple OSS Distributions }
2764*e3723e1fSApple OSS Distributions
2765*e3723e1fSApple OSS Distributions void
setDescriptorID(void)2766*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2767*e3723e1fSApple OSS Distributions {
2768*e3723e1fSApple OSS Distributions if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2769*e3723e1fSApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2770*e3723e1fSApple OSS Distributions OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2771*e3723e1fSApple OSS Distributions }
2772*e3723e1fSApple OSS Distributions }
2773*e3723e1fSApple OSS Distributions
2774*e3723e1fSApple OSS Distributions uint64_t
getDescriptorID(void)2775*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2776*e3723e1fSApple OSS Distributions {
2777*e3723e1fSApple OSS Distributions setDescriptorID();
2778*e3723e1fSApple OSS Distributions
2779*e3723e1fSApple OSS Distributions if (reserved) {
2780*e3723e1fSApple OSS Distributions return reserved->descriptorID;
2781*e3723e1fSApple OSS Distributions } else {
2782*e3723e1fSApple OSS Distributions return kIODescriptorIDInvalid;
2783*e3723e1fSApple OSS Distributions }
2784*e3723e1fSApple OSS Distributions }
2785*e3723e1fSApple OSS Distributions
2786*e3723e1fSApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2787*e3723e1fSApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2788*e3723e1fSApple OSS Distributions {
2789*e3723e1fSApple OSS Distributions if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2790*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
2791*e3723e1fSApple OSS Distributions }
2792*e3723e1fSApple OSS Distributions
2793*e3723e1fSApple OSS Distributions assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2794*e3723e1fSApple OSS Distributions if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2795*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
2796*e3723e1fSApple OSS Distributions }
2797*e3723e1fSApple OSS Distributions
2798*e3723e1fSApple OSS Distributions uint64_t descriptorID = getDescriptorID();
2799*e3723e1fSApple OSS Distributions assert(descriptorID != kIODescriptorIDInvalid);
2800*e3723e1fSApple OSS Distributions if (getDescriptorID() == kIODescriptorIDInvalid) {
2801*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
2802*e3723e1fSApple OSS Distributions }
2803*e3723e1fSApple OSS Distributions
2804*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2805*e3723e1fSApple OSS Distributions
2806*e3723e1fSApple OSS Distributions #if __LP64__
2807*e3723e1fSApple OSS Distributions static const uint8_t num_segments_page = 8;
2808*e3723e1fSApple OSS Distributions #else
2809*e3723e1fSApple OSS Distributions static const uint8_t num_segments_page = 4;
2810*e3723e1fSApple OSS Distributions #endif
2811*e3723e1fSApple OSS Distributions static const uint8_t num_segments_long = 2;
2812*e3723e1fSApple OSS Distributions
2813*e3723e1fSApple OSS Distributions IOPhysicalAddress segments_page[num_segments_page];
2814*e3723e1fSApple OSS Distributions IOPhysicalRange segments_long[num_segments_long];
2815*e3723e1fSApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2816*e3723e1fSApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2817*e3723e1fSApple OSS Distributions
2818*e3723e1fSApple OSS Distributions uint8_t segment_page_idx = 0;
2819*e3723e1fSApple OSS Distributions uint8_t segment_long_idx = 0;
2820*e3723e1fSApple OSS Distributions
2821*e3723e1fSApple OSS Distributions IOPhysicalRange physical_segment;
2822*e3723e1fSApple OSS Distributions for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2823*e3723e1fSApple OSS Distributions physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2824*e3723e1fSApple OSS Distributions
2825*e3723e1fSApple OSS Distributions if (physical_segment.length == 0) {
2826*e3723e1fSApple OSS Distributions break;
2827*e3723e1fSApple OSS Distributions }
2828*e3723e1fSApple OSS Distributions
2829*e3723e1fSApple OSS Distributions /**
2830*e3723e1fSApple OSS Distributions * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2831*e3723e1fSApple OSS Distributions * buffer memory, pack segment events according to the following.
2832*e3723e1fSApple OSS Distributions *
2833*e3723e1fSApple OSS Distributions * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2834*e3723e1fSApple OSS Distributions * IOMDPA_MAPPED event emitted on by the current thread_id.
2835*e3723e1fSApple OSS Distributions *
2836*e3723e1fSApple OSS Distributions * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2837*e3723e1fSApple OSS Distributions * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2838*e3723e1fSApple OSS Distributions * - unmapped pages will have a ppn of MAX_INT_32
2839*e3723e1fSApple OSS Distributions * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2840*e3723e1fSApple OSS Distributions * - address_0, length_0, address_0, length_1
2841*e3723e1fSApple OSS Distributions * - unmapped pages will have an address of 0
2842*e3723e1fSApple OSS Distributions *
2843*e3723e1fSApple OSS Distributions * During each iteration do the following depending on the length of the mapping:
2844*e3723e1fSApple OSS Distributions * 1. add the current segment to the appropriate queue of pending segments
2845*e3723e1fSApple OSS Distributions * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2846*e3723e1fSApple OSS Distributions * 1a. if FALSE emit and reset all events in the previous queue
2847*e3723e1fSApple OSS Distributions * 2. check if we have filled up the current queue of pending events
2848*e3723e1fSApple OSS Distributions * 2a. if TRUE emit and reset all events in the pending queue
2849*e3723e1fSApple OSS Distributions * 3. after completing all iterations emit events in the current queue
2850*e3723e1fSApple OSS Distributions */
2851*e3723e1fSApple OSS Distributions
2852*e3723e1fSApple OSS Distributions bool emit_page = false;
2853*e3723e1fSApple OSS Distributions bool emit_long = false;
2854*e3723e1fSApple OSS Distributions if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2855*e3723e1fSApple OSS Distributions segments_page[segment_page_idx] = physical_segment.address;
2856*e3723e1fSApple OSS Distributions segment_page_idx++;
2857*e3723e1fSApple OSS Distributions
2858*e3723e1fSApple OSS Distributions emit_long = segment_long_idx != 0;
2859*e3723e1fSApple OSS Distributions emit_page = segment_page_idx == num_segments_page;
2860*e3723e1fSApple OSS Distributions
2861*e3723e1fSApple OSS Distributions if (os_unlikely(emit_long)) {
2862*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2863*e3723e1fSApple OSS Distributions segments_long[0].address, segments_long[0].length,
2864*e3723e1fSApple OSS Distributions segments_long[1].address, segments_long[1].length);
2865*e3723e1fSApple OSS Distributions }
2866*e3723e1fSApple OSS Distributions
2867*e3723e1fSApple OSS Distributions if (os_unlikely(emit_page)) {
2868*e3723e1fSApple OSS Distributions #if __LP64__
2869*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2870*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2871*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2872*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2873*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2874*e3723e1fSApple OSS Distributions #else
2875*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2876*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2877*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2878*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2879*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2880*e3723e1fSApple OSS Distributions #endif
2881*e3723e1fSApple OSS Distributions }
2882*e3723e1fSApple OSS Distributions } else {
2883*e3723e1fSApple OSS Distributions segments_long[segment_long_idx] = physical_segment;
2884*e3723e1fSApple OSS Distributions segment_long_idx++;
2885*e3723e1fSApple OSS Distributions
2886*e3723e1fSApple OSS Distributions emit_page = segment_page_idx != 0;
2887*e3723e1fSApple OSS Distributions emit_long = segment_long_idx == num_segments_long;
2888*e3723e1fSApple OSS Distributions
2889*e3723e1fSApple OSS Distributions if (os_unlikely(emit_page)) {
2890*e3723e1fSApple OSS Distributions #if __LP64__
2891*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2892*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2893*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2894*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2895*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2896*e3723e1fSApple OSS Distributions #else
2897*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2898*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2899*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2900*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2901*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2902*e3723e1fSApple OSS Distributions #endif
2903*e3723e1fSApple OSS Distributions }
2904*e3723e1fSApple OSS Distributions
2905*e3723e1fSApple OSS Distributions if (emit_long) {
2906*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2907*e3723e1fSApple OSS Distributions segments_long[0].address, segments_long[0].length,
2908*e3723e1fSApple OSS Distributions segments_long[1].address, segments_long[1].length);
2909*e3723e1fSApple OSS Distributions }
2910*e3723e1fSApple OSS Distributions }
2911*e3723e1fSApple OSS Distributions
2912*e3723e1fSApple OSS Distributions if (os_unlikely(emit_page)) {
2913*e3723e1fSApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2914*e3723e1fSApple OSS Distributions segment_page_idx = 0;
2915*e3723e1fSApple OSS Distributions }
2916*e3723e1fSApple OSS Distributions
2917*e3723e1fSApple OSS Distributions if (os_unlikely(emit_long)) {
2918*e3723e1fSApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2919*e3723e1fSApple OSS Distributions segment_long_idx = 0;
2920*e3723e1fSApple OSS Distributions }
2921*e3723e1fSApple OSS Distributions }
2922*e3723e1fSApple OSS Distributions
2923*e3723e1fSApple OSS Distributions if (segment_page_idx != 0) {
2924*e3723e1fSApple OSS Distributions assert(segment_long_idx == 0);
2925*e3723e1fSApple OSS Distributions #if __LP64__
2926*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2927*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2928*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2929*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2930*e3723e1fSApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2931*e3723e1fSApple OSS Distributions #else
2932*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2933*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2934*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2935*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2936*e3723e1fSApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2937*e3723e1fSApple OSS Distributions #endif
2938*e3723e1fSApple OSS Distributions } else if (segment_long_idx != 0) {
2939*e3723e1fSApple OSS Distributions assert(segment_page_idx == 0);
2940*e3723e1fSApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2941*e3723e1fSApple OSS Distributions segments_long[0].address, segments_long[0].length,
2942*e3723e1fSApple OSS Distributions segments_long[1].address, segments_long[1].length);
2943*e3723e1fSApple OSS Distributions }
2944*e3723e1fSApple OSS Distributions
2945*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
2946*e3723e1fSApple OSS Distributions }
2947*e3723e1fSApple OSS Distributions
2948*e3723e1fSApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2949*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2950*e3723e1fSApple OSS Distributions {
2951*e3723e1fSApple OSS Distributions _kernelTag = (vm_tag_t) kernelTag;
2952*e3723e1fSApple OSS Distributions _userTag = (vm_tag_t) userTag;
2953*e3723e1fSApple OSS Distributions }
2954*e3723e1fSApple OSS Distributions
2955*e3723e1fSApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2956*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2957*e3723e1fSApple OSS Distributions {
2958*e3723e1fSApple OSS Distributions if (vm_kernel_map_is_kernel(map)) {
2959*e3723e1fSApple OSS Distributions if (VM_KERN_MEMORY_NONE != _kernelTag) {
2960*e3723e1fSApple OSS Distributions return (uint32_t) _kernelTag;
2961*e3723e1fSApple OSS Distributions }
2962*e3723e1fSApple OSS Distributions } else {
2963*e3723e1fSApple OSS Distributions if (VM_KERN_MEMORY_NONE != _userTag) {
2964*e3723e1fSApple OSS Distributions return (uint32_t) _userTag;
2965*e3723e1fSApple OSS Distributions }
2966*e3723e1fSApple OSS Distributions }
2967*e3723e1fSApple OSS Distributions return IOMemoryTag(map);
2968*e3723e1fSApple OSS Distributions }
2969*e3723e1fSApple OSS Distributions
2970*e3723e1fSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2971*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2972*e3723e1fSApple OSS Distributions {
2973*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
2974*e3723e1fSApple OSS Distributions DMACommandOps params;
2975*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2976*e3723e1fSApple OSS Distributions ioGMDData *dataP;
2977*e3723e1fSApple OSS Distributions
2978*e3723e1fSApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
2979*e3723e1fSApple OSS Distributions op &= kIOMDDMACommandOperationMask;
2980*e3723e1fSApple OSS Distributions
2981*e3723e1fSApple OSS Distributions if (kIOMDDMAMap == op) {
2982*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
2983*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
2984*e3723e1fSApple OSS Distributions }
2985*e3723e1fSApple OSS Distributions
2986*e3723e1fSApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2987*e3723e1fSApple OSS Distributions
2988*e3723e1fSApple OSS Distributions if (!_memoryEntries
2989*e3723e1fSApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2990*e3723e1fSApple OSS Distributions return kIOReturnNoMemory;
2991*e3723e1fSApple OSS Distributions }
2992*e3723e1fSApple OSS Distributions
2993*e3723e1fSApple OSS Distributions if (_memoryEntries && data->fMapper) {
2994*e3723e1fSApple OSS Distributions bool remap, keepMap;
2995*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
2996*e3723e1fSApple OSS Distributions
2997*e3723e1fSApple OSS Distributions if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2998*e3723e1fSApple OSS Distributions dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2999*e3723e1fSApple OSS Distributions }
3000*e3723e1fSApple OSS Distributions if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3001*e3723e1fSApple OSS Distributions dataP->fDMAMapAlignment = data->fMapSpec.alignment;
3002*e3723e1fSApple OSS Distributions }
3003*e3723e1fSApple OSS Distributions
3004*e3723e1fSApple OSS Distributions keepMap = (data->fMapper == gIOSystemMapper);
3005*e3723e1fSApple OSS Distributions keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3006*e3723e1fSApple OSS Distributions
3007*e3723e1fSApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3008*e3723e1fSApple OSS Distributions IOLockLock(_prepareLock);
3009*e3723e1fSApple OSS Distributions }
3010*e3723e1fSApple OSS Distributions
3011*e3723e1fSApple OSS Distributions remap = (!keepMap);
3012*e3723e1fSApple OSS Distributions remap |= (dataP->fDMAMapNumAddressBits < 64)
3013*e3723e1fSApple OSS Distributions && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3014*e3723e1fSApple OSS Distributions remap |= (dataP->fDMAMapAlignment > page_size);
3015*e3723e1fSApple OSS Distributions
3016*e3723e1fSApple OSS Distributions if (remap || !dataP->fMappedBaseValid) {
3017*e3723e1fSApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3018*e3723e1fSApple OSS Distributions if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3019*e3723e1fSApple OSS Distributions dataP->fMappedBase = data->fAlloc;
3020*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = true;
3021*e3723e1fSApple OSS Distributions dataP->fMappedLength = data->fAllocLength;
3022*e3723e1fSApple OSS Distributions data->fAllocLength = 0; // IOMD owns the alloc now
3023*e3723e1fSApple OSS Distributions }
3024*e3723e1fSApple OSS Distributions } else {
3025*e3723e1fSApple OSS Distributions data->fAlloc = dataP->fMappedBase;
3026*e3723e1fSApple OSS Distributions data->fAllocLength = 0; // give out IOMD map
3027*e3723e1fSApple OSS Distributions md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3028*e3723e1fSApple OSS Distributions }
3029*e3723e1fSApple OSS Distributions
3030*e3723e1fSApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3031*e3723e1fSApple OSS Distributions IOLockUnlock(_prepareLock);
3032*e3723e1fSApple OSS Distributions }
3033*e3723e1fSApple OSS Distributions }
3034*e3723e1fSApple OSS Distributions return err;
3035*e3723e1fSApple OSS Distributions }
3036*e3723e1fSApple OSS Distributions if (kIOMDDMAUnmap == op) {
3037*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3038*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3039*e3723e1fSApple OSS Distributions }
3040*e3723e1fSApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3041*e3723e1fSApple OSS Distributions
3042*e3723e1fSApple OSS Distributions if (_pages) {
3043*e3723e1fSApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3044*e3723e1fSApple OSS Distributions }
3045*e3723e1fSApple OSS Distributions
3046*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3047*e3723e1fSApple OSS Distributions }
3048*e3723e1fSApple OSS Distributions
3049*e3723e1fSApple OSS Distributions if (kIOMDAddDMAMapSpec == op) {
3050*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IODMAMapSpecification)) {
3051*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3052*e3723e1fSApple OSS Distributions }
3053*e3723e1fSApple OSS Distributions
3054*e3723e1fSApple OSS Distributions IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3055*e3723e1fSApple OSS Distributions
3056*e3723e1fSApple OSS Distributions if (!_memoryEntries
3057*e3723e1fSApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3058*e3723e1fSApple OSS Distributions return kIOReturnNoMemory;
3059*e3723e1fSApple OSS Distributions }
3060*e3723e1fSApple OSS Distributions
3061*e3723e1fSApple OSS Distributions if (_memoryEntries) {
3062*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
3063*e3723e1fSApple OSS Distributions if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3064*e3723e1fSApple OSS Distributions dataP->fDMAMapNumAddressBits = data->numAddressBits;
3065*e3723e1fSApple OSS Distributions }
3066*e3723e1fSApple OSS Distributions if (data->alignment > dataP->fDMAMapAlignment) {
3067*e3723e1fSApple OSS Distributions dataP->fDMAMapAlignment = data->alignment;
3068*e3723e1fSApple OSS Distributions }
3069*e3723e1fSApple OSS Distributions }
3070*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3071*e3723e1fSApple OSS Distributions }
3072*e3723e1fSApple OSS Distributions
3073*e3723e1fSApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3074*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3075*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3076*e3723e1fSApple OSS Distributions }
3077*e3723e1fSApple OSS Distributions
3078*e3723e1fSApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3079*e3723e1fSApple OSS Distributions data->fLength = _length;
3080*e3723e1fSApple OSS Distributions data->fSGCount = _rangesCount;
3081*e3723e1fSApple OSS Distributions data->fPages = _pages;
3082*e3723e1fSApple OSS Distributions data->fDirection = getDirection();
3083*e3723e1fSApple OSS Distributions if (!_wireCount) {
3084*e3723e1fSApple OSS Distributions data->fIsPrepared = false;
3085*e3723e1fSApple OSS Distributions } else {
3086*e3723e1fSApple OSS Distributions data->fIsPrepared = true;
3087*e3723e1fSApple OSS Distributions data->fHighestPage = _highestPage;
3088*e3723e1fSApple OSS Distributions if (_memoryEntries) {
3089*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
3090*e3723e1fSApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
3091*e3723e1fSApple OSS Distributions UInt count = getNumIOPL(_memoryEntries, dataP);
3092*e3723e1fSApple OSS Distributions if (count == 1) {
3093*e3723e1fSApple OSS Distributions data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3094*e3723e1fSApple OSS Distributions }
3095*e3723e1fSApple OSS Distributions }
3096*e3723e1fSApple OSS Distributions }
3097*e3723e1fSApple OSS Distributions
3098*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3099*e3723e1fSApple OSS Distributions } else if (kIOMDDMAActive == op) {
3100*e3723e1fSApple OSS Distributions if (params) {
3101*e3723e1fSApple OSS Distributions int16_t prior;
3102*e3723e1fSApple OSS Distributions prior = OSAddAtomic16(1, &md->_dmaReferences);
3103*e3723e1fSApple OSS Distributions if (!prior) {
3104*e3723e1fSApple OSS Distributions md->_mapName = NULL;
3105*e3723e1fSApple OSS Distributions }
3106*e3723e1fSApple OSS Distributions } else {
3107*e3723e1fSApple OSS Distributions if (md->_dmaReferences) {
3108*e3723e1fSApple OSS Distributions OSAddAtomic16(-1, &md->_dmaReferences);
3109*e3723e1fSApple OSS Distributions } else {
3110*e3723e1fSApple OSS Distributions panic("_dmaReferences underflow");
3111*e3723e1fSApple OSS Distributions }
3112*e3723e1fSApple OSS Distributions }
3113*e3723e1fSApple OSS Distributions } else if (kIOMDWalkSegments != op) {
3114*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
3115*e3723e1fSApple OSS Distributions }
3116*e3723e1fSApple OSS Distributions
3117*e3723e1fSApple OSS Distributions // Get the next segment
3118*e3723e1fSApple OSS Distributions struct InternalState {
3119*e3723e1fSApple OSS Distributions IOMDDMAWalkSegmentArgs fIO;
3120*e3723e1fSApple OSS Distributions mach_vm_size_t fOffset2Index;
3121*e3723e1fSApple OSS Distributions mach_vm_size_t fNextOffset;
3122*e3723e1fSApple OSS Distributions UInt fIndex;
3123*e3723e1fSApple OSS Distributions } *isP;
3124*e3723e1fSApple OSS Distributions
3125*e3723e1fSApple OSS Distributions // Find the next segment
3126*e3723e1fSApple OSS Distributions if (dataSize < sizeof(*isP)) {
3127*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3128*e3723e1fSApple OSS Distributions }
3129*e3723e1fSApple OSS Distributions
3130*e3723e1fSApple OSS Distributions isP = (InternalState *) vData;
3131*e3723e1fSApple OSS Distributions uint64_t offset = isP->fIO.fOffset;
3132*e3723e1fSApple OSS Distributions uint8_t mapped = isP->fIO.fMapped;
3133*e3723e1fSApple OSS Distributions uint64_t mappedBase;
3134*e3723e1fSApple OSS Distributions
3135*e3723e1fSApple OSS Distributions if (mapped && (kIOMemoryRemote & _flags)) {
3136*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3137*e3723e1fSApple OSS Distributions }
3138*e3723e1fSApple OSS Distributions
3139*e3723e1fSApple OSS Distributions if (IOMapper::gSystem && mapped
3140*e3723e1fSApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3141*e3723e1fSApple OSS Distributions && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3142*e3723e1fSApple OSS Distributions // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3143*e3723e1fSApple OSS Distributions if (!_memoryEntries
3144*e3723e1fSApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3145*e3723e1fSApple OSS Distributions return kIOReturnNoMemory;
3146*e3723e1fSApple OSS Distributions }
3147*e3723e1fSApple OSS Distributions
3148*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
3149*e3723e1fSApple OSS Distributions if (dataP->fMapper) {
3150*e3723e1fSApple OSS Distributions IODMAMapSpecification mapSpec;
3151*e3723e1fSApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
3152*e3723e1fSApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3153*e3723e1fSApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
3154*e3723e1fSApple OSS Distributions err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3155*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
3156*e3723e1fSApple OSS Distributions return err;
3157*e3723e1fSApple OSS Distributions }
3158*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = true;
3159*e3723e1fSApple OSS Distributions }
3160*e3723e1fSApple OSS Distributions }
3161*e3723e1fSApple OSS Distributions
3162*e3723e1fSApple OSS Distributions if (mapped) {
3163*e3723e1fSApple OSS Distributions if (IOMapper::gSystem
3164*e3723e1fSApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3165*e3723e1fSApple OSS Distributions && _memoryEntries
3166*e3723e1fSApple OSS Distributions && (dataP = getDataP(_memoryEntries))
3167*e3723e1fSApple OSS Distributions && dataP->fMappedBaseValid) {
3168*e3723e1fSApple OSS Distributions mappedBase = dataP->fMappedBase;
3169*e3723e1fSApple OSS Distributions } else {
3170*e3723e1fSApple OSS Distributions mapped = 0;
3171*e3723e1fSApple OSS Distributions }
3172*e3723e1fSApple OSS Distributions }
3173*e3723e1fSApple OSS Distributions
3174*e3723e1fSApple OSS Distributions if (offset >= _length) {
3175*e3723e1fSApple OSS Distributions return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3176*e3723e1fSApple OSS Distributions }
3177*e3723e1fSApple OSS Distributions
3178*e3723e1fSApple OSS Distributions // Validate the previous offset
3179*e3723e1fSApple OSS Distributions UInt ind;
3180*e3723e1fSApple OSS Distributions mach_vm_size_t off2Ind = isP->fOffset2Index;
3181*e3723e1fSApple OSS Distributions if (!params
3182*e3723e1fSApple OSS Distributions && offset
3183*e3723e1fSApple OSS Distributions && (offset == isP->fNextOffset || off2Ind <= offset)) {
3184*e3723e1fSApple OSS Distributions ind = isP->fIndex;
3185*e3723e1fSApple OSS Distributions } else {
3186*e3723e1fSApple OSS Distributions ind = off2Ind = 0; // Start from beginning
3187*e3723e1fSApple OSS Distributions }
3188*e3723e1fSApple OSS Distributions mach_vm_size_t length;
3189*e3723e1fSApple OSS Distributions UInt64 address;
3190*e3723e1fSApple OSS Distributions
3191*e3723e1fSApple OSS Distributions if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3192*e3723e1fSApple OSS Distributions // Physical address based memory descriptor
3193*e3723e1fSApple OSS Distributions const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3194*e3723e1fSApple OSS Distributions
3195*e3723e1fSApple OSS Distributions // Find the range after the one that contains the offset
3196*e3723e1fSApple OSS Distributions mach_vm_size_t len;
3197*e3723e1fSApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3198*e3723e1fSApple OSS Distributions len = physP[ind].length;
3199*e3723e1fSApple OSS Distributions off2Ind += len;
3200*e3723e1fSApple OSS Distributions }
3201*e3723e1fSApple OSS Distributions
3202*e3723e1fSApple OSS Distributions // Calculate length within range and starting address
3203*e3723e1fSApple OSS Distributions length = off2Ind - offset;
3204*e3723e1fSApple OSS Distributions address = physP[ind - 1].address + len - length;
3205*e3723e1fSApple OSS Distributions
3206*e3723e1fSApple OSS Distributions if (true && mapped) {
3207*e3723e1fSApple OSS Distributions address = mappedBase + offset;
3208*e3723e1fSApple OSS Distributions } else {
3209*e3723e1fSApple OSS Distributions // see how far we can coalesce ranges
3210*e3723e1fSApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3211*e3723e1fSApple OSS Distributions len = physP[ind].length;
3212*e3723e1fSApple OSS Distributions length += len;
3213*e3723e1fSApple OSS Distributions off2Ind += len;
3214*e3723e1fSApple OSS Distributions ind++;
3215*e3723e1fSApple OSS Distributions }
3216*e3723e1fSApple OSS Distributions }
3217*e3723e1fSApple OSS Distributions
3218*e3723e1fSApple OSS Distributions // correct contiguous check overshoot
3219*e3723e1fSApple OSS Distributions ind--;
3220*e3723e1fSApple OSS Distributions off2Ind -= len;
3221*e3723e1fSApple OSS Distributions }
3222*e3723e1fSApple OSS Distributions #ifndef __LP64__
3223*e3723e1fSApple OSS Distributions else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3224*e3723e1fSApple OSS Distributions // Physical address based memory descriptor
3225*e3723e1fSApple OSS Distributions const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3226*e3723e1fSApple OSS Distributions
3227*e3723e1fSApple OSS Distributions // Find the range after the one that contains the offset
3228*e3723e1fSApple OSS Distributions mach_vm_size_t len;
3229*e3723e1fSApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3230*e3723e1fSApple OSS Distributions len = physP[ind].length;
3231*e3723e1fSApple OSS Distributions off2Ind += len;
3232*e3723e1fSApple OSS Distributions }
3233*e3723e1fSApple OSS Distributions
3234*e3723e1fSApple OSS Distributions // Calculate length within range and starting address
3235*e3723e1fSApple OSS Distributions length = off2Ind - offset;
3236*e3723e1fSApple OSS Distributions address = physP[ind - 1].address + len - length;
3237*e3723e1fSApple OSS Distributions
3238*e3723e1fSApple OSS Distributions if (true && mapped) {
3239*e3723e1fSApple OSS Distributions address = mappedBase + offset;
3240*e3723e1fSApple OSS Distributions } else {
3241*e3723e1fSApple OSS Distributions // see how far we can coalesce ranges
3242*e3723e1fSApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3243*e3723e1fSApple OSS Distributions len = physP[ind].length;
3244*e3723e1fSApple OSS Distributions length += len;
3245*e3723e1fSApple OSS Distributions off2Ind += len;
3246*e3723e1fSApple OSS Distributions ind++;
3247*e3723e1fSApple OSS Distributions }
3248*e3723e1fSApple OSS Distributions }
3249*e3723e1fSApple OSS Distributions // correct contiguous check overshoot
3250*e3723e1fSApple OSS Distributions ind--;
3251*e3723e1fSApple OSS Distributions off2Ind -= len;
3252*e3723e1fSApple OSS Distributions }
3253*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
3254*e3723e1fSApple OSS Distributions else {
3255*e3723e1fSApple OSS Distributions do {
3256*e3723e1fSApple OSS Distributions if (!_wireCount) {
3257*e3723e1fSApple OSS Distributions panic("IOGMD: not wired for the IODMACommand");
3258*e3723e1fSApple OSS Distributions }
3259*e3723e1fSApple OSS Distributions
3260*e3723e1fSApple OSS Distributions assert(_memoryEntries);
3261*e3723e1fSApple OSS Distributions
3262*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
3263*e3723e1fSApple OSS Distributions const ioPLBlock *ioplList = getIOPLList(dataP);
3264*e3723e1fSApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3265*e3723e1fSApple OSS Distributions upl_page_info_t *pageList = getPageList(dataP);
3266*e3723e1fSApple OSS Distributions
3267*e3723e1fSApple OSS Distributions assert(numIOPLs > 0);
3268*e3723e1fSApple OSS Distributions
3269*e3723e1fSApple OSS Distributions // Scan through iopl info blocks looking for block containing offset
3270*e3723e1fSApple OSS Distributions while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3271*e3723e1fSApple OSS Distributions ind++;
3272*e3723e1fSApple OSS Distributions }
3273*e3723e1fSApple OSS Distributions
3274*e3723e1fSApple OSS Distributions // Go back to actual range as search goes past it
3275*e3723e1fSApple OSS Distributions ioPLBlock ioplInfo = ioplList[ind - 1];
3276*e3723e1fSApple OSS Distributions off2Ind = ioplInfo.fIOMDOffset;
3277*e3723e1fSApple OSS Distributions
3278*e3723e1fSApple OSS Distributions if (ind < numIOPLs) {
3279*e3723e1fSApple OSS Distributions length = ioplList[ind].fIOMDOffset;
3280*e3723e1fSApple OSS Distributions } else {
3281*e3723e1fSApple OSS Distributions length = _length;
3282*e3723e1fSApple OSS Distributions }
3283*e3723e1fSApple OSS Distributions length -= offset; // Remainder within iopl
3284*e3723e1fSApple OSS Distributions
3285*e3723e1fSApple OSS Distributions // Subtract offset till this iopl in total list
3286*e3723e1fSApple OSS Distributions offset -= off2Ind;
3287*e3723e1fSApple OSS Distributions
3288*e3723e1fSApple OSS Distributions // If a mapped address is requested and this is a pre-mapped IOPL
3289*e3723e1fSApple OSS Distributions // then just need to compute an offset relative to the mapped base.
3290*e3723e1fSApple OSS Distributions if (mapped) {
3291*e3723e1fSApple OSS Distributions offset += (ioplInfo.fPageOffset & PAGE_MASK);
3292*e3723e1fSApple OSS Distributions address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3293*e3723e1fSApple OSS Distributions continue; // Done leave do/while(false) now
3294*e3723e1fSApple OSS Distributions }
3295*e3723e1fSApple OSS Distributions
3296*e3723e1fSApple OSS Distributions // The offset is rebased into the current iopl.
3297*e3723e1fSApple OSS Distributions // Now add the iopl 1st page offset.
3298*e3723e1fSApple OSS Distributions offset += ioplInfo.fPageOffset;
3299*e3723e1fSApple OSS Distributions
3300*e3723e1fSApple OSS Distributions // For external UPLs the fPageInfo field points directly to
3301*e3723e1fSApple OSS Distributions // the upl's upl_page_info_t array.
3302*e3723e1fSApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
3303*e3723e1fSApple OSS Distributions pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3304*e3723e1fSApple OSS Distributions } else {
3305*e3723e1fSApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
3306*e3723e1fSApple OSS Distributions }
3307*e3723e1fSApple OSS Distributions
3308*e3723e1fSApple OSS Distributions // Check for direct device non-paged memory
3309*e3723e1fSApple OSS Distributions if (ioplInfo.fFlags & kIOPLOnDevice) {
3310*e3723e1fSApple OSS Distributions address = ptoa_64(pageList->phys_addr) + offset;
3311*e3723e1fSApple OSS Distributions continue; // Done leave do/while(false) now
3312*e3723e1fSApple OSS Distributions }
3313*e3723e1fSApple OSS Distributions
3314*e3723e1fSApple OSS Distributions // Now we need compute the index into the pageList
3315*e3723e1fSApple OSS Distributions UInt pageInd = atop_32(offset);
3316*e3723e1fSApple OSS Distributions offset &= PAGE_MASK;
3317*e3723e1fSApple OSS Distributions
3318*e3723e1fSApple OSS Distributions // Compute the starting address of this segment
3319*e3723e1fSApple OSS Distributions IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3320*e3723e1fSApple OSS Distributions if (!pageAddr) {
3321*e3723e1fSApple OSS Distributions panic("!pageList phys_addr");
3322*e3723e1fSApple OSS Distributions }
3323*e3723e1fSApple OSS Distributions
3324*e3723e1fSApple OSS Distributions address = ptoa_64(pageAddr) + offset;
3325*e3723e1fSApple OSS Distributions
3326*e3723e1fSApple OSS Distributions // length is currently set to the length of the remainider of the iopl.
3327*e3723e1fSApple OSS Distributions // We need to check that the remainder of the iopl is contiguous.
3328*e3723e1fSApple OSS Distributions // This is indicated by pageList[ind].phys_addr being sequential.
3329*e3723e1fSApple OSS Distributions IOByteCount contigLength = PAGE_SIZE - offset;
3330*e3723e1fSApple OSS Distributions while (contigLength < length
3331*e3723e1fSApple OSS Distributions && ++pageAddr == pageList[++pageInd].phys_addr) {
3332*e3723e1fSApple OSS Distributions contigLength += PAGE_SIZE;
3333*e3723e1fSApple OSS Distributions }
3334*e3723e1fSApple OSS Distributions
3335*e3723e1fSApple OSS Distributions if (contigLength < length) {
3336*e3723e1fSApple OSS Distributions length = contigLength;
3337*e3723e1fSApple OSS Distributions }
3338*e3723e1fSApple OSS Distributions
3339*e3723e1fSApple OSS Distributions assert(address);
3340*e3723e1fSApple OSS Distributions assert(length);
3341*e3723e1fSApple OSS Distributions } while (false);
3342*e3723e1fSApple OSS Distributions }
3343*e3723e1fSApple OSS Distributions
3344*e3723e1fSApple OSS Distributions // Update return values and state
3345*e3723e1fSApple OSS Distributions isP->fIO.fIOVMAddr = address;
3346*e3723e1fSApple OSS Distributions isP->fIO.fLength = length;
3347*e3723e1fSApple OSS Distributions isP->fIndex = ind;
3348*e3723e1fSApple OSS Distributions isP->fOffset2Index = off2Ind;
3349*e3723e1fSApple OSS Distributions isP->fNextOffset = isP->fIO.fOffset + length;
3350*e3723e1fSApple OSS Distributions
3351*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3352*e3723e1fSApple OSS Distributions }
3353*e3723e1fSApple OSS Distributions
3354*e3723e1fSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3355*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3356*e3723e1fSApple OSS Distributions {
3357*e3723e1fSApple OSS Distributions IOReturn ret;
3358*e3723e1fSApple OSS Distributions mach_vm_address_t address = 0;
3359*e3723e1fSApple OSS Distributions mach_vm_size_t length = 0;
3360*e3723e1fSApple OSS Distributions IOMapper * mapper = gIOSystemMapper;
3361*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3362*e3723e1fSApple OSS Distributions
3363*e3723e1fSApple OSS Distributions if (lengthOfSegment) {
3364*e3723e1fSApple OSS Distributions *lengthOfSegment = 0;
3365*e3723e1fSApple OSS Distributions }
3366*e3723e1fSApple OSS Distributions
3367*e3723e1fSApple OSS Distributions if (offset >= _length) {
3368*e3723e1fSApple OSS Distributions return 0;
3369*e3723e1fSApple OSS Distributions }
3370*e3723e1fSApple OSS Distributions
3371*e3723e1fSApple OSS Distributions // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3372*e3723e1fSApple OSS Distributions // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3373*e3723e1fSApple OSS Distributions // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3374*e3723e1fSApple OSS Distributions // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3375*e3723e1fSApple OSS Distributions
3376*e3723e1fSApple OSS Distributions if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3377*e3723e1fSApple OSS Distributions unsigned rangesIndex = 0;
3378*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
3379*e3723e1fSApple OSS Distributions mach_vm_address_t addr;
3380*e3723e1fSApple OSS Distributions
3381*e3723e1fSApple OSS Distributions // Find starting address within the vector of ranges
3382*e3723e1fSApple OSS Distributions for (;;) {
3383*e3723e1fSApple OSS Distributions getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3384*e3723e1fSApple OSS Distributions if (offset < length) {
3385*e3723e1fSApple OSS Distributions break;
3386*e3723e1fSApple OSS Distributions }
3387*e3723e1fSApple OSS Distributions offset -= length; // (make offset relative)
3388*e3723e1fSApple OSS Distributions rangesIndex++;
3389*e3723e1fSApple OSS Distributions }
3390*e3723e1fSApple OSS Distributions
3391*e3723e1fSApple OSS Distributions // Now that we have the starting range,
3392*e3723e1fSApple OSS Distributions // lets find the last contiguous range
3393*e3723e1fSApple OSS Distributions addr += offset;
3394*e3723e1fSApple OSS Distributions length -= offset;
3395*e3723e1fSApple OSS Distributions
3396*e3723e1fSApple OSS Distributions for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3397*e3723e1fSApple OSS Distributions mach_vm_address_t newAddr;
3398*e3723e1fSApple OSS Distributions mach_vm_size_t newLen;
3399*e3723e1fSApple OSS Distributions
3400*e3723e1fSApple OSS Distributions getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3401*e3723e1fSApple OSS Distributions if (addr + length != newAddr) {
3402*e3723e1fSApple OSS Distributions break;
3403*e3723e1fSApple OSS Distributions }
3404*e3723e1fSApple OSS Distributions length += newLen;
3405*e3723e1fSApple OSS Distributions }
3406*e3723e1fSApple OSS Distributions if (addr) {
3407*e3723e1fSApple OSS Distributions address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3408*e3723e1fSApple OSS Distributions }
3409*e3723e1fSApple OSS Distributions } else {
3410*e3723e1fSApple OSS Distributions IOMDDMAWalkSegmentState _state;
3411*e3723e1fSApple OSS Distributions IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3412*e3723e1fSApple OSS Distributions
3413*e3723e1fSApple OSS Distributions state->fOffset = offset;
3414*e3723e1fSApple OSS Distributions state->fLength = _length - offset;
3415*e3723e1fSApple OSS Distributions state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3416*e3723e1fSApple OSS Distributions
3417*e3723e1fSApple OSS Distributions ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3418*e3723e1fSApple OSS Distributions
3419*e3723e1fSApple OSS Distributions if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3420*e3723e1fSApple OSS Distributions DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3421*e3723e1fSApple OSS Distributions ret, this, state->fOffset,
3422*e3723e1fSApple OSS Distributions state->fIOVMAddr, state->fLength);
3423*e3723e1fSApple OSS Distributions }
3424*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == ret) {
3425*e3723e1fSApple OSS Distributions address = state->fIOVMAddr;
3426*e3723e1fSApple OSS Distributions length = state->fLength;
3427*e3723e1fSApple OSS Distributions }
3428*e3723e1fSApple OSS Distributions
3429*e3723e1fSApple OSS Distributions // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3430*e3723e1fSApple OSS Distributions // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3431*e3723e1fSApple OSS Distributions
3432*e3723e1fSApple OSS Distributions if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3433*e3723e1fSApple OSS Distributions if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3434*e3723e1fSApple OSS Distributions addr64_t origAddr = address;
3435*e3723e1fSApple OSS Distributions IOByteCount origLen = length;
3436*e3723e1fSApple OSS Distributions
3437*e3723e1fSApple OSS Distributions address = mapper->mapToPhysicalAddress(origAddr);
3438*e3723e1fSApple OSS Distributions length = page_size - (address & (page_size - 1));
3439*e3723e1fSApple OSS Distributions while ((length < origLen)
3440*e3723e1fSApple OSS Distributions && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3441*e3723e1fSApple OSS Distributions length += page_size;
3442*e3723e1fSApple OSS Distributions }
3443*e3723e1fSApple OSS Distributions if (length > origLen) {
3444*e3723e1fSApple OSS Distributions length = origLen;
3445*e3723e1fSApple OSS Distributions }
3446*e3723e1fSApple OSS Distributions }
3447*e3723e1fSApple OSS Distributions }
3448*e3723e1fSApple OSS Distributions }
3449*e3723e1fSApple OSS Distributions
3450*e3723e1fSApple OSS Distributions if (!address) {
3451*e3723e1fSApple OSS Distributions length = 0;
3452*e3723e1fSApple OSS Distributions }
3453*e3723e1fSApple OSS Distributions
3454*e3723e1fSApple OSS Distributions if (lengthOfSegment) {
3455*e3723e1fSApple OSS Distributions *lengthOfSegment = length;
3456*e3723e1fSApple OSS Distributions }
3457*e3723e1fSApple OSS Distributions
3458*e3723e1fSApple OSS Distributions return address;
3459*e3723e1fSApple OSS Distributions }
3460*e3723e1fSApple OSS Distributions
3461*e3723e1fSApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3462*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::readBytes
3463*e3723e1fSApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
3464*e3723e1fSApple OSS Distributions {
3465*e3723e1fSApple OSS Distributions IOByteCount count = super::readBytes(offset, bytes, length);
3466*e3723e1fSApple OSS Distributions return count;
3467*e3723e1fSApple OSS Distributions }
3468*e3723e1fSApple OSS Distributions
3469*e3723e1fSApple OSS Distributions IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3470*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::writeBytes
3471*e3723e1fSApple OSS Distributions (IOByteCount offset, const void* bytes, IOByteCount withLength)
3472*e3723e1fSApple OSS Distributions {
3473*e3723e1fSApple OSS Distributions IOByteCount count = super::writeBytes(offset, bytes, withLength);
3474*e3723e1fSApple OSS Distributions return count;
3475*e3723e1fSApple OSS Distributions }
3476*e3723e1fSApple OSS Distributions
3477*e3723e1fSApple OSS Distributions #ifndef __LP64__
3478*e3723e1fSApple OSS Distributions #pragma clang diagnostic push
3479*e3723e1fSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3480*e3723e1fSApple OSS Distributions
3481*e3723e1fSApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3482*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3483*e3723e1fSApple OSS Distributions {
3484*e3723e1fSApple OSS Distributions addr64_t address = 0;
3485*e3723e1fSApple OSS Distributions
3486*e3723e1fSApple OSS Distributions if (options & _kIOMemorySourceSegment) {
3487*e3723e1fSApple OSS Distributions address = getSourceSegment(offset, lengthOfSegment);
3488*e3723e1fSApple OSS Distributions } else if (options & kIOMemoryMapperNone) {
3489*e3723e1fSApple OSS Distributions address = getPhysicalSegment64(offset, lengthOfSegment);
3490*e3723e1fSApple OSS Distributions } else {
3491*e3723e1fSApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment);
3492*e3723e1fSApple OSS Distributions }
3493*e3723e1fSApple OSS Distributions
3494*e3723e1fSApple OSS Distributions return address;
3495*e3723e1fSApple OSS Distributions }
3496*e3723e1fSApple OSS Distributions #pragma clang diagnostic pop
3497*e3723e1fSApple OSS Distributions
3498*e3723e1fSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500*e3723e1fSApple OSS Distributions {
3501*e3723e1fSApple OSS Distributions return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3502*e3723e1fSApple OSS Distributions }
3503*e3723e1fSApple OSS Distributions
3504*e3723e1fSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3505*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3506*e3723e1fSApple OSS Distributions {
3507*e3723e1fSApple OSS Distributions addr64_t address = 0;
3508*e3723e1fSApple OSS Distributions IOByteCount length = 0;
3509*e3723e1fSApple OSS Distributions
3510*e3723e1fSApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment, 0);
3511*e3723e1fSApple OSS Distributions
3512*e3723e1fSApple OSS Distributions if (lengthOfSegment) {
3513*e3723e1fSApple OSS Distributions length = *lengthOfSegment;
3514*e3723e1fSApple OSS Distributions }
3515*e3723e1fSApple OSS Distributions
3516*e3723e1fSApple OSS Distributions if ((address + length) > 0x100000000ULL) {
3517*e3723e1fSApple OSS Distributions panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3518*e3723e1fSApple OSS Distributions address, (long) length, (getMetaClass())->getClassName());
3519*e3723e1fSApple OSS Distributions }
3520*e3723e1fSApple OSS Distributions
3521*e3723e1fSApple OSS Distributions return (IOPhysicalAddress) address;
3522*e3723e1fSApple OSS Distributions }
3523*e3723e1fSApple OSS Distributions
3524*e3723e1fSApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3525*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3526*e3723e1fSApple OSS Distributions {
3527*e3723e1fSApple OSS Distributions IOPhysicalAddress phys32;
3528*e3723e1fSApple OSS Distributions IOByteCount length;
3529*e3723e1fSApple OSS Distributions addr64_t phys64;
3530*e3723e1fSApple OSS Distributions IOMapper * mapper = NULL;
3531*e3723e1fSApple OSS Distributions
3532*e3723e1fSApple OSS Distributions phys32 = getPhysicalSegment(offset, lengthOfSegment);
3533*e3723e1fSApple OSS Distributions if (!phys32) {
3534*e3723e1fSApple OSS Distributions return 0;
3535*e3723e1fSApple OSS Distributions }
3536*e3723e1fSApple OSS Distributions
3537*e3723e1fSApple OSS Distributions if (gIOSystemMapper) {
3538*e3723e1fSApple OSS Distributions mapper = gIOSystemMapper;
3539*e3723e1fSApple OSS Distributions }
3540*e3723e1fSApple OSS Distributions
3541*e3723e1fSApple OSS Distributions if (mapper) {
3542*e3723e1fSApple OSS Distributions IOByteCount origLen;
3543*e3723e1fSApple OSS Distributions
3544*e3723e1fSApple OSS Distributions phys64 = mapper->mapToPhysicalAddress(phys32);
3545*e3723e1fSApple OSS Distributions origLen = *lengthOfSegment;
3546*e3723e1fSApple OSS Distributions length = page_size - (phys64 & (page_size - 1));
3547*e3723e1fSApple OSS Distributions while ((length < origLen)
3548*e3723e1fSApple OSS Distributions && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3549*e3723e1fSApple OSS Distributions length += page_size;
3550*e3723e1fSApple OSS Distributions }
3551*e3723e1fSApple OSS Distributions if (length > origLen) {
3552*e3723e1fSApple OSS Distributions length = origLen;
3553*e3723e1fSApple OSS Distributions }
3554*e3723e1fSApple OSS Distributions
3555*e3723e1fSApple OSS Distributions *lengthOfSegment = length;
3556*e3723e1fSApple OSS Distributions } else {
3557*e3723e1fSApple OSS Distributions phys64 = (addr64_t) phys32;
3558*e3723e1fSApple OSS Distributions }
3559*e3723e1fSApple OSS Distributions
3560*e3723e1fSApple OSS Distributions return phys64;
3561*e3723e1fSApple OSS Distributions }
3562*e3723e1fSApple OSS Distributions
3563*e3723e1fSApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3564*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3565*e3723e1fSApple OSS Distributions {
3566*e3723e1fSApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3567*e3723e1fSApple OSS Distributions }
3568*e3723e1fSApple OSS Distributions
3569*e3723e1fSApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3570*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3571*e3723e1fSApple OSS Distributions {
3572*e3723e1fSApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3573*e3723e1fSApple OSS Distributions }
3574*e3723e1fSApple OSS Distributions
3575*e3723e1fSApple OSS Distributions #pragma clang diagnostic push
3576*e3723e1fSApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3577*e3723e1fSApple OSS Distributions
3578*e3723e1fSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3579*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3580*e3723e1fSApple OSS Distributions IOByteCount * lengthOfSegment)
3581*e3723e1fSApple OSS Distributions {
3582*e3723e1fSApple OSS Distributions if (_task == kernel_task) {
3583*e3723e1fSApple OSS Distributions return (void *) getSourceSegment(offset, lengthOfSegment);
3584*e3723e1fSApple OSS Distributions } else {
3585*e3723e1fSApple OSS Distributions panic("IOGMD::getVirtualSegment deprecated");
3586*e3723e1fSApple OSS Distributions }
3587*e3723e1fSApple OSS Distributions
3588*e3723e1fSApple OSS Distributions return NULL;
3589*e3723e1fSApple OSS Distributions }
3590*e3723e1fSApple OSS Distributions #pragma clang diagnostic pop
3591*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
3592*e3723e1fSApple OSS Distributions
3593*e3723e1fSApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3594*e3723e1fSApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3595*e3723e1fSApple OSS Distributions {
3596*e3723e1fSApple OSS Distributions IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3597*e3723e1fSApple OSS Distributions DMACommandOps params;
3598*e3723e1fSApple OSS Distributions IOReturn err;
3599*e3723e1fSApple OSS Distributions
3600*e3723e1fSApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
3601*e3723e1fSApple OSS Distributions op &= kIOMDDMACommandOperationMask;
3602*e3723e1fSApple OSS Distributions
3603*e3723e1fSApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3604*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3605*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3606*e3723e1fSApple OSS Distributions }
3607*e3723e1fSApple OSS Distributions
3608*e3723e1fSApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3609*e3723e1fSApple OSS Distributions data->fLength = getLength();
3610*e3723e1fSApple OSS Distributions data->fSGCount = 0;
3611*e3723e1fSApple OSS Distributions data->fDirection = getDirection();
3612*e3723e1fSApple OSS Distributions data->fIsPrepared = true; // Assume prepared - fails safe
3613*e3723e1fSApple OSS Distributions } else if (kIOMDWalkSegments == op) {
3614*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3615*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3616*e3723e1fSApple OSS Distributions }
3617*e3723e1fSApple OSS Distributions
3618*e3723e1fSApple OSS Distributions IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3619*e3723e1fSApple OSS Distributions IOByteCount offset = (IOByteCount) data->fOffset;
3620*e3723e1fSApple OSS Distributions IOPhysicalLength length, nextLength;
3621*e3723e1fSApple OSS Distributions addr64_t addr, nextAddr;
3622*e3723e1fSApple OSS Distributions
3623*e3723e1fSApple OSS Distributions if (data->fMapped) {
3624*e3723e1fSApple OSS Distributions panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3625*e3723e1fSApple OSS Distributions }
3626*e3723e1fSApple OSS Distributions addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3627*e3723e1fSApple OSS Distributions offset += length;
3628*e3723e1fSApple OSS Distributions while (offset < getLength()) {
3629*e3723e1fSApple OSS Distributions nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3630*e3723e1fSApple OSS Distributions if ((addr + length) != nextAddr) {
3631*e3723e1fSApple OSS Distributions break;
3632*e3723e1fSApple OSS Distributions }
3633*e3723e1fSApple OSS Distributions length += nextLength;
3634*e3723e1fSApple OSS Distributions offset += nextLength;
3635*e3723e1fSApple OSS Distributions }
3636*e3723e1fSApple OSS Distributions data->fIOVMAddr = addr;
3637*e3723e1fSApple OSS Distributions data->fLength = length;
3638*e3723e1fSApple OSS Distributions } else if (kIOMDAddDMAMapSpec == op) {
3639*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
3640*e3723e1fSApple OSS Distributions } else if (kIOMDDMAMap == op) {
3641*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3642*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3643*e3723e1fSApple OSS Distributions }
3644*e3723e1fSApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3645*e3723e1fSApple OSS Distributions
3646*e3723e1fSApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3647*e3723e1fSApple OSS Distributions
3648*e3723e1fSApple OSS Distributions return err;
3649*e3723e1fSApple OSS Distributions } else if (kIOMDDMAUnmap == op) {
3650*e3723e1fSApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3651*e3723e1fSApple OSS Distributions return kIOReturnUnderrun;
3652*e3723e1fSApple OSS Distributions }
3653*e3723e1fSApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3654*e3723e1fSApple OSS Distributions
3655*e3723e1fSApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3656*e3723e1fSApple OSS Distributions
3657*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3658*e3723e1fSApple OSS Distributions } else {
3659*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
3660*e3723e1fSApple OSS Distributions }
3661*e3723e1fSApple OSS Distributions
3662*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3663*e3723e1fSApple OSS Distributions }
3664*e3723e1fSApple OSS Distributions
3665*e3723e1fSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3666*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3667*e3723e1fSApple OSS Distributions IOOptionBits * oldState )
3668*e3723e1fSApple OSS Distributions {
3669*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
3670*e3723e1fSApple OSS Distributions
3671*e3723e1fSApple OSS Distributions vm_purgable_t control;
3672*e3723e1fSApple OSS Distributions int state;
3673*e3723e1fSApple OSS Distributions
3674*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3675*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
3676*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3677*e3723e1fSApple OSS Distributions }
3678*e3723e1fSApple OSS Distributions
3679*e3723e1fSApple OSS Distributions if (_memRef) {
3680*e3723e1fSApple OSS Distributions err = super::setPurgeable(newState, oldState);
3681*e3723e1fSApple OSS Distributions } else {
3682*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3683*e3723e1fSApple OSS Distributions LOCK;
3684*e3723e1fSApple OSS Distributions }
3685*e3723e1fSApple OSS Distributions do{
3686*e3723e1fSApple OSS Distributions // Find the appropriate vm_map for the given task
3687*e3723e1fSApple OSS Distributions vm_map_t curMap;
3688*e3723e1fSApple OSS Distributions if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3689*e3723e1fSApple OSS Distributions err = kIOReturnNotReady;
3690*e3723e1fSApple OSS Distributions break;
3691*e3723e1fSApple OSS Distributions } else if (!_task) {
3692*e3723e1fSApple OSS Distributions err = kIOReturnUnsupported;
3693*e3723e1fSApple OSS Distributions break;
3694*e3723e1fSApple OSS Distributions } else {
3695*e3723e1fSApple OSS Distributions curMap = get_task_map(_task);
3696*e3723e1fSApple OSS Distributions if (NULL == curMap) {
3697*e3723e1fSApple OSS Distributions err = KERN_INVALID_ARGUMENT;
3698*e3723e1fSApple OSS Distributions break;
3699*e3723e1fSApple OSS Distributions }
3700*e3723e1fSApple OSS Distributions }
3701*e3723e1fSApple OSS Distributions
3702*e3723e1fSApple OSS Distributions // can only do one range
3703*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
3704*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3705*e3723e1fSApple OSS Distributions mach_vm_address_t addr;
3706*e3723e1fSApple OSS Distributions mach_vm_size_t len;
3707*e3723e1fSApple OSS Distributions getAddrLenForInd(addr, len, type, vec, 0, _task);
3708*e3723e1fSApple OSS Distributions
3709*e3723e1fSApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
3710*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
3711*e3723e1fSApple OSS Distributions break;
3712*e3723e1fSApple OSS Distributions }
3713*e3723e1fSApple OSS Distributions err = vm_map_purgable_control(curMap, addr, control, &state);
3714*e3723e1fSApple OSS Distributions if (oldState) {
3715*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == err) {
3716*e3723e1fSApple OSS Distributions err = purgeableStateBits(&state);
3717*e3723e1fSApple OSS Distributions *oldState = state;
3718*e3723e1fSApple OSS Distributions }
3719*e3723e1fSApple OSS Distributions }
3720*e3723e1fSApple OSS Distributions }while (false);
3721*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3722*e3723e1fSApple OSS Distributions UNLOCK;
3723*e3723e1fSApple OSS Distributions }
3724*e3723e1fSApple OSS Distributions }
3725*e3723e1fSApple OSS Distributions
3726*e3723e1fSApple OSS Distributions return err;
3727*e3723e1fSApple OSS Distributions }
3728*e3723e1fSApple OSS Distributions
3729*e3723e1fSApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3730*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3731*e3723e1fSApple OSS Distributions IOOptionBits * oldState )
3732*e3723e1fSApple OSS Distributions {
3733*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnNotReady;
3734*e3723e1fSApple OSS Distributions
3735*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3736*e3723e1fSApple OSS Distributions LOCK;
3737*e3723e1fSApple OSS Distributions }
3738*e3723e1fSApple OSS Distributions if (_memRef) {
3739*e3723e1fSApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3740*e3723e1fSApple OSS Distributions }
3741*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3742*e3723e1fSApple OSS Distributions UNLOCK;
3743*e3723e1fSApple OSS Distributions }
3744*e3723e1fSApple OSS Distributions
3745*e3723e1fSApple OSS Distributions return err;
3746*e3723e1fSApple OSS Distributions }
3747*e3723e1fSApple OSS Distributions
3748*e3723e1fSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3749*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3750*e3723e1fSApple OSS Distributions int newLedgerTag,
3751*e3723e1fSApple OSS Distributions IOOptionBits newLedgerOptions )
3752*e3723e1fSApple OSS Distributions {
3753*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
3754*e3723e1fSApple OSS Distributions
3755*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3756*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
3757*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3758*e3723e1fSApple OSS Distributions }
3759*e3723e1fSApple OSS Distributions
3760*e3723e1fSApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3761*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
3762*e3723e1fSApple OSS Distributions }
3763*e3723e1fSApple OSS Distributions
3764*e3723e1fSApple OSS Distributions if (_memRef) {
3765*e3723e1fSApple OSS Distributions err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3766*e3723e1fSApple OSS Distributions } else {
3767*e3723e1fSApple OSS Distributions err = kIOReturnUnsupported;
3768*e3723e1fSApple OSS Distributions }
3769*e3723e1fSApple OSS Distributions
3770*e3723e1fSApple OSS Distributions return err;
3771*e3723e1fSApple OSS Distributions }
3772*e3723e1fSApple OSS Distributions
3773*e3723e1fSApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3774*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3775*e3723e1fSApple OSS Distributions int newLedgerTag,
3776*e3723e1fSApple OSS Distributions IOOptionBits newLedgerOptions )
3777*e3723e1fSApple OSS Distributions {
3778*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnNotReady;
3779*e3723e1fSApple OSS Distributions
3780*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3781*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
3782*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3783*e3723e1fSApple OSS Distributions }
3784*e3723e1fSApple OSS Distributions
3785*e3723e1fSApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3786*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
3787*e3723e1fSApple OSS Distributions }
3788*e3723e1fSApple OSS Distributions
3789*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3790*e3723e1fSApple OSS Distributions LOCK;
3791*e3723e1fSApple OSS Distributions }
3792*e3723e1fSApple OSS Distributions if (_memRef) {
3793*e3723e1fSApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3794*e3723e1fSApple OSS Distributions } else {
3795*e3723e1fSApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3796*e3723e1fSApple OSS Distributions IOSubMemoryDescriptor * smd;
3797*e3723e1fSApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3798*e3723e1fSApple OSS Distributions err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3799*e3723e1fSApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3800*e3723e1fSApple OSS Distributions err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3801*e3723e1fSApple OSS Distributions }
3802*e3723e1fSApple OSS Distributions }
3803*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3804*e3723e1fSApple OSS Distributions UNLOCK;
3805*e3723e1fSApple OSS Distributions }
3806*e3723e1fSApple OSS Distributions
3807*e3723e1fSApple OSS Distributions return err;
3808*e3723e1fSApple OSS Distributions }
3809*e3723e1fSApple OSS Distributions
3810*e3723e1fSApple OSS Distributions
3811*e3723e1fSApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3812*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3813*e3723e1fSApple OSS Distributions {
3814*e3723e1fSApple OSS Distributions uint64_t length;
3815*e3723e1fSApple OSS Distributions
3816*e3723e1fSApple OSS Distributions if (_memRef) {
3817*e3723e1fSApple OSS Distributions length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3818*e3723e1fSApple OSS Distributions } else {
3819*e3723e1fSApple OSS Distributions IOByteCount iterate, segLen;
3820*e3723e1fSApple OSS Distributions IOPhysicalAddress sourceAddr, sourceAlign;
3821*e3723e1fSApple OSS Distributions
3822*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3823*e3723e1fSApple OSS Distributions LOCK;
3824*e3723e1fSApple OSS Distributions }
3825*e3723e1fSApple OSS Distributions length = 0;
3826*e3723e1fSApple OSS Distributions iterate = 0;
3827*e3723e1fSApple OSS Distributions while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3828*e3723e1fSApple OSS Distributions sourceAlign = (sourceAddr & page_mask);
3829*e3723e1fSApple OSS Distributions if (offset && !iterate) {
3830*e3723e1fSApple OSS Distributions *offset = sourceAlign;
3831*e3723e1fSApple OSS Distributions }
3832*e3723e1fSApple OSS Distributions length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3833*e3723e1fSApple OSS Distributions iterate += segLen;
3834*e3723e1fSApple OSS Distributions }
3835*e3723e1fSApple OSS Distributions if (!iterate) {
3836*e3723e1fSApple OSS Distributions length = getLength();
3837*e3723e1fSApple OSS Distributions if (offset) {
3838*e3723e1fSApple OSS Distributions *offset = 0;
3839*e3723e1fSApple OSS Distributions }
3840*e3723e1fSApple OSS Distributions }
3841*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3842*e3723e1fSApple OSS Distributions UNLOCK;
3843*e3723e1fSApple OSS Distributions }
3844*e3723e1fSApple OSS Distributions }
3845*e3723e1fSApple OSS Distributions
3846*e3723e1fSApple OSS Distributions return length;
3847*e3723e1fSApple OSS Distributions }
3848*e3723e1fSApple OSS Distributions
3849*e3723e1fSApple OSS Distributions
3850*e3723e1fSApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3851*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3852*e3723e1fSApple OSS Distributions IOByteCount * dirtyPageCount )
3853*e3723e1fSApple OSS Distributions {
3854*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnNotReady;
3855*e3723e1fSApple OSS Distributions
3856*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3857*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
3858*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3859*e3723e1fSApple OSS Distributions }
3860*e3723e1fSApple OSS Distributions
3861*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3862*e3723e1fSApple OSS Distributions LOCK;
3863*e3723e1fSApple OSS Distributions }
3864*e3723e1fSApple OSS Distributions if (_memRef) {
3865*e3723e1fSApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3866*e3723e1fSApple OSS Distributions } else {
3867*e3723e1fSApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3868*e3723e1fSApple OSS Distributions IOSubMemoryDescriptor * smd;
3869*e3723e1fSApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3870*e3723e1fSApple OSS Distributions err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3871*e3723e1fSApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3872*e3723e1fSApple OSS Distributions err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3873*e3723e1fSApple OSS Distributions }
3874*e3723e1fSApple OSS Distributions }
3875*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3876*e3723e1fSApple OSS Distributions UNLOCK;
3877*e3723e1fSApple OSS Distributions }
3878*e3723e1fSApple OSS Distributions
3879*e3723e1fSApple OSS Distributions return err;
3880*e3723e1fSApple OSS Distributions }
3881*e3723e1fSApple OSS Distributions
3882*e3723e1fSApple OSS Distributions
3883*e3723e1fSApple OSS Distributions #if defined(__arm64__)
3884*e3723e1fSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3885*e3723e1fSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3886*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
3887*e3723e1fSApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3888*e3723e1fSApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3889*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
3890*e3723e1fSApple OSS Distributions
3891*e3723e1fSApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3892*e3723e1fSApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3893*e3723e1fSApple OSS Distributions {
3894*e3723e1fSApple OSS Distributions ppnum_t page, end;
3895*e3723e1fSApple OSS Distributions
3896*e3723e1fSApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3897*e3723e1fSApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3898*e3723e1fSApple OSS Distributions for (; page < end; page++) {
3899*e3723e1fSApple OSS Distributions pmap_clear_noencrypt(page);
3900*e3723e1fSApple OSS Distributions }
3901*e3723e1fSApple OSS Distributions }
3902*e3723e1fSApple OSS Distributions
3903*e3723e1fSApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3904*e3723e1fSApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3905*e3723e1fSApple OSS Distributions {
3906*e3723e1fSApple OSS Distributions ppnum_t page, end;
3907*e3723e1fSApple OSS Distributions
3908*e3723e1fSApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3909*e3723e1fSApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3910*e3723e1fSApple OSS Distributions for (; page < end; page++) {
3911*e3723e1fSApple OSS Distributions pmap_set_noencrypt(page);
3912*e3723e1fSApple OSS Distributions }
3913*e3723e1fSApple OSS Distributions }
3914*e3723e1fSApple OSS Distributions
3915*e3723e1fSApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3916*e3723e1fSApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3917*e3723e1fSApple OSS Distributions IOByteCount offset, IOByteCount length )
3918*e3723e1fSApple OSS Distributions {
3919*e3723e1fSApple OSS Distributions IOByteCount remaining;
3920*e3723e1fSApple OSS Distributions unsigned int res;
3921*e3723e1fSApple OSS Distributions void (*func)(addr64_t pa, unsigned int count) = NULL;
3922*e3723e1fSApple OSS Distributions #if defined(__arm64__)
3923*e3723e1fSApple OSS Distributions void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3924*e3723e1fSApple OSS Distributions #endif
3925*e3723e1fSApple OSS Distributions
3926*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3927*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
3928*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
3929*e3723e1fSApple OSS Distributions }
3930*e3723e1fSApple OSS Distributions
3931*e3723e1fSApple OSS Distributions switch (options) {
3932*e3723e1fSApple OSS Distributions case kIOMemoryIncoherentIOFlush:
3933*e3723e1fSApple OSS Distributions #if defined(__arm64__)
3934*e3723e1fSApple OSS Distributions func_ext = &dcache_incoherent_io_flush64;
3935*e3723e1fSApple OSS Distributions #if __ARM_COHERENT_IO__
3936*e3723e1fSApple OSS Distributions func_ext(0, 0, 0, &res);
3937*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3938*e3723e1fSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3939*e3723e1fSApple OSS Distributions break;
3940*e3723e1fSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3941*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
3942*e3723e1fSApple OSS Distributions func = &dcache_incoherent_io_flush64;
3943*e3723e1fSApple OSS Distributions break;
3944*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
3945*e3723e1fSApple OSS Distributions case kIOMemoryIncoherentIOStore:
3946*e3723e1fSApple OSS Distributions #if defined(__arm64__)
3947*e3723e1fSApple OSS Distributions func_ext = &dcache_incoherent_io_store64;
3948*e3723e1fSApple OSS Distributions #if __ARM_COHERENT_IO__
3949*e3723e1fSApple OSS Distributions func_ext(0, 0, 0, &res);
3950*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
3951*e3723e1fSApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3952*e3723e1fSApple OSS Distributions break;
3953*e3723e1fSApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3954*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
3955*e3723e1fSApple OSS Distributions func = &dcache_incoherent_io_store64;
3956*e3723e1fSApple OSS Distributions break;
3957*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
3958*e3723e1fSApple OSS Distributions
3959*e3723e1fSApple OSS Distributions case kIOMemorySetEncrypted:
3960*e3723e1fSApple OSS Distributions func = &SetEncryptOp;
3961*e3723e1fSApple OSS Distributions break;
3962*e3723e1fSApple OSS Distributions case kIOMemoryClearEncrypted:
3963*e3723e1fSApple OSS Distributions func = &ClearEncryptOp;
3964*e3723e1fSApple OSS Distributions break;
3965*e3723e1fSApple OSS Distributions }
3966*e3723e1fSApple OSS Distributions
3967*e3723e1fSApple OSS Distributions #if defined(__arm64__)
3968*e3723e1fSApple OSS Distributions if ((func == NULL) && (func_ext == NULL)) {
3969*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
3970*e3723e1fSApple OSS Distributions }
3971*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
3972*e3723e1fSApple OSS Distributions if (!func) {
3973*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
3974*e3723e1fSApple OSS Distributions }
3975*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
3976*e3723e1fSApple OSS Distributions
3977*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3978*e3723e1fSApple OSS Distributions LOCK;
3979*e3723e1fSApple OSS Distributions }
3980*e3723e1fSApple OSS Distributions
3981*e3723e1fSApple OSS Distributions res = 0x0UL;
3982*e3723e1fSApple OSS Distributions remaining = length = min(length, getLength() - offset);
3983*e3723e1fSApple OSS Distributions while (remaining) {
3984*e3723e1fSApple OSS Distributions // (process another target segment?)
3985*e3723e1fSApple OSS Distributions addr64_t dstAddr64;
3986*e3723e1fSApple OSS Distributions IOByteCount dstLen;
3987*e3723e1fSApple OSS Distributions
3988*e3723e1fSApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3989*e3723e1fSApple OSS Distributions if (!dstAddr64) {
3990*e3723e1fSApple OSS Distributions break;
3991*e3723e1fSApple OSS Distributions }
3992*e3723e1fSApple OSS Distributions
3993*e3723e1fSApple OSS Distributions // Clip segment length to remaining
3994*e3723e1fSApple OSS Distributions if (dstLen > remaining) {
3995*e3723e1fSApple OSS Distributions dstLen = remaining;
3996*e3723e1fSApple OSS Distributions }
3997*e3723e1fSApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3998*e3723e1fSApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
3999*e3723e1fSApple OSS Distributions }
4000*e3723e1fSApple OSS Distributions if (remaining > UINT_MAX) {
4001*e3723e1fSApple OSS Distributions remaining = UINT_MAX;
4002*e3723e1fSApple OSS Distributions }
4003*e3723e1fSApple OSS Distributions
4004*e3723e1fSApple OSS Distributions #if defined(__arm64__)
4005*e3723e1fSApple OSS Distributions if (func) {
4006*e3723e1fSApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
4007*e3723e1fSApple OSS Distributions }
4008*e3723e1fSApple OSS Distributions if (func_ext) {
4009*e3723e1fSApple OSS Distributions (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4010*e3723e1fSApple OSS Distributions if (res != 0x0UL) {
4011*e3723e1fSApple OSS Distributions remaining = 0;
4012*e3723e1fSApple OSS Distributions break;
4013*e3723e1fSApple OSS Distributions }
4014*e3723e1fSApple OSS Distributions }
4015*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
4016*e3723e1fSApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
4017*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
4018*e3723e1fSApple OSS Distributions
4019*e3723e1fSApple OSS Distributions offset += dstLen;
4020*e3723e1fSApple OSS Distributions remaining -= dstLen;
4021*e3723e1fSApple OSS Distributions }
4022*e3723e1fSApple OSS Distributions
4023*e3723e1fSApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
4024*e3723e1fSApple OSS Distributions UNLOCK;
4025*e3723e1fSApple OSS Distributions }
4026*e3723e1fSApple OSS Distributions
4027*e3723e1fSApple OSS Distributions return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4028*e3723e1fSApple OSS Distributions }
4029*e3723e1fSApple OSS Distributions
4030*e3723e1fSApple OSS Distributions /*
4031*e3723e1fSApple OSS Distributions *
4032*e3723e1fSApple OSS Distributions */
4033*e3723e1fSApple OSS Distributions
4034*e3723e1fSApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4035*e3723e1fSApple OSS Distributions
4036*e3723e1fSApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4037*e3723e1fSApple OSS Distributions
4038*e3723e1fSApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4039*e3723e1fSApple OSS Distributions * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4040*e3723e1fSApple OSS Distributions * kernel non-text data -- should we just add another range instead?
4041*e3723e1fSApple OSS Distributions */
4042*e3723e1fSApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4043*e3723e1fSApple OSS Distributions #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4044*e3723e1fSApple OSS Distributions
4045*e3723e1fSApple OSS Distributions #elif defined(__arm64__)
4046*e3723e1fSApple OSS Distributions
4047*e3723e1fSApple OSS Distributions extern vm_offset_t static_memory_end;
4048*e3723e1fSApple OSS Distributions
4049*e3723e1fSApple OSS Distributions #if defined(__arm64__)
4050*e3723e1fSApple OSS Distributions #define io_kernel_static_start vm_kext_base
4051*e3723e1fSApple OSS Distributions #else /* defined(__arm64__) */
4052*e3723e1fSApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4053*e3723e1fSApple OSS Distributions #endif /* defined(__arm64__) */
4054*e3723e1fSApple OSS Distributions
4055*e3723e1fSApple OSS Distributions #define io_kernel_static_end static_memory_end
4056*e3723e1fSApple OSS Distributions
4057*e3723e1fSApple OSS Distributions #else
4058*e3723e1fSApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4059*e3723e1fSApple OSS Distributions #endif
4060*e3723e1fSApple OSS Distributions
4061*e3723e1fSApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4062*e3723e1fSApple OSS Distributions io_get_kernel_static_upl(
4063*e3723e1fSApple OSS Distributions vm_map_t /* map */,
4064*e3723e1fSApple OSS Distributions uintptr_t offset,
4065*e3723e1fSApple OSS Distributions upl_size_t *upl_size,
4066*e3723e1fSApple OSS Distributions unsigned int *page_offset,
4067*e3723e1fSApple OSS Distributions upl_t *upl,
4068*e3723e1fSApple OSS Distributions upl_page_info_array_t page_list,
4069*e3723e1fSApple OSS Distributions unsigned int *count,
4070*e3723e1fSApple OSS Distributions ppnum_t *highest_page)
4071*e3723e1fSApple OSS Distributions {
4072*e3723e1fSApple OSS Distributions unsigned int pageCount, page;
4073*e3723e1fSApple OSS Distributions ppnum_t phys;
4074*e3723e1fSApple OSS Distributions ppnum_t highestPage = 0;
4075*e3723e1fSApple OSS Distributions
4076*e3723e1fSApple OSS Distributions pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4077*e3723e1fSApple OSS Distributions if (pageCount > *count) {
4078*e3723e1fSApple OSS Distributions pageCount = *count;
4079*e3723e1fSApple OSS Distributions }
4080*e3723e1fSApple OSS Distributions *upl_size = (upl_size_t) ptoa_64(pageCount);
4081*e3723e1fSApple OSS Distributions
4082*e3723e1fSApple OSS Distributions *upl = NULL;
4083*e3723e1fSApple OSS Distributions *page_offset = ((unsigned int) page_mask & offset);
4084*e3723e1fSApple OSS Distributions
4085*e3723e1fSApple OSS Distributions for (page = 0; page < pageCount; page++) {
4086*e3723e1fSApple OSS Distributions phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4087*e3723e1fSApple OSS Distributions if (!phys) {
4088*e3723e1fSApple OSS Distributions break;
4089*e3723e1fSApple OSS Distributions }
4090*e3723e1fSApple OSS Distributions page_list[page].phys_addr = phys;
4091*e3723e1fSApple OSS Distributions page_list[page].free_when_done = 0;
4092*e3723e1fSApple OSS Distributions page_list[page].absent = 0;
4093*e3723e1fSApple OSS Distributions page_list[page].dirty = 0;
4094*e3723e1fSApple OSS Distributions page_list[page].precious = 0;
4095*e3723e1fSApple OSS Distributions page_list[page].device = 0;
4096*e3723e1fSApple OSS Distributions if (phys > highestPage) {
4097*e3723e1fSApple OSS Distributions highestPage = phys;
4098*e3723e1fSApple OSS Distributions }
4099*e3723e1fSApple OSS Distributions }
4100*e3723e1fSApple OSS Distributions
4101*e3723e1fSApple OSS Distributions *highest_page = highestPage;
4102*e3723e1fSApple OSS Distributions
4103*e3723e1fSApple OSS Distributions return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4104*e3723e1fSApple OSS Distributions }
4105*e3723e1fSApple OSS Distributions
4106*e3723e1fSApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4107*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4108*e3723e1fSApple OSS Distributions {
4109*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4110*e3723e1fSApple OSS Distributions IOReturn error = kIOReturnSuccess;
4111*e3723e1fSApple OSS Distributions ioGMDData *dataP;
4112*e3723e1fSApple OSS Distributions upl_page_info_array_t pageInfo;
4113*e3723e1fSApple OSS Distributions ppnum_t mapBase;
4114*e3723e1fSApple OSS Distributions vm_tag_t tag = VM_KERN_MEMORY_NONE;
4115*e3723e1fSApple OSS Distributions mach_vm_size_t numBytesWired = 0;
4116*e3723e1fSApple OSS Distributions
4117*e3723e1fSApple OSS Distributions assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4118*e3723e1fSApple OSS Distributions
4119*e3723e1fSApple OSS Distributions if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4120*e3723e1fSApple OSS Distributions forDirection = (IODirection) (forDirection | getDirection());
4121*e3723e1fSApple OSS Distributions }
4122*e3723e1fSApple OSS Distributions
4123*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4124*e3723e1fSApple OSS Distributions upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4125*e3723e1fSApple OSS Distributions switch (kIODirectionOutIn & forDirection) {
4126*e3723e1fSApple OSS Distributions case kIODirectionOut:
4127*e3723e1fSApple OSS Distributions // Pages do not need to be marked as dirty on commit
4128*e3723e1fSApple OSS Distributions uplFlags = UPL_COPYOUT_FROM;
4129*e3723e1fSApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
4130*e3723e1fSApple OSS Distributions break;
4131*e3723e1fSApple OSS Distributions
4132*e3723e1fSApple OSS Distributions case kIODirectionIn:
4133*e3723e1fSApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
4134*e3723e1fSApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4135*e3723e1fSApple OSS Distributions break;
4136*e3723e1fSApple OSS Distributions
4137*e3723e1fSApple OSS Distributions default:
4138*e3723e1fSApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4139*e3723e1fSApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4140*e3723e1fSApple OSS Distributions break;
4141*e3723e1fSApple OSS Distributions }
4142*e3723e1fSApple OSS Distributions
4143*e3723e1fSApple OSS Distributions if (_wireCount) {
4144*e3723e1fSApple OSS Distributions if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4145*e3723e1fSApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4146*e3723e1fSApple OSS Distributions (size_t)VM_KERNEL_ADDRPERM(this));
4147*e3723e1fSApple OSS Distributions error = kIOReturnNotWritable;
4148*e3723e1fSApple OSS Distributions }
4149*e3723e1fSApple OSS Distributions } else {
4150*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4151*e3723e1fSApple OSS Distributions IOMapper *mapper;
4152*e3723e1fSApple OSS Distributions
4153*e3723e1fSApple OSS Distributions mapper = dataP->fMapper;
4154*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4155*e3723e1fSApple OSS Distributions
4156*e3723e1fSApple OSS Distributions uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4157*e3723e1fSApple OSS Distributions tag = _kernelTag;
4158*e3723e1fSApple OSS Distributions if (VM_KERN_MEMORY_NONE == tag) {
4159*e3723e1fSApple OSS Distributions tag = IOMemoryTag(kernel_map);
4160*e3723e1fSApple OSS Distributions }
4161*e3723e1fSApple OSS Distributions
4162*e3723e1fSApple OSS Distributions if (kIODirectionPrepareToPhys32 & forDirection) {
4163*e3723e1fSApple OSS Distributions if (!mapper) {
4164*e3723e1fSApple OSS Distributions uplFlags |= UPL_NEED_32BIT_ADDR;
4165*e3723e1fSApple OSS Distributions }
4166*e3723e1fSApple OSS Distributions if (dataP->fDMAMapNumAddressBits > 32) {
4167*e3723e1fSApple OSS Distributions dataP->fDMAMapNumAddressBits = 32;
4168*e3723e1fSApple OSS Distributions }
4169*e3723e1fSApple OSS Distributions }
4170*e3723e1fSApple OSS Distributions if (kIODirectionPrepareNoFault & forDirection) {
4171*e3723e1fSApple OSS Distributions uplFlags |= UPL_REQUEST_NO_FAULT;
4172*e3723e1fSApple OSS Distributions }
4173*e3723e1fSApple OSS Distributions if (kIODirectionPrepareNoZeroFill & forDirection) {
4174*e3723e1fSApple OSS Distributions uplFlags |= UPL_NOZEROFILLIO;
4175*e3723e1fSApple OSS Distributions }
4176*e3723e1fSApple OSS Distributions if (kIODirectionPrepareNonCoherent & forDirection) {
4177*e3723e1fSApple OSS Distributions uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4178*e3723e1fSApple OSS Distributions }
4179*e3723e1fSApple OSS Distributions
4180*e3723e1fSApple OSS Distributions mapBase = 0;
4181*e3723e1fSApple OSS Distributions
4182*e3723e1fSApple OSS Distributions // Note that appendBytes(NULL) zeros the data up to the desired length
4183*e3723e1fSApple OSS Distributions size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4184*e3723e1fSApple OSS Distributions if (uplPageSize > ((unsigned int)uplPageSize)) {
4185*e3723e1fSApple OSS Distributions error = kIOReturnNoMemory;
4186*e3723e1fSApple OSS Distributions traceInterval.setEndArg2(error);
4187*e3723e1fSApple OSS Distributions return error;
4188*e3723e1fSApple OSS Distributions }
4189*e3723e1fSApple OSS Distributions if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4190*e3723e1fSApple OSS Distributions error = kIOReturnNoMemory;
4191*e3723e1fSApple OSS Distributions traceInterval.setEndArg2(error);
4192*e3723e1fSApple OSS Distributions return error;
4193*e3723e1fSApple OSS Distributions }
4194*e3723e1fSApple OSS Distributions dataP = NULL;
4195*e3723e1fSApple OSS Distributions
4196*e3723e1fSApple OSS Distributions // Find the appropriate vm_map for the given task
4197*e3723e1fSApple OSS Distributions vm_map_t curMap;
4198*e3723e1fSApple OSS Distributions if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4199*e3723e1fSApple OSS Distributions curMap = NULL;
4200*e3723e1fSApple OSS Distributions } else {
4201*e3723e1fSApple OSS Distributions curMap = get_task_map(_task);
4202*e3723e1fSApple OSS Distributions }
4203*e3723e1fSApple OSS Distributions
4204*e3723e1fSApple OSS Distributions // Iterate over the vector of virtual ranges
4205*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
4206*e3723e1fSApple OSS Distributions unsigned int pageIndex = 0;
4207*e3723e1fSApple OSS Distributions IOByteCount mdOffset = 0;
4208*e3723e1fSApple OSS Distributions ppnum_t highestPage = 0;
4209*e3723e1fSApple OSS Distributions bool byteAlignUPL;
4210*e3723e1fSApple OSS Distributions
4211*e3723e1fSApple OSS Distributions IOMemoryEntry * memRefEntry = NULL;
4212*e3723e1fSApple OSS Distributions if (_memRef) {
4213*e3723e1fSApple OSS Distributions memRefEntry = &_memRef->entries[0];
4214*e3723e1fSApple OSS Distributions byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4215*e3723e1fSApple OSS Distributions } else {
4216*e3723e1fSApple OSS Distributions byteAlignUPL = true;
4217*e3723e1fSApple OSS Distributions }
4218*e3723e1fSApple OSS Distributions
4219*e3723e1fSApple OSS Distributions for (UInt range = 0; mdOffset < _length; range++) {
4220*e3723e1fSApple OSS Distributions ioPLBlock iopl;
4221*e3723e1fSApple OSS Distributions mach_vm_address_t startPage, startPageOffset;
4222*e3723e1fSApple OSS Distributions mach_vm_size_t numBytes;
4223*e3723e1fSApple OSS Distributions ppnum_t highPage = 0;
4224*e3723e1fSApple OSS Distributions
4225*e3723e1fSApple OSS Distributions if (_memRef) {
4226*e3723e1fSApple OSS Distributions if (range >= _memRef->count) {
4227*e3723e1fSApple OSS Distributions panic("memRefEntry");
4228*e3723e1fSApple OSS Distributions }
4229*e3723e1fSApple OSS Distributions memRefEntry = &_memRef->entries[range];
4230*e3723e1fSApple OSS Distributions numBytes = memRefEntry->size;
4231*e3723e1fSApple OSS Distributions startPage = -1ULL;
4232*e3723e1fSApple OSS Distributions if (byteAlignUPL) {
4233*e3723e1fSApple OSS Distributions startPageOffset = 0;
4234*e3723e1fSApple OSS Distributions } else {
4235*e3723e1fSApple OSS Distributions startPageOffset = (memRefEntry->start & PAGE_MASK);
4236*e3723e1fSApple OSS Distributions }
4237*e3723e1fSApple OSS Distributions } else {
4238*e3723e1fSApple OSS Distributions // Get the startPage address and length of vec[range]
4239*e3723e1fSApple OSS Distributions getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4240*e3723e1fSApple OSS Distributions if (byteAlignUPL) {
4241*e3723e1fSApple OSS Distributions startPageOffset = 0;
4242*e3723e1fSApple OSS Distributions } else {
4243*e3723e1fSApple OSS Distributions startPageOffset = startPage & PAGE_MASK;
4244*e3723e1fSApple OSS Distributions startPage = trunc_page_64(startPage);
4245*e3723e1fSApple OSS Distributions }
4246*e3723e1fSApple OSS Distributions }
4247*e3723e1fSApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4248*e3723e1fSApple OSS Distributions numBytes += startPageOffset;
4249*e3723e1fSApple OSS Distributions
4250*e3723e1fSApple OSS Distributions if (mapper) {
4251*e3723e1fSApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4252*e3723e1fSApple OSS Distributions } else {
4253*e3723e1fSApple OSS Distributions iopl.fMappedPage = 0;
4254*e3723e1fSApple OSS Distributions }
4255*e3723e1fSApple OSS Distributions
4256*e3723e1fSApple OSS Distributions // Iterate over the current range, creating UPLs
4257*e3723e1fSApple OSS Distributions while (numBytes) {
4258*e3723e1fSApple OSS Distributions vm_address_t kernelStart = (vm_address_t) startPage;
4259*e3723e1fSApple OSS Distributions vm_map_t theMap;
4260*e3723e1fSApple OSS Distributions if (curMap) {
4261*e3723e1fSApple OSS Distributions theMap = curMap;
4262*e3723e1fSApple OSS Distributions } else if (_memRef) {
4263*e3723e1fSApple OSS Distributions theMap = NULL;
4264*e3723e1fSApple OSS Distributions } else {
4265*e3723e1fSApple OSS Distributions assert(_task == kernel_task);
4266*e3723e1fSApple OSS Distributions theMap = IOPageableMapForAddress(kernelStart);
4267*e3723e1fSApple OSS Distributions }
4268*e3723e1fSApple OSS Distributions
4269*e3723e1fSApple OSS Distributions // ioplFlags is an in/out parameter
4270*e3723e1fSApple OSS Distributions upl_control_flags_t ioplFlags = uplFlags;
4271*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4272*e3723e1fSApple OSS Distributions pageInfo = getPageList(dataP);
4273*e3723e1fSApple OSS Distributions upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4274*e3723e1fSApple OSS Distributions
4275*e3723e1fSApple OSS Distributions mach_vm_size_t ioplPhysSize;
4276*e3723e1fSApple OSS Distributions upl_size_t ioplSize;
4277*e3723e1fSApple OSS Distributions unsigned int numPageInfo;
4278*e3723e1fSApple OSS Distributions
4279*e3723e1fSApple OSS Distributions if (_memRef) {
4280*e3723e1fSApple OSS Distributions error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4281*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4282*e3723e1fSApple OSS Distributions } else {
4283*e3723e1fSApple OSS Distributions error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4284*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4285*e3723e1fSApple OSS Distributions }
4286*e3723e1fSApple OSS Distributions if (error != KERN_SUCCESS) {
4287*e3723e1fSApple OSS Distributions if (_memRef) {
4288*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4289*e3723e1fSApple OSS Distributions } else {
4290*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4291*e3723e1fSApple OSS Distributions }
4292*e3723e1fSApple OSS Distributions printf("entry size error %d\n", error);
4293*e3723e1fSApple OSS Distributions goto abortExit;
4294*e3723e1fSApple OSS Distributions }
4295*e3723e1fSApple OSS Distributions ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4296*e3723e1fSApple OSS Distributions numPageInfo = atop_32(ioplPhysSize);
4297*e3723e1fSApple OSS Distributions if (byteAlignUPL) {
4298*e3723e1fSApple OSS Distributions if (numBytes > ioplPhysSize) {
4299*e3723e1fSApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4300*e3723e1fSApple OSS Distributions } else {
4301*e3723e1fSApple OSS Distributions ioplSize = ((typeof(ioplSize))numBytes);
4302*e3723e1fSApple OSS Distributions }
4303*e3723e1fSApple OSS Distributions } else {
4304*e3723e1fSApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4305*e3723e1fSApple OSS Distributions }
4306*e3723e1fSApple OSS Distributions
4307*e3723e1fSApple OSS Distributions if (_memRef) {
4308*e3723e1fSApple OSS Distributions memory_object_offset_t entryOffset;
4309*e3723e1fSApple OSS Distributions
4310*e3723e1fSApple OSS Distributions entryOffset = mdOffset;
4311*e3723e1fSApple OSS Distributions if (byteAlignUPL) {
4312*e3723e1fSApple OSS Distributions entryOffset = (entryOffset - memRefEntry->offset);
4313*e3723e1fSApple OSS Distributions } else {
4314*e3723e1fSApple OSS Distributions entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4315*e3723e1fSApple OSS Distributions }
4316*e3723e1fSApple OSS Distributions if (ioplSize > (memRefEntry->size - entryOffset)) {
4317*e3723e1fSApple OSS Distributions ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4318*e3723e1fSApple OSS Distributions }
4319*e3723e1fSApple OSS Distributions error = memory_object_iopl_request(memRefEntry->entry,
4320*e3723e1fSApple OSS Distributions entryOffset,
4321*e3723e1fSApple OSS Distributions &ioplSize,
4322*e3723e1fSApple OSS Distributions &iopl.fIOPL,
4323*e3723e1fSApple OSS Distributions baseInfo,
4324*e3723e1fSApple OSS Distributions &numPageInfo,
4325*e3723e1fSApple OSS Distributions &ioplFlags,
4326*e3723e1fSApple OSS Distributions tag);
4327*e3723e1fSApple OSS Distributions } else if ((theMap == kernel_map)
4328*e3723e1fSApple OSS Distributions && (kernelStart >= io_kernel_static_start)
4329*e3723e1fSApple OSS Distributions && (kernelStart < io_kernel_static_end)) {
4330*e3723e1fSApple OSS Distributions error = io_get_kernel_static_upl(theMap,
4331*e3723e1fSApple OSS Distributions kernelStart,
4332*e3723e1fSApple OSS Distributions &ioplSize,
4333*e3723e1fSApple OSS Distributions &iopl.fPageOffset,
4334*e3723e1fSApple OSS Distributions &iopl.fIOPL,
4335*e3723e1fSApple OSS Distributions baseInfo,
4336*e3723e1fSApple OSS Distributions &numPageInfo,
4337*e3723e1fSApple OSS Distributions &highPage);
4338*e3723e1fSApple OSS Distributions } else {
4339*e3723e1fSApple OSS Distributions assert(theMap);
4340*e3723e1fSApple OSS Distributions error = vm_map_create_upl(theMap,
4341*e3723e1fSApple OSS Distributions startPage,
4342*e3723e1fSApple OSS Distributions (upl_size_t*)&ioplSize,
4343*e3723e1fSApple OSS Distributions &iopl.fIOPL,
4344*e3723e1fSApple OSS Distributions baseInfo,
4345*e3723e1fSApple OSS Distributions &numPageInfo,
4346*e3723e1fSApple OSS Distributions &ioplFlags,
4347*e3723e1fSApple OSS Distributions tag);
4348*e3723e1fSApple OSS Distributions }
4349*e3723e1fSApple OSS Distributions
4350*e3723e1fSApple OSS Distributions if (error != KERN_SUCCESS) {
4351*e3723e1fSApple OSS Distributions traceInterval.setEndArg2(error);
4352*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4353*e3723e1fSApple OSS Distributions goto abortExit;
4354*e3723e1fSApple OSS Distributions }
4355*e3723e1fSApple OSS Distributions
4356*e3723e1fSApple OSS Distributions assert(ioplSize);
4357*e3723e1fSApple OSS Distributions
4358*e3723e1fSApple OSS Distributions if (iopl.fIOPL) {
4359*e3723e1fSApple OSS Distributions highPage = upl_get_highest_page(iopl.fIOPL);
4360*e3723e1fSApple OSS Distributions }
4361*e3723e1fSApple OSS Distributions if (highPage > highestPage) {
4362*e3723e1fSApple OSS Distributions highestPage = highPage;
4363*e3723e1fSApple OSS Distributions }
4364*e3723e1fSApple OSS Distributions
4365*e3723e1fSApple OSS Distributions if (baseInfo->device) {
4366*e3723e1fSApple OSS Distributions numPageInfo = 1;
4367*e3723e1fSApple OSS Distributions iopl.fFlags = kIOPLOnDevice;
4368*e3723e1fSApple OSS Distributions } else {
4369*e3723e1fSApple OSS Distributions iopl.fFlags = 0;
4370*e3723e1fSApple OSS Distributions }
4371*e3723e1fSApple OSS Distributions
4372*e3723e1fSApple OSS Distributions if (byteAlignUPL) {
4373*e3723e1fSApple OSS Distributions if (iopl.fIOPL) {
4374*e3723e1fSApple OSS Distributions DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4375*e3723e1fSApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4376*e3723e1fSApple OSS Distributions }
4377*e3723e1fSApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4378*e3723e1fSApple OSS Distributions // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4379*e3723e1fSApple OSS Distributions startPage -= iopl.fPageOffset;
4380*e3723e1fSApple OSS Distributions }
4381*e3723e1fSApple OSS Distributions ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4382*e3723e1fSApple OSS Distributions numBytes += iopl.fPageOffset;
4383*e3723e1fSApple OSS Distributions }
4384*e3723e1fSApple OSS Distributions
4385*e3723e1fSApple OSS Distributions iopl.fIOMDOffset = mdOffset;
4386*e3723e1fSApple OSS Distributions iopl.fPageInfo = pageIndex;
4387*e3723e1fSApple OSS Distributions
4388*e3723e1fSApple OSS Distributions if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4389*e3723e1fSApple OSS Distributions // Clean up partial created and unsaved iopl
4390*e3723e1fSApple OSS Distributions if (iopl.fIOPL) {
4391*e3723e1fSApple OSS Distributions upl_abort(iopl.fIOPL, 0);
4392*e3723e1fSApple OSS Distributions upl_deallocate(iopl.fIOPL);
4393*e3723e1fSApple OSS Distributions }
4394*e3723e1fSApple OSS Distributions error = kIOReturnNoMemory;
4395*e3723e1fSApple OSS Distributions traceInterval.setEndArg2(error);
4396*e3723e1fSApple OSS Distributions goto abortExit;
4397*e3723e1fSApple OSS Distributions }
4398*e3723e1fSApple OSS Distributions dataP = NULL;
4399*e3723e1fSApple OSS Distributions
4400*e3723e1fSApple OSS Distributions // Check for a multiple iopl's in one virtual range
4401*e3723e1fSApple OSS Distributions pageIndex += numPageInfo;
4402*e3723e1fSApple OSS Distributions mdOffset -= iopl.fPageOffset;
4403*e3723e1fSApple OSS Distributions numBytesWired += ioplSize;
4404*e3723e1fSApple OSS Distributions if (ioplSize < numBytes) {
4405*e3723e1fSApple OSS Distributions numBytes -= ioplSize;
4406*e3723e1fSApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4407*e3723e1fSApple OSS Distributions startPage += ioplSize;
4408*e3723e1fSApple OSS Distributions }
4409*e3723e1fSApple OSS Distributions mdOffset += ioplSize;
4410*e3723e1fSApple OSS Distributions iopl.fPageOffset = 0;
4411*e3723e1fSApple OSS Distributions if (mapper) {
4412*e3723e1fSApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4413*e3723e1fSApple OSS Distributions }
4414*e3723e1fSApple OSS Distributions } else {
4415*e3723e1fSApple OSS Distributions mdOffset += numBytes;
4416*e3723e1fSApple OSS Distributions break;
4417*e3723e1fSApple OSS Distributions }
4418*e3723e1fSApple OSS Distributions }
4419*e3723e1fSApple OSS Distributions }
4420*e3723e1fSApple OSS Distributions
4421*e3723e1fSApple OSS Distributions _highestPage = highestPage;
4422*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4423*e3723e1fSApple OSS Distributions
4424*e3723e1fSApple OSS Distributions if (UPL_COPYOUT_FROM & uplFlags) {
4425*e3723e1fSApple OSS Distributions _flags |= kIOMemoryPreparedReadOnly;
4426*e3723e1fSApple OSS Distributions }
4427*e3723e1fSApple OSS Distributions traceInterval.setEndCodes(numBytesWired, error);
4428*e3723e1fSApple OSS Distributions }
4429*e3723e1fSApple OSS Distributions
4430*e3723e1fSApple OSS Distributions #if IOTRACKING
4431*e3723e1fSApple OSS Distributions if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4432*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4433*e3723e1fSApple OSS Distributions if (!dataP->fWireTracking.link.next) {
4434*e3723e1fSApple OSS Distributions IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4435*e3723e1fSApple OSS Distributions }
4436*e3723e1fSApple OSS Distributions }
4437*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
4438*e3723e1fSApple OSS Distributions
4439*e3723e1fSApple OSS Distributions return error;
4440*e3723e1fSApple OSS Distributions
4441*e3723e1fSApple OSS Distributions abortExit:
4442*e3723e1fSApple OSS Distributions {
4443*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4444*e3723e1fSApple OSS Distributions UInt done = getNumIOPL(_memoryEntries, dataP);
4445*e3723e1fSApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4446*e3723e1fSApple OSS Distributions
4447*e3723e1fSApple OSS Distributions for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4448*e3723e1fSApple OSS Distributions if (ioplList[ioplIdx].fIOPL) {
4449*e3723e1fSApple OSS Distributions upl_abort(ioplList[ioplIdx].fIOPL, 0);
4450*e3723e1fSApple OSS Distributions upl_deallocate(ioplList[ioplIdx].fIOPL);
4451*e3723e1fSApple OSS Distributions }
4452*e3723e1fSApple OSS Distributions }
4453*e3723e1fSApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4454*e3723e1fSApple OSS Distributions }
4455*e3723e1fSApple OSS Distributions
4456*e3723e1fSApple OSS Distributions if (error == KERN_FAILURE) {
4457*e3723e1fSApple OSS Distributions error = kIOReturnCannotWire;
4458*e3723e1fSApple OSS Distributions } else if (error == KERN_MEMORY_ERROR) {
4459*e3723e1fSApple OSS Distributions error = kIOReturnNoResources;
4460*e3723e1fSApple OSS Distributions }
4461*e3723e1fSApple OSS Distributions
4462*e3723e1fSApple OSS Distributions return error;
4463*e3723e1fSApple OSS Distributions }
4464*e3723e1fSApple OSS Distributions
4465*e3723e1fSApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4466*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4467*e3723e1fSApple OSS Distributions {
4468*e3723e1fSApple OSS Distributions ioGMDData * dataP;
4469*e3723e1fSApple OSS Distributions
4470*e3723e1fSApple OSS Distributions if (size > UINT_MAX) {
4471*e3723e1fSApple OSS Distributions return false;
4472*e3723e1fSApple OSS Distributions }
4473*e3723e1fSApple OSS Distributions if (!_memoryEntries) {
4474*e3723e1fSApple OSS Distributions _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4475*e3723e1fSApple OSS Distributions if (!_memoryEntries) {
4476*e3723e1fSApple OSS Distributions return false;
4477*e3723e1fSApple OSS Distributions }
4478*e3723e1fSApple OSS Distributions } else if (!_memoryEntries->initWithCapacity(size)) {
4479*e3723e1fSApple OSS Distributions return false;
4480*e3723e1fSApple OSS Distributions }
4481*e3723e1fSApple OSS Distributions
4482*e3723e1fSApple OSS Distributions _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4483*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4484*e3723e1fSApple OSS Distributions
4485*e3723e1fSApple OSS Distributions if (mapper == kIOMapperWaitSystem) {
4486*e3723e1fSApple OSS Distributions IOMapper::checkForSystemMapper();
4487*e3723e1fSApple OSS Distributions mapper = IOMapper::gSystem;
4488*e3723e1fSApple OSS Distributions }
4489*e3723e1fSApple OSS Distributions dataP->fMapper = mapper;
4490*e3723e1fSApple OSS Distributions dataP->fPageCnt = 0;
4491*e3723e1fSApple OSS Distributions dataP->fMappedBase = 0;
4492*e3723e1fSApple OSS Distributions dataP->fDMAMapNumAddressBits = 64;
4493*e3723e1fSApple OSS Distributions dataP->fDMAMapAlignment = 0;
4494*e3723e1fSApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4495*e3723e1fSApple OSS Distributions dataP->fCompletionError = false;
4496*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = false;
4497*e3723e1fSApple OSS Distributions
4498*e3723e1fSApple OSS Distributions return true;
4499*e3723e1fSApple OSS Distributions }
4500*e3723e1fSApple OSS Distributions
4501*e3723e1fSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4502*e3723e1fSApple OSS Distributions IOMemoryDescriptor::dmaMap(
4503*e3723e1fSApple OSS Distributions IOMapper * mapper,
4504*e3723e1fSApple OSS Distributions IOMemoryDescriptor * memory,
4505*e3723e1fSApple OSS Distributions IODMACommand * command,
4506*e3723e1fSApple OSS Distributions const IODMAMapSpecification * mapSpec,
4507*e3723e1fSApple OSS Distributions uint64_t offset,
4508*e3723e1fSApple OSS Distributions uint64_t length,
4509*e3723e1fSApple OSS Distributions uint64_t * mapAddress,
4510*e3723e1fSApple OSS Distributions uint64_t * mapLength)
4511*e3723e1fSApple OSS Distributions {
4512*e3723e1fSApple OSS Distributions IOReturn err;
4513*e3723e1fSApple OSS Distributions uint32_t mapOptions;
4514*e3723e1fSApple OSS Distributions
4515*e3723e1fSApple OSS Distributions mapOptions = 0;
4516*e3723e1fSApple OSS Distributions mapOptions |= kIODMAMapReadAccess;
4517*e3723e1fSApple OSS Distributions if (!(kIOMemoryPreparedReadOnly & _flags)) {
4518*e3723e1fSApple OSS Distributions mapOptions |= kIODMAMapWriteAccess;
4519*e3723e1fSApple OSS Distributions }
4520*e3723e1fSApple OSS Distributions
4521*e3723e1fSApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4522*e3723e1fSApple OSS Distributions mapSpec, command, NULL, mapAddress, mapLength);
4523*e3723e1fSApple OSS Distributions
4524*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == err) {
4525*e3723e1fSApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4526*e3723e1fSApple OSS Distributions }
4527*e3723e1fSApple OSS Distributions
4528*e3723e1fSApple OSS Distributions return err;
4529*e3723e1fSApple OSS Distributions }
4530*e3723e1fSApple OSS Distributions
4531*e3723e1fSApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4532*e3723e1fSApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4533*e3723e1fSApple OSS Distributions IOMapper * mapper,
4534*e3723e1fSApple OSS Distributions IODMACommand * command,
4535*e3723e1fSApple OSS Distributions uint64_t mapLength)
4536*e3723e1fSApple OSS Distributions {
4537*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4538*e3723e1fSApple OSS Distributions kern_allocation_name_t alloc;
4539*e3723e1fSApple OSS Distributions int16_t prior;
4540*e3723e1fSApple OSS Distributions
4541*e3723e1fSApple OSS Distributions if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4542*e3723e1fSApple OSS Distributions kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4543*e3723e1fSApple OSS Distributions }
4544*e3723e1fSApple OSS Distributions
4545*e3723e1fSApple OSS Distributions if (!command) {
4546*e3723e1fSApple OSS Distributions return;
4547*e3723e1fSApple OSS Distributions }
4548*e3723e1fSApple OSS Distributions prior = OSAddAtomic16(1, &_dmaReferences);
4549*e3723e1fSApple OSS Distributions if (!prior) {
4550*e3723e1fSApple OSS Distributions if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4551*e3723e1fSApple OSS Distributions _mapName = alloc;
4552*e3723e1fSApple OSS Distributions mapLength = _length;
4553*e3723e1fSApple OSS Distributions kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4554*e3723e1fSApple OSS Distributions } else {
4555*e3723e1fSApple OSS Distributions _mapName = NULL;
4556*e3723e1fSApple OSS Distributions }
4557*e3723e1fSApple OSS Distributions }
4558*e3723e1fSApple OSS Distributions }
4559*e3723e1fSApple OSS Distributions
4560*e3723e1fSApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4561*e3723e1fSApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4562*e3723e1fSApple OSS Distributions IOMapper * mapper,
4563*e3723e1fSApple OSS Distributions IODMACommand * command,
4564*e3723e1fSApple OSS Distributions uint64_t offset,
4565*e3723e1fSApple OSS Distributions uint64_t mapAddress,
4566*e3723e1fSApple OSS Distributions uint64_t mapLength)
4567*e3723e1fSApple OSS Distributions {
4568*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4569*e3723e1fSApple OSS Distributions IOReturn ret;
4570*e3723e1fSApple OSS Distributions kern_allocation_name_t alloc;
4571*e3723e1fSApple OSS Distributions kern_allocation_name_t mapName;
4572*e3723e1fSApple OSS Distributions int16_t prior;
4573*e3723e1fSApple OSS Distributions
4574*e3723e1fSApple OSS Distributions mapName = NULL;
4575*e3723e1fSApple OSS Distributions prior = 0;
4576*e3723e1fSApple OSS Distributions if (command) {
4577*e3723e1fSApple OSS Distributions mapName = _mapName;
4578*e3723e1fSApple OSS Distributions if (_dmaReferences) {
4579*e3723e1fSApple OSS Distributions prior = OSAddAtomic16(-1, &_dmaReferences);
4580*e3723e1fSApple OSS Distributions } else {
4581*e3723e1fSApple OSS Distributions panic("_dmaReferences underflow");
4582*e3723e1fSApple OSS Distributions }
4583*e3723e1fSApple OSS Distributions }
4584*e3723e1fSApple OSS Distributions
4585*e3723e1fSApple OSS Distributions if (!mapLength) {
4586*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4587*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4588*e3723e1fSApple OSS Distributions }
4589*e3723e1fSApple OSS Distributions
4590*e3723e1fSApple OSS Distributions ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4591*e3723e1fSApple OSS Distributions
4592*e3723e1fSApple OSS Distributions if ((alloc = mapper->fAllocName)) {
4593*e3723e1fSApple OSS Distributions kern_allocation_update_size(alloc, -mapLength, NULL);
4594*e3723e1fSApple OSS Distributions if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4595*e3723e1fSApple OSS Distributions mapLength = _length;
4596*e3723e1fSApple OSS Distributions kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4597*e3723e1fSApple OSS Distributions }
4598*e3723e1fSApple OSS Distributions }
4599*e3723e1fSApple OSS Distributions
4600*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(ret);
4601*e3723e1fSApple OSS Distributions return ret;
4602*e3723e1fSApple OSS Distributions }
4603*e3723e1fSApple OSS Distributions
4604*e3723e1fSApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4605*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4606*e3723e1fSApple OSS Distributions IOMapper * mapper,
4607*e3723e1fSApple OSS Distributions IOMemoryDescriptor * memory,
4608*e3723e1fSApple OSS Distributions IODMACommand * command,
4609*e3723e1fSApple OSS Distributions const IODMAMapSpecification * mapSpec,
4610*e3723e1fSApple OSS Distributions uint64_t offset,
4611*e3723e1fSApple OSS Distributions uint64_t length,
4612*e3723e1fSApple OSS Distributions uint64_t * mapAddress,
4613*e3723e1fSApple OSS Distributions uint64_t * mapLength)
4614*e3723e1fSApple OSS Distributions {
4615*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
4616*e3723e1fSApple OSS Distributions ioGMDData * dataP;
4617*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4618*e3723e1fSApple OSS Distributions
4619*e3723e1fSApple OSS Distributions *mapAddress = 0;
4620*e3723e1fSApple OSS Distributions if (kIOMemoryHostOnly & _flags) {
4621*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4622*e3723e1fSApple OSS Distributions }
4623*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
4624*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
4625*e3723e1fSApple OSS Distributions }
4626*e3723e1fSApple OSS Distributions
4627*e3723e1fSApple OSS Distributions if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4628*e3723e1fSApple OSS Distributions || offset || (length != _length)) {
4629*e3723e1fSApple OSS Distributions err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4630*e3723e1fSApple OSS Distributions } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4631*e3723e1fSApple OSS Distributions const ioPLBlock * ioplList = getIOPLList(dataP);
4632*e3723e1fSApple OSS Distributions upl_page_info_t * pageList;
4633*e3723e1fSApple OSS Distributions uint32_t mapOptions = 0;
4634*e3723e1fSApple OSS Distributions
4635*e3723e1fSApple OSS Distributions IODMAMapSpecification mapSpec;
4636*e3723e1fSApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
4637*e3723e1fSApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4638*e3723e1fSApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
4639*e3723e1fSApple OSS Distributions
4640*e3723e1fSApple OSS Distributions // For external UPLs the fPageInfo field points directly to
4641*e3723e1fSApple OSS Distributions // the upl's upl_page_info_t array.
4642*e3723e1fSApple OSS Distributions if (ioplList->fFlags & kIOPLExternUPL) {
4643*e3723e1fSApple OSS Distributions pageList = (upl_page_info_t *) ioplList->fPageInfo;
4644*e3723e1fSApple OSS Distributions mapOptions |= kIODMAMapPagingPath;
4645*e3723e1fSApple OSS Distributions } else {
4646*e3723e1fSApple OSS Distributions pageList = getPageList(dataP);
4647*e3723e1fSApple OSS Distributions }
4648*e3723e1fSApple OSS Distributions
4649*e3723e1fSApple OSS Distributions if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4650*e3723e1fSApple OSS Distributions mapOptions |= kIODMAMapPageListFullyOccupied;
4651*e3723e1fSApple OSS Distributions }
4652*e3723e1fSApple OSS Distributions
4653*e3723e1fSApple OSS Distributions assert(dataP->fDMAAccess);
4654*e3723e1fSApple OSS Distributions mapOptions |= dataP->fDMAAccess;
4655*e3723e1fSApple OSS Distributions
4656*e3723e1fSApple OSS Distributions // Check for direct device non-paged memory
4657*e3723e1fSApple OSS Distributions if (ioplList->fFlags & kIOPLOnDevice) {
4658*e3723e1fSApple OSS Distributions mapOptions |= kIODMAMapPhysicallyContiguous;
4659*e3723e1fSApple OSS Distributions }
4660*e3723e1fSApple OSS Distributions
4661*e3723e1fSApple OSS Distributions IODMAMapPageList dmaPageList =
4662*e3723e1fSApple OSS Distributions {
4663*e3723e1fSApple OSS Distributions .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4664*e3723e1fSApple OSS Distributions .pageListCount = _pages,
4665*e3723e1fSApple OSS Distributions .pageList = &pageList[0]
4666*e3723e1fSApple OSS Distributions };
4667*e3723e1fSApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4668*e3723e1fSApple OSS Distributions command, &dmaPageList, mapAddress, mapLength);
4669*e3723e1fSApple OSS Distributions
4670*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == err) {
4671*e3723e1fSApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4672*e3723e1fSApple OSS Distributions }
4673*e3723e1fSApple OSS Distributions }
4674*e3723e1fSApple OSS Distributions
4675*e3723e1fSApple OSS Distributions return err;
4676*e3723e1fSApple OSS Distributions }
4677*e3723e1fSApple OSS Distributions
4678*e3723e1fSApple OSS Distributions /*
4679*e3723e1fSApple OSS Distributions * prepare
4680*e3723e1fSApple OSS Distributions *
4681*e3723e1fSApple OSS Distributions * Prepare the memory for an I/O transfer. This involves paging in
4682*e3723e1fSApple OSS Distributions * the memory, if necessary, and wiring it down for the duration of
4683*e3723e1fSApple OSS Distributions * the transfer. The complete() method completes the processing of
4684*e3723e1fSApple OSS Distributions * the memory after the I/O transfer finishes. This method needn't
4685*e3723e1fSApple OSS Distributions * called for non-pageable memory.
4686*e3723e1fSApple OSS Distributions */
4687*e3723e1fSApple OSS Distributions
4688*e3723e1fSApple OSS Distributions IOReturn
prepare(IODirection forDirection)4689*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4690*e3723e1fSApple OSS Distributions {
4691*e3723e1fSApple OSS Distributions IOReturn error = kIOReturnSuccess;
4692*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4693*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4694*e3723e1fSApple OSS Distributions
4695*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4696*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4697*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4698*e3723e1fSApple OSS Distributions }
4699*e3723e1fSApple OSS Distributions
4700*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4701*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
4702*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4703*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
4704*e3723e1fSApple OSS Distributions }
4705*e3723e1fSApple OSS Distributions
4706*e3723e1fSApple OSS Distributions if (_prepareLock) {
4707*e3723e1fSApple OSS Distributions IOLockLock(_prepareLock);
4708*e3723e1fSApple OSS Distributions }
4709*e3723e1fSApple OSS Distributions
4710*e3723e1fSApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4711*e3723e1fSApple OSS Distributions if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4712*e3723e1fSApple OSS Distributions error = kIOReturnNotReady;
4713*e3723e1fSApple OSS Distributions goto finish;
4714*e3723e1fSApple OSS Distributions }
4715*e3723e1fSApple OSS Distributions error = wireVirtual(forDirection);
4716*e3723e1fSApple OSS Distributions }
4717*e3723e1fSApple OSS Distributions
4718*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == error) {
4719*e3723e1fSApple OSS Distributions if (1 == ++_wireCount) {
4720*e3723e1fSApple OSS Distributions if (kIOMemoryClearEncrypt & _flags) {
4721*e3723e1fSApple OSS Distributions performOperation(kIOMemoryClearEncrypted, 0, _length);
4722*e3723e1fSApple OSS Distributions }
4723*e3723e1fSApple OSS Distributions
4724*e3723e1fSApple OSS Distributions ktraceEmitPhysicalSegments();
4725*e3723e1fSApple OSS Distributions }
4726*e3723e1fSApple OSS Distributions }
4727*e3723e1fSApple OSS Distributions
4728*e3723e1fSApple OSS Distributions finish:
4729*e3723e1fSApple OSS Distributions
4730*e3723e1fSApple OSS Distributions if (_prepareLock) {
4731*e3723e1fSApple OSS Distributions IOLockUnlock(_prepareLock);
4732*e3723e1fSApple OSS Distributions }
4733*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(error);
4734*e3723e1fSApple OSS Distributions
4735*e3723e1fSApple OSS Distributions return error;
4736*e3723e1fSApple OSS Distributions }
4737*e3723e1fSApple OSS Distributions
4738*e3723e1fSApple OSS Distributions /*
4739*e3723e1fSApple OSS Distributions * complete
4740*e3723e1fSApple OSS Distributions *
4741*e3723e1fSApple OSS Distributions * Complete processing of the memory after an I/O transfer finishes.
4742*e3723e1fSApple OSS Distributions * This method should not be called unless a prepare was previously
4743*e3723e1fSApple OSS Distributions * issued; the prepare() and complete() must occur in pairs, before
4744*e3723e1fSApple OSS Distributions * before and after an I/O transfer involving pageable memory.
4745*e3723e1fSApple OSS Distributions */
4746*e3723e1fSApple OSS Distributions
4747*e3723e1fSApple OSS Distributions IOReturn
complete(IODirection forDirection)4748*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4749*e3723e1fSApple OSS Distributions {
4750*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4751*e3723e1fSApple OSS Distributions ioGMDData * dataP;
4752*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4753*e3723e1fSApple OSS Distributions
4754*e3723e1fSApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4755*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4756*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4757*e3723e1fSApple OSS Distributions }
4758*e3723e1fSApple OSS Distributions
4759*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4760*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
4761*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4762*e3723e1fSApple OSS Distributions return kIOReturnNotAttached;
4763*e3723e1fSApple OSS Distributions }
4764*e3723e1fSApple OSS Distributions
4765*e3723e1fSApple OSS Distributions if (_prepareLock) {
4766*e3723e1fSApple OSS Distributions IOLockLock(_prepareLock);
4767*e3723e1fSApple OSS Distributions }
4768*e3723e1fSApple OSS Distributions do{
4769*e3723e1fSApple OSS Distributions assert(_wireCount);
4770*e3723e1fSApple OSS Distributions if (!_wireCount) {
4771*e3723e1fSApple OSS Distributions break;
4772*e3723e1fSApple OSS Distributions }
4773*e3723e1fSApple OSS Distributions dataP = getDataP(_memoryEntries);
4774*e3723e1fSApple OSS Distributions if (!dataP) {
4775*e3723e1fSApple OSS Distributions break;
4776*e3723e1fSApple OSS Distributions }
4777*e3723e1fSApple OSS Distributions
4778*e3723e1fSApple OSS Distributions if (kIODirectionCompleteWithError & forDirection) {
4779*e3723e1fSApple OSS Distributions dataP->fCompletionError = true;
4780*e3723e1fSApple OSS Distributions }
4781*e3723e1fSApple OSS Distributions
4782*e3723e1fSApple OSS Distributions if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4783*e3723e1fSApple OSS Distributions performOperation(kIOMemorySetEncrypted, 0, _length);
4784*e3723e1fSApple OSS Distributions }
4785*e3723e1fSApple OSS Distributions
4786*e3723e1fSApple OSS Distributions _wireCount--;
4787*e3723e1fSApple OSS Distributions if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4788*e3723e1fSApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4789*e3723e1fSApple OSS Distributions UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4790*e3723e1fSApple OSS Distributions
4791*e3723e1fSApple OSS Distributions if (_wireCount) {
4792*e3723e1fSApple OSS Distributions // kIODirectionCompleteWithDataValid & forDirection
4793*e3723e1fSApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*e3723e1fSApple OSS Distributions vm_tag_t tag;
4795*e3723e1fSApple OSS Distributions tag = (typeof(tag))getVMTag(kernel_map);
4796*e3723e1fSApple OSS Distributions for (ind = 0; ind < count; ind++) {
4797*e3723e1fSApple OSS Distributions if (ioplList[ind].fIOPL) {
4798*e3723e1fSApple OSS Distributions iopl_valid_data(ioplList[ind].fIOPL, tag);
4799*e3723e1fSApple OSS Distributions }
4800*e3723e1fSApple OSS Distributions }
4801*e3723e1fSApple OSS Distributions }
4802*e3723e1fSApple OSS Distributions } else {
4803*e3723e1fSApple OSS Distributions if (_dmaReferences) {
4804*e3723e1fSApple OSS Distributions panic("complete() while dma active");
4805*e3723e1fSApple OSS Distributions }
4806*e3723e1fSApple OSS Distributions
4807*e3723e1fSApple OSS Distributions if (dataP->fMappedBaseValid) {
4808*e3723e1fSApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4809*e3723e1fSApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4810*e3723e1fSApple OSS Distributions }
4811*e3723e1fSApple OSS Distributions #if IOTRACKING
4812*e3723e1fSApple OSS Distributions if (dataP->fWireTracking.link.next) {
4813*e3723e1fSApple OSS Distributions IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4814*e3723e1fSApple OSS Distributions }
4815*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
4816*e3723e1fSApple OSS Distributions // Only complete iopls that we created which are for TypeVirtual
4817*e3723e1fSApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4818*e3723e1fSApple OSS Distributions for (ind = 0; ind < count; ind++) {
4819*e3723e1fSApple OSS Distributions if (ioplList[ind].fIOPL) {
4820*e3723e1fSApple OSS Distributions if (dataP->fCompletionError) {
4821*e3723e1fSApple OSS Distributions upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4822*e3723e1fSApple OSS Distributions } else {
4823*e3723e1fSApple OSS Distributions upl_commit(ioplList[ind].fIOPL, NULL, 0);
4824*e3723e1fSApple OSS Distributions }
4825*e3723e1fSApple OSS Distributions upl_deallocate(ioplList[ind].fIOPL);
4826*e3723e1fSApple OSS Distributions }
4827*e3723e1fSApple OSS Distributions }
4828*e3723e1fSApple OSS Distributions } else if (kIOMemoryTypeUPL == type) {
4829*e3723e1fSApple OSS Distributions upl_set_referenced(ioplList[0].fIOPL, false);
4830*e3723e1fSApple OSS Distributions }
4831*e3723e1fSApple OSS Distributions
4832*e3723e1fSApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4833*e3723e1fSApple OSS Distributions
4834*e3723e1fSApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4835*e3723e1fSApple OSS Distributions _flags &= ~kIOMemoryPreparedReadOnly;
4836*e3723e1fSApple OSS Distributions
4837*e3723e1fSApple OSS Distributions if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4838*e3723e1fSApple OSS Distributions IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4839*e3723e1fSApple OSS Distributions }
4840*e3723e1fSApple OSS Distributions }
4841*e3723e1fSApple OSS Distributions }
4842*e3723e1fSApple OSS Distributions }while (false);
4843*e3723e1fSApple OSS Distributions
4844*e3723e1fSApple OSS Distributions if (_prepareLock) {
4845*e3723e1fSApple OSS Distributions IOLockUnlock(_prepareLock);
4846*e3723e1fSApple OSS Distributions }
4847*e3723e1fSApple OSS Distributions
4848*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4849*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4850*e3723e1fSApple OSS Distributions }
4851*e3723e1fSApple OSS Distributions
4852*e3723e1fSApple OSS Distributions IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4853*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4854*e3723e1fSApple OSS Distributions {
4855*e3723e1fSApple OSS Distributions IOOptionBits createOptions = 0;
4856*e3723e1fSApple OSS Distributions
4857*e3723e1fSApple OSS Distributions if (!(kIOMap64Bit & options)) {
4858*e3723e1fSApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
4859*e3723e1fSApple OSS Distributions }
4860*e3723e1fSApple OSS Distributions if (!(kIOMapReadOnly & options)) {
4861*e3723e1fSApple OSS Distributions createOptions |= kIOMemoryReferenceWrite;
4862*e3723e1fSApple OSS Distributions #if DEVELOPMENT || DEBUG
4863*e3723e1fSApple OSS Distributions if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4864*e3723e1fSApple OSS Distributions && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4865*e3723e1fSApple OSS Distributions OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4866*e3723e1fSApple OSS Distributions }
4867*e3723e1fSApple OSS Distributions #endif
4868*e3723e1fSApple OSS Distributions }
4869*e3723e1fSApple OSS Distributions return createOptions;
4870*e3723e1fSApple OSS Distributions }
4871*e3723e1fSApple OSS Distributions
4872*e3723e1fSApple OSS Distributions /*
4873*e3723e1fSApple OSS Distributions * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4874*e3723e1fSApple OSS Distributions * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4875*e3723e1fSApple OSS Distributions * creation.
4876*e3723e1fSApple OSS Distributions */
4877*e3723e1fSApple OSS Distributions
4878*e3723e1fSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4879*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::makeMapping(
4880*e3723e1fSApple OSS Distributions IOMemoryDescriptor * owner,
4881*e3723e1fSApple OSS Distributions task_t __intoTask,
4882*e3723e1fSApple OSS Distributions IOVirtualAddress __address,
4883*e3723e1fSApple OSS Distributions IOOptionBits options,
4884*e3723e1fSApple OSS Distributions IOByteCount __offset,
4885*e3723e1fSApple OSS Distributions IOByteCount __length )
4886*e3723e1fSApple OSS Distributions {
4887*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
4888*e3723e1fSApple OSS Distributions IOMemoryMap * mapping;
4889*e3723e1fSApple OSS Distributions
4890*e3723e1fSApple OSS Distributions if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4891*e3723e1fSApple OSS Distributions struct IOMemoryReference * newRef;
4892*e3723e1fSApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4893*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == err) {
4894*e3723e1fSApple OSS Distributions if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4895*e3723e1fSApple OSS Distributions memoryReferenceFree(newRef);
4896*e3723e1fSApple OSS Distributions }
4897*e3723e1fSApple OSS Distributions }
4898*e3723e1fSApple OSS Distributions }
4899*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
4900*e3723e1fSApple OSS Distributions return NULL;
4901*e3723e1fSApple OSS Distributions }
4902*e3723e1fSApple OSS Distributions mapping = IOMemoryDescriptor::makeMapping(
4903*e3723e1fSApple OSS Distributions owner, __intoTask, __address, options, __offset, __length);
4904*e3723e1fSApple OSS Distributions
4905*e3723e1fSApple OSS Distributions #if IOTRACKING
4906*e3723e1fSApple OSS Distributions if ((mapping == (IOMemoryMap *) __address)
4907*e3723e1fSApple OSS Distributions && (0 == (kIOMapStatic & mapping->fOptions))
4908*e3723e1fSApple OSS Distributions && (NULL == mapping->fSuperMap)
4909*e3723e1fSApple OSS Distributions && ((kIOTracking & gIOKitDebug) || _task)) {
4910*e3723e1fSApple OSS Distributions // only dram maps in the default on development case
4911*e3723e1fSApple OSS Distributions IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4912*e3723e1fSApple OSS Distributions }
4913*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
4914*e3723e1fSApple OSS Distributions
4915*e3723e1fSApple OSS Distributions return mapping;
4916*e3723e1fSApple OSS Distributions }
4917*e3723e1fSApple OSS Distributions
4918*e3723e1fSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4919*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4920*e3723e1fSApple OSS Distributions vm_map_t __addressMap,
4921*e3723e1fSApple OSS Distributions IOVirtualAddress * __address,
4922*e3723e1fSApple OSS Distributions IOOptionBits options,
4923*e3723e1fSApple OSS Distributions IOByteCount __offset,
4924*e3723e1fSApple OSS Distributions IOByteCount __length )
4925*e3723e1fSApple OSS Distributions {
4926*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4927*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4928*e3723e1fSApple OSS Distributions #ifndef __LP64__
4929*e3723e1fSApple OSS Distributions if (!(kIOMap64Bit & options)) {
4930*e3723e1fSApple OSS Distributions panic("IOGeneralMemoryDescriptor::doMap !64bit");
4931*e3723e1fSApple OSS Distributions }
4932*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
4933*e3723e1fSApple OSS Distributions
4934*e3723e1fSApple OSS Distributions kern_return_t err;
4935*e3723e1fSApple OSS Distributions
4936*e3723e1fSApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4937*e3723e1fSApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
4938*e3723e1fSApple OSS Distributions mach_vm_size_t length = mapping->fLength;
4939*e3723e1fSApple OSS Distributions
4940*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4941*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
4942*e3723e1fSApple OSS Distributions
4943*e3723e1fSApple OSS Distributions mach_vm_address_t range0Addr = 0;
4944*e3723e1fSApple OSS Distributions mach_vm_size_t range0Len = 0;
4945*e3723e1fSApple OSS Distributions
4946*e3723e1fSApple OSS Distributions if ((offset >= _length) || ((offset + length) > _length)) {
4947*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(kIOReturnBadArgument);
4948*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4949*e3723e1fSApple OSS Distributions // assert(offset == 0 && _length == 0 && length == 0);
4950*e3723e1fSApple OSS Distributions return kIOReturnBadArgument;
4951*e3723e1fSApple OSS Distributions }
4952*e3723e1fSApple OSS Distributions
4953*e3723e1fSApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4954*e3723e1fSApple OSS Distributions if (kIOMemoryRemote & _flags) {
4955*e3723e1fSApple OSS Distributions return 0;
4956*e3723e1fSApple OSS Distributions }
4957*e3723e1fSApple OSS Distributions
4958*e3723e1fSApple OSS Distributions if (vec.v) {
4959*e3723e1fSApple OSS Distributions getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4960*e3723e1fSApple OSS Distributions }
4961*e3723e1fSApple OSS Distributions
4962*e3723e1fSApple OSS Distributions // mapping source == dest? (could be much better)
4963*e3723e1fSApple OSS Distributions if (_task
4964*e3723e1fSApple OSS Distributions && (mapping->fAddressTask == _task)
4965*e3723e1fSApple OSS Distributions && (mapping->fAddressMap == get_task_map(_task))
4966*e3723e1fSApple OSS Distributions && (options & kIOMapAnywhere)
4967*e3723e1fSApple OSS Distributions && (!(kIOMapUnique & options))
4968*e3723e1fSApple OSS Distributions && (!(kIOMapGuardedMask & options))
4969*e3723e1fSApple OSS Distributions && (1 == _rangesCount)
4970*e3723e1fSApple OSS Distributions && (0 == offset)
4971*e3723e1fSApple OSS Distributions && range0Addr
4972*e3723e1fSApple OSS Distributions && (length <= range0Len)) {
4973*e3723e1fSApple OSS Distributions mapping->fAddress = range0Addr;
4974*e3723e1fSApple OSS Distributions mapping->fOptions |= kIOMapStatic;
4975*e3723e1fSApple OSS Distributions
4976*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
4977*e3723e1fSApple OSS Distributions }
4978*e3723e1fSApple OSS Distributions
4979*e3723e1fSApple OSS Distributions if (!_memRef) {
4980*e3723e1fSApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4981*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
4982*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(err);
4983*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4984*e3723e1fSApple OSS Distributions return err;
4985*e3723e1fSApple OSS Distributions }
4986*e3723e1fSApple OSS Distributions }
4987*e3723e1fSApple OSS Distributions
4988*e3723e1fSApple OSS Distributions
4989*e3723e1fSApple OSS Distributions memory_object_t pager;
4990*e3723e1fSApple OSS Distributions pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4991*e3723e1fSApple OSS Distributions
4992*e3723e1fSApple OSS Distributions // <upl_transpose //
4993*e3723e1fSApple OSS Distributions if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4994*e3723e1fSApple OSS Distributions do{
4995*e3723e1fSApple OSS Distributions upl_t redirUPL2;
4996*e3723e1fSApple OSS Distributions upl_size_t size;
4997*e3723e1fSApple OSS Distributions upl_control_flags_t flags;
4998*e3723e1fSApple OSS Distributions unsigned int lock_count;
4999*e3723e1fSApple OSS Distributions
5000*e3723e1fSApple OSS Distributions if (!_memRef || (1 != _memRef->count)) {
5001*e3723e1fSApple OSS Distributions err = kIOReturnNotReadable;
5002*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5003*e3723e1fSApple OSS Distributions break;
5004*e3723e1fSApple OSS Distributions }
5005*e3723e1fSApple OSS Distributions
5006*e3723e1fSApple OSS Distributions size = (upl_size_t) round_page(mapping->fLength);
5007*e3723e1fSApple OSS Distributions flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5008*e3723e1fSApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5009*e3723e1fSApple OSS Distributions
5010*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5011*e3723e1fSApple OSS Distributions NULL, NULL,
5012*e3723e1fSApple OSS Distributions &flags, (vm_tag_t) getVMTag(kernel_map))) {
5013*e3723e1fSApple OSS Distributions redirUPL2 = NULL;
5014*e3723e1fSApple OSS Distributions }
5015*e3723e1fSApple OSS Distributions
5016*e3723e1fSApple OSS Distributions for (lock_count = 0;
5017*e3723e1fSApple OSS Distributions IORecursiveLockHaveLock(gIOMemoryLock);
5018*e3723e1fSApple OSS Distributions lock_count++) {
5019*e3723e1fSApple OSS Distributions UNLOCK;
5020*e3723e1fSApple OSS Distributions }
5021*e3723e1fSApple OSS Distributions err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5022*e3723e1fSApple OSS Distributions for (;
5023*e3723e1fSApple OSS Distributions lock_count;
5024*e3723e1fSApple OSS Distributions lock_count--) {
5025*e3723e1fSApple OSS Distributions LOCK;
5026*e3723e1fSApple OSS Distributions }
5027*e3723e1fSApple OSS Distributions
5028*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
5029*e3723e1fSApple OSS Distributions IOLog("upl_transpose(%x)\n", err);
5030*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
5031*e3723e1fSApple OSS Distributions }
5032*e3723e1fSApple OSS Distributions
5033*e3723e1fSApple OSS Distributions if (redirUPL2) {
5034*e3723e1fSApple OSS Distributions upl_commit(redirUPL2, NULL, 0);
5035*e3723e1fSApple OSS Distributions upl_deallocate(redirUPL2);
5036*e3723e1fSApple OSS Distributions redirUPL2 = NULL;
5037*e3723e1fSApple OSS Distributions }
5038*e3723e1fSApple OSS Distributions {
5039*e3723e1fSApple OSS Distributions // swap the memEntries since they now refer to different vm_objects
5040*e3723e1fSApple OSS Distributions IOMemoryReference * me = _memRef;
5041*e3723e1fSApple OSS Distributions _memRef = mapping->fMemory->_memRef;
5042*e3723e1fSApple OSS Distributions mapping->fMemory->_memRef = me;
5043*e3723e1fSApple OSS Distributions }
5044*e3723e1fSApple OSS Distributions if (pager) {
5045*e3723e1fSApple OSS Distributions err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5046*e3723e1fSApple OSS Distributions }
5047*e3723e1fSApple OSS Distributions }while (false);
5048*e3723e1fSApple OSS Distributions }
5049*e3723e1fSApple OSS Distributions // upl_transpose> //
5050*e3723e1fSApple OSS Distributions else {
5051*e3723e1fSApple OSS Distributions err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5052*e3723e1fSApple OSS Distributions if (err) {
5053*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5054*e3723e1fSApple OSS Distributions }
5055*e3723e1fSApple OSS Distributions if ((err == KERN_SUCCESS) && pager) {
5056*e3723e1fSApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5057*e3723e1fSApple OSS Distributions
5058*e3723e1fSApple OSS Distributions if (err != KERN_SUCCESS) {
5059*e3723e1fSApple OSS Distributions doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5060*e3723e1fSApple OSS Distributions } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5061*e3723e1fSApple OSS Distributions mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5062*e3723e1fSApple OSS Distributions }
5063*e3723e1fSApple OSS Distributions }
5064*e3723e1fSApple OSS Distributions }
5065*e3723e1fSApple OSS Distributions
5066*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(err);
5067*e3723e1fSApple OSS Distributions if (err) {
5068*e3723e1fSApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5069*e3723e1fSApple OSS Distributions }
5070*e3723e1fSApple OSS Distributions return err;
5071*e3723e1fSApple OSS Distributions }
5072*e3723e1fSApple OSS Distributions
5073*e3723e1fSApple OSS Distributions #if IOTRACKING
5074*e3723e1fSApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5075*e3723e1fSApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5076*e3723e1fSApple OSS Distributions mach_vm_address_t * address, mach_vm_size_t * size)
5077*e3723e1fSApple OSS Distributions {
5078*e3723e1fSApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5079*e3723e1fSApple OSS Distributions
5080*e3723e1fSApple OSS Distributions IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5081*e3723e1fSApple OSS Distributions
5082*e3723e1fSApple OSS Distributions if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5083*e3723e1fSApple OSS Distributions return kIOReturnNotReady;
5084*e3723e1fSApple OSS Distributions }
5085*e3723e1fSApple OSS Distributions
5086*e3723e1fSApple OSS Distributions *task = map->fAddressTask;
5087*e3723e1fSApple OSS Distributions *address = map->fAddress;
5088*e3723e1fSApple OSS Distributions *size = map->fLength;
5089*e3723e1fSApple OSS Distributions
5090*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
5091*e3723e1fSApple OSS Distributions }
5092*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
5093*e3723e1fSApple OSS Distributions
5094*e3723e1fSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5095*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5096*e3723e1fSApple OSS Distributions vm_map_t addressMap,
5097*e3723e1fSApple OSS Distributions IOVirtualAddress __address,
5098*e3723e1fSApple OSS Distributions IOByteCount __length )
5099*e3723e1fSApple OSS Distributions {
5100*e3723e1fSApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5101*e3723e1fSApple OSS Distributions IOReturn ret;
5102*e3723e1fSApple OSS Distributions ret = super::doUnmap(addressMap, __address, __length);
5103*e3723e1fSApple OSS Distributions traceInterval.setEndArg1(ret);
5104*e3723e1fSApple OSS Distributions return ret;
5105*e3723e1fSApple OSS Distributions }
5106*e3723e1fSApple OSS Distributions
5107*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5108*e3723e1fSApple OSS Distributions
5109*e3723e1fSApple OSS Distributions #undef super
5110*e3723e1fSApple OSS Distributions #define super OSObject
5111*e3723e1fSApple OSS Distributions
5112*e3723e1fSApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5113*e3723e1fSApple OSS Distributions
5114*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5115*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5116*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5117*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5118*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5119*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5120*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5121*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5122*e3723e1fSApple OSS Distributions
5123*e3723e1fSApple OSS Distributions /* ex-inline function implementation */
5124*e3723e1fSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5125*e3723e1fSApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5126*e3723e1fSApple OSS Distributions {
5127*e3723e1fSApple OSS Distributions return getPhysicalSegment( 0, NULL );
5128*e3723e1fSApple OSS Distributions }
5129*e3723e1fSApple OSS Distributions
5130*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5131*e3723e1fSApple OSS Distributions
5132*e3723e1fSApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5133*e3723e1fSApple OSS Distributions IOMemoryMap::init(
5134*e3723e1fSApple OSS Distributions task_t intoTask,
5135*e3723e1fSApple OSS Distributions mach_vm_address_t toAddress,
5136*e3723e1fSApple OSS Distributions IOOptionBits _options,
5137*e3723e1fSApple OSS Distributions mach_vm_size_t _offset,
5138*e3723e1fSApple OSS Distributions mach_vm_size_t _length )
5139*e3723e1fSApple OSS Distributions {
5140*e3723e1fSApple OSS Distributions if (!intoTask) {
5141*e3723e1fSApple OSS Distributions return false;
5142*e3723e1fSApple OSS Distributions }
5143*e3723e1fSApple OSS Distributions
5144*e3723e1fSApple OSS Distributions if (!super::init()) {
5145*e3723e1fSApple OSS Distributions return false;
5146*e3723e1fSApple OSS Distributions }
5147*e3723e1fSApple OSS Distributions
5148*e3723e1fSApple OSS Distributions fAddressMap = get_task_map(intoTask);
5149*e3723e1fSApple OSS Distributions if (!fAddressMap) {
5150*e3723e1fSApple OSS Distributions return false;
5151*e3723e1fSApple OSS Distributions }
5152*e3723e1fSApple OSS Distributions vm_map_reference(fAddressMap);
5153*e3723e1fSApple OSS Distributions
5154*e3723e1fSApple OSS Distributions fAddressTask = intoTask;
5155*e3723e1fSApple OSS Distributions fOptions = _options;
5156*e3723e1fSApple OSS Distributions fLength = _length;
5157*e3723e1fSApple OSS Distributions fOffset = _offset;
5158*e3723e1fSApple OSS Distributions fAddress = toAddress;
5159*e3723e1fSApple OSS Distributions
5160*e3723e1fSApple OSS Distributions return true;
5161*e3723e1fSApple OSS Distributions }
5162*e3723e1fSApple OSS Distributions
5163*e3723e1fSApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5164*e3723e1fSApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5165*e3723e1fSApple OSS Distributions {
5166*e3723e1fSApple OSS Distributions if (!_memory) {
5167*e3723e1fSApple OSS Distributions return false;
5168*e3723e1fSApple OSS Distributions }
5169*e3723e1fSApple OSS Distributions
5170*e3723e1fSApple OSS Distributions if (!fSuperMap) {
5171*e3723e1fSApple OSS Distributions if ((_offset + fLength) > _memory->getLength()) {
5172*e3723e1fSApple OSS Distributions return false;
5173*e3723e1fSApple OSS Distributions }
5174*e3723e1fSApple OSS Distributions fOffset = _offset;
5175*e3723e1fSApple OSS Distributions }
5176*e3723e1fSApple OSS Distributions
5177*e3723e1fSApple OSS Distributions
5178*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5179*e3723e1fSApple OSS Distributions if (fMemory) {
5180*e3723e1fSApple OSS Distributions if (fMemory != _memory) {
5181*e3723e1fSApple OSS Distributions fMemory->removeMapping(this);
5182*e3723e1fSApple OSS Distributions }
5183*e3723e1fSApple OSS Distributions }
5184*e3723e1fSApple OSS Distributions fMemory = os::move(tempval);
5185*e3723e1fSApple OSS Distributions
5186*e3723e1fSApple OSS Distributions return true;
5187*e3723e1fSApple OSS Distributions }
5188*e3723e1fSApple OSS Distributions
5189*e3723e1fSApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5190*e3723e1fSApple OSS Distributions IOMemoryDescriptor::doMap(
5191*e3723e1fSApple OSS Distributions vm_map_t __addressMap,
5192*e3723e1fSApple OSS Distributions IOVirtualAddress * __address,
5193*e3723e1fSApple OSS Distributions IOOptionBits options,
5194*e3723e1fSApple OSS Distributions IOByteCount __offset,
5195*e3723e1fSApple OSS Distributions IOByteCount __length )
5196*e3723e1fSApple OSS Distributions {
5197*e3723e1fSApple OSS Distributions return kIOReturnUnsupported;
5198*e3723e1fSApple OSS Distributions }
5199*e3723e1fSApple OSS Distributions
5200*e3723e1fSApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5201*e3723e1fSApple OSS Distributions IOMemoryDescriptor::handleFault(
5202*e3723e1fSApple OSS Distributions void * _pager,
5203*e3723e1fSApple OSS Distributions mach_vm_size_t sourceOffset,
5204*e3723e1fSApple OSS Distributions mach_vm_size_t length)
5205*e3723e1fSApple OSS Distributions {
5206*e3723e1fSApple OSS Distributions if (kIOMemoryRedirected & _flags) {
5207*e3723e1fSApple OSS Distributions #if DEBUG
5208*e3723e1fSApple OSS Distributions IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5209*e3723e1fSApple OSS Distributions #endif
5210*e3723e1fSApple OSS Distributions do {
5211*e3723e1fSApple OSS Distributions SLEEP;
5212*e3723e1fSApple OSS Distributions } while (kIOMemoryRedirected & _flags);
5213*e3723e1fSApple OSS Distributions }
5214*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
5215*e3723e1fSApple OSS Distributions }
5216*e3723e1fSApple OSS Distributions
5217*e3723e1fSApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5218*e3723e1fSApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5219*e3723e1fSApple OSS Distributions void * _pager,
5220*e3723e1fSApple OSS Distributions vm_map_t addressMap,
5221*e3723e1fSApple OSS Distributions mach_vm_address_t address,
5222*e3723e1fSApple OSS Distributions mach_vm_size_t sourceOffset,
5223*e3723e1fSApple OSS Distributions mach_vm_size_t length,
5224*e3723e1fSApple OSS Distributions IOOptionBits options )
5225*e3723e1fSApple OSS Distributions {
5226*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
5227*e3723e1fSApple OSS Distributions memory_object_t pager = (memory_object_t) _pager;
5228*e3723e1fSApple OSS Distributions mach_vm_size_t size;
5229*e3723e1fSApple OSS Distributions mach_vm_size_t bytes;
5230*e3723e1fSApple OSS Distributions mach_vm_size_t page;
5231*e3723e1fSApple OSS Distributions mach_vm_size_t pageOffset;
5232*e3723e1fSApple OSS Distributions mach_vm_size_t pagerOffset;
5233*e3723e1fSApple OSS Distributions IOPhysicalLength segLen, chunk;
5234*e3723e1fSApple OSS Distributions addr64_t physAddr;
5235*e3723e1fSApple OSS Distributions IOOptionBits type;
5236*e3723e1fSApple OSS Distributions
5237*e3723e1fSApple OSS Distributions type = _flags & kIOMemoryTypeMask;
5238*e3723e1fSApple OSS Distributions
5239*e3723e1fSApple OSS Distributions if (reserved->dp.pagerContig) {
5240*e3723e1fSApple OSS Distributions sourceOffset = 0;
5241*e3723e1fSApple OSS Distributions pagerOffset = 0;
5242*e3723e1fSApple OSS Distributions }
5243*e3723e1fSApple OSS Distributions
5244*e3723e1fSApple OSS Distributions physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5245*e3723e1fSApple OSS Distributions assert( physAddr );
5246*e3723e1fSApple OSS Distributions pageOffset = physAddr - trunc_page_64( physAddr );
5247*e3723e1fSApple OSS Distributions pagerOffset = sourceOffset;
5248*e3723e1fSApple OSS Distributions
5249*e3723e1fSApple OSS Distributions size = length + pageOffset;
5250*e3723e1fSApple OSS Distributions physAddr -= pageOffset;
5251*e3723e1fSApple OSS Distributions
5252*e3723e1fSApple OSS Distributions segLen += pageOffset;
5253*e3723e1fSApple OSS Distributions bytes = size;
5254*e3723e1fSApple OSS Distributions do{
5255*e3723e1fSApple OSS Distributions // in the middle of the loop only map whole pages
5256*e3723e1fSApple OSS Distributions if (segLen >= bytes) {
5257*e3723e1fSApple OSS Distributions segLen = bytes;
5258*e3723e1fSApple OSS Distributions } else if (segLen != trunc_page_64(segLen)) {
5259*e3723e1fSApple OSS Distributions err = kIOReturnVMError;
5260*e3723e1fSApple OSS Distributions }
5261*e3723e1fSApple OSS Distributions if (physAddr != trunc_page_64(physAddr)) {
5262*e3723e1fSApple OSS Distributions err = kIOReturnBadArgument;
5263*e3723e1fSApple OSS Distributions }
5264*e3723e1fSApple OSS Distributions
5265*e3723e1fSApple OSS Distributions if (kIOReturnSuccess != err) {
5266*e3723e1fSApple OSS Distributions break;
5267*e3723e1fSApple OSS Distributions }
5268*e3723e1fSApple OSS Distributions
5269*e3723e1fSApple OSS Distributions #if DEBUG || DEVELOPMENT
5270*e3723e1fSApple OSS Distributions if ((kIOMemoryTypeUPL != type)
5271*e3723e1fSApple OSS Distributions && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5272*e3723e1fSApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5273*e3723e1fSApple OSS Distributions physAddr, (uint64_t)segLen);
5274*e3723e1fSApple OSS Distributions }
5275*e3723e1fSApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5276*e3723e1fSApple OSS Distributions
5277*e3723e1fSApple OSS Distributions chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5278*e3723e1fSApple OSS Distributions for (page = 0;
5279*e3723e1fSApple OSS Distributions (page < segLen) && (KERN_SUCCESS == err);
5280*e3723e1fSApple OSS Distributions page += chunk) {
5281*e3723e1fSApple OSS Distributions err = device_pager_populate_object(pager, pagerOffset,
5282*e3723e1fSApple OSS Distributions (ppnum_t)(atop_64(physAddr + page)), chunk);
5283*e3723e1fSApple OSS Distributions pagerOffset += chunk;
5284*e3723e1fSApple OSS Distributions }
5285*e3723e1fSApple OSS Distributions
5286*e3723e1fSApple OSS Distributions assert(KERN_SUCCESS == err);
5287*e3723e1fSApple OSS Distributions if (err) {
5288*e3723e1fSApple OSS Distributions break;
5289*e3723e1fSApple OSS Distributions }
5290*e3723e1fSApple OSS Distributions
5291*e3723e1fSApple OSS Distributions // This call to vm_fault causes an early pmap level resolution
5292*e3723e1fSApple OSS Distributions // of the mappings created above for kernel mappings, since
5293*e3723e1fSApple OSS Distributions // faulting in later can't take place from interrupt level.
5294*e3723e1fSApple OSS Distributions if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5295*e3723e1fSApple OSS Distributions err = vm_fault(addressMap,
5296*e3723e1fSApple OSS Distributions (vm_map_offset_t)trunc_page_64(address),
5297*e3723e1fSApple OSS Distributions options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5298*e3723e1fSApple OSS Distributions FALSE, VM_KERN_MEMORY_NONE,
5299*e3723e1fSApple OSS Distributions THREAD_UNINT, NULL,
5300*e3723e1fSApple OSS Distributions (vm_map_offset_t)0);
5301*e3723e1fSApple OSS Distributions
5302*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != err) {
5303*e3723e1fSApple OSS Distributions break;
5304*e3723e1fSApple OSS Distributions }
5305*e3723e1fSApple OSS Distributions }
5306*e3723e1fSApple OSS Distributions
5307*e3723e1fSApple OSS Distributions sourceOffset += segLen - pageOffset;
5308*e3723e1fSApple OSS Distributions address += segLen;
5309*e3723e1fSApple OSS Distributions bytes -= segLen;
5310*e3723e1fSApple OSS Distributions pageOffset = 0;
5311*e3723e1fSApple OSS Distributions }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5312*e3723e1fSApple OSS Distributions
5313*e3723e1fSApple OSS Distributions if (bytes) {
5314*e3723e1fSApple OSS Distributions err = kIOReturnBadArgument;
5315*e3723e1fSApple OSS Distributions }
5316*e3723e1fSApple OSS Distributions
5317*e3723e1fSApple OSS Distributions return err;
5318*e3723e1fSApple OSS Distributions }
5319*e3723e1fSApple OSS Distributions
5320*e3723e1fSApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5321*e3723e1fSApple OSS Distributions IOMemoryDescriptor::doUnmap(
5322*e3723e1fSApple OSS Distributions vm_map_t addressMap,
5323*e3723e1fSApple OSS Distributions IOVirtualAddress __address,
5324*e3723e1fSApple OSS Distributions IOByteCount __length )
5325*e3723e1fSApple OSS Distributions {
5326*e3723e1fSApple OSS Distributions IOReturn err;
5327*e3723e1fSApple OSS Distributions IOMemoryMap * mapping;
5328*e3723e1fSApple OSS Distributions mach_vm_address_t address;
5329*e3723e1fSApple OSS Distributions mach_vm_size_t length;
5330*e3723e1fSApple OSS Distributions
5331*e3723e1fSApple OSS Distributions if (__length) {
5332*e3723e1fSApple OSS Distributions panic("doUnmap");
5333*e3723e1fSApple OSS Distributions }
5334*e3723e1fSApple OSS Distributions
5335*e3723e1fSApple OSS Distributions mapping = (IOMemoryMap *) __address;
5336*e3723e1fSApple OSS Distributions addressMap = mapping->fAddressMap;
5337*e3723e1fSApple OSS Distributions address = mapping->fAddress;
5338*e3723e1fSApple OSS Distributions length = mapping->fLength;
5339*e3723e1fSApple OSS Distributions
5340*e3723e1fSApple OSS Distributions if (kIOMapOverwrite & mapping->fOptions) {
5341*e3723e1fSApple OSS Distributions err = KERN_SUCCESS;
5342*e3723e1fSApple OSS Distributions } else {
5343*e3723e1fSApple OSS Distributions if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5344*e3723e1fSApple OSS Distributions addressMap = IOPageableMapForAddress( address );
5345*e3723e1fSApple OSS Distributions }
5346*e3723e1fSApple OSS Distributions #if DEBUG
5347*e3723e1fSApple OSS Distributions if (kIOLogMapping & gIOKitDebug) {
5348*e3723e1fSApple OSS Distributions IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5349*e3723e1fSApple OSS Distributions addressMap, address, length );
5350*e3723e1fSApple OSS Distributions }
5351*e3723e1fSApple OSS Distributions #endif
5352*e3723e1fSApple OSS Distributions err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5353*e3723e1fSApple OSS Distributions if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5354*e3723e1fSApple OSS Distributions DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5355*e3723e1fSApple OSS Distributions }
5356*e3723e1fSApple OSS Distributions }
5357*e3723e1fSApple OSS Distributions
5358*e3723e1fSApple OSS Distributions #if IOTRACKING
5359*e3723e1fSApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5360*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
5361*e3723e1fSApple OSS Distributions
5362*e3723e1fSApple OSS Distributions return err;
5363*e3723e1fSApple OSS Distributions }
5364*e3723e1fSApple OSS Distributions
5365*e3723e1fSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5366*e3723e1fSApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5367*e3723e1fSApple OSS Distributions {
5368*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
5369*e3723e1fSApple OSS Distributions IOMemoryMap * mapping = NULL;
5370*e3723e1fSApple OSS Distributions OSSharedPtr<OSIterator> iter;
5371*e3723e1fSApple OSS Distributions
5372*e3723e1fSApple OSS Distributions LOCK;
5373*e3723e1fSApple OSS Distributions
5374*e3723e1fSApple OSS Distributions if (doRedirect) {
5375*e3723e1fSApple OSS Distributions _flags |= kIOMemoryRedirected;
5376*e3723e1fSApple OSS Distributions } else {
5377*e3723e1fSApple OSS Distributions _flags &= ~kIOMemoryRedirected;
5378*e3723e1fSApple OSS Distributions }
5379*e3723e1fSApple OSS Distributions
5380*e3723e1fSApple OSS Distributions do {
5381*e3723e1fSApple OSS Distributions if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5382*e3723e1fSApple OSS Distributions memory_object_t pager;
5383*e3723e1fSApple OSS Distributions
5384*e3723e1fSApple OSS Distributions if (reserved) {
5385*e3723e1fSApple OSS Distributions pager = (memory_object_t) reserved->dp.devicePager;
5386*e3723e1fSApple OSS Distributions } else {
5387*e3723e1fSApple OSS Distributions pager = MACH_PORT_NULL;
5388*e3723e1fSApple OSS Distributions }
5389*e3723e1fSApple OSS Distributions
5390*e3723e1fSApple OSS Distributions while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5391*e3723e1fSApple OSS Distributions mapping->redirect( safeTask, doRedirect );
5392*e3723e1fSApple OSS Distributions if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5393*e3723e1fSApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5394*e3723e1fSApple OSS Distributions }
5395*e3723e1fSApple OSS Distributions }
5396*e3723e1fSApple OSS Distributions
5397*e3723e1fSApple OSS Distributions iter.reset();
5398*e3723e1fSApple OSS Distributions }
5399*e3723e1fSApple OSS Distributions } while (false);
5400*e3723e1fSApple OSS Distributions
5401*e3723e1fSApple OSS Distributions if (!doRedirect) {
5402*e3723e1fSApple OSS Distributions WAKEUP;
5403*e3723e1fSApple OSS Distributions }
5404*e3723e1fSApple OSS Distributions
5405*e3723e1fSApple OSS Distributions UNLOCK;
5406*e3723e1fSApple OSS Distributions
5407*e3723e1fSApple OSS Distributions #ifndef __LP64__
5408*e3723e1fSApple OSS Distributions // temporary binary compatibility
5409*e3723e1fSApple OSS Distributions IOSubMemoryDescriptor * subMem;
5410*e3723e1fSApple OSS Distributions if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5411*e3723e1fSApple OSS Distributions err = subMem->redirect( safeTask, doRedirect );
5412*e3723e1fSApple OSS Distributions } else {
5413*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
5414*e3723e1fSApple OSS Distributions }
5415*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5416*e3723e1fSApple OSS Distributions
5417*e3723e1fSApple OSS Distributions return err;
5418*e3723e1fSApple OSS Distributions }
5419*e3723e1fSApple OSS Distributions
5420*e3723e1fSApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5421*e3723e1fSApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5422*e3723e1fSApple OSS Distributions {
5423*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
5424*e3723e1fSApple OSS Distributions
5425*e3723e1fSApple OSS Distributions if (fSuperMap) {
5426*e3723e1fSApple OSS Distributions // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5427*e3723e1fSApple OSS Distributions } else {
5428*e3723e1fSApple OSS Distributions LOCK;
5429*e3723e1fSApple OSS Distributions
5430*e3723e1fSApple OSS Distributions do{
5431*e3723e1fSApple OSS Distributions if (!fAddress) {
5432*e3723e1fSApple OSS Distributions break;
5433*e3723e1fSApple OSS Distributions }
5434*e3723e1fSApple OSS Distributions if (!fAddressMap) {
5435*e3723e1fSApple OSS Distributions break;
5436*e3723e1fSApple OSS Distributions }
5437*e3723e1fSApple OSS Distributions
5438*e3723e1fSApple OSS Distributions if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5439*e3723e1fSApple OSS Distributions && (0 == (fOptions & kIOMapStatic))) {
5440*e3723e1fSApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5441*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
5442*e3723e1fSApple OSS Distributions #if DEBUG
5443*e3723e1fSApple OSS Distributions IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5444*e3723e1fSApple OSS Distributions #endif
5445*e3723e1fSApple OSS Distributions } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5446*e3723e1fSApple OSS Distributions IOOptionBits newMode;
5447*e3723e1fSApple OSS Distributions newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5448*e3723e1fSApple OSS Distributions IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5449*e3723e1fSApple OSS Distributions }
5450*e3723e1fSApple OSS Distributions }while (false);
5451*e3723e1fSApple OSS Distributions UNLOCK;
5452*e3723e1fSApple OSS Distributions }
5453*e3723e1fSApple OSS Distributions
5454*e3723e1fSApple OSS Distributions if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5455*e3723e1fSApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5456*e3723e1fSApple OSS Distributions && safeTask
5457*e3723e1fSApple OSS Distributions && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5458*e3723e1fSApple OSS Distributions fMemory->redirect(safeTask, doRedirect);
5459*e3723e1fSApple OSS Distributions }
5460*e3723e1fSApple OSS Distributions
5461*e3723e1fSApple OSS Distributions return err;
5462*e3723e1fSApple OSS Distributions }
5463*e3723e1fSApple OSS Distributions
5464*e3723e1fSApple OSS Distributions IOReturn
unmap(void)5465*e3723e1fSApple OSS Distributions IOMemoryMap::unmap( void )
5466*e3723e1fSApple OSS Distributions {
5467*e3723e1fSApple OSS Distributions IOReturn err;
5468*e3723e1fSApple OSS Distributions
5469*e3723e1fSApple OSS Distributions LOCK;
5470*e3723e1fSApple OSS Distributions
5471*e3723e1fSApple OSS Distributions if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5472*e3723e1fSApple OSS Distributions && (0 == (kIOMapStatic & fOptions))) {
5473*e3723e1fSApple OSS Distributions err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5474*e3723e1fSApple OSS Distributions } else {
5475*e3723e1fSApple OSS Distributions err = kIOReturnSuccess;
5476*e3723e1fSApple OSS Distributions }
5477*e3723e1fSApple OSS Distributions
5478*e3723e1fSApple OSS Distributions if (fAddressMap) {
5479*e3723e1fSApple OSS Distributions vm_map_deallocate(fAddressMap);
5480*e3723e1fSApple OSS Distributions fAddressMap = NULL;
5481*e3723e1fSApple OSS Distributions }
5482*e3723e1fSApple OSS Distributions
5483*e3723e1fSApple OSS Distributions fAddress = 0;
5484*e3723e1fSApple OSS Distributions
5485*e3723e1fSApple OSS Distributions UNLOCK;
5486*e3723e1fSApple OSS Distributions
5487*e3723e1fSApple OSS Distributions return err;
5488*e3723e1fSApple OSS Distributions }
5489*e3723e1fSApple OSS Distributions
5490*e3723e1fSApple OSS Distributions void
taskDied(void)5491*e3723e1fSApple OSS Distributions IOMemoryMap::taskDied( void )
5492*e3723e1fSApple OSS Distributions {
5493*e3723e1fSApple OSS Distributions LOCK;
5494*e3723e1fSApple OSS Distributions if (fUserClientUnmap) {
5495*e3723e1fSApple OSS Distributions unmap();
5496*e3723e1fSApple OSS Distributions }
5497*e3723e1fSApple OSS Distributions #if IOTRACKING
5498*e3723e1fSApple OSS Distributions else {
5499*e3723e1fSApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5500*e3723e1fSApple OSS Distributions }
5501*e3723e1fSApple OSS Distributions #endif /* IOTRACKING */
5502*e3723e1fSApple OSS Distributions
5503*e3723e1fSApple OSS Distributions if (fAddressMap) {
5504*e3723e1fSApple OSS Distributions vm_map_deallocate(fAddressMap);
5505*e3723e1fSApple OSS Distributions fAddressMap = NULL;
5506*e3723e1fSApple OSS Distributions }
5507*e3723e1fSApple OSS Distributions fAddressTask = NULL;
5508*e3723e1fSApple OSS Distributions fAddress = 0;
5509*e3723e1fSApple OSS Distributions UNLOCK;
5510*e3723e1fSApple OSS Distributions }
5511*e3723e1fSApple OSS Distributions
5512*e3723e1fSApple OSS Distributions IOReturn
userClientUnmap(void)5513*e3723e1fSApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5514*e3723e1fSApple OSS Distributions {
5515*e3723e1fSApple OSS Distributions fUserClientUnmap = true;
5516*e3723e1fSApple OSS Distributions return kIOReturnSuccess;
5517*e3723e1fSApple OSS Distributions }
5518*e3723e1fSApple OSS Distributions
5519*e3723e1fSApple OSS Distributions // Overload the release mechanism. All mappings must be a member
5520*e3723e1fSApple OSS Distributions // of a memory descriptors _mappings set. This means that we
5521*e3723e1fSApple OSS Distributions // always have 2 references on a mapping. When either of these mappings
5522*e3723e1fSApple OSS Distributions // are released we need to free ourselves.
5523*e3723e1fSApple OSS Distributions void
taggedRelease(const void * tag) const5524*e3723e1fSApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5525*e3723e1fSApple OSS Distributions {
5526*e3723e1fSApple OSS Distributions LOCK;
5527*e3723e1fSApple OSS Distributions super::taggedRelease(tag, 2);
5528*e3723e1fSApple OSS Distributions UNLOCK;
5529*e3723e1fSApple OSS Distributions }
5530*e3723e1fSApple OSS Distributions
5531*e3723e1fSApple OSS Distributions void
free()5532*e3723e1fSApple OSS Distributions IOMemoryMap::free()
5533*e3723e1fSApple OSS Distributions {
5534*e3723e1fSApple OSS Distributions unmap();
5535*e3723e1fSApple OSS Distributions
5536*e3723e1fSApple OSS Distributions if (fMemory) {
5537*e3723e1fSApple OSS Distributions LOCK;
5538*e3723e1fSApple OSS Distributions fMemory->removeMapping(this);
5539*e3723e1fSApple OSS Distributions UNLOCK;
5540*e3723e1fSApple OSS Distributions fMemory.reset();
5541*e3723e1fSApple OSS Distributions }
5542*e3723e1fSApple OSS Distributions
5543*e3723e1fSApple OSS Distributions if (fSuperMap) {
5544*e3723e1fSApple OSS Distributions fSuperMap.reset();
5545*e3723e1fSApple OSS Distributions }
5546*e3723e1fSApple OSS Distributions
5547*e3723e1fSApple OSS Distributions if (fRedirUPL) {
5548*e3723e1fSApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5549*e3723e1fSApple OSS Distributions upl_deallocate(fRedirUPL);
5550*e3723e1fSApple OSS Distributions }
5551*e3723e1fSApple OSS Distributions
5552*e3723e1fSApple OSS Distributions super::free();
5553*e3723e1fSApple OSS Distributions }
5554*e3723e1fSApple OSS Distributions
5555*e3723e1fSApple OSS Distributions IOByteCount
getLength()5556*e3723e1fSApple OSS Distributions IOMemoryMap::getLength()
5557*e3723e1fSApple OSS Distributions {
5558*e3723e1fSApple OSS Distributions return fLength;
5559*e3723e1fSApple OSS Distributions }
5560*e3723e1fSApple OSS Distributions
5561*e3723e1fSApple OSS Distributions IOVirtualAddress
getVirtualAddress()5562*e3723e1fSApple OSS Distributions IOMemoryMap::getVirtualAddress()
5563*e3723e1fSApple OSS Distributions {
5564*e3723e1fSApple OSS Distributions #ifndef __LP64__
5565*e3723e1fSApple OSS Distributions if (fSuperMap) {
5566*e3723e1fSApple OSS Distributions fSuperMap->getVirtualAddress();
5567*e3723e1fSApple OSS Distributions } else if (fAddressMap
5568*e3723e1fSApple OSS Distributions && vm_map_is_64bit(fAddressMap)
5569*e3723e1fSApple OSS Distributions && (sizeof(IOVirtualAddress) < 8)) {
5570*e3723e1fSApple OSS Distributions OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5571*e3723e1fSApple OSS Distributions }
5572*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5573*e3723e1fSApple OSS Distributions
5574*e3723e1fSApple OSS Distributions return fAddress;
5575*e3723e1fSApple OSS Distributions }
5576*e3723e1fSApple OSS Distributions
5577*e3723e1fSApple OSS Distributions #ifndef __LP64__
5578*e3723e1fSApple OSS Distributions mach_vm_address_t
getAddress()5579*e3723e1fSApple OSS Distributions IOMemoryMap::getAddress()
5580*e3723e1fSApple OSS Distributions {
5581*e3723e1fSApple OSS Distributions return fAddress;
5582*e3723e1fSApple OSS Distributions }
5583*e3723e1fSApple OSS Distributions
5584*e3723e1fSApple OSS Distributions mach_vm_size_t
getSize()5585*e3723e1fSApple OSS Distributions IOMemoryMap::getSize()
5586*e3723e1fSApple OSS Distributions {
5587*e3723e1fSApple OSS Distributions return fLength;
5588*e3723e1fSApple OSS Distributions }
5589*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5590*e3723e1fSApple OSS Distributions
5591*e3723e1fSApple OSS Distributions
5592*e3723e1fSApple OSS Distributions task_t
getAddressTask()5593*e3723e1fSApple OSS Distributions IOMemoryMap::getAddressTask()
5594*e3723e1fSApple OSS Distributions {
5595*e3723e1fSApple OSS Distributions if (fSuperMap) {
5596*e3723e1fSApple OSS Distributions return fSuperMap->getAddressTask();
5597*e3723e1fSApple OSS Distributions } else {
5598*e3723e1fSApple OSS Distributions return fAddressTask;
5599*e3723e1fSApple OSS Distributions }
5600*e3723e1fSApple OSS Distributions }
5601*e3723e1fSApple OSS Distributions
5602*e3723e1fSApple OSS Distributions IOOptionBits
getMapOptions()5603*e3723e1fSApple OSS Distributions IOMemoryMap::getMapOptions()
5604*e3723e1fSApple OSS Distributions {
5605*e3723e1fSApple OSS Distributions return fOptions;
5606*e3723e1fSApple OSS Distributions }
5607*e3723e1fSApple OSS Distributions
5608*e3723e1fSApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5609*e3723e1fSApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5610*e3723e1fSApple OSS Distributions {
5611*e3723e1fSApple OSS Distributions return fMemory.get();
5612*e3723e1fSApple OSS Distributions }
5613*e3723e1fSApple OSS Distributions
5614*e3723e1fSApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5615*e3723e1fSApple OSS Distributions IOMemoryMap::copyCompatible(
5616*e3723e1fSApple OSS Distributions IOMemoryMap * newMapping )
5617*e3723e1fSApple OSS Distributions {
5618*e3723e1fSApple OSS Distributions task_t task = newMapping->getAddressTask();
5619*e3723e1fSApple OSS Distributions mach_vm_address_t toAddress = newMapping->fAddress;
5620*e3723e1fSApple OSS Distributions IOOptionBits _options = newMapping->fOptions;
5621*e3723e1fSApple OSS Distributions mach_vm_size_t _offset = newMapping->fOffset;
5622*e3723e1fSApple OSS Distributions mach_vm_size_t _length = newMapping->fLength;
5623*e3723e1fSApple OSS Distributions
5624*e3723e1fSApple OSS Distributions if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5625*e3723e1fSApple OSS Distributions return NULL;
5626*e3723e1fSApple OSS Distributions }
5627*e3723e1fSApple OSS Distributions if ((fOptions ^ _options) & kIOMapReadOnly) {
5628*e3723e1fSApple OSS Distributions return NULL;
5629*e3723e1fSApple OSS Distributions }
5630*e3723e1fSApple OSS Distributions if ((fOptions ^ _options) & kIOMapGuardedMask) {
5631*e3723e1fSApple OSS Distributions return NULL;
5632*e3723e1fSApple OSS Distributions }
5633*e3723e1fSApple OSS Distributions if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5634*e3723e1fSApple OSS Distributions && ((fOptions ^ _options) & kIOMapCacheMask)) {
5635*e3723e1fSApple OSS Distributions return NULL;
5636*e3723e1fSApple OSS Distributions }
5637*e3723e1fSApple OSS Distributions
5638*e3723e1fSApple OSS Distributions if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5639*e3723e1fSApple OSS Distributions return NULL;
5640*e3723e1fSApple OSS Distributions }
5641*e3723e1fSApple OSS Distributions
5642*e3723e1fSApple OSS Distributions if (_offset < fOffset) {
5643*e3723e1fSApple OSS Distributions return NULL;
5644*e3723e1fSApple OSS Distributions }
5645*e3723e1fSApple OSS Distributions
5646*e3723e1fSApple OSS Distributions _offset -= fOffset;
5647*e3723e1fSApple OSS Distributions
5648*e3723e1fSApple OSS Distributions if ((_offset + _length) > fLength) {
5649*e3723e1fSApple OSS Distributions return NULL;
5650*e3723e1fSApple OSS Distributions }
5651*e3723e1fSApple OSS Distributions
5652*e3723e1fSApple OSS Distributions if ((fLength == _length) && (!_offset)) {
5653*e3723e1fSApple OSS Distributions retain();
5654*e3723e1fSApple OSS Distributions newMapping = this;
5655*e3723e1fSApple OSS Distributions } else {
5656*e3723e1fSApple OSS Distributions newMapping->fSuperMap.reset(this, OSRetain);
5657*e3723e1fSApple OSS Distributions newMapping->fOffset = fOffset + _offset;
5658*e3723e1fSApple OSS Distributions newMapping->fAddress = fAddress + _offset;
5659*e3723e1fSApple OSS Distributions }
5660*e3723e1fSApple OSS Distributions
5661*e3723e1fSApple OSS Distributions return newMapping;
5662*e3723e1fSApple OSS Distributions }
5663*e3723e1fSApple OSS Distributions
5664*e3723e1fSApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5665*e3723e1fSApple OSS Distributions IOMemoryMap::wireRange(
5666*e3723e1fSApple OSS Distributions uint32_t options,
5667*e3723e1fSApple OSS Distributions mach_vm_size_t offset,
5668*e3723e1fSApple OSS Distributions mach_vm_size_t length)
5669*e3723e1fSApple OSS Distributions {
5670*e3723e1fSApple OSS Distributions IOReturn kr;
5671*e3723e1fSApple OSS Distributions mach_vm_address_t start = trunc_page_64(fAddress + offset);
5672*e3723e1fSApple OSS Distributions mach_vm_address_t end = round_page_64(fAddress + offset + length);
5673*e3723e1fSApple OSS Distributions vm_prot_t prot;
5674*e3723e1fSApple OSS Distributions
5675*e3723e1fSApple OSS Distributions prot = (kIODirectionOutIn & options);
5676*e3723e1fSApple OSS Distributions if (prot) {
5677*e3723e1fSApple OSS Distributions kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5678*e3723e1fSApple OSS Distributions } else {
5679*e3723e1fSApple OSS Distributions kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5680*e3723e1fSApple OSS Distributions }
5681*e3723e1fSApple OSS Distributions
5682*e3723e1fSApple OSS Distributions return kr;
5683*e3723e1fSApple OSS Distributions }
5684*e3723e1fSApple OSS Distributions
5685*e3723e1fSApple OSS Distributions
5686*e3723e1fSApple OSS Distributions IOPhysicalAddress
5687*e3723e1fSApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5688*e3723e1fSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5689*e3723e1fSApple OSS Distributions #else /* !__LP64__ */
5690*e3723e1fSApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5691*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5692*e3723e1fSApple OSS Distributions {
5693*e3723e1fSApple OSS Distributions IOPhysicalAddress address;
5694*e3723e1fSApple OSS Distributions
5695*e3723e1fSApple OSS Distributions LOCK;
5696*e3723e1fSApple OSS Distributions #ifdef __LP64__
5697*e3723e1fSApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5698*e3723e1fSApple OSS Distributions #else /* !__LP64__ */
5699*e3723e1fSApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5700*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5701*e3723e1fSApple OSS Distributions UNLOCK;
5702*e3723e1fSApple OSS Distributions
5703*e3723e1fSApple OSS Distributions return address;
5704*e3723e1fSApple OSS Distributions }
5705*e3723e1fSApple OSS Distributions
5706*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5707*e3723e1fSApple OSS Distributions
5708*e3723e1fSApple OSS Distributions #undef super
5709*e3723e1fSApple OSS Distributions #define super OSObject
5710*e3723e1fSApple OSS Distributions
5711*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5712*e3723e1fSApple OSS Distributions
5713*e3723e1fSApple OSS Distributions void
initialize(void)5714*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initialize( void )
5715*e3723e1fSApple OSS Distributions {
5716*e3723e1fSApple OSS Distributions if (NULL == gIOMemoryLock) {
5717*e3723e1fSApple OSS Distributions gIOMemoryLock = IORecursiveLockAlloc();
5718*e3723e1fSApple OSS Distributions }
5719*e3723e1fSApple OSS Distributions
5720*e3723e1fSApple OSS Distributions gIOLastPage = IOGetLastPageNumber();
5721*e3723e1fSApple OSS Distributions }
5722*e3723e1fSApple OSS Distributions
5723*e3723e1fSApple OSS Distributions void
free(void)5724*e3723e1fSApple OSS Distributions IOMemoryDescriptor::free( void )
5725*e3723e1fSApple OSS Distributions {
5726*e3723e1fSApple OSS Distributions if (_mappings) {
5727*e3723e1fSApple OSS Distributions _mappings.reset();
5728*e3723e1fSApple OSS Distributions }
5729*e3723e1fSApple OSS Distributions
5730*e3723e1fSApple OSS Distributions if (reserved) {
5731*e3723e1fSApple OSS Distributions cleanKernelReserved(reserved);
5732*e3723e1fSApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
5733*e3723e1fSApple OSS Distributions reserved = NULL;
5734*e3723e1fSApple OSS Distributions }
5735*e3723e1fSApple OSS Distributions super::free();
5736*e3723e1fSApple OSS Distributions }
5737*e3723e1fSApple OSS Distributions
5738*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5739*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setMapping(
5740*e3723e1fSApple OSS Distributions task_t intoTask,
5741*e3723e1fSApple OSS Distributions IOVirtualAddress mapAddress,
5742*e3723e1fSApple OSS Distributions IOOptionBits options )
5743*e3723e1fSApple OSS Distributions {
5744*e3723e1fSApple OSS Distributions return createMappingInTask( intoTask, mapAddress,
5745*e3723e1fSApple OSS Distributions options | kIOMapStatic,
5746*e3723e1fSApple OSS Distributions 0, getLength());
5747*e3723e1fSApple OSS Distributions }
5748*e3723e1fSApple OSS Distributions
5749*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5750*e3723e1fSApple OSS Distributions IOMemoryDescriptor::map(
5751*e3723e1fSApple OSS Distributions IOOptionBits options )
5752*e3723e1fSApple OSS Distributions {
5753*e3723e1fSApple OSS Distributions return createMappingInTask( kernel_task, 0,
5754*e3723e1fSApple OSS Distributions options | kIOMapAnywhere,
5755*e3723e1fSApple OSS Distributions 0, getLength());
5756*e3723e1fSApple OSS Distributions }
5757*e3723e1fSApple OSS Distributions
5758*e3723e1fSApple OSS Distributions #ifndef __LP64__
5759*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5760*e3723e1fSApple OSS Distributions IOMemoryDescriptor::map(
5761*e3723e1fSApple OSS Distributions task_t intoTask,
5762*e3723e1fSApple OSS Distributions IOVirtualAddress atAddress,
5763*e3723e1fSApple OSS Distributions IOOptionBits options,
5764*e3723e1fSApple OSS Distributions IOByteCount offset,
5765*e3723e1fSApple OSS Distributions IOByteCount length )
5766*e3723e1fSApple OSS Distributions {
5767*e3723e1fSApple OSS Distributions if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5768*e3723e1fSApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5769*e3723e1fSApple OSS Distributions return NULL;
5770*e3723e1fSApple OSS Distributions }
5771*e3723e1fSApple OSS Distributions
5772*e3723e1fSApple OSS Distributions return createMappingInTask(intoTask, atAddress,
5773*e3723e1fSApple OSS Distributions options, offset, length);
5774*e3723e1fSApple OSS Distributions }
5775*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5776*e3723e1fSApple OSS Distributions
5777*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5778*e3723e1fSApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5779*e3723e1fSApple OSS Distributions task_t intoTask,
5780*e3723e1fSApple OSS Distributions mach_vm_address_t atAddress,
5781*e3723e1fSApple OSS Distributions IOOptionBits options,
5782*e3723e1fSApple OSS Distributions mach_vm_size_t offset,
5783*e3723e1fSApple OSS Distributions mach_vm_size_t length)
5784*e3723e1fSApple OSS Distributions {
5785*e3723e1fSApple OSS Distributions IOMemoryMap * result;
5786*e3723e1fSApple OSS Distributions IOMemoryMap * mapping;
5787*e3723e1fSApple OSS Distributions
5788*e3723e1fSApple OSS Distributions if (0 == length) {
5789*e3723e1fSApple OSS Distributions length = getLength();
5790*e3723e1fSApple OSS Distributions }
5791*e3723e1fSApple OSS Distributions
5792*e3723e1fSApple OSS Distributions mapping = new IOMemoryMap;
5793*e3723e1fSApple OSS Distributions
5794*e3723e1fSApple OSS Distributions #if 136275805
5795*e3723e1fSApple OSS Distributions /*
5796*e3723e1fSApple OSS Distributions * XXX: Redundantly check the mapping size here so that failure stack traces
5797*e3723e1fSApple OSS Distributions * are more useful. This has no functional value but is helpful because
5798*e3723e1fSApple OSS Distributions * telemetry traps can currently only capture the last five calls and
5799*e3723e1fSApple OSS Distributions * so we want to trap as shallow as possible in a select few cases
5800*e3723e1fSApple OSS Distributions * where we anticipate issues.
5801*e3723e1fSApple OSS Distributions *
5802*e3723e1fSApple OSS Distributions * When telemetry collection is complete, this will be removed.
5803*e3723e1fSApple OSS Distributions */
5804*e3723e1fSApple OSS Distributions if (__improbable(mapping && !vm_map_is_map_size_valid(
5805*e3723e1fSApple OSS Distributions get_task_map(intoTask), length, /* no_soft_limit */ false))) {
5806*e3723e1fSApple OSS Distributions mapping->release();
5807*e3723e1fSApple OSS Distributions mapping = NULL;
5808*e3723e1fSApple OSS Distributions }
5809*e3723e1fSApple OSS Distributions #endif /* 136275805 */
5810*e3723e1fSApple OSS Distributions
5811*e3723e1fSApple OSS Distributions if (mapping
5812*e3723e1fSApple OSS Distributions && !mapping->init( intoTask, atAddress,
5813*e3723e1fSApple OSS Distributions options, offset, length )) {
5814*e3723e1fSApple OSS Distributions mapping->release();
5815*e3723e1fSApple OSS Distributions mapping = NULL;
5816*e3723e1fSApple OSS Distributions }
5817*e3723e1fSApple OSS Distributions
5818*e3723e1fSApple OSS Distributions if (mapping) {
5819*e3723e1fSApple OSS Distributions result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5820*e3723e1fSApple OSS Distributions } else {
5821*e3723e1fSApple OSS Distributions result = nullptr;
5822*e3723e1fSApple OSS Distributions }
5823*e3723e1fSApple OSS Distributions
5824*e3723e1fSApple OSS Distributions #if DEBUG
5825*e3723e1fSApple OSS Distributions if (!result) {
5826*e3723e1fSApple OSS Distributions IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5827*e3723e1fSApple OSS Distributions this, atAddress, (uint32_t) options, offset, length);
5828*e3723e1fSApple OSS Distributions }
5829*e3723e1fSApple OSS Distributions #endif
5830*e3723e1fSApple OSS Distributions
5831*e3723e1fSApple OSS Distributions // already retained through makeMapping
5832*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5833*e3723e1fSApple OSS Distributions
5834*e3723e1fSApple OSS Distributions return retval;
5835*e3723e1fSApple OSS Distributions }
5836*e3723e1fSApple OSS Distributions
5837*e3723e1fSApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5838*e3723e1fSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5839*e3723e1fSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5840*e3723e1fSApple OSS Distributions IOOptionBits options,
5841*e3723e1fSApple OSS Distributions IOByteCount offset)
5842*e3723e1fSApple OSS Distributions {
5843*e3723e1fSApple OSS Distributions return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5844*e3723e1fSApple OSS Distributions }
5845*e3723e1fSApple OSS Distributions #endif
5846*e3723e1fSApple OSS Distributions
5847*e3723e1fSApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5848*e3723e1fSApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5849*e3723e1fSApple OSS Distributions IOOptionBits options,
5850*e3723e1fSApple OSS Distributions mach_vm_size_t offset)
5851*e3723e1fSApple OSS Distributions {
5852*e3723e1fSApple OSS Distributions IOReturn err = kIOReturnSuccess;
5853*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> physMem;
5854*e3723e1fSApple OSS Distributions
5855*e3723e1fSApple OSS Distributions LOCK;
5856*e3723e1fSApple OSS Distributions
5857*e3723e1fSApple OSS Distributions if (fAddress && fAddressMap) {
5858*e3723e1fSApple OSS Distributions do{
5859*e3723e1fSApple OSS Distributions if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5860*e3723e1fSApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5861*e3723e1fSApple OSS Distributions physMem = fMemory;
5862*e3723e1fSApple OSS Distributions }
5863*e3723e1fSApple OSS Distributions
5864*e3723e1fSApple OSS Distributions if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5865*e3723e1fSApple OSS Distributions upl_size_t size = (typeof(size))round_page(fLength);
5866*e3723e1fSApple OSS Distributions upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5867*e3723e1fSApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5868*e3723e1fSApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5869*e3723e1fSApple OSS Distributions NULL, NULL,
5870*e3723e1fSApple OSS Distributions &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5871*e3723e1fSApple OSS Distributions fRedirUPL = NULL;
5872*e3723e1fSApple OSS Distributions }
5873*e3723e1fSApple OSS Distributions
5874*e3723e1fSApple OSS Distributions if (physMem) {
5875*e3723e1fSApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5876*e3723e1fSApple OSS Distributions if ((false)) {
5877*e3723e1fSApple OSS Distributions physMem->redirect(NULL, true);
5878*e3723e1fSApple OSS Distributions }
5879*e3723e1fSApple OSS Distributions }
5880*e3723e1fSApple OSS Distributions }
5881*e3723e1fSApple OSS Distributions
5882*e3723e1fSApple OSS Distributions if (newBackingMemory) {
5883*e3723e1fSApple OSS Distributions if (newBackingMemory != fMemory) {
5884*e3723e1fSApple OSS Distributions fOffset = 0;
5885*e3723e1fSApple OSS Distributions if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5886*e3723e1fSApple OSS Distributions options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5887*e3723e1fSApple OSS Distributions offset, fLength)) {
5888*e3723e1fSApple OSS Distributions err = kIOReturnError;
5889*e3723e1fSApple OSS Distributions }
5890*e3723e1fSApple OSS Distributions }
5891*e3723e1fSApple OSS Distributions if (fRedirUPL) {
5892*e3723e1fSApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5893*e3723e1fSApple OSS Distributions upl_deallocate(fRedirUPL);
5894*e3723e1fSApple OSS Distributions fRedirUPL = NULL;
5895*e3723e1fSApple OSS Distributions }
5896*e3723e1fSApple OSS Distributions if ((false) && physMem) {
5897*e3723e1fSApple OSS Distributions physMem->redirect(NULL, false);
5898*e3723e1fSApple OSS Distributions }
5899*e3723e1fSApple OSS Distributions }
5900*e3723e1fSApple OSS Distributions }while (false);
5901*e3723e1fSApple OSS Distributions }
5902*e3723e1fSApple OSS Distributions
5903*e3723e1fSApple OSS Distributions UNLOCK;
5904*e3723e1fSApple OSS Distributions
5905*e3723e1fSApple OSS Distributions return err;
5906*e3723e1fSApple OSS Distributions }
5907*e3723e1fSApple OSS Distributions
5908*e3723e1fSApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5909*e3723e1fSApple OSS Distributions IOMemoryDescriptor::makeMapping(
5910*e3723e1fSApple OSS Distributions IOMemoryDescriptor * owner,
5911*e3723e1fSApple OSS Distributions task_t __intoTask,
5912*e3723e1fSApple OSS Distributions IOVirtualAddress __address,
5913*e3723e1fSApple OSS Distributions IOOptionBits options,
5914*e3723e1fSApple OSS Distributions IOByteCount __offset,
5915*e3723e1fSApple OSS Distributions IOByteCount __length )
5916*e3723e1fSApple OSS Distributions {
5917*e3723e1fSApple OSS Distributions #ifndef __LP64__
5918*e3723e1fSApple OSS Distributions if (!(kIOMap64Bit & options)) {
5919*e3723e1fSApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
5920*e3723e1fSApple OSS Distributions }
5921*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
5922*e3723e1fSApple OSS Distributions
5923*e3723e1fSApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> mapDesc;
5924*e3723e1fSApple OSS Distributions __block IOMemoryMap * result = NULL;
5925*e3723e1fSApple OSS Distributions
5926*e3723e1fSApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) __address;
5927*e3723e1fSApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
5928*e3723e1fSApple OSS Distributions mach_vm_size_t length = mapping->fLength;
5929*e3723e1fSApple OSS Distributions
5930*e3723e1fSApple OSS Distributions mapping->fOffset = offset;
5931*e3723e1fSApple OSS Distributions
5932*e3723e1fSApple OSS Distributions LOCK;
5933*e3723e1fSApple OSS Distributions
5934*e3723e1fSApple OSS Distributions do{
5935*e3723e1fSApple OSS Distributions if (kIOMapStatic & options) {
5936*e3723e1fSApple OSS Distributions result = mapping;
5937*e3723e1fSApple OSS Distributions addMapping(mapping);
5938*e3723e1fSApple OSS Distributions mapping->setMemoryDescriptor(this, 0);
5939*e3723e1fSApple OSS Distributions continue;
5940*e3723e1fSApple OSS Distributions }
5941*e3723e1fSApple OSS Distributions
5942*e3723e1fSApple OSS Distributions if (kIOMapUnique & options) {
5943*e3723e1fSApple OSS Distributions addr64_t phys;
5944*e3723e1fSApple OSS Distributions IOByteCount physLen;
5945*e3723e1fSApple OSS Distributions
5946*e3723e1fSApple OSS Distributions // if (owner != this) continue;
5947*e3723e1fSApple OSS Distributions
5948*e3723e1fSApple OSS Distributions if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5949*e3723e1fSApple OSS Distributions || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5950*e3723e1fSApple OSS Distributions phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5951*e3723e1fSApple OSS Distributions if (!phys || (physLen < length)) {
5952*e3723e1fSApple OSS Distributions continue;
5953*e3723e1fSApple OSS Distributions }
5954*e3723e1fSApple OSS Distributions
5955*e3723e1fSApple OSS Distributions mapDesc = IOMemoryDescriptor::withAddressRange(
5956*e3723e1fSApple OSS Distributions phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5957*e3723e1fSApple OSS Distributions if (!mapDesc) {
5958*e3723e1fSApple OSS Distributions continue;
5959*e3723e1fSApple OSS Distributions }
5960*e3723e1fSApple OSS Distributions offset = 0;
5961*e3723e1fSApple OSS Distributions mapping->fOffset = offset;
5962*e3723e1fSApple OSS Distributions }
5963*e3723e1fSApple OSS Distributions } else {
5964*e3723e1fSApple OSS Distributions // look for a compatible existing mapping
5965*e3723e1fSApple OSS Distributions if (_mappings) {
5966*e3723e1fSApple OSS Distributions _mappings->iterateObjects(^(OSObject * object)
5967*e3723e1fSApple OSS Distributions {
5968*e3723e1fSApple OSS Distributions IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5969*e3723e1fSApple OSS Distributions if ((result = lookMapping->copyCompatible(mapping))) {
5970*e3723e1fSApple OSS Distributions addMapping(result);
5971*e3723e1fSApple OSS Distributions result->setMemoryDescriptor(this, offset);
5972*e3723e1fSApple OSS Distributions return true;
5973*e3723e1fSApple OSS Distributions }
5974*e3723e1fSApple OSS Distributions return false;
5975*e3723e1fSApple OSS Distributions });
5976*e3723e1fSApple OSS Distributions }
5977*e3723e1fSApple OSS Distributions if (result || (options & kIOMapReference)) {
5978*e3723e1fSApple OSS Distributions if (result != mapping) {
5979*e3723e1fSApple OSS Distributions mapping->release();
5980*e3723e1fSApple OSS Distributions mapping = NULL;
5981*e3723e1fSApple OSS Distributions }
5982*e3723e1fSApple OSS Distributions continue;
5983*e3723e1fSApple OSS Distributions }
5984*e3723e1fSApple OSS Distributions }
5985*e3723e1fSApple OSS Distributions
5986*e3723e1fSApple OSS Distributions if (!mapDesc) {
5987*e3723e1fSApple OSS Distributions mapDesc.reset(this, OSRetain);
5988*e3723e1fSApple OSS Distributions }
5989*e3723e1fSApple OSS Distributions IOReturn
5990*e3723e1fSApple OSS Distributions kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5991*e3723e1fSApple OSS Distributions if (kIOReturnSuccess == kr) {
5992*e3723e1fSApple OSS Distributions result = mapping;
5993*e3723e1fSApple OSS Distributions mapDesc->addMapping(result);
5994*e3723e1fSApple OSS Distributions result->setMemoryDescriptor(mapDesc.get(), offset);
5995*e3723e1fSApple OSS Distributions } else {
5996*e3723e1fSApple OSS Distributions mapping->release();
5997*e3723e1fSApple OSS Distributions mapping = NULL;
5998*e3723e1fSApple OSS Distributions }
5999*e3723e1fSApple OSS Distributions }while (false);
6000*e3723e1fSApple OSS Distributions
6001*e3723e1fSApple OSS Distributions UNLOCK;
6002*e3723e1fSApple OSS Distributions
6003*e3723e1fSApple OSS Distributions return result;
6004*e3723e1fSApple OSS Distributions }
6005*e3723e1fSApple OSS Distributions
6006*e3723e1fSApple OSS Distributions void
addMapping(IOMemoryMap * mapping)6007*e3723e1fSApple OSS Distributions IOMemoryDescriptor::addMapping(
6008*e3723e1fSApple OSS Distributions IOMemoryMap * mapping )
6009*e3723e1fSApple OSS Distributions {
6010*e3723e1fSApple OSS Distributions if (mapping) {
6011*e3723e1fSApple OSS Distributions if (NULL == _mappings) {
6012*e3723e1fSApple OSS Distributions _mappings = OSSet::withCapacity(1);
6013*e3723e1fSApple OSS Distributions }
6014*e3723e1fSApple OSS Distributions if (_mappings) {
6015*e3723e1fSApple OSS Distributions _mappings->setObject( mapping );
6016*e3723e1fSApple OSS Distributions }
6017*e3723e1fSApple OSS Distributions }
6018*e3723e1fSApple OSS Distributions }
6019*e3723e1fSApple OSS Distributions
6020*e3723e1fSApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)6021*e3723e1fSApple OSS Distributions IOMemoryDescriptor::removeMapping(
6022*e3723e1fSApple OSS Distributions IOMemoryMap * mapping )
6023*e3723e1fSApple OSS Distributions {
6024*e3723e1fSApple OSS Distributions if (_mappings) {
6025*e3723e1fSApple OSS Distributions _mappings->removeObject( mapping);
6026*e3723e1fSApple OSS Distributions }
6027*e3723e1fSApple OSS Distributions }
6028*e3723e1fSApple OSS Distributions
6029*e3723e1fSApple OSS Distributions void
setMapperOptions(uint16_t options)6030*e3723e1fSApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
6031*e3723e1fSApple OSS Distributions {
6032*e3723e1fSApple OSS Distributions _iomapperOptions = options;
6033*e3723e1fSApple OSS Distributions }
6034*e3723e1fSApple OSS Distributions
6035*e3723e1fSApple OSS Distributions uint16_t
getMapperOptions(void)6036*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
6037*e3723e1fSApple OSS Distributions {
6038*e3723e1fSApple OSS Distributions return _iomapperOptions;
6039*e3723e1fSApple OSS Distributions }
6040*e3723e1fSApple OSS Distributions
6041*e3723e1fSApple OSS Distributions #ifndef __LP64__
6042*e3723e1fSApple OSS Distributions // obsolete initializers
6043*e3723e1fSApple OSS Distributions // - initWithOptions is the designated initializer
6044*e3723e1fSApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6045*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithAddress(void * address,
6046*e3723e1fSApple OSS Distributions IOByteCount length,
6047*e3723e1fSApple OSS Distributions IODirection direction)
6048*e3723e1fSApple OSS Distributions {
6049*e3723e1fSApple OSS Distributions return false;
6050*e3723e1fSApple OSS Distributions }
6051*e3723e1fSApple OSS Distributions
6052*e3723e1fSApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6053*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6054*e3723e1fSApple OSS Distributions IOByteCount length,
6055*e3723e1fSApple OSS Distributions IODirection direction,
6056*e3723e1fSApple OSS Distributions task_t task)
6057*e3723e1fSApple OSS Distributions {
6058*e3723e1fSApple OSS Distributions return false;
6059*e3723e1fSApple OSS Distributions }
6060*e3723e1fSApple OSS Distributions
6061*e3723e1fSApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6062*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
6063*e3723e1fSApple OSS Distributions IOPhysicalAddress address,
6064*e3723e1fSApple OSS Distributions IOByteCount length,
6065*e3723e1fSApple OSS Distributions IODirection direction )
6066*e3723e1fSApple OSS Distributions {
6067*e3723e1fSApple OSS Distributions return false;
6068*e3723e1fSApple OSS Distributions }
6069*e3723e1fSApple OSS Distributions
6070*e3723e1fSApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6071*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithRanges(
6072*e3723e1fSApple OSS Distributions IOVirtualRange * ranges,
6073*e3723e1fSApple OSS Distributions UInt32 withCount,
6074*e3723e1fSApple OSS Distributions IODirection direction,
6075*e3723e1fSApple OSS Distributions task_t task,
6076*e3723e1fSApple OSS Distributions bool asReference)
6077*e3723e1fSApple OSS Distributions {
6078*e3723e1fSApple OSS Distributions return false;
6079*e3723e1fSApple OSS Distributions }
6080*e3723e1fSApple OSS Distributions
6081*e3723e1fSApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6082*e3723e1fSApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6083*e3723e1fSApple OSS Distributions UInt32 withCount,
6084*e3723e1fSApple OSS Distributions IODirection direction,
6085*e3723e1fSApple OSS Distributions bool asReference)
6086*e3723e1fSApple OSS Distributions {
6087*e3723e1fSApple OSS Distributions return false;
6088*e3723e1fSApple OSS Distributions }
6089*e3723e1fSApple OSS Distributions
6090*e3723e1fSApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6091*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6092*e3723e1fSApple OSS Distributions IOByteCount * lengthOfSegment)
6093*e3723e1fSApple OSS Distributions {
6094*e3723e1fSApple OSS Distributions return NULL;
6095*e3723e1fSApple OSS Distributions }
6096*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
6097*e3723e1fSApple OSS Distributions
6098*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6099*e3723e1fSApple OSS Distributions
6100*e3723e1fSApple OSS Distributions bool
serialize(OSSerialize * s) const6101*e3723e1fSApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6102*e3723e1fSApple OSS Distributions {
6103*e3723e1fSApple OSS Distributions OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6104*e3723e1fSApple OSS Distributions OSSharedPtr<OSObject> values[2] = {NULL};
6105*e3723e1fSApple OSS Distributions OSSharedPtr<OSArray> array;
6106*e3723e1fSApple OSS Distributions
6107*e3723e1fSApple OSS Distributions struct SerData {
6108*e3723e1fSApple OSS Distributions user_addr_t address;
6109*e3723e1fSApple OSS Distributions user_size_t length;
6110*e3723e1fSApple OSS Distributions };
6111*e3723e1fSApple OSS Distributions
6112*e3723e1fSApple OSS Distributions unsigned int index;
6113*e3723e1fSApple OSS Distributions
6114*e3723e1fSApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
6115*e3723e1fSApple OSS Distributions
6116*e3723e1fSApple OSS Distributions if (s == NULL) {
6117*e3723e1fSApple OSS Distributions return false;
6118*e3723e1fSApple OSS Distributions }
6119*e3723e1fSApple OSS Distributions
6120*e3723e1fSApple OSS Distributions array = OSArray::withCapacity(4);
6121*e3723e1fSApple OSS Distributions if (!array) {
6122*e3723e1fSApple OSS Distributions return false;
6123*e3723e1fSApple OSS Distributions }
6124*e3723e1fSApple OSS Distributions
6125*e3723e1fSApple OSS Distributions OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6126*e3723e1fSApple OSS Distributions if (!vcopy) {
6127*e3723e1fSApple OSS Distributions return false;
6128*e3723e1fSApple OSS Distributions }
6129*e3723e1fSApple OSS Distributions
6130*e3723e1fSApple OSS Distributions keys[0] = OSSymbol::withCString("address");
6131*e3723e1fSApple OSS Distributions keys[1] = OSSymbol::withCString("length");
6132*e3723e1fSApple OSS Distributions
6133*e3723e1fSApple OSS Distributions // Copy the volatile data so we don't have to allocate memory
6134*e3723e1fSApple OSS Distributions // while the lock is held.
6135*e3723e1fSApple OSS Distributions LOCK;
6136*e3723e1fSApple OSS Distributions if (vcopy.size() == _rangesCount) {
6137*e3723e1fSApple OSS Distributions Ranges vec = _ranges;
6138*e3723e1fSApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6139*e3723e1fSApple OSS Distributions mach_vm_address_t addr; mach_vm_size_t len;
6140*e3723e1fSApple OSS Distributions getAddrLenForInd(addr, len, type, vec, index, _task);
6141*e3723e1fSApple OSS Distributions vcopy[index].address = addr;
6142*e3723e1fSApple OSS Distributions vcopy[index].length = len;
6143*e3723e1fSApple OSS Distributions }
6144*e3723e1fSApple OSS Distributions } else {
6145*e3723e1fSApple OSS Distributions // The descriptor changed out from under us. Give up.
6146*e3723e1fSApple OSS Distributions UNLOCK;
6147*e3723e1fSApple OSS Distributions return false;
6148*e3723e1fSApple OSS Distributions }
6149*e3723e1fSApple OSS Distributions UNLOCK;
6150*e3723e1fSApple OSS Distributions
6151*e3723e1fSApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6152*e3723e1fSApple OSS Distributions user_addr_t addr = vcopy[index].address;
6153*e3723e1fSApple OSS Distributions IOByteCount len = (IOByteCount) vcopy[index].length;
6154*e3723e1fSApple OSS Distributions values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6155*e3723e1fSApple OSS Distributions if (values[0] == NULL) {
6156*e3723e1fSApple OSS Distributions return false;
6157*e3723e1fSApple OSS Distributions }
6158*e3723e1fSApple OSS Distributions values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6159*e3723e1fSApple OSS Distributions if (values[1] == NULL) {
6160*e3723e1fSApple OSS Distributions return false;
6161*e3723e1fSApple OSS Distributions }
6162*e3723e1fSApple OSS Distributions OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6163*e3723e1fSApple OSS Distributions if (dict == NULL) {
6164*e3723e1fSApple OSS Distributions return false;
6165*e3723e1fSApple OSS Distributions }
6166*e3723e1fSApple OSS Distributions array->setObject(dict.get());
6167*e3723e1fSApple OSS Distributions dict.reset();
6168*e3723e1fSApple OSS Distributions values[0].reset();
6169*e3723e1fSApple OSS Distributions values[1].reset();
6170*e3723e1fSApple OSS Distributions }
6171*e3723e1fSApple OSS Distributions
6172*e3723e1fSApple OSS Distributions return array->serialize(s);
6173*e3723e1fSApple OSS Distributions }
6174*e3723e1fSApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6175*e3723e1fSApple OSS Distributions
6176*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6177*e3723e1fSApple OSS Distributions #ifdef __LP64__
6178*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6179*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6180*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6181*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6182*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6183*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6184*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6185*e3723e1fSApple OSS Distributions #else /* !__LP64__ */
6186*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6187*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6188*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6189*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6190*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6191*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6192*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6193*e3723e1fSApple OSS Distributions #endif /* !__LP64__ */
6194*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6195*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6196*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6197*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6198*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6199*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6200*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6201*e3723e1fSApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6202*e3723e1fSApple OSS Distributions
6203*e3723e1fSApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6204*e3723e1fSApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6205*e3723e1fSApple OSS Distributions struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6206*e3723e1fSApple OSS Distributions
6207*e3723e1fSApple OSS Distributions /* ex-inline function implementation */
6208*e3723e1fSApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6209*e3723e1fSApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6210*e3723e1fSApple OSS Distributions {
6211*e3723e1fSApple OSS Distributions return getPhysicalSegment( 0, NULL );
6212*e3723e1fSApple OSS Distributions }
6213*e3723e1fSApple OSS Distributions
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6214*e3723e1fSApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6215*e3723e1fSApple OSS Distributions
6216*e3723e1fSApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6217*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6218*e3723e1fSApple OSS Distributions {
6219*e3723e1fSApple OSS Distributions OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6220*e3723e1fSApple OSS Distributions if (me && !me->initWithCapacity(capacity)) {
6221*e3723e1fSApple OSS Distributions return nullptr;
6222*e3723e1fSApple OSS Distributions }
6223*e3723e1fSApple OSS Distributions return me;
6224*e3723e1fSApple OSS Distributions }
6225*e3723e1fSApple OSS Distributions
6226*e3723e1fSApple OSS Distributions bool
initWithCapacity(size_t capacity)6227*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6228*e3723e1fSApple OSS Distributions {
6229*e3723e1fSApple OSS Distributions if (_data && (!capacity || (_capacity < capacity))) {
6230*e3723e1fSApple OSS Distributions freeMemory();
6231*e3723e1fSApple OSS Distributions }
6232*e3723e1fSApple OSS Distributions
6233*e3723e1fSApple OSS Distributions if (!OSObject::init()) {
6234*e3723e1fSApple OSS Distributions return false;
6235*e3723e1fSApple OSS Distributions }
6236*e3723e1fSApple OSS Distributions
6237*e3723e1fSApple OSS Distributions if (!_data && capacity) {
6238*e3723e1fSApple OSS Distributions _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6239*e3723e1fSApple OSS Distributions Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6240*e3723e1fSApple OSS Distributions if (!_data) {
6241*e3723e1fSApple OSS Distributions return false;
6242*e3723e1fSApple OSS Distributions }
6243*e3723e1fSApple OSS Distributions _capacity = capacity;
6244*e3723e1fSApple OSS Distributions }
6245*e3723e1fSApple OSS Distributions
6246*e3723e1fSApple OSS Distributions _length = 0;
6247*e3723e1fSApple OSS Distributions
6248*e3723e1fSApple OSS Distributions return true;
6249*e3723e1fSApple OSS Distributions }
6250*e3723e1fSApple OSS Distributions
6251*e3723e1fSApple OSS Distributions void
free()6252*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6253*e3723e1fSApple OSS Distributions {
6254*e3723e1fSApple OSS Distributions freeMemory();
6255*e3723e1fSApple OSS Distributions OSObject::free();
6256*e3723e1fSApple OSS Distributions }
6257*e3723e1fSApple OSS Distributions
6258*e3723e1fSApple OSS Distributions void
freeMemory()6259*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6260*e3723e1fSApple OSS Distributions {
6261*e3723e1fSApple OSS Distributions kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6262*e3723e1fSApple OSS Distributions _data = nullptr;
6263*e3723e1fSApple OSS Distributions _capacity = _length = 0;
6264*e3723e1fSApple OSS Distributions }
6265*e3723e1fSApple OSS Distributions
6266*e3723e1fSApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6267*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6268*e3723e1fSApple OSS Distributions {
6269*e3723e1fSApple OSS Distributions const auto oldLength = getLength();
6270*e3723e1fSApple OSS Distributions size_t newLength;
6271*e3723e1fSApple OSS Distributions if (os_add_overflow(oldLength, length, &newLength)) {
6272*e3723e1fSApple OSS Distributions return false;
6273*e3723e1fSApple OSS Distributions }
6274*e3723e1fSApple OSS Distributions
6275*e3723e1fSApple OSS Distributions if (!setLength(newLength)) {
6276*e3723e1fSApple OSS Distributions return false;
6277*e3723e1fSApple OSS Distributions }
6278*e3723e1fSApple OSS Distributions
6279*e3723e1fSApple OSS Distributions unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6280*e3723e1fSApple OSS Distributions if (bytes) {
6281*e3723e1fSApple OSS Distributions bcopy(bytes, dest, length);
6282*e3723e1fSApple OSS Distributions }
6283*e3723e1fSApple OSS Distributions
6284*e3723e1fSApple OSS Distributions return true;
6285*e3723e1fSApple OSS Distributions }
6286*e3723e1fSApple OSS Distributions
6287*e3723e1fSApple OSS Distributions bool
setLength(size_t length)6288*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6289*e3723e1fSApple OSS Distributions {
6290*e3723e1fSApple OSS Distributions if (!_data || (length > _capacity)) {
6291*e3723e1fSApple OSS Distributions void *newData;
6292*e3723e1fSApple OSS Distributions
6293*e3723e1fSApple OSS Distributions newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6294*e3723e1fSApple OSS Distributions length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6295*e3723e1fSApple OSS Distributions NULL);
6296*e3723e1fSApple OSS Distributions if (!newData) {
6297*e3723e1fSApple OSS Distributions return false;
6298*e3723e1fSApple OSS Distributions }
6299*e3723e1fSApple OSS Distributions
6300*e3723e1fSApple OSS Distributions _data = newData;
6301*e3723e1fSApple OSS Distributions _capacity = length;
6302*e3723e1fSApple OSS Distributions }
6303*e3723e1fSApple OSS Distributions
6304*e3723e1fSApple OSS Distributions _length = length;
6305*e3723e1fSApple OSS Distributions return true;
6306*e3723e1fSApple OSS Distributions }
6307*e3723e1fSApple OSS Distributions
6308*e3723e1fSApple OSS Distributions const void *
getBytes() const6309*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6310*e3723e1fSApple OSS Distributions {
6311*e3723e1fSApple OSS Distributions return _length ? _data : nullptr;
6312*e3723e1fSApple OSS Distributions }
6313*e3723e1fSApple OSS Distributions
6314*e3723e1fSApple OSS Distributions size_t
getLength() const6315*e3723e1fSApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6316*e3723e1fSApple OSS Distributions {
6317*e3723e1fSApple OSS Distributions return _data ? _length : 0;
6318*e3723e1fSApple OSS Distributions }
6319