1*43a90889SApple OSS Distributions /*
2*43a90889SApple OSS Distributions * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*43a90889SApple OSS Distributions *
4*43a90889SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*43a90889SApple OSS Distributions *
6*43a90889SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*43a90889SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*43a90889SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*43a90889SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*43a90889SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*43a90889SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*43a90889SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*43a90889SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*43a90889SApple OSS Distributions *
15*43a90889SApple OSS Distributions * Please obtain a copy of the License at
16*43a90889SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*43a90889SApple OSS Distributions *
18*43a90889SApple OSS Distributions * The Original Code and all software distributed under the License are
19*43a90889SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*43a90889SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*43a90889SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*43a90889SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*43a90889SApple OSS Distributions * Please see the License for the specific language governing rights and
24*43a90889SApple OSS Distributions * limitations under the License.
25*43a90889SApple OSS Distributions *
26*43a90889SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*43a90889SApple OSS Distributions */
28*43a90889SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*43a90889SApple OSS Distributions
30*43a90889SApple OSS Distributions #include <sys/cdefs.h>
31*43a90889SApple OSS Distributions
32*43a90889SApple OSS Distributions #include <IOKit/assert.h>
33*43a90889SApple OSS Distributions #include <IOKit/system.h>
34*43a90889SApple OSS Distributions #include <IOKit/IOLib.h>
35*43a90889SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*43a90889SApple OSS Distributions #include <IOKit/IOMapper.h>
37*43a90889SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*43a90889SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*43a90889SApple OSS Distributions
40*43a90889SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*43a90889SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*43a90889SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*43a90889SApple OSS Distributions
44*43a90889SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*43a90889SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*43a90889SApple OSS Distributions #include <libkern/OSDebug.h>
47*43a90889SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*43a90889SApple OSS Distributions
49*43a90889SApple OSS Distributions #include "IOKitKernelInternal.h"
50*43a90889SApple OSS Distributions
51*43a90889SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*43a90889SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*43a90889SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*43a90889SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*43a90889SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*43a90889SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*43a90889SApple OSS Distributions #include <os/overflow.h>
58*43a90889SApple OSS Distributions #include <os/cpp_util.h>
59*43a90889SApple OSS Distributions #include <os/base_private.h>
60*43a90889SApple OSS Distributions
61*43a90889SApple OSS Distributions #include <sys/uio.h>
62*43a90889SApple OSS Distributions
63*43a90889SApple OSS Distributions __BEGIN_DECLS
64*43a90889SApple OSS Distributions #include <vm/pmap.h>
65*43a90889SApple OSS Distributions #include <vm/vm_pageout_xnu.h>
66*43a90889SApple OSS Distributions #include <mach/memory_object_types.h>
67*43a90889SApple OSS Distributions #include <device/device_port.h>
68*43a90889SApple OSS Distributions
69*43a90889SApple OSS Distributions #include <mach/vm_prot.h>
70*43a90889SApple OSS Distributions #include <mach/mach_vm.h>
71*43a90889SApple OSS Distributions #include <mach/memory_entry.h>
72*43a90889SApple OSS Distributions #include <mach/mach_host.h>
73*43a90889SApple OSS Distributions #include <vm/vm_fault_xnu.h>
74*43a90889SApple OSS Distributions #include <vm/vm_protos.h>
75*43a90889SApple OSS Distributions #include <vm/vm_memory_entry.h>
76*43a90889SApple OSS Distributions #include <vm/vm_kern_xnu.h>
77*43a90889SApple OSS Distributions #include <vm/vm_iokit.h>
78*43a90889SApple OSS Distributions #include <vm/vm_map_xnu.h>
79*43a90889SApple OSS Distributions #include <kern/thread.h>
80*43a90889SApple OSS Distributions
81*43a90889SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
82*43a90889SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
83*43a90889SApple OSS Distributions
84*43a90889SApple OSS Distributions __END_DECLS
85*43a90889SApple OSS Distributions
86*43a90889SApple OSS Distributions #define kIOMapperWaitSystem ((IOMapper *) 1)
87*43a90889SApple OSS Distributions
88*43a90889SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
89*43a90889SApple OSS Distributions
90*43a90889SApple OSS Distributions ppnum_t gIOLastPage;
91*43a90889SApple OSS Distributions
92*43a90889SApple OSS Distributions enum {
93*43a90889SApple OSS Distributions kIOMapGuardSizeLarge = 65536
94*43a90889SApple OSS Distributions };
95*43a90889SApple OSS Distributions
96*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97*43a90889SApple OSS Distributions
98*43a90889SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
99*43a90889SApple OSS Distributions
100*43a90889SApple OSS Distributions #define super IOMemoryDescriptor
101*43a90889SApple OSS Distributions
102*43a90889SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
103*43a90889SApple OSS Distributions IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
104*43a90889SApple OSS Distributions
105*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106*43a90889SApple OSS Distributions
107*43a90889SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
108*43a90889SApple OSS Distributions
109*43a90889SApple OSS Distributions #define LOCK IORecursiveLockLock( gIOMemoryLock)
110*43a90889SApple OSS Distributions #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111*43a90889SApple OSS Distributions #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112*43a90889SApple OSS Distributions #define WAKEUP \
113*43a90889SApple OSS Distributions IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114*43a90889SApple OSS Distributions
115*43a90889SApple OSS Distributions #if 0
116*43a90889SApple OSS Distributions #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117*43a90889SApple OSS Distributions #else
118*43a90889SApple OSS Distributions #define DEBG(fmt, args...) {}
119*43a90889SApple OSS Distributions #endif
120*43a90889SApple OSS Distributions
121*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122*43a90889SApple OSS Distributions
123*43a90889SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
124*43a90889SApple OSS Distributions // Function
125*43a90889SApple OSS Distributions
126*43a90889SApple OSS Distributions enum ioPLBlockFlags {
127*43a90889SApple OSS Distributions kIOPLOnDevice = 0x00000001,
128*43a90889SApple OSS Distributions kIOPLExternUPL = 0x00000002,
129*43a90889SApple OSS Distributions };
130*43a90889SApple OSS Distributions
131*43a90889SApple OSS Distributions struct IOMDPersistentInitData {
132*43a90889SApple OSS Distributions const IOGeneralMemoryDescriptor * fMD;
133*43a90889SApple OSS Distributions IOMemoryReference * fMemRef;
134*43a90889SApple OSS Distributions };
135*43a90889SApple OSS Distributions
136*43a90889SApple OSS Distributions struct ioPLBlock {
137*43a90889SApple OSS Distributions upl_t fIOPL;
138*43a90889SApple OSS Distributions vm_address_t fPageInfo; // Pointer to page list or index into it
139*43a90889SApple OSS Distributions uint64_t fIOMDOffset; // The offset of this iopl in descriptor
140*43a90889SApple OSS Distributions ppnum_t fMappedPage; // Page number of first page in this iopl
141*43a90889SApple OSS Distributions unsigned int fPageOffset; // Offset within first page of iopl
142*43a90889SApple OSS Distributions unsigned int fFlags; // Flags
143*43a90889SApple OSS Distributions };
144*43a90889SApple OSS Distributions
145*43a90889SApple OSS Distributions enum { kMaxWireTags = 6 };
146*43a90889SApple OSS Distributions
147*43a90889SApple OSS Distributions struct ioGMDData {
148*43a90889SApple OSS Distributions IOMapper * fMapper;
149*43a90889SApple OSS Distributions uint64_t fDMAMapAlignment;
150*43a90889SApple OSS Distributions uint64_t fMappedBase;
151*43a90889SApple OSS Distributions uint64_t fMappedLength;
152*43a90889SApple OSS Distributions uint64_t fPreparationID;
153*43a90889SApple OSS Distributions #if IOTRACKING
154*43a90889SApple OSS Distributions IOTracking fWireTracking;
155*43a90889SApple OSS Distributions #endif /* IOTRACKING */
156*43a90889SApple OSS Distributions unsigned int fPageCnt;
157*43a90889SApple OSS Distributions uint8_t fDMAMapNumAddressBits;
158*43a90889SApple OSS Distributions unsigned char fCompletionError:1;
159*43a90889SApple OSS Distributions unsigned char fMappedBaseValid:1;
160*43a90889SApple OSS Distributions unsigned char _resv:4;
161*43a90889SApple OSS Distributions unsigned char fDMAAccess:2;
162*43a90889SApple OSS Distributions
163*43a90889SApple OSS Distributions /* variable length arrays */
164*43a90889SApple OSS Distributions upl_page_info_t fPageList[1]
165*43a90889SApple OSS Distributions #if __LP64__
166*43a90889SApple OSS Distributions // align fPageList as for ioPLBlock
167*43a90889SApple OSS Distributions __attribute__((aligned(sizeof(upl_t))))
168*43a90889SApple OSS Distributions #endif
169*43a90889SApple OSS Distributions ;
170*43a90889SApple OSS Distributions //ioPLBlock fBlocks[1];
171*43a90889SApple OSS Distributions };
172*43a90889SApple OSS Distributions
173*43a90889SApple OSS Distributions #pragma GCC visibility push(hidden)
174*43a90889SApple OSS Distributions
175*43a90889SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
176*43a90889SApple OSS Distributions {
177*43a90889SApple OSS Distributions OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
178*43a90889SApple OSS Distributions
179*43a90889SApple OSS Distributions public:
180*43a90889SApple OSS Distributions static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
181*43a90889SApple OSS Distributions bool initWithCapacity(size_t capacity);
182*43a90889SApple OSS Distributions virtual void free() APPLE_KEXT_OVERRIDE;
183*43a90889SApple OSS Distributions
184*43a90889SApple OSS Distributions bool appendBytes(const void * bytes, size_t length);
185*43a90889SApple OSS Distributions bool setLength(size_t length);
186*43a90889SApple OSS Distributions
187*43a90889SApple OSS Distributions const void * getBytes() const;
188*43a90889SApple OSS Distributions size_t getLength() const;
189*43a90889SApple OSS Distributions
190*43a90889SApple OSS Distributions private:
191*43a90889SApple OSS Distributions void freeMemory();
192*43a90889SApple OSS Distributions
193*43a90889SApple OSS Distributions void * _data = nullptr;
194*43a90889SApple OSS Distributions size_t _length = 0;
195*43a90889SApple OSS Distributions size_t _capacity = 0;
196*43a90889SApple OSS Distributions };
197*43a90889SApple OSS Distributions
198*43a90889SApple OSS Distributions #pragma GCC visibility pop
199*43a90889SApple OSS Distributions
200*43a90889SApple OSS Distributions #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
201*43a90889SApple OSS Distributions #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
202*43a90889SApple OSS Distributions #define getNumIOPL(osd, d) \
203*43a90889SApple OSS Distributions ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
204*43a90889SApple OSS Distributions #define getPageList(d) (&(d->fPageList[0]))
205*43a90889SApple OSS Distributions #define computeDataSize(p, u) \
206*43a90889SApple OSS Distributions (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
207*43a90889SApple OSS Distributions
208*43a90889SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
209*43a90889SApple OSS Distributions
210*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211*43a90889SApple OSS Distributions
212*43a90889SApple OSS Distributions extern "C" {
213*43a90889SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)214*43a90889SApple OSS Distributions device_data_action(
215*43a90889SApple OSS Distributions uintptr_t device_handle,
216*43a90889SApple OSS Distributions ipc_port_t device_pager,
217*43a90889SApple OSS Distributions vm_prot_t protection,
218*43a90889SApple OSS Distributions vm_object_offset_t offset,
219*43a90889SApple OSS Distributions vm_size_t size)
220*43a90889SApple OSS Distributions {
221*43a90889SApple OSS Distributions kern_return_t kr;
222*43a90889SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
223*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> memDesc;
224*43a90889SApple OSS Distributions
225*43a90889SApple OSS Distributions LOCK;
226*43a90889SApple OSS Distributions if (ref->dp.memory) {
227*43a90889SApple OSS Distributions memDesc.reset(ref->dp.memory, OSRetain);
228*43a90889SApple OSS Distributions kr = memDesc->handleFault(device_pager, offset, size);
229*43a90889SApple OSS Distributions memDesc.reset();
230*43a90889SApple OSS Distributions } else {
231*43a90889SApple OSS Distributions kr = KERN_ABORTED;
232*43a90889SApple OSS Distributions }
233*43a90889SApple OSS Distributions UNLOCK;
234*43a90889SApple OSS Distributions
235*43a90889SApple OSS Distributions return kr;
236*43a90889SApple OSS Distributions }
237*43a90889SApple OSS Distributions
238*43a90889SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)239*43a90889SApple OSS Distributions device_close(
240*43a90889SApple OSS Distributions uintptr_t device_handle)
241*43a90889SApple OSS Distributions {
242*43a90889SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
243*43a90889SApple OSS Distributions
244*43a90889SApple OSS Distributions IOFreeType( ref, IOMemoryDescriptorReserved );
245*43a90889SApple OSS Distributions
246*43a90889SApple OSS Distributions return kIOReturnSuccess;
247*43a90889SApple OSS Distributions }
248*43a90889SApple OSS Distributions }; // end extern "C"
249*43a90889SApple OSS Distributions
250*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251*43a90889SApple OSS Distributions
252*43a90889SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
253*43a90889SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
254*43a90889SApple OSS Distributions // checked for as a NULL reference is illegal.
255*43a90889SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)256*43a90889SApple OSS Distributions getAddrLenForInd(
257*43a90889SApple OSS Distributions mach_vm_address_t &addr,
258*43a90889SApple OSS Distributions mach_vm_size_t &len, // Output variables
259*43a90889SApple OSS Distributions UInt32 type,
260*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::Ranges r,
261*43a90889SApple OSS Distributions UInt32 ind,
262*43a90889SApple OSS Distributions task_t task __unused)
263*43a90889SApple OSS Distributions {
264*43a90889SApple OSS Distributions assert(kIOMemoryTypeUIO == type
265*43a90889SApple OSS Distributions || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
266*43a90889SApple OSS Distributions || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
267*43a90889SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
268*43a90889SApple OSS Distributions user_size_t us;
269*43a90889SApple OSS Distributions user_addr_t ad;
270*43a90889SApple OSS Distributions uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
271*43a90889SApple OSS Distributions }
272*43a90889SApple OSS Distributions #ifndef __LP64__
273*43a90889SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
274*43a90889SApple OSS Distributions IOAddressRange cur = r.v64[ind];
275*43a90889SApple OSS Distributions addr = cur.address;
276*43a90889SApple OSS Distributions len = cur.length;
277*43a90889SApple OSS Distributions }
278*43a90889SApple OSS Distributions #endif /* !__LP64__ */
279*43a90889SApple OSS Distributions else {
280*43a90889SApple OSS Distributions IOVirtualRange cur = r.v[ind];
281*43a90889SApple OSS Distributions addr = cur.address;
282*43a90889SApple OSS Distributions len = cur.length;
283*43a90889SApple OSS Distributions }
284*43a90889SApple OSS Distributions #if CONFIG_PROB_GZALLOC
285*43a90889SApple OSS Distributions if (task == kernel_task) {
286*43a90889SApple OSS Distributions addr = pgz_decode(addr, len);
287*43a90889SApple OSS Distributions }
288*43a90889SApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
289*43a90889SApple OSS Distributions }
290*43a90889SApple OSS Distributions
291*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292*43a90889SApple OSS Distributions
293*43a90889SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)294*43a90889SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
295*43a90889SApple OSS Distributions {
296*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
297*43a90889SApple OSS Distributions
298*43a90889SApple OSS Distributions *control = VM_PURGABLE_SET_STATE;
299*43a90889SApple OSS Distributions
300*43a90889SApple OSS Distributions enum { kIOMemoryPurgeableControlMask = 15 };
301*43a90889SApple OSS Distributions
302*43a90889SApple OSS Distributions switch (kIOMemoryPurgeableControlMask & newState) {
303*43a90889SApple OSS Distributions case kIOMemoryPurgeableKeepCurrent:
304*43a90889SApple OSS Distributions *control = VM_PURGABLE_GET_STATE;
305*43a90889SApple OSS Distributions break;
306*43a90889SApple OSS Distributions
307*43a90889SApple OSS Distributions case kIOMemoryPurgeableNonVolatile:
308*43a90889SApple OSS Distributions *state = VM_PURGABLE_NONVOLATILE;
309*43a90889SApple OSS Distributions break;
310*43a90889SApple OSS Distributions case kIOMemoryPurgeableVolatile:
311*43a90889SApple OSS Distributions *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
312*43a90889SApple OSS Distributions break;
313*43a90889SApple OSS Distributions case kIOMemoryPurgeableEmpty:
314*43a90889SApple OSS Distributions *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
315*43a90889SApple OSS Distributions break;
316*43a90889SApple OSS Distributions default:
317*43a90889SApple OSS Distributions err = kIOReturnBadArgument;
318*43a90889SApple OSS Distributions break;
319*43a90889SApple OSS Distributions }
320*43a90889SApple OSS Distributions
321*43a90889SApple OSS Distributions if (*control == VM_PURGABLE_SET_STATE) {
322*43a90889SApple OSS Distributions // let VM know this call is from the kernel and is allowed to alter
323*43a90889SApple OSS Distributions // the volatility of the memory entry even if it was created with
324*43a90889SApple OSS Distributions // MAP_MEM_PURGABLE_KERNEL_ONLY
325*43a90889SApple OSS Distributions *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
326*43a90889SApple OSS Distributions }
327*43a90889SApple OSS Distributions
328*43a90889SApple OSS Distributions return err;
329*43a90889SApple OSS Distributions }
330*43a90889SApple OSS Distributions
331*43a90889SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)332*43a90889SApple OSS Distributions purgeableStateBits(int * state)
333*43a90889SApple OSS Distributions {
334*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
335*43a90889SApple OSS Distributions
336*43a90889SApple OSS Distributions switch (VM_PURGABLE_STATE_MASK & *state) {
337*43a90889SApple OSS Distributions case VM_PURGABLE_NONVOLATILE:
338*43a90889SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
339*43a90889SApple OSS Distributions break;
340*43a90889SApple OSS Distributions case VM_PURGABLE_VOLATILE:
341*43a90889SApple OSS Distributions *state = kIOMemoryPurgeableVolatile;
342*43a90889SApple OSS Distributions break;
343*43a90889SApple OSS Distributions case VM_PURGABLE_EMPTY:
344*43a90889SApple OSS Distributions *state = kIOMemoryPurgeableEmpty;
345*43a90889SApple OSS Distributions break;
346*43a90889SApple OSS Distributions default:
347*43a90889SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
348*43a90889SApple OSS Distributions err = kIOReturnNotReady;
349*43a90889SApple OSS Distributions break;
350*43a90889SApple OSS Distributions }
351*43a90889SApple OSS Distributions return err;
352*43a90889SApple OSS Distributions }
353*43a90889SApple OSS Distributions
354*43a90889SApple OSS Distributions typedef struct {
355*43a90889SApple OSS Distributions unsigned int wimg;
356*43a90889SApple OSS Distributions unsigned int object_type;
357*43a90889SApple OSS Distributions } iokit_memtype_entry;
358*43a90889SApple OSS Distributions
359*43a90889SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
360*43a90889SApple OSS Distributions [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
361*43a90889SApple OSS Distributions [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
362*43a90889SApple OSS Distributions [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
363*43a90889SApple OSS Distributions [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
364*43a90889SApple OSS Distributions [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
365*43a90889SApple OSS Distributions [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
366*43a90889SApple OSS Distributions [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
367*43a90889SApple OSS Distributions [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
368*43a90889SApple OSS Distributions [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
369*43a90889SApple OSS Distributions [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
370*43a90889SApple OSS Distributions };
371*43a90889SApple OSS Distributions
372*43a90889SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)373*43a90889SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
374*43a90889SApple OSS Distributions {
375*43a90889SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376*43a90889SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
377*43a90889SApple OSS Distributions cacheMode = kIODefaultCache;
378*43a90889SApple OSS Distributions }
379*43a90889SApple OSS Distributions vm_prot_t prot = 0;
380*43a90889SApple OSS Distributions SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
381*43a90889SApple OSS Distributions return prot;
382*43a90889SApple OSS Distributions }
383*43a90889SApple OSS Distributions
384*43a90889SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)385*43a90889SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
386*43a90889SApple OSS Distributions {
387*43a90889SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
388*43a90889SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
389*43a90889SApple OSS Distributions cacheMode = kIODefaultCache;
390*43a90889SApple OSS Distributions }
391*43a90889SApple OSS Distributions if (cacheMode == kIODefaultCache) {
392*43a90889SApple OSS Distributions return -1U;
393*43a90889SApple OSS Distributions }
394*43a90889SApple OSS Distributions return iomd_mem_types[cacheMode].wimg;
395*43a90889SApple OSS Distributions }
396*43a90889SApple OSS Distributions
397*43a90889SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)398*43a90889SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
399*43a90889SApple OSS Distributions {
400*43a90889SApple OSS Distributions pagerFlags &= VM_WIMG_MASK;
401*43a90889SApple OSS Distributions IOOptionBits cacheMode = kIODefaultCache;
402*43a90889SApple OSS Distributions for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
403*43a90889SApple OSS Distributions if (iomd_mem_types[i].wimg == pagerFlags) {
404*43a90889SApple OSS Distributions cacheMode = i;
405*43a90889SApple OSS Distributions break;
406*43a90889SApple OSS Distributions }
407*43a90889SApple OSS Distributions }
408*43a90889SApple OSS Distributions return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
409*43a90889SApple OSS Distributions }
410*43a90889SApple OSS Distributions
411*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413*43a90889SApple OSS Distributions
414*43a90889SApple OSS Distributions struct IOMemoryEntry {
415*43a90889SApple OSS Distributions ipc_port_t entry;
416*43a90889SApple OSS Distributions int64_t offset;
417*43a90889SApple OSS Distributions uint64_t size;
418*43a90889SApple OSS Distributions uint64_t start;
419*43a90889SApple OSS Distributions };
420*43a90889SApple OSS Distributions
421*43a90889SApple OSS Distributions struct IOMemoryReference {
422*43a90889SApple OSS Distributions volatile SInt32 refCount;
423*43a90889SApple OSS Distributions vm_prot_t prot;
424*43a90889SApple OSS Distributions uint32_t capacity;
425*43a90889SApple OSS Distributions uint32_t count;
426*43a90889SApple OSS Distributions struct IOMemoryReference * mapRef;
427*43a90889SApple OSS Distributions IOMemoryEntry entries[0];
428*43a90889SApple OSS Distributions };
429*43a90889SApple OSS Distributions
430*43a90889SApple OSS Distributions enum{
431*43a90889SApple OSS Distributions kIOMemoryReferenceReuse = 0x00000001,
432*43a90889SApple OSS Distributions kIOMemoryReferenceWrite = 0x00000002,
433*43a90889SApple OSS Distributions kIOMemoryReferenceCOW = 0x00000004,
434*43a90889SApple OSS Distributions };
435*43a90889SApple OSS Distributions
436*43a90889SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
437*43a90889SApple OSS Distributions
438*43a90889SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)439*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
440*43a90889SApple OSS Distributions {
441*43a90889SApple OSS Distributions IOMemoryReference * ref;
442*43a90889SApple OSS Distributions size_t oldCapacity;
443*43a90889SApple OSS Distributions
444*43a90889SApple OSS Distributions if (realloc) {
445*43a90889SApple OSS Distributions oldCapacity = realloc->capacity;
446*43a90889SApple OSS Distributions } else {
447*43a90889SApple OSS Distributions oldCapacity = 0;
448*43a90889SApple OSS Distributions }
449*43a90889SApple OSS Distributions
450*43a90889SApple OSS Distributions // Use the kalloc API instead of manually handling the reallocation
451*43a90889SApple OSS Distributions ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
452*43a90889SApple OSS Distributions oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
453*43a90889SApple OSS Distributions if (ref) {
454*43a90889SApple OSS Distributions if (oldCapacity == 0) {
455*43a90889SApple OSS Distributions ref->refCount = 1;
456*43a90889SApple OSS Distributions OSIncrementAtomic(&gIOMemoryReferenceCount);
457*43a90889SApple OSS Distributions }
458*43a90889SApple OSS Distributions ref->capacity = capacity;
459*43a90889SApple OSS Distributions }
460*43a90889SApple OSS Distributions return ref;
461*43a90889SApple OSS Distributions }
462*43a90889SApple OSS Distributions
463*43a90889SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)464*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
465*43a90889SApple OSS Distributions {
466*43a90889SApple OSS Distributions IOMemoryEntry * entries;
467*43a90889SApple OSS Distributions
468*43a90889SApple OSS Distributions if (ref->mapRef) {
469*43a90889SApple OSS Distributions memoryReferenceFree(ref->mapRef);
470*43a90889SApple OSS Distributions ref->mapRef = NULL;
471*43a90889SApple OSS Distributions }
472*43a90889SApple OSS Distributions
473*43a90889SApple OSS Distributions entries = ref->entries + ref->count;
474*43a90889SApple OSS Distributions while (entries > &ref->entries[0]) {
475*43a90889SApple OSS Distributions entries--;
476*43a90889SApple OSS Distributions ipc_port_release_send(entries->entry);
477*43a90889SApple OSS Distributions }
478*43a90889SApple OSS Distributions kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
479*43a90889SApple OSS Distributions
480*43a90889SApple OSS Distributions OSDecrementAtomic(&gIOMemoryReferenceCount);
481*43a90889SApple OSS Distributions }
482*43a90889SApple OSS Distributions
483*43a90889SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)484*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
485*43a90889SApple OSS Distributions {
486*43a90889SApple OSS Distributions if (1 == OSDecrementAtomic(&ref->refCount)) {
487*43a90889SApple OSS Distributions memoryReferenceFree(ref);
488*43a90889SApple OSS Distributions }
489*43a90889SApple OSS Distributions }
490*43a90889SApple OSS Distributions
491*43a90889SApple OSS Distributions
492*43a90889SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)493*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
494*43a90889SApple OSS Distributions IOOptionBits options,
495*43a90889SApple OSS Distributions IOMemoryReference ** reference)
496*43a90889SApple OSS Distributions {
497*43a90889SApple OSS Distributions enum { kCapacity = 4, kCapacityInc = 4 };
498*43a90889SApple OSS Distributions
499*43a90889SApple OSS Distributions kern_return_t err;
500*43a90889SApple OSS Distributions IOMemoryReference * ref;
501*43a90889SApple OSS Distributions IOMemoryEntry * entries;
502*43a90889SApple OSS Distributions IOMemoryEntry * cloneEntries = NULL;
503*43a90889SApple OSS Distributions vm_map_t map;
504*43a90889SApple OSS Distributions ipc_port_t entry, cloneEntry;
505*43a90889SApple OSS Distributions vm_prot_t prot;
506*43a90889SApple OSS Distributions memory_object_size_t actualSize;
507*43a90889SApple OSS Distributions uint32_t rangeIdx;
508*43a90889SApple OSS Distributions uint32_t count;
509*43a90889SApple OSS Distributions mach_vm_address_t entryAddr, endAddr, entrySize;
510*43a90889SApple OSS Distributions mach_vm_size_t srcAddr, srcLen;
511*43a90889SApple OSS Distributions mach_vm_size_t nextAddr, nextLen;
512*43a90889SApple OSS Distributions mach_vm_size_t offset, remain;
513*43a90889SApple OSS Distributions vm_map_offset_t overmap_start = 0, overmap_end = 0;
514*43a90889SApple OSS Distributions int misaligned_start = 0, misaligned_end = 0;
515*43a90889SApple OSS Distributions IOByteCount physLen;
516*43a90889SApple OSS Distributions IOOptionBits type = (_flags & kIOMemoryTypeMask);
517*43a90889SApple OSS Distributions IOOptionBits cacheMode;
518*43a90889SApple OSS Distributions unsigned int pagerFlags;
519*43a90889SApple OSS Distributions vm_tag_t tag;
520*43a90889SApple OSS Distributions vm_named_entry_kernel_flags_t vmne_kflags;
521*43a90889SApple OSS Distributions
522*43a90889SApple OSS Distributions ref = memoryReferenceAlloc(kCapacity, NULL);
523*43a90889SApple OSS Distributions if (!ref) {
524*43a90889SApple OSS Distributions return kIOReturnNoMemory;
525*43a90889SApple OSS Distributions }
526*43a90889SApple OSS Distributions
527*43a90889SApple OSS Distributions tag = (vm_tag_t) getVMTag(kernel_map);
528*43a90889SApple OSS Distributions vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
529*43a90889SApple OSS Distributions entries = &ref->entries[0];
530*43a90889SApple OSS Distributions count = 0;
531*43a90889SApple OSS Distributions err = KERN_SUCCESS;
532*43a90889SApple OSS Distributions
533*43a90889SApple OSS Distributions offset = 0;
534*43a90889SApple OSS Distributions rangeIdx = 0;
535*43a90889SApple OSS Distributions remain = _length;
536*43a90889SApple OSS Distributions if (_task) {
537*43a90889SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
538*43a90889SApple OSS Distributions
539*43a90889SApple OSS Distributions // account for IOBMD setLength(), use its capacity as length
540*43a90889SApple OSS Distributions IOBufferMemoryDescriptor * bmd;
541*43a90889SApple OSS Distributions if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
542*43a90889SApple OSS Distributions nextLen = bmd->getCapacity();
543*43a90889SApple OSS Distributions remain = nextLen;
544*43a90889SApple OSS Distributions }
545*43a90889SApple OSS Distributions } else {
546*43a90889SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
547*43a90889SApple OSS Distributions nextLen = physLen;
548*43a90889SApple OSS Distributions
549*43a90889SApple OSS Distributions // default cache mode for physical
550*43a90889SApple OSS Distributions if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
551*43a90889SApple OSS Distributions IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
552*43a90889SApple OSS Distributions _flags |= (mode << kIOMemoryBufferCacheShift);
553*43a90889SApple OSS Distributions }
554*43a90889SApple OSS Distributions }
555*43a90889SApple OSS Distributions
556*43a90889SApple OSS Distributions // cache mode & vm_prot
557*43a90889SApple OSS Distributions prot = VM_PROT_READ;
558*43a90889SApple OSS Distributions cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
559*43a90889SApple OSS Distributions prot |= vmProtForCacheMode(cacheMode);
560*43a90889SApple OSS Distributions // VM system requires write access to change cache mode
561*43a90889SApple OSS Distributions if (kIODefaultCache != cacheMode) {
562*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
563*43a90889SApple OSS Distributions }
564*43a90889SApple OSS Distributions if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
565*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
566*43a90889SApple OSS Distributions }
567*43a90889SApple OSS Distributions if (kIOMemoryReferenceWrite & options) {
568*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
569*43a90889SApple OSS Distributions }
570*43a90889SApple OSS Distributions if (kIOMemoryReferenceCOW & options) {
571*43a90889SApple OSS Distributions prot |= MAP_MEM_VM_COPY;
572*43a90889SApple OSS Distributions }
573*43a90889SApple OSS Distributions
574*43a90889SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
575*43a90889SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
576*43a90889SApple OSS Distributions }
577*43a90889SApple OSS Distributions
578*43a90889SApple OSS Distributions if ((kIOMemoryReferenceReuse & options) && _memRef) {
579*43a90889SApple OSS Distributions cloneEntries = &_memRef->entries[0];
580*43a90889SApple OSS Distributions prot |= MAP_MEM_NAMED_REUSE;
581*43a90889SApple OSS Distributions }
582*43a90889SApple OSS Distributions
583*43a90889SApple OSS Distributions if (_task) {
584*43a90889SApple OSS Distributions // virtual ranges
585*43a90889SApple OSS Distributions
586*43a90889SApple OSS Distributions if (kIOMemoryBufferPageable & _flags) {
587*43a90889SApple OSS Distributions int ledger_tag, ledger_no_footprint;
588*43a90889SApple OSS Distributions
589*43a90889SApple OSS Distributions // IOBufferMemoryDescriptor alloc - set flags for entry + object create
590*43a90889SApple OSS Distributions prot |= MAP_MEM_NAMED_CREATE;
591*43a90889SApple OSS Distributions
592*43a90889SApple OSS Distributions // default accounting settings:
593*43a90889SApple OSS Distributions // + "none" ledger tag
594*43a90889SApple OSS Distributions // + include in footprint
595*43a90889SApple OSS Distributions // can be changed later with ::setOwnership()
596*43a90889SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
597*43a90889SApple OSS Distributions ledger_no_footprint = 0;
598*43a90889SApple OSS Distributions
599*43a90889SApple OSS Distributions if (kIOMemoryBufferPurgeable & _flags) {
600*43a90889SApple OSS Distributions prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
601*43a90889SApple OSS Distributions if (VM_KERN_MEMORY_SKYWALK == tag) {
602*43a90889SApple OSS Distributions // Skywalk purgeable memory accounting:
603*43a90889SApple OSS Distributions // + "network" ledger tag
604*43a90889SApple OSS Distributions // + not included in footprint
605*43a90889SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NETWORK;
606*43a90889SApple OSS Distributions ledger_no_footprint = 1;
607*43a90889SApple OSS Distributions } else {
608*43a90889SApple OSS Distributions // regular purgeable memory accounting:
609*43a90889SApple OSS Distributions // + no ledger tag
610*43a90889SApple OSS Distributions // + included in footprint
611*43a90889SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
612*43a90889SApple OSS Distributions ledger_no_footprint = 0;
613*43a90889SApple OSS Distributions }
614*43a90889SApple OSS Distributions }
615*43a90889SApple OSS Distributions vmne_kflags.vmnekf_ledger_tag = ledger_tag;
616*43a90889SApple OSS Distributions vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
617*43a90889SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
618*43a90889SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
619*43a90889SApple OSS Distributions }
620*43a90889SApple OSS Distributions
621*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
622*43a90889SApple OSS Distributions map = NULL;
623*43a90889SApple OSS Distributions } else {
624*43a90889SApple OSS Distributions prot |= MAP_MEM_USE_DATA_ADDR;
625*43a90889SApple OSS Distributions map = get_task_map(_task);
626*43a90889SApple OSS Distributions }
627*43a90889SApple OSS Distributions DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
628*43a90889SApple OSS Distributions
629*43a90889SApple OSS Distributions while (remain) {
630*43a90889SApple OSS Distributions srcAddr = nextAddr;
631*43a90889SApple OSS Distributions srcLen = nextLen;
632*43a90889SApple OSS Distributions nextAddr = 0;
633*43a90889SApple OSS Distributions nextLen = 0;
634*43a90889SApple OSS Distributions // coalesce addr range
635*43a90889SApple OSS Distributions for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
636*43a90889SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
637*43a90889SApple OSS Distributions if ((srcAddr + srcLen) != nextAddr) {
638*43a90889SApple OSS Distributions break;
639*43a90889SApple OSS Distributions }
640*43a90889SApple OSS Distributions srcLen += nextLen;
641*43a90889SApple OSS Distributions }
642*43a90889SApple OSS Distributions
643*43a90889SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
644*43a90889SApple OSS Distributions entryAddr = srcAddr;
645*43a90889SApple OSS Distributions endAddr = srcAddr + srcLen;
646*43a90889SApple OSS Distributions } else {
647*43a90889SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
648*43a90889SApple OSS Distributions endAddr = round_page_64(srcAddr + srcLen);
649*43a90889SApple OSS Distributions }
650*43a90889SApple OSS Distributions if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
651*43a90889SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
652*43a90889SApple OSS Distributions }
653*43a90889SApple OSS Distributions
654*43a90889SApple OSS Distributions do{
655*43a90889SApple OSS Distributions entrySize = (endAddr - entryAddr);
656*43a90889SApple OSS Distributions if (!entrySize) {
657*43a90889SApple OSS Distributions break;
658*43a90889SApple OSS Distributions }
659*43a90889SApple OSS Distributions actualSize = entrySize;
660*43a90889SApple OSS Distributions
661*43a90889SApple OSS Distributions cloneEntry = MACH_PORT_NULL;
662*43a90889SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
663*43a90889SApple OSS Distributions if (cloneEntries < &_memRef->entries[_memRef->count]) {
664*43a90889SApple OSS Distributions cloneEntry = cloneEntries->entry;
665*43a90889SApple OSS Distributions } else {
666*43a90889SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
667*43a90889SApple OSS Distributions }
668*43a90889SApple OSS Distributions }
669*43a90889SApple OSS Distributions
670*43a90889SApple OSS Distributions mach_vm_offset_t entryAddrForVm = entryAddr;
671*43a90889SApple OSS Distributions err = mach_make_memory_entry_internal(map,
672*43a90889SApple OSS Distributions &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
673*43a90889SApple OSS Distributions
674*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
675*43a90889SApple OSS Distributions DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
676*43a90889SApple OSS Distributions break;
677*43a90889SApple OSS Distributions }
678*43a90889SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
679*43a90889SApple OSS Distributions if (actualSize > entrySize) {
680*43a90889SApple OSS Distributions actualSize = entrySize;
681*43a90889SApple OSS Distributions }
682*43a90889SApple OSS Distributions } else if (actualSize > entrySize) {
683*43a90889SApple OSS Distributions panic("mach_make_memory_entry_64 actualSize");
684*43a90889SApple OSS Distributions }
685*43a90889SApple OSS Distributions
686*43a90889SApple OSS Distributions memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687*43a90889SApple OSS Distributions
688*43a90889SApple OSS Distributions if (count && overmap_start) {
689*43a90889SApple OSS Distributions /*
690*43a90889SApple OSS Distributions * Track misaligned start for all
691*43a90889SApple OSS Distributions * except the first entry.
692*43a90889SApple OSS Distributions */
693*43a90889SApple OSS Distributions misaligned_start++;
694*43a90889SApple OSS Distributions }
695*43a90889SApple OSS Distributions
696*43a90889SApple OSS Distributions if (overmap_end) {
697*43a90889SApple OSS Distributions /*
698*43a90889SApple OSS Distributions * Ignore misaligned end for the
699*43a90889SApple OSS Distributions * last entry.
700*43a90889SApple OSS Distributions */
701*43a90889SApple OSS Distributions if ((entryAddr + actualSize) != endAddr) {
702*43a90889SApple OSS Distributions misaligned_end++;
703*43a90889SApple OSS Distributions }
704*43a90889SApple OSS Distributions }
705*43a90889SApple OSS Distributions
706*43a90889SApple OSS Distributions if (count) {
707*43a90889SApple OSS Distributions /* Middle entries */
708*43a90889SApple OSS Distributions if (misaligned_start || misaligned_end) {
709*43a90889SApple OSS Distributions DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710*43a90889SApple OSS Distributions ipc_port_release_send(entry);
711*43a90889SApple OSS Distributions err = KERN_NOT_SUPPORTED;
712*43a90889SApple OSS Distributions break;
713*43a90889SApple OSS Distributions }
714*43a90889SApple OSS Distributions }
715*43a90889SApple OSS Distributions
716*43a90889SApple OSS Distributions if (count >= ref->capacity) {
717*43a90889SApple OSS Distributions ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718*43a90889SApple OSS Distributions entries = &ref->entries[count];
719*43a90889SApple OSS Distributions }
720*43a90889SApple OSS Distributions entries->entry = entry;
721*43a90889SApple OSS Distributions entries->size = actualSize;
722*43a90889SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
723*43a90889SApple OSS Distributions entries->start = entryAddr;
724*43a90889SApple OSS Distributions entryAddr += actualSize;
725*43a90889SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
726*43a90889SApple OSS Distributions if ((cloneEntries->entry == entries->entry)
727*43a90889SApple OSS Distributions && (cloneEntries->size == entries->size)
728*43a90889SApple OSS Distributions && (cloneEntries->offset == entries->offset)) {
729*43a90889SApple OSS Distributions cloneEntries++;
730*43a90889SApple OSS Distributions } else {
731*43a90889SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
732*43a90889SApple OSS Distributions }
733*43a90889SApple OSS Distributions }
734*43a90889SApple OSS Distributions entries++;
735*43a90889SApple OSS Distributions count++;
736*43a90889SApple OSS Distributions }while (true);
737*43a90889SApple OSS Distributions offset += srcLen;
738*43a90889SApple OSS Distributions remain -= srcLen;
739*43a90889SApple OSS Distributions }
740*43a90889SApple OSS Distributions } else {
741*43a90889SApple OSS Distributions // _task == 0, physical or kIOMemoryTypeUPL
742*43a90889SApple OSS Distributions memory_object_t pager;
743*43a90889SApple OSS Distributions vm_size_t size = ptoa_64(_pages);
744*43a90889SApple OSS Distributions
745*43a90889SApple OSS Distributions if (!getKernelReserved()) {
746*43a90889SApple OSS Distributions panic("getKernelReserved");
747*43a90889SApple OSS Distributions }
748*43a90889SApple OSS Distributions
749*43a90889SApple OSS Distributions reserved->dp.pagerContig = (1 == _rangesCount);
750*43a90889SApple OSS Distributions reserved->dp.memory = this;
751*43a90889SApple OSS Distributions
752*43a90889SApple OSS Distributions pagerFlags = pagerFlagsForCacheMode(cacheMode);
753*43a90889SApple OSS Distributions if (-1U == pagerFlags) {
754*43a90889SApple OSS Distributions panic("phys is kIODefaultCache");
755*43a90889SApple OSS Distributions }
756*43a90889SApple OSS Distributions if (reserved->dp.pagerContig) {
757*43a90889SApple OSS Distributions pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758*43a90889SApple OSS Distributions }
759*43a90889SApple OSS Distributions
760*43a90889SApple OSS Distributions pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761*43a90889SApple OSS Distributions size, pagerFlags);
762*43a90889SApple OSS Distributions assert(pager);
763*43a90889SApple OSS Distributions if (!pager) {
764*43a90889SApple OSS Distributions DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765*43a90889SApple OSS Distributions err = kIOReturnVMError;
766*43a90889SApple OSS Distributions } else {
767*43a90889SApple OSS Distributions srcAddr = nextAddr;
768*43a90889SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
769*43a90889SApple OSS Distributions err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770*43a90889SApple OSS Distributions size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
772*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
773*43a90889SApple OSS Distributions device_pager_deallocate(pager);
774*43a90889SApple OSS Distributions } else {
775*43a90889SApple OSS Distributions reserved->dp.devicePager = pager;
776*43a90889SApple OSS Distributions entries->entry = entry;
777*43a90889SApple OSS Distributions entries->size = size;
778*43a90889SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
779*43a90889SApple OSS Distributions entries++;
780*43a90889SApple OSS Distributions count++;
781*43a90889SApple OSS Distributions }
782*43a90889SApple OSS Distributions }
783*43a90889SApple OSS Distributions }
784*43a90889SApple OSS Distributions
785*43a90889SApple OSS Distributions ref->count = count;
786*43a90889SApple OSS Distributions ref->prot = prot;
787*43a90889SApple OSS Distributions
788*43a90889SApple OSS Distributions if (_task && (KERN_SUCCESS == err)
789*43a90889SApple OSS Distributions && (kIOMemoryMapCopyOnWrite & _flags)
790*43a90889SApple OSS Distributions && !(kIOMemoryReferenceCOW & options)) {
791*43a90889SApple OSS Distributions err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
793*43a90889SApple OSS Distributions DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794*43a90889SApple OSS Distributions }
795*43a90889SApple OSS Distributions }
796*43a90889SApple OSS Distributions
797*43a90889SApple OSS Distributions if (KERN_SUCCESS == err) {
798*43a90889SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
799*43a90889SApple OSS Distributions memoryReferenceFree(ref);
800*43a90889SApple OSS Distributions OSIncrementAtomic(&_memRef->refCount);
801*43a90889SApple OSS Distributions ref = _memRef;
802*43a90889SApple OSS Distributions }
803*43a90889SApple OSS Distributions } else {
804*43a90889SApple OSS Distributions DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805*43a90889SApple OSS Distributions memoryReferenceFree(ref);
806*43a90889SApple OSS Distributions ref = NULL;
807*43a90889SApple OSS Distributions }
808*43a90889SApple OSS Distributions
809*43a90889SApple OSS Distributions *reference = ref;
810*43a90889SApple OSS Distributions
811*43a90889SApple OSS Distributions return err;
812*43a90889SApple OSS Distributions }
813*43a90889SApple OSS Distributions
814*43a90889SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815*43a90889SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816*43a90889SApple OSS Distributions {
817*43a90889SApple OSS Distributions switch (kIOMapGuardedMask & options) {
818*43a90889SApple OSS Distributions default:
819*43a90889SApple OSS Distributions case kIOMapGuardedSmall:
820*43a90889SApple OSS Distributions return vm_map_page_size(map);
821*43a90889SApple OSS Distributions case kIOMapGuardedLarge:
822*43a90889SApple OSS Distributions assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823*43a90889SApple OSS Distributions return kIOMapGuardSizeLarge;
824*43a90889SApple OSS Distributions }
825*43a90889SApple OSS Distributions ;
826*43a90889SApple OSS Distributions }
827*43a90889SApple OSS Distributions
828*43a90889SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829*43a90889SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830*43a90889SApple OSS Distributions vm_map_offset_t addr, mach_vm_size_t size)
831*43a90889SApple OSS Distributions {
832*43a90889SApple OSS Distributions kern_return_t kr;
833*43a90889SApple OSS Distributions vm_map_offset_t actualAddr;
834*43a90889SApple OSS Distributions mach_vm_size_t actualSize;
835*43a90889SApple OSS Distributions
836*43a90889SApple OSS Distributions actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837*43a90889SApple OSS Distributions actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838*43a90889SApple OSS Distributions
839*43a90889SApple OSS Distributions if (kIOMapGuardedMask & options) {
840*43a90889SApple OSS Distributions mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841*43a90889SApple OSS Distributions actualAddr -= guardSize;
842*43a90889SApple OSS Distributions actualSize += 2 * guardSize;
843*43a90889SApple OSS Distributions }
844*43a90889SApple OSS Distributions kr = mach_vm_deallocate(map, actualAddr, actualSize);
845*43a90889SApple OSS Distributions
846*43a90889SApple OSS Distributions return kr;
847*43a90889SApple OSS Distributions }
848*43a90889SApple OSS Distributions
849*43a90889SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850*43a90889SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851*43a90889SApple OSS Distributions {
852*43a90889SApple OSS Distributions IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853*43a90889SApple OSS Distributions IOReturn err;
854*43a90889SApple OSS Distributions vm_map_offset_t addr;
855*43a90889SApple OSS Distributions mach_vm_size_t size;
856*43a90889SApple OSS Distributions mach_vm_size_t guardSize;
857*43a90889SApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
858*43a90889SApple OSS Distributions
859*43a90889SApple OSS Distributions addr = ref->mapped;
860*43a90889SApple OSS Distributions size = ref->size;
861*43a90889SApple OSS Distributions guardSize = 0;
862*43a90889SApple OSS Distributions
863*43a90889SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
864*43a90889SApple OSS Distributions if (!(kIOMapAnywhere & ref->options)) {
865*43a90889SApple OSS Distributions return kIOReturnBadArgument;
866*43a90889SApple OSS Distributions }
867*43a90889SApple OSS Distributions guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868*43a90889SApple OSS Distributions size += 2 * guardSize;
869*43a90889SApple OSS Distributions }
870*43a90889SApple OSS Distributions if (kIOMapAnywhere & ref->options) {
871*43a90889SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872*43a90889SApple OSS Distributions } else {
873*43a90889SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874*43a90889SApple OSS Distributions }
875*43a90889SApple OSS Distributions vmk_flags.vm_tag = ref->tag;
876*43a90889SApple OSS Distributions
877*43a90889SApple OSS Distributions /*
878*43a90889SApple OSS Distributions * Mapping memory into the kernel_map using IOMDs use the data range.
879*43a90889SApple OSS Distributions * Memory being mapped should not contain kernel pointers.
880*43a90889SApple OSS Distributions */
881*43a90889SApple OSS Distributions if (map == kernel_map) {
882*43a90889SApple OSS Distributions vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883*43a90889SApple OSS Distributions }
884*43a90889SApple OSS Distributions
885*43a90889SApple OSS Distributions err = mach_vm_map_kernel(map, &addr, size,
886*43a90889SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
887*43a90889SApple OSS Distributions // TODO4K this should not be necessary...
888*43a90889SApple OSS Distributions (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889*43a90889SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
890*43a90889SApple OSS Distributions (vm_map_offset_t) 0,
891*43a90889SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
892*43a90889SApple OSS Distributions vmk_flags,
893*43a90889SApple OSS Distributions IPC_PORT_NULL,
894*43a90889SApple OSS Distributions (memory_object_offset_t) 0,
895*43a90889SApple OSS Distributions false, /* copy */
896*43a90889SApple OSS Distributions ref->prot,
897*43a90889SApple OSS Distributions ref->prot,
898*43a90889SApple OSS Distributions VM_INHERIT_NONE);
899*43a90889SApple OSS Distributions if (KERN_SUCCESS == err) {
900*43a90889SApple OSS Distributions ref->mapped = (mach_vm_address_t) addr;
901*43a90889SApple OSS Distributions ref->map = map;
902*43a90889SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
903*43a90889SApple OSS Distributions vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904*43a90889SApple OSS Distributions
905*43a90889SApple OSS Distributions err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
906*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
907*43a90889SApple OSS Distributions err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
908*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
909*43a90889SApple OSS Distributions ref->mapped += guardSize;
910*43a90889SApple OSS Distributions }
911*43a90889SApple OSS Distributions }
912*43a90889SApple OSS Distributions
913*43a90889SApple OSS Distributions return err;
914*43a90889SApple OSS Distributions }
915*43a90889SApple OSS Distributions
916*43a90889SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
918*43a90889SApple OSS Distributions IOMemoryReference * ref,
919*43a90889SApple OSS Distributions vm_map_t map,
920*43a90889SApple OSS Distributions mach_vm_size_t inoffset,
921*43a90889SApple OSS Distributions mach_vm_size_t size,
922*43a90889SApple OSS Distributions IOOptionBits options,
923*43a90889SApple OSS Distributions mach_vm_address_t * inaddr)
924*43a90889SApple OSS Distributions {
925*43a90889SApple OSS Distributions IOReturn err;
926*43a90889SApple OSS Distributions int64_t offset = inoffset;
927*43a90889SApple OSS Distributions uint32_t rangeIdx, entryIdx;
928*43a90889SApple OSS Distributions vm_map_offset_t addr, mapAddr;
929*43a90889SApple OSS Distributions vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930*43a90889SApple OSS Distributions
931*43a90889SApple OSS Distributions mach_vm_address_t nextAddr;
932*43a90889SApple OSS Distributions mach_vm_size_t nextLen;
933*43a90889SApple OSS Distributions IOByteCount physLen;
934*43a90889SApple OSS Distributions IOMemoryEntry * entry;
935*43a90889SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
936*43a90889SApple OSS Distributions IOOptionBits type;
937*43a90889SApple OSS Distributions IOOptionBits cacheMode;
938*43a90889SApple OSS Distributions vm_tag_t tag;
939*43a90889SApple OSS Distributions // for the kIOMapPrefault option.
940*43a90889SApple OSS Distributions upl_page_info_t * pageList = NULL;
941*43a90889SApple OSS Distributions UInt currentPageIndex = 0;
942*43a90889SApple OSS Distributions bool didAlloc;
943*43a90889SApple OSS Distributions
944*43a90889SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945*43a90889SApple OSS Distributions
946*43a90889SApple OSS Distributions if (ref->mapRef) {
947*43a90889SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948*43a90889SApple OSS Distributions return err;
949*43a90889SApple OSS Distributions }
950*43a90889SApple OSS Distributions
951*43a90889SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952*43a90889SApple OSS Distributions err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953*43a90889SApple OSS Distributions return err;
954*43a90889SApple OSS Distributions }
955*43a90889SApple OSS Distributions
956*43a90889SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
957*43a90889SApple OSS Distributions
958*43a90889SApple OSS Distributions prot = VM_PROT_READ;
959*43a90889SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
960*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
961*43a90889SApple OSS Distributions }
962*43a90889SApple OSS Distributions prot &= ref->prot;
963*43a90889SApple OSS Distributions
964*43a90889SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965*43a90889SApple OSS Distributions if (kIODefaultCache != cacheMode) {
966*43a90889SApple OSS Distributions // VM system requires write access to update named entry cache mode
967*43a90889SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968*43a90889SApple OSS Distributions }
969*43a90889SApple OSS Distributions
970*43a90889SApple OSS Distributions tag = (typeof(tag))getVMTag(map);
971*43a90889SApple OSS Distributions
972*43a90889SApple OSS Distributions if (_task) {
973*43a90889SApple OSS Distributions // Find first range for offset
974*43a90889SApple OSS Distributions if (!_rangesCount) {
975*43a90889SApple OSS Distributions return kIOReturnBadArgument;
976*43a90889SApple OSS Distributions }
977*43a90889SApple OSS Distributions for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978*43a90889SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979*43a90889SApple OSS Distributions if (remain < nextLen) {
980*43a90889SApple OSS Distributions break;
981*43a90889SApple OSS Distributions }
982*43a90889SApple OSS Distributions remain -= nextLen;
983*43a90889SApple OSS Distributions }
984*43a90889SApple OSS Distributions } else {
985*43a90889SApple OSS Distributions rangeIdx = 0;
986*43a90889SApple OSS Distributions remain = 0;
987*43a90889SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988*43a90889SApple OSS Distributions nextLen = size;
989*43a90889SApple OSS Distributions }
990*43a90889SApple OSS Distributions
991*43a90889SApple OSS Distributions assert(remain < nextLen);
992*43a90889SApple OSS Distributions if (remain >= nextLen) {
993*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994*43a90889SApple OSS Distributions return kIOReturnBadArgument;
995*43a90889SApple OSS Distributions }
996*43a90889SApple OSS Distributions
997*43a90889SApple OSS Distributions nextAddr += remain;
998*43a90889SApple OSS Distributions nextLen -= remain;
999*43a90889SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1000*43a90889SApple OSS Distributions pageOffset = (vm_map_page_mask(map) & nextAddr);
1001*43a90889SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1002*43a90889SApple OSS Distributions pageOffset = (page_mask & nextAddr);
1003*43a90889SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004*43a90889SApple OSS Distributions addr = 0;
1005*43a90889SApple OSS Distributions didAlloc = false;
1006*43a90889SApple OSS Distributions
1007*43a90889SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1008*43a90889SApple OSS Distributions addr = *inaddr;
1009*43a90889SApple OSS Distributions if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011*43a90889SApple OSS Distributions }
1012*43a90889SApple OSS Distributions addr -= pageOffset;
1013*43a90889SApple OSS Distributions }
1014*43a90889SApple OSS Distributions
1015*43a90889SApple OSS Distributions // find first entry for offset
1016*43a90889SApple OSS Distributions for (entryIdx = 0;
1017*43a90889SApple OSS Distributions (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018*43a90889SApple OSS Distributions entryIdx++) {
1019*43a90889SApple OSS Distributions }
1020*43a90889SApple OSS Distributions entryIdx--;
1021*43a90889SApple OSS Distributions entry = &ref->entries[entryIdx];
1022*43a90889SApple OSS Distributions
1023*43a90889SApple OSS Distributions // allocate VM
1024*43a90889SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1025*43a90889SApple OSS Distributions size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026*43a90889SApple OSS Distributions #else
1027*43a90889SApple OSS Distributions size = round_page_64(size + pageOffset);
1028*43a90889SApple OSS Distributions #endif
1029*43a90889SApple OSS Distributions if (kIOMapOverwrite & options) {
1030*43a90889SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031*43a90889SApple OSS Distributions map = IOPageableMapForAddress(addr);
1032*43a90889SApple OSS Distributions }
1033*43a90889SApple OSS Distributions err = KERN_SUCCESS;
1034*43a90889SApple OSS Distributions } else {
1035*43a90889SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1036*43a90889SApple OSS Distributions ref.map = map;
1037*43a90889SApple OSS Distributions ref.tag = tag;
1038*43a90889SApple OSS Distributions ref.options = options;
1039*43a90889SApple OSS Distributions ref.size = size;
1040*43a90889SApple OSS Distributions ref.prot = prot;
1041*43a90889SApple OSS Distributions if (options & kIOMapAnywhere) {
1042*43a90889SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043*43a90889SApple OSS Distributions ref.mapped = 0;
1044*43a90889SApple OSS Distributions } else {
1045*43a90889SApple OSS Distributions ref.mapped = addr;
1046*43a90889SApple OSS Distributions }
1047*43a90889SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048*43a90889SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049*43a90889SApple OSS Distributions } else {
1050*43a90889SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051*43a90889SApple OSS Distributions }
1052*43a90889SApple OSS Distributions if (KERN_SUCCESS == err) {
1053*43a90889SApple OSS Distributions addr = ref.mapped;
1054*43a90889SApple OSS Distributions map = ref.map;
1055*43a90889SApple OSS Distributions didAlloc = true;
1056*43a90889SApple OSS Distributions }
1057*43a90889SApple OSS Distributions }
1058*43a90889SApple OSS Distributions
1059*43a90889SApple OSS Distributions /*
1060*43a90889SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1061*43a90889SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1062*43a90889SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063*43a90889SApple OSS Distributions * operations.
1064*43a90889SApple OSS Distributions */
1065*43a90889SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066*43a90889SApple OSS Distributions options &= ~kIOMapPrefault;
1067*43a90889SApple OSS Distributions }
1068*43a90889SApple OSS Distributions
1069*43a90889SApple OSS Distributions /*
1070*43a90889SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1071*43a90889SApple OSS Distributions * memory type, and the underlying data.
1072*43a90889SApple OSS Distributions */
1073*43a90889SApple OSS Distributions if (options & kIOMapPrefault) {
1074*43a90889SApple OSS Distributions /*
1075*43a90889SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1076*43a90889SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077*43a90889SApple OSS Distributions */
1078*43a90889SApple OSS Distributions assert(_wireCount != 0);
1079*43a90889SApple OSS Distributions assert(_memoryEntries != NULL);
1080*43a90889SApple OSS Distributions if ((_wireCount == 0) ||
1081*43a90889SApple OSS Distributions (_memoryEntries == NULL)) {
1082*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083*43a90889SApple OSS Distributions return kIOReturnBadArgument;
1084*43a90889SApple OSS Distributions }
1085*43a90889SApple OSS Distributions
1086*43a90889SApple OSS Distributions // Get the page list.
1087*43a90889SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1088*43a90889SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1089*43a90889SApple OSS Distributions pageList = getPageList(dataP);
1090*43a90889SApple OSS Distributions
1091*43a90889SApple OSS Distributions // Get the number of IOPLs.
1092*43a90889SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093*43a90889SApple OSS Distributions
1094*43a90889SApple OSS Distributions /*
1095*43a90889SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1096*43a90889SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1097*43a90889SApple OSS Distributions * right range at the end.
1098*43a90889SApple OSS Distributions */
1099*43a90889SApple OSS Distributions UInt ioplIndex = 0;
1100*43a90889SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101*43a90889SApple OSS Distributions ioplIndex++;
1102*43a90889SApple OSS Distributions }
1103*43a90889SApple OSS Distributions ioplIndex--;
1104*43a90889SApple OSS Distributions
1105*43a90889SApple OSS Distributions // Retrieve the IOPL info block.
1106*43a90889SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1107*43a90889SApple OSS Distributions
1108*43a90889SApple OSS Distributions /*
1109*43a90889SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110*43a90889SApple OSS Distributions * array.
1111*43a90889SApple OSS Distributions */
1112*43a90889SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1113*43a90889SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114*43a90889SApple OSS Distributions } else {
1115*43a90889SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1116*43a90889SApple OSS Distributions }
1117*43a90889SApple OSS Distributions
1118*43a90889SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1119*43a90889SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120*43a90889SApple OSS Distributions
1121*43a90889SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1122*43a90889SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1123*43a90889SApple OSS Distributions }
1124*43a90889SApple OSS Distributions
1125*43a90889SApple OSS Distributions // enter mappings
1126*43a90889SApple OSS Distributions remain = size;
1127*43a90889SApple OSS Distributions mapAddr = addr;
1128*43a90889SApple OSS Distributions addr += pageOffset;
1129*43a90889SApple OSS Distributions
1130*43a90889SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1131*43a90889SApple OSS Distributions entryOffset = offset - entry->offset;
1132*43a90889SApple OSS Distributions if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133*43a90889SApple OSS Distributions err = kIOReturnNotAligned;
1134*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135*43a90889SApple OSS Distributions break;
1136*43a90889SApple OSS Distributions }
1137*43a90889SApple OSS Distributions
1138*43a90889SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1139*43a90889SApple OSS Distributions vm_size_t unused = 0;
1140*43a90889SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141*43a90889SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1142*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
1143*43a90889SApple OSS Distributions }
1144*43a90889SApple OSS Distributions
1145*43a90889SApple OSS Distributions entryOffset -= pageOffset;
1146*43a90889SApple OSS Distributions if (entryOffset >= entry->size) {
1147*43a90889SApple OSS Distributions panic("entryOffset");
1148*43a90889SApple OSS Distributions }
1149*43a90889SApple OSS Distributions chunk = entry->size - entryOffset;
1150*43a90889SApple OSS Distributions if (chunk) {
1151*43a90889SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1152*43a90889SApple OSS Distributions .vmf_fixed = true,
1153*43a90889SApple OSS Distributions .vmf_overwrite = true,
1154*43a90889SApple OSS Distributions .vm_tag = tag,
1155*43a90889SApple OSS Distributions .vmkf_iokit_acct = true,
1156*43a90889SApple OSS Distributions };
1157*43a90889SApple OSS Distributions
1158*43a90889SApple OSS Distributions if (chunk > remain) {
1159*43a90889SApple OSS Distributions chunk = remain;
1160*43a90889SApple OSS Distributions }
1161*43a90889SApple OSS Distributions if (options & kIOMapPrefault) {
1162*43a90889SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163*43a90889SApple OSS Distributions
1164*43a90889SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1165*43a90889SApple OSS Distributions &mapAddr,
1166*43a90889SApple OSS Distributions chunk, 0 /* mask */,
1167*43a90889SApple OSS Distributions vmk_flags,
1168*43a90889SApple OSS Distributions entry->entry,
1169*43a90889SApple OSS Distributions entryOffset,
1170*43a90889SApple OSS Distributions prot, // cur
1171*43a90889SApple OSS Distributions prot, // max
1172*43a90889SApple OSS Distributions &pageList[currentPageIndex],
1173*43a90889SApple OSS Distributions nb_pages);
1174*43a90889SApple OSS Distributions
1175*43a90889SApple OSS Distributions if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176*43a90889SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177*43a90889SApple OSS Distributions }
1178*43a90889SApple OSS Distributions // Compute the next index in the page list.
1179*43a90889SApple OSS Distributions currentPageIndex += nb_pages;
1180*43a90889SApple OSS Distributions assert(currentPageIndex <= _pages);
1181*43a90889SApple OSS Distributions } else {
1182*43a90889SApple OSS Distributions err = mach_vm_map_kernel(map,
1183*43a90889SApple OSS Distributions &mapAddr,
1184*43a90889SApple OSS Distributions chunk, 0 /* mask */,
1185*43a90889SApple OSS Distributions vmk_flags,
1186*43a90889SApple OSS Distributions entry->entry,
1187*43a90889SApple OSS Distributions entryOffset,
1188*43a90889SApple OSS Distributions false, // copy
1189*43a90889SApple OSS Distributions prot, // cur
1190*43a90889SApple OSS Distributions prot, // max
1191*43a90889SApple OSS Distributions VM_INHERIT_NONE);
1192*43a90889SApple OSS Distributions }
1193*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1194*43a90889SApple OSS Distributions DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195*43a90889SApple OSS Distributions break;
1196*43a90889SApple OSS Distributions }
1197*43a90889SApple OSS Distributions remain -= chunk;
1198*43a90889SApple OSS Distributions if (!remain) {
1199*43a90889SApple OSS Distributions break;
1200*43a90889SApple OSS Distributions }
1201*43a90889SApple OSS Distributions mapAddr += chunk;
1202*43a90889SApple OSS Distributions offset += chunk - pageOffset;
1203*43a90889SApple OSS Distributions }
1204*43a90889SApple OSS Distributions pageOffset = 0;
1205*43a90889SApple OSS Distributions entry++;
1206*43a90889SApple OSS Distributions entryIdx++;
1207*43a90889SApple OSS Distributions if (entryIdx >= ref->count) {
1208*43a90889SApple OSS Distributions err = kIOReturnOverrun;
1209*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210*43a90889SApple OSS Distributions break;
1211*43a90889SApple OSS Distributions }
1212*43a90889SApple OSS Distributions }
1213*43a90889SApple OSS Distributions
1214*43a90889SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1215*43a90889SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216*43a90889SApple OSS Distributions addr = 0;
1217*43a90889SApple OSS Distributions }
1218*43a90889SApple OSS Distributions *inaddr = addr;
1219*43a90889SApple OSS Distributions
1220*43a90889SApple OSS Distributions if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222*43a90889SApple OSS Distributions }
1223*43a90889SApple OSS Distributions return err;
1224*43a90889SApple OSS Distributions }
1225*43a90889SApple OSS Distributions
1226*43a90889SApple OSS Distributions #define LOGUNALIGN 0
1227*43a90889SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229*43a90889SApple OSS Distributions IOMemoryReference * ref,
1230*43a90889SApple OSS Distributions vm_map_t map,
1231*43a90889SApple OSS Distributions mach_vm_size_t inoffset,
1232*43a90889SApple OSS Distributions mach_vm_size_t size,
1233*43a90889SApple OSS Distributions IOOptionBits options,
1234*43a90889SApple OSS Distributions mach_vm_address_t * inaddr)
1235*43a90889SApple OSS Distributions {
1236*43a90889SApple OSS Distributions IOReturn err;
1237*43a90889SApple OSS Distributions int64_t offset = inoffset;
1238*43a90889SApple OSS Distributions uint32_t entryIdx, firstEntryIdx;
1239*43a90889SApple OSS Distributions vm_map_offset_t addr, mapAddr, mapAddrOut;
1240*43a90889SApple OSS Distributions vm_map_offset_t entryOffset, remain, chunk;
1241*43a90889SApple OSS Distributions
1242*43a90889SApple OSS Distributions IOMemoryEntry * entry;
1243*43a90889SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
1244*43a90889SApple OSS Distributions IOOptionBits type;
1245*43a90889SApple OSS Distributions IOOptionBits cacheMode;
1246*43a90889SApple OSS Distributions vm_tag_t tag;
1247*43a90889SApple OSS Distributions // for the kIOMapPrefault option.
1248*43a90889SApple OSS Distributions upl_page_info_t * pageList = NULL;
1249*43a90889SApple OSS Distributions UInt currentPageIndex = 0;
1250*43a90889SApple OSS Distributions bool didAlloc;
1251*43a90889SApple OSS Distributions
1252*43a90889SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253*43a90889SApple OSS Distributions
1254*43a90889SApple OSS Distributions if (ref->mapRef) {
1255*43a90889SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256*43a90889SApple OSS Distributions return err;
1257*43a90889SApple OSS Distributions }
1258*43a90889SApple OSS Distributions
1259*43a90889SApple OSS Distributions #if LOGUNALIGN
1260*43a90889SApple OSS Distributions printf("MAP offset %qx, %qx\n", inoffset, size);
1261*43a90889SApple OSS Distributions #endif
1262*43a90889SApple OSS Distributions
1263*43a90889SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
1264*43a90889SApple OSS Distributions
1265*43a90889SApple OSS Distributions prot = VM_PROT_READ;
1266*43a90889SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
1267*43a90889SApple OSS Distributions prot |= VM_PROT_WRITE;
1268*43a90889SApple OSS Distributions }
1269*43a90889SApple OSS Distributions prot &= ref->prot;
1270*43a90889SApple OSS Distributions
1271*43a90889SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272*43a90889SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1273*43a90889SApple OSS Distributions // VM system requires write access to update named entry cache mode
1274*43a90889SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275*43a90889SApple OSS Distributions }
1276*43a90889SApple OSS Distributions
1277*43a90889SApple OSS Distributions tag = (vm_tag_t) getVMTag(map);
1278*43a90889SApple OSS Distributions
1279*43a90889SApple OSS Distributions addr = 0;
1280*43a90889SApple OSS Distributions didAlloc = false;
1281*43a90889SApple OSS Distributions
1282*43a90889SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1283*43a90889SApple OSS Distributions addr = *inaddr;
1284*43a90889SApple OSS Distributions }
1285*43a90889SApple OSS Distributions
1286*43a90889SApple OSS Distributions // find first entry for offset
1287*43a90889SApple OSS Distributions for (firstEntryIdx = 0;
1288*43a90889SApple OSS Distributions (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289*43a90889SApple OSS Distributions firstEntryIdx++) {
1290*43a90889SApple OSS Distributions }
1291*43a90889SApple OSS Distributions firstEntryIdx--;
1292*43a90889SApple OSS Distributions
1293*43a90889SApple OSS Distributions // calculate required VM space
1294*43a90889SApple OSS Distributions
1295*43a90889SApple OSS Distributions entryIdx = firstEntryIdx;
1296*43a90889SApple OSS Distributions entry = &ref->entries[entryIdx];
1297*43a90889SApple OSS Distributions
1298*43a90889SApple OSS Distributions remain = size;
1299*43a90889SApple OSS Distributions int64_t iteroffset = offset;
1300*43a90889SApple OSS Distributions uint64_t mapSize = 0;
1301*43a90889SApple OSS Distributions while (remain) {
1302*43a90889SApple OSS Distributions entryOffset = iteroffset - entry->offset;
1303*43a90889SApple OSS Distributions if (entryOffset >= entry->size) {
1304*43a90889SApple OSS Distributions panic("entryOffset");
1305*43a90889SApple OSS Distributions }
1306*43a90889SApple OSS Distributions
1307*43a90889SApple OSS Distributions #if LOGUNALIGN
1308*43a90889SApple OSS Distributions printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309*43a90889SApple OSS Distributions entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310*43a90889SApple OSS Distributions #endif
1311*43a90889SApple OSS Distributions
1312*43a90889SApple OSS Distributions chunk = entry->size - entryOffset;
1313*43a90889SApple OSS Distributions if (chunk) {
1314*43a90889SApple OSS Distributions if (chunk > remain) {
1315*43a90889SApple OSS Distributions chunk = remain;
1316*43a90889SApple OSS Distributions }
1317*43a90889SApple OSS Distributions mach_vm_size_t entrySize;
1318*43a90889SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
1320*43a90889SApple OSS Distributions mapSize += entrySize;
1321*43a90889SApple OSS Distributions
1322*43a90889SApple OSS Distributions remain -= chunk;
1323*43a90889SApple OSS Distributions if (!remain) {
1324*43a90889SApple OSS Distributions break;
1325*43a90889SApple OSS Distributions }
1326*43a90889SApple OSS Distributions iteroffset += chunk; // - pageOffset;
1327*43a90889SApple OSS Distributions }
1328*43a90889SApple OSS Distributions entry++;
1329*43a90889SApple OSS Distributions entryIdx++;
1330*43a90889SApple OSS Distributions if (entryIdx >= ref->count) {
1331*43a90889SApple OSS Distributions panic("overrun");
1332*43a90889SApple OSS Distributions err = kIOReturnOverrun;
1333*43a90889SApple OSS Distributions break;
1334*43a90889SApple OSS Distributions }
1335*43a90889SApple OSS Distributions }
1336*43a90889SApple OSS Distributions
1337*43a90889SApple OSS Distributions if (kIOMapOverwrite & options) {
1338*43a90889SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*43a90889SApple OSS Distributions map = IOPageableMapForAddress(addr);
1340*43a90889SApple OSS Distributions }
1341*43a90889SApple OSS Distributions err = KERN_SUCCESS;
1342*43a90889SApple OSS Distributions } else {
1343*43a90889SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1344*43a90889SApple OSS Distributions ref.map = map;
1345*43a90889SApple OSS Distributions ref.tag = tag;
1346*43a90889SApple OSS Distributions ref.options = options;
1347*43a90889SApple OSS Distributions ref.size = mapSize;
1348*43a90889SApple OSS Distributions ref.prot = prot;
1349*43a90889SApple OSS Distributions if (options & kIOMapAnywhere) {
1350*43a90889SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351*43a90889SApple OSS Distributions ref.mapped = 0;
1352*43a90889SApple OSS Distributions } else {
1353*43a90889SApple OSS Distributions ref.mapped = addr;
1354*43a90889SApple OSS Distributions }
1355*43a90889SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356*43a90889SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357*43a90889SApple OSS Distributions } else {
1358*43a90889SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359*43a90889SApple OSS Distributions }
1360*43a90889SApple OSS Distributions
1361*43a90889SApple OSS Distributions if (KERN_SUCCESS == err) {
1362*43a90889SApple OSS Distributions addr = ref.mapped;
1363*43a90889SApple OSS Distributions map = ref.map;
1364*43a90889SApple OSS Distributions didAlloc = true;
1365*43a90889SApple OSS Distributions }
1366*43a90889SApple OSS Distributions #if LOGUNALIGN
1367*43a90889SApple OSS Distributions IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368*43a90889SApple OSS Distributions #endif
1369*43a90889SApple OSS Distributions }
1370*43a90889SApple OSS Distributions
1371*43a90889SApple OSS Distributions /*
1372*43a90889SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1373*43a90889SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1374*43a90889SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375*43a90889SApple OSS Distributions * operations.
1376*43a90889SApple OSS Distributions */
1377*43a90889SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378*43a90889SApple OSS Distributions options &= ~kIOMapPrefault;
1379*43a90889SApple OSS Distributions }
1380*43a90889SApple OSS Distributions
1381*43a90889SApple OSS Distributions /*
1382*43a90889SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1383*43a90889SApple OSS Distributions * memory type, and the underlying data.
1384*43a90889SApple OSS Distributions */
1385*43a90889SApple OSS Distributions if (options & kIOMapPrefault) {
1386*43a90889SApple OSS Distributions /*
1387*43a90889SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1388*43a90889SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389*43a90889SApple OSS Distributions */
1390*43a90889SApple OSS Distributions assert(_wireCount != 0);
1391*43a90889SApple OSS Distributions assert(_memoryEntries != NULL);
1392*43a90889SApple OSS Distributions if ((_wireCount == 0) ||
1393*43a90889SApple OSS Distributions (_memoryEntries == NULL)) {
1394*43a90889SApple OSS Distributions return kIOReturnBadArgument;
1395*43a90889SApple OSS Distributions }
1396*43a90889SApple OSS Distributions
1397*43a90889SApple OSS Distributions // Get the page list.
1398*43a90889SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1399*43a90889SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1400*43a90889SApple OSS Distributions pageList = getPageList(dataP);
1401*43a90889SApple OSS Distributions
1402*43a90889SApple OSS Distributions // Get the number of IOPLs.
1403*43a90889SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404*43a90889SApple OSS Distributions
1405*43a90889SApple OSS Distributions /*
1406*43a90889SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1407*43a90889SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1408*43a90889SApple OSS Distributions * right range at the end.
1409*43a90889SApple OSS Distributions */
1410*43a90889SApple OSS Distributions UInt ioplIndex = 0;
1411*43a90889SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412*43a90889SApple OSS Distributions ioplIndex++;
1413*43a90889SApple OSS Distributions }
1414*43a90889SApple OSS Distributions ioplIndex--;
1415*43a90889SApple OSS Distributions
1416*43a90889SApple OSS Distributions // Retrieve the IOPL info block.
1417*43a90889SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1418*43a90889SApple OSS Distributions
1419*43a90889SApple OSS Distributions /*
1420*43a90889SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421*43a90889SApple OSS Distributions * array.
1422*43a90889SApple OSS Distributions */
1423*43a90889SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1424*43a90889SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425*43a90889SApple OSS Distributions } else {
1426*43a90889SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1427*43a90889SApple OSS Distributions }
1428*43a90889SApple OSS Distributions
1429*43a90889SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1430*43a90889SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431*43a90889SApple OSS Distributions
1432*43a90889SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1433*43a90889SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1434*43a90889SApple OSS Distributions }
1435*43a90889SApple OSS Distributions
1436*43a90889SApple OSS Distributions // enter mappings
1437*43a90889SApple OSS Distributions remain = size;
1438*43a90889SApple OSS Distributions mapAddr = addr;
1439*43a90889SApple OSS Distributions entryIdx = firstEntryIdx;
1440*43a90889SApple OSS Distributions entry = &ref->entries[entryIdx];
1441*43a90889SApple OSS Distributions
1442*43a90889SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1443*43a90889SApple OSS Distributions #if LOGUNALIGN
1444*43a90889SApple OSS Distributions printf("offset %qx, %qx\n", offset, entry->offset);
1445*43a90889SApple OSS Distributions #endif
1446*43a90889SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1447*43a90889SApple OSS Distributions vm_size_t unused = 0;
1448*43a90889SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449*43a90889SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1450*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
1451*43a90889SApple OSS Distributions }
1452*43a90889SApple OSS Distributions entryOffset = offset - entry->offset;
1453*43a90889SApple OSS Distributions if (entryOffset >= entry->size) {
1454*43a90889SApple OSS Distributions panic("entryOffset");
1455*43a90889SApple OSS Distributions }
1456*43a90889SApple OSS Distributions chunk = entry->size - entryOffset;
1457*43a90889SApple OSS Distributions #if LOGUNALIGN
1458*43a90889SApple OSS Distributions printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459*43a90889SApple OSS Distributions #endif
1460*43a90889SApple OSS Distributions if (chunk) {
1461*43a90889SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1462*43a90889SApple OSS Distributions .vmf_fixed = true,
1463*43a90889SApple OSS Distributions .vmf_overwrite = true,
1464*43a90889SApple OSS Distributions .vmf_return_data_addr = true,
1465*43a90889SApple OSS Distributions .vm_tag = tag,
1466*43a90889SApple OSS Distributions .vmkf_iokit_acct = true,
1467*43a90889SApple OSS Distributions };
1468*43a90889SApple OSS Distributions
1469*43a90889SApple OSS Distributions if (chunk > remain) {
1470*43a90889SApple OSS Distributions chunk = remain;
1471*43a90889SApple OSS Distributions }
1472*43a90889SApple OSS Distributions mapAddrOut = mapAddr;
1473*43a90889SApple OSS Distributions if (options & kIOMapPrefault) {
1474*43a90889SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475*43a90889SApple OSS Distributions
1476*43a90889SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1477*43a90889SApple OSS Distributions &mapAddrOut,
1478*43a90889SApple OSS Distributions chunk, 0 /* mask */,
1479*43a90889SApple OSS Distributions vmk_flags,
1480*43a90889SApple OSS Distributions entry->entry,
1481*43a90889SApple OSS Distributions entryOffset,
1482*43a90889SApple OSS Distributions prot, // cur
1483*43a90889SApple OSS Distributions prot, // max
1484*43a90889SApple OSS Distributions &pageList[currentPageIndex],
1485*43a90889SApple OSS Distributions nb_pages);
1486*43a90889SApple OSS Distributions
1487*43a90889SApple OSS Distributions // Compute the next index in the page list.
1488*43a90889SApple OSS Distributions currentPageIndex += nb_pages;
1489*43a90889SApple OSS Distributions assert(currentPageIndex <= _pages);
1490*43a90889SApple OSS Distributions } else {
1491*43a90889SApple OSS Distributions #if LOGUNALIGN
1492*43a90889SApple OSS Distributions printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493*43a90889SApple OSS Distributions #endif
1494*43a90889SApple OSS Distributions err = mach_vm_map_kernel(map,
1495*43a90889SApple OSS Distributions &mapAddrOut,
1496*43a90889SApple OSS Distributions chunk, 0 /* mask */,
1497*43a90889SApple OSS Distributions vmk_flags,
1498*43a90889SApple OSS Distributions entry->entry,
1499*43a90889SApple OSS Distributions entryOffset,
1500*43a90889SApple OSS Distributions false, // copy
1501*43a90889SApple OSS Distributions prot, // cur
1502*43a90889SApple OSS Distributions prot, // max
1503*43a90889SApple OSS Distributions VM_INHERIT_NONE);
1504*43a90889SApple OSS Distributions }
1505*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1506*43a90889SApple OSS Distributions panic("map enter err %x", err);
1507*43a90889SApple OSS Distributions break;
1508*43a90889SApple OSS Distributions }
1509*43a90889SApple OSS Distributions #if LOGUNALIGN
1510*43a90889SApple OSS Distributions printf("mapAddr o %qx\n", mapAddrOut);
1511*43a90889SApple OSS Distributions #endif
1512*43a90889SApple OSS Distributions if (entryIdx == firstEntryIdx) {
1513*43a90889SApple OSS Distributions addr = mapAddrOut;
1514*43a90889SApple OSS Distributions }
1515*43a90889SApple OSS Distributions remain -= chunk;
1516*43a90889SApple OSS Distributions if (!remain) {
1517*43a90889SApple OSS Distributions break;
1518*43a90889SApple OSS Distributions }
1519*43a90889SApple OSS Distributions mach_vm_size_t entrySize;
1520*43a90889SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
1522*43a90889SApple OSS Distributions mapAddr += entrySize;
1523*43a90889SApple OSS Distributions offset += chunk;
1524*43a90889SApple OSS Distributions }
1525*43a90889SApple OSS Distributions
1526*43a90889SApple OSS Distributions entry++;
1527*43a90889SApple OSS Distributions entryIdx++;
1528*43a90889SApple OSS Distributions if (entryIdx >= ref->count) {
1529*43a90889SApple OSS Distributions err = kIOReturnOverrun;
1530*43a90889SApple OSS Distributions break;
1531*43a90889SApple OSS Distributions }
1532*43a90889SApple OSS Distributions }
1533*43a90889SApple OSS Distributions
1534*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1535*43a90889SApple OSS Distributions DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536*43a90889SApple OSS Distributions }
1537*43a90889SApple OSS Distributions
1538*43a90889SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1539*43a90889SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540*43a90889SApple OSS Distributions addr = 0;
1541*43a90889SApple OSS Distributions }
1542*43a90889SApple OSS Distributions *inaddr = addr;
1543*43a90889SApple OSS Distributions
1544*43a90889SApple OSS Distributions return err;
1545*43a90889SApple OSS Distributions }
1546*43a90889SApple OSS Distributions
1547*43a90889SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549*43a90889SApple OSS Distributions IOMemoryReference * ref,
1550*43a90889SApple OSS Distributions uint64_t * offset)
1551*43a90889SApple OSS Distributions {
1552*43a90889SApple OSS Distributions kern_return_t kr;
1553*43a90889SApple OSS Distributions vm_object_offset_t data_offset = 0;
1554*43a90889SApple OSS Distributions uint64_t total;
1555*43a90889SApple OSS Distributions uint32_t idx;
1556*43a90889SApple OSS Distributions
1557*43a90889SApple OSS Distributions assert(ref->count);
1558*43a90889SApple OSS Distributions if (offset) {
1559*43a90889SApple OSS Distributions *offset = (uint64_t) data_offset;
1560*43a90889SApple OSS Distributions }
1561*43a90889SApple OSS Distributions total = 0;
1562*43a90889SApple OSS Distributions for (idx = 0; idx < ref->count; idx++) {
1563*43a90889SApple OSS Distributions kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564*43a90889SApple OSS Distributions &data_offset);
1565*43a90889SApple OSS Distributions if (KERN_SUCCESS != kr) {
1566*43a90889SApple OSS Distributions DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567*43a90889SApple OSS Distributions } else if (0 != data_offset) {
1568*43a90889SApple OSS Distributions DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569*43a90889SApple OSS Distributions }
1570*43a90889SApple OSS Distributions if (offset && !idx) {
1571*43a90889SApple OSS Distributions *offset = (uint64_t) data_offset;
1572*43a90889SApple OSS Distributions }
1573*43a90889SApple OSS Distributions total += round_page(data_offset + ref->entries[idx].size);
1574*43a90889SApple OSS Distributions }
1575*43a90889SApple OSS Distributions
1576*43a90889SApple OSS Distributions DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577*43a90889SApple OSS Distributions (offset ? *offset : (vm_object_offset_t)-1), total);
1578*43a90889SApple OSS Distributions
1579*43a90889SApple OSS Distributions return total;
1580*43a90889SApple OSS Distributions }
1581*43a90889SApple OSS Distributions
1582*43a90889SApple OSS Distributions
1583*43a90889SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585*43a90889SApple OSS Distributions IOMemoryReference * ref,
1586*43a90889SApple OSS Distributions IOByteCount * residentPageCount,
1587*43a90889SApple OSS Distributions IOByteCount * dirtyPageCount)
1588*43a90889SApple OSS Distributions {
1589*43a90889SApple OSS Distributions IOReturn err;
1590*43a90889SApple OSS Distributions IOMemoryEntry * entries;
1591*43a90889SApple OSS Distributions unsigned int resident, dirty;
1592*43a90889SApple OSS Distributions unsigned int totalResident, totalDirty;
1593*43a90889SApple OSS Distributions
1594*43a90889SApple OSS Distributions totalResident = totalDirty = 0;
1595*43a90889SApple OSS Distributions err = kIOReturnSuccess;
1596*43a90889SApple OSS Distributions entries = ref->entries + ref->count;
1597*43a90889SApple OSS Distributions while (entries > &ref->entries[0]) {
1598*43a90889SApple OSS Distributions entries--;
1599*43a90889SApple OSS Distributions err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1601*43a90889SApple OSS Distributions break;
1602*43a90889SApple OSS Distributions }
1603*43a90889SApple OSS Distributions totalResident += resident;
1604*43a90889SApple OSS Distributions totalDirty += dirty;
1605*43a90889SApple OSS Distributions }
1606*43a90889SApple OSS Distributions
1607*43a90889SApple OSS Distributions if (residentPageCount) {
1608*43a90889SApple OSS Distributions *residentPageCount = totalResident;
1609*43a90889SApple OSS Distributions }
1610*43a90889SApple OSS Distributions if (dirtyPageCount) {
1611*43a90889SApple OSS Distributions *dirtyPageCount = totalDirty;
1612*43a90889SApple OSS Distributions }
1613*43a90889SApple OSS Distributions return err;
1614*43a90889SApple OSS Distributions }
1615*43a90889SApple OSS Distributions
1616*43a90889SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618*43a90889SApple OSS Distributions IOMemoryReference * ref,
1619*43a90889SApple OSS Distributions IOOptionBits newState,
1620*43a90889SApple OSS Distributions IOOptionBits * oldState)
1621*43a90889SApple OSS Distributions {
1622*43a90889SApple OSS Distributions IOReturn err;
1623*43a90889SApple OSS Distributions IOMemoryEntry * entries;
1624*43a90889SApple OSS Distributions vm_purgable_t control;
1625*43a90889SApple OSS Distributions int totalState, state;
1626*43a90889SApple OSS Distributions
1627*43a90889SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1628*43a90889SApple OSS Distributions err = kIOReturnSuccess;
1629*43a90889SApple OSS Distributions entries = ref->entries + ref->count;
1630*43a90889SApple OSS Distributions while (entries > &ref->entries[0]) {
1631*43a90889SApple OSS Distributions entries--;
1632*43a90889SApple OSS Distributions
1633*43a90889SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
1634*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1635*43a90889SApple OSS Distributions break;
1636*43a90889SApple OSS Distributions }
1637*43a90889SApple OSS Distributions err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1639*43a90889SApple OSS Distributions break;
1640*43a90889SApple OSS Distributions }
1641*43a90889SApple OSS Distributions err = purgeableStateBits(&state);
1642*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1643*43a90889SApple OSS Distributions break;
1644*43a90889SApple OSS Distributions }
1645*43a90889SApple OSS Distributions
1646*43a90889SApple OSS Distributions if (kIOMemoryPurgeableEmpty == state) {
1647*43a90889SApple OSS Distributions totalState = kIOMemoryPurgeableEmpty;
1648*43a90889SApple OSS Distributions } else if (kIOMemoryPurgeableEmpty == totalState) {
1649*43a90889SApple OSS Distributions continue;
1650*43a90889SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == totalState) {
1651*43a90889SApple OSS Distributions continue;
1652*43a90889SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == state) {
1653*43a90889SApple OSS Distributions totalState = kIOMemoryPurgeableVolatile;
1654*43a90889SApple OSS Distributions } else {
1655*43a90889SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1656*43a90889SApple OSS Distributions }
1657*43a90889SApple OSS Distributions }
1658*43a90889SApple OSS Distributions
1659*43a90889SApple OSS Distributions if (oldState) {
1660*43a90889SApple OSS Distributions *oldState = totalState;
1661*43a90889SApple OSS Distributions }
1662*43a90889SApple OSS Distributions return err;
1663*43a90889SApple OSS Distributions }
1664*43a90889SApple OSS Distributions
1665*43a90889SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667*43a90889SApple OSS Distributions IOMemoryReference * ref,
1668*43a90889SApple OSS Distributions task_t newOwner,
1669*43a90889SApple OSS Distributions int newLedgerTag,
1670*43a90889SApple OSS Distributions IOOptionBits newLedgerOptions)
1671*43a90889SApple OSS Distributions {
1672*43a90889SApple OSS Distributions IOReturn err, totalErr;
1673*43a90889SApple OSS Distributions IOMemoryEntry * entries;
1674*43a90889SApple OSS Distributions
1675*43a90889SApple OSS Distributions totalErr = kIOReturnSuccess;
1676*43a90889SApple OSS Distributions entries = ref->entries + ref->count;
1677*43a90889SApple OSS Distributions while (entries > &ref->entries[0]) {
1678*43a90889SApple OSS Distributions entries--;
1679*43a90889SApple OSS Distributions
1680*43a90889SApple OSS Distributions err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
1682*43a90889SApple OSS Distributions totalErr = err;
1683*43a90889SApple OSS Distributions }
1684*43a90889SApple OSS Distributions }
1685*43a90889SApple OSS Distributions
1686*43a90889SApple OSS Distributions return totalErr;
1687*43a90889SApple OSS Distributions }
1688*43a90889SApple OSS Distributions
1689*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690*43a90889SApple OSS Distributions
1691*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692*43a90889SApple OSS Distributions IOMemoryDescriptor::withAddress(void * address,
1693*43a90889SApple OSS Distributions IOByteCount length,
1694*43a90889SApple OSS Distributions IODirection direction)
1695*43a90889SApple OSS Distributions {
1696*43a90889SApple OSS Distributions return IOMemoryDescriptor::
1697*43a90889SApple OSS Distributions withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698*43a90889SApple OSS Distributions }
1699*43a90889SApple OSS Distributions
1700*43a90889SApple OSS Distributions #ifndef __LP64__
1701*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702*43a90889SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703*43a90889SApple OSS Distributions IOByteCount length,
1704*43a90889SApple OSS Distributions IODirection direction,
1705*43a90889SApple OSS Distributions task_t task)
1706*43a90889SApple OSS Distributions {
1707*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708*43a90889SApple OSS Distributions if (that) {
1709*43a90889SApple OSS Distributions if (that->initWithAddress(address, length, direction, task)) {
1710*43a90889SApple OSS Distributions return os::move(that);
1711*43a90889SApple OSS Distributions }
1712*43a90889SApple OSS Distributions }
1713*43a90889SApple OSS Distributions return nullptr;
1714*43a90889SApple OSS Distributions }
1715*43a90889SApple OSS Distributions #endif /* !__LP64__ */
1716*43a90889SApple OSS Distributions
1717*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718*43a90889SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1719*43a90889SApple OSS Distributions IOPhysicalAddress address,
1720*43a90889SApple OSS Distributions IOByteCount length,
1721*43a90889SApple OSS Distributions IODirection direction )
1722*43a90889SApple OSS Distributions {
1723*43a90889SApple OSS Distributions return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724*43a90889SApple OSS Distributions }
1725*43a90889SApple OSS Distributions
1726*43a90889SApple OSS Distributions #ifndef __LP64__
1727*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728*43a90889SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729*43a90889SApple OSS Distributions UInt32 withCount,
1730*43a90889SApple OSS Distributions IODirection direction,
1731*43a90889SApple OSS Distributions task_t task,
1732*43a90889SApple OSS Distributions bool asReference)
1733*43a90889SApple OSS Distributions {
1734*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735*43a90889SApple OSS Distributions if (that) {
1736*43a90889SApple OSS Distributions if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737*43a90889SApple OSS Distributions return os::move(that);
1738*43a90889SApple OSS Distributions }
1739*43a90889SApple OSS Distributions }
1740*43a90889SApple OSS Distributions return nullptr;
1741*43a90889SApple OSS Distributions }
1742*43a90889SApple OSS Distributions #endif /* !__LP64__ */
1743*43a90889SApple OSS Distributions
1744*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745*43a90889SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746*43a90889SApple OSS Distributions mach_vm_size_t length,
1747*43a90889SApple OSS Distributions IOOptionBits options,
1748*43a90889SApple OSS Distributions task_t task)
1749*43a90889SApple OSS Distributions {
1750*43a90889SApple OSS Distributions IOAddressRange range = { address, length };
1751*43a90889SApple OSS Distributions return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752*43a90889SApple OSS Distributions }
1753*43a90889SApple OSS Distributions
1754*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755*43a90889SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1756*43a90889SApple OSS Distributions UInt32 rangeCount,
1757*43a90889SApple OSS Distributions IOOptionBits options,
1758*43a90889SApple OSS Distributions task_t task)
1759*43a90889SApple OSS Distributions {
1760*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761*43a90889SApple OSS Distributions if (that) {
1762*43a90889SApple OSS Distributions if (task) {
1763*43a90889SApple OSS Distributions options |= kIOMemoryTypeVirtual64;
1764*43a90889SApple OSS Distributions } else {
1765*43a90889SApple OSS Distributions options |= kIOMemoryTypePhysical64;
1766*43a90889SApple OSS Distributions }
1767*43a90889SApple OSS Distributions
1768*43a90889SApple OSS Distributions if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769*43a90889SApple OSS Distributions return os::move(that);
1770*43a90889SApple OSS Distributions }
1771*43a90889SApple OSS Distributions }
1772*43a90889SApple OSS Distributions
1773*43a90889SApple OSS Distributions return nullptr;
1774*43a90889SApple OSS Distributions }
1775*43a90889SApple OSS Distributions
1776*43a90889SApple OSS Distributions
1777*43a90889SApple OSS Distributions /*
1778*43a90889SApple OSS Distributions * withOptions:
1779*43a90889SApple OSS Distributions *
1780*43a90889SApple OSS Distributions * Create a new IOMemoryDescriptor. The buffer is made up of several
1781*43a90889SApple OSS Distributions * virtual address ranges, from a given task.
1782*43a90889SApple OSS Distributions *
1783*43a90889SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1784*43a90889SApple OSS Distributions */
1785*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786*43a90889SApple OSS Distributions IOMemoryDescriptor::withOptions(void * buffers,
1787*43a90889SApple OSS Distributions UInt32 count,
1788*43a90889SApple OSS Distributions UInt32 offset,
1789*43a90889SApple OSS Distributions task_t task,
1790*43a90889SApple OSS Distributions IOOptionBits opts,
1791*43a90889SApple OSS Distributions IOMapper * mapper)
1792*43a90889SApple OSS Distributions {
1793*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794*43a90889SApple OSS Distributions
1795*43a90889SApple OSS Distributions if (self
1796*43a90889SApple OSS Distributions && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797*43a90889SApple OSS Distributions return nullptr;
1798*43a90889SApple OSS Distributions }
1799*43a90889SApple OSS Distributions
1800*43a90889SApple OSS Distributions return os::move(self);
1801*43a90889SApple OSS Distributions }
1802*43a90889SApple OSS Distributions
1803*43a90889SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void * buffers,
1805*43a90889SApple OSS Distributions UInt32 count,
1806*43a90889SApple OSS Distributions UInt32 offset,
1807*43a90889SApple OSS Distributions task_t task,
1808*43a90889SApple OSS Distributions IOOptionBits options,
1809*43a90889SApple OSS Distributions IOMapper * mapper)
1810*43a90889SApple OSS Distributions {
1811*43a90889SApple OSS Distributions return false;
1812*43a90889SApple OSS Distributions }
1813*43a90889SApple OSS Distributions
1814*43a90889SApple OSS Distributions #ifndef __LP64__
1815*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816*43a90889SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817*43a90889SApple OSS Distributions UInt32 withCount,
1818*43a90889SApple OSS Distributions IODirection direction,
1819*43a90889SApple OSS Distributions bool asReference)
1820*43a90889SApple OSS Distributions {
1821*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822*43a90889SApple OSS Distributions if (that) {
1823*43a90889SApple OSS Distributions if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824*43a90889SApple OSS Distributions return os::move(that);
1825*43a90889SApple OSS Distributions }
1826*43a90889SApple OSS Distributions }
1827*43a90889SApple OSS Distributions return nullptr;
1828*43a90889SApple OSS Distributions }
1829*43a90889SApple OSS Distributions
1830*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831*43a90889SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1832*43a90889SApple OSS Distributions IOByteCount offset,
1833*43a90889SApple OSS Distributions IOByteCount length,
1834*43a90889SApple OSS Distributions IODirection direction)
1835*43a90889SApple OSS Distributions {
1836*43a90889SApple OSS Distributions return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837*43a90889SApple OSS Distributions }
1838*43a90889SApple OSS Distributions #endif /* !__LP64__ */
1839*43a90889SApple OSS Distributions
1840*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841*43a90889SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842*43a90889SApple OSS Distributions {
1843*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor *origGenMD =
1844*43a90889SApple OSS Distributions OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845*43a90889SApple OSS Distributions
1846*43a90889SApple OSS Distributions if (origGenMD) {
1847*43a90889SApple OSS Distributions return IOGeneralMemoryDescriptor::
1848*43a90889SApple OSS Distributions withPersistentMemoryDescriptor(origGenMD);
1849*43a90889SApple OSS Distributions } else {
1850*43a90889SApple OSS Distributions return nullptr;
1851*43a90889SApple OSS Distributions }
1852*43a90889SApple OSS Distributions }
1853*43a90889SApple OSS Distributions
1854*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856*43a90889SApple OSS Distributions {
1857*43a90889SApple OSS Distributions IOMemoryReference * memRef;
1858*43a90889SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859*43a90889SApple OSS Distributions
1860*43a90889SApple OSS Distributions if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861*43a90889SApple OSS Distributions return nullptr;
1862*43a90889SApple OSS Distributions }
1863*43a90889SApple OSS Distributions
1864*43a90889SApple OSS Distributions if (memRef == originalMD->_memRef) {
1865*43a90889SApple OSS Distributions self.reset(originalMD, OSRetain);
1866*43a90889SApple OSS Distributions originalMD->memoryReferenceRelease(memRef);
1867*43a90889SApple OSS Distributions return os::move(self);
1868*43a90889SApple OSS Distributions }
1869*43a90889SApple OSS Distributions
1870*43a90889SApple OSS Distributions self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871*43a90889SApple OSS Distributions IOMDPersistentInitData initData = { originalMD, memRef };
1872*43a90889SApple OSS Distributions
1873*43a90889SApple OSS Distributions if (self
1874*43a90889SApple OSS Distributions && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875*43a90889SApple OSS Distributions return nullptr;
1876*43a90889SApple OSS Distributions }
1877*43a90889SApple OSS Distributions return os::move(self);
1878*43a90889SApple OSS Distributions }
1879*43a90889SApple OSS Distributions
1880*43a90889SApple OSS Distributions #ifndef __LP64__
1881*43a90889SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void * address,
1883*43a90889SApple OSS Distributions IOByteCount withLength,
1884*43a90889SApple OSS Distributions IODirection withDirection)
1885*43a90889SApple OSS Distributions {
1886*43a90889SApple OSS Distributions _singleRange.v.address = (vm_offset_t) address;
1887*43a90889SApple OSS Distributions _singleRange.v.length = withLength;
1888*43a90889SApple OSS Distributions
1889*43a90889SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890*43a90889SApple OSS Distributions }
1891*43a90889SApple OSS Distributions
1892*43a90889SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894*43a90889SApple OSS Distributions IOByteCount withLength,
1895*43a90889SApple OSS Distributions IODirection withDirection,
1896*43a90889SApple OSS Distributions task_t withTask)
1897*43a90889SApple OSS Distributions {
1898*43a90889SApple OSS Distributions _singleRange.v.address = address;
1899*43a90889SApple OSS Distributions _singleRange.v.length = withLength;
1900*43a90889SApple OSS Distributions
1901*43a90889SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902*43a90889SApple OSS Distributions }
1903*43a90889SApple OSS Distributions
1904*43a90889SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906*43a90889SApple OSS Distributions IOPhysicalAddress address,
1907*43a90889SApple OSS Distributions IOByteCount withLength,
1908*43a90889SApple OSS Distributions IODirection withDirection )
1909*43a90889SApple OSS Distributions {
1910*43a90889SApple OSS Distributions _singleRange.p.address = address;
1911*43a90889SApple OSS Distributions _singleRange.p.length = withLength;
1912*43a90889SApple OSS Distributions
1913*43a90889SApple OSS Distributions return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914*43a90889SApple OSS Distributions }
1915*43a90889SApple OSS Distributions
1916*43a90889SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918*43a90889SApple OSS Distributions IOPhysicalRange * ranges,
1919*43a90889SApple OSS Distributions UInt32 count,
1920*43a90889SApple OSS Distributions IODirection direction,
1921*43a90889SApple OSS Distributions bool reference)
1922*43a90889SApple OSS Distributions {
1923*43a90889SApple OSS Distributions IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924*43a90889SApple OSS Distributions
1925*43a90889SApple OSS Distributions if (reference) {
1926*43a90889SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1927*43a90889SApple OSS Distributions }
1928*43a90889SApple OSS Distributions
1929*43a90889SApple OSS Distributions return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930*43a90889SApple OSS Distributions }
1931*43a90889SApple OSS Distributions
1932*43a90889SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1934*43a90889SApple OSS Distributions IOVirtualRange * ranges,
1935*43a90889SApple OSS Distributions UInt32 count,
1936*43a90889SApple OSS Distributions IODirection direction,
1937*43a90889SApple OSS Distributions task_t task,
1938*43a90889SApple OSS Distributions bool reference)
1939*43a90889SApple OSS Distributions {
1940*43a90889SApple OSS Distributions IOOptionBits mdOpts = direction;
1941*43a90889SApple OSS Distributions
1942*43a90889SApple OSS Distributions if (reference) {
1943*43a90889SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1944*43a90889SApple OSS Distributions }
1945*43a90889SApple OSS Distributions
1946*43a90889SApple OSS Distributions if (task) {
1947*43a90889SApple OSS Distributions mdOpts |= kIOMemoryTypeVirtual;
1948*43a90889SApple OSS Distributions
1949*43a90889SApple OSS Distributions // Auto-prepare if this is a kernel memory descriptor as very few
1950*43a90889SApple OSS Distributions // clients bother to prepare() kernel memory.
1951*43a90889SApple OSS Distributions // But it was not enforced so what are you going to do?
1952*43a90889SApple OSS Distributions if (task == kernel_task) {
1953*43a90889SApple OSS Distributions mdOpts |= kIOMemoryAutoPrepare;
1954*43a90889SApple OSS Distributions }
1955*43a90889SApple OSS Distributions } else {
1956*43a90889SApple OSS Distributions mdOpts |= kIOMemoryTypePhysical;
1957*43a90889SApple OSS Distributions }
1958*43a90889SApple OSS Distributions
1959*43a90889SApple OSS Distributions return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960*43a90889SApple OSS Distributions }
1961*43a90889SApple OSS Distributions #endif /* !__LP64__ */
1962*43a90889SApple OSS Distributions
1963*43a90889SApple OSS Distributions /*
1964*43a90889SApple OSS Distributions * initWithOptions:
1965*43a90889SApple OSS Distributions *
1966*43a90889SApple OSS Distributions * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967*43a90889SApple OSS Distributions * from a given task, several physical ranges, an UPL from the ubc
1968*43a90889SApple OSS Distributions * system or a uio (may be 64bit) from the BSD subsystem.
1969*43a90889SApple OSS Distributions *
1970*43a90889SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1971*43a90889SApple OSS Distributions *
1972*43a90889SApple OSS Distributions * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973*43a90889SApple OSS Distributions * existing instance -- note this behavior is not commonly supported in other
1974*43a90889SApple OSS Distributions * I/O Kit classes, although it is supported here.
1975*43a90889SApple OSS Distributions */
1976*43a90889SApple OSS Distributions
1977*43a90889SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1979*43a90889SApple OSS Distributions UInt32 count,
1980*43a90889SApple OSS Distributions UInt32 offset,
1981*43a90889SApple OSS Distributions task_t task,
1982*43a90889SApple OSS Distributions IOOptionBits options,
1983*43a90889SApple OSS Distributions IOMapper * mapper)
1984*43a90889SApple OSS Distributions {
1985*43a90889SApple OSS Distributions IOOptionBits type = options & kIOMemoryTypeMask;
1986*43a90889SApple OSS Distributions
1987*43a90889SApple OSS Distributions #ifndef __LP64__
1988*43a90889SApple OSS Distributions if (task
1989*43a90889SApple OSS Distributions && (kIOMemoryTypeVirtual == type)
1990*43a90889SApple OSS Distributions && vm_map_is_64bit(get_task_map(task))
1991*43a90889SApple OSS Distributions && ((IOVirtualRange *) buffers)->address) {
1992*43a90889SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993*43a90889SApple OSS Distributions return false;
1994*43a90889SApple OSS Distributions }
1995*43a90889SApple OSS Distributions #endif /* !__LP64__ */
1996*43a90889SApple OSS Distributions
1997*43a90889SApple OSS Distributions // Grab the original MD's configuation data to initialse the
1998*43a90889SApple OSS Distributions // arguments to this function.
1999*43a90889SApple OSS Distributions if (kIOMemoryTypePersistentMD == type) {
2000*43a90889SApple OSS Distributions IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001*43a90889SApple OSS Distributions const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002*43a90889SApple OSS Distributions ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003*43a90889SApple OSS Distributions
2004*43a90889SApple OSS Distributions // Only accept persistent memory descriptors with valid dataP data.
2005*43a90889SApple OSS Distributions assert(orig->_rangesCount == 1);
2006*43a90889SApple OSS Distributions if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007*43a90889SApple OSS Distributions return false;
2008*43a90889SApple OSS Distributions }
2009*43a90889SApple OSS Distributions
2010*43a90889SApple OSS Distributions _memRef = initData->fMemRef; // Grab the new named entry
2011*43a90889SApple OSS Distributions options = orig->_flags & ~kIOMemoryAsReference;
2012*43a90889SApple OSS Distributions type = options & kIOMemoryTypeMask;
2013*43a90889SApple OSS Distributions buffers = orig->_ranges.v;
2014*43a90889SApple OSS Distributions count = orig->_rangesCount;
2015*43a90889SApple OSS Distributions
2016*43a90889SApple OSS Distributions // Now grab the original task and whatever mapper was previously used
2017*43a90889SApple OSS Distributions task = orig->_task;
2018*43a90889SApple OSS Distributions mapper = dataP->fMapper;
2019*43a90889SApple OSS Distributions
2020*43a90889SApple OSS Distributions // We are ready to go through the original initialisation now
2021*43a90889SApple OSS Distributions }
2022*43a90889SApple OSS Distributions
2023*43a90889SApple OSS Distributions switch (type) {
2024*43a90889SApple OSS Distributions case kIOMemoryTypeUIO:
2025*43a90889SApple OSS Distributions case kIOMemoryTypeVirtual:
2026*43a90889SApple OSS Distributions #ifndef __LP64__
2027*43a90889SApple OSS Distributions case kIOMemoryTypeVirtual64:
2028*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2029*43a90889SApple OSS Distributions assert(task);
2030*43a90889SApple OSS Distributions if (!task) {
2031*43a90889SApple OSS Distributions return false;
2032*43a90889SApple OSS Distributions }
2033*43a90889SApple OSS Distributions break;
2034*43a90889SApple OSS Distributions
2035*43a90889SApple OSS Distributions case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2036*43a90889SApple OSS Distributions #ifndef __LP64__
2037*43a90889SApple OSS Distributions case kIOMemoryTypePhysical64:
2038*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2039*43a90889SApple OSS Distributions case kIOMemoryTypeUPL:
2040*43a90889SApple OSS Distributions assert(!task);
2041*43a90889SApple OSS Distributions break;
2042*43a90889SApple OSS Distributions default:
2043*43a90889SApple OSS Distributions return false; /* bad argument */
2044*43a90889SApple OSS Distributions }
2045*43a90889SApple OSS Distributions
2046*43a90889SApple OSS Distributions assert(buffers);
2047*43a90889SApple OSS Distributions assert(count);
2048*43a90889SApple OSS Distributions
2049*43a90889SApple OSS Distributions /*
2050*43a90889SApple OSS Distributions * We can check the _initialized instance variable before having ever set
2051*43a90889SApple OSS Distributions * it to an initial value because I/O Kit guarantees that all our instance
2052*43a90889SApple OSS Distributions * variables are zeroed on an object's allocation.
2053*43a90889SApple OSS Distributions */
2054*43a90889SApple OSS Distributions
2055*43a90889SApple OSS Distributions if (_initialized) {
2056*43a90889SApple OSS Distributions /*
2057*43a90889SApple OSS Distributions * An existing memory descriptor is being retargeted to point to
2058*43a90889SApple OSS Distributions * somewhere else. Clean up our present state.
2059*43a90889SApple OSS Distributions */
2060*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2061*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062*43a90889SApple OSS Distributions while (_wireCount) {
2063*43a90889SApple OSS Distributions complete();
2064*43a90889SApple OSS Distributions }
2065*43a90889SApple OSS Distributions }
2066*43a90889SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067*43a90889SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2068*43a90889SApple OSS Distributions uio_free((uio_t) _ranges.v);
2069*43a90889SApple OSS Distributions }
2070*43a90889SApple OSS Distributions #ifndef __LP64__
2071*43a90889SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072*43a90889SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073*43a90889SApple OSS Distributions }
2074*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2075*43a90889SApple OSS Distributions else {
2076*43a90889SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077*43a90889SApple OSS Distributions }
2078*43a90889SApple OSS Distributions }
2079*43a90889SApple OSS Distributions
2080*43a90889SApple OSS Distributions options |= (kIOMemoryRedirected & _flags);
2081*43a90889SApple OSS Distributions if (!(kIOMemoryRedirected & options)) {
2082*43a90889SApple OSS Distributions if (_memRef) {
2083*43a90889SApple OSS Distributions memoryReferenceRelease(_memRef);
2084*43a90889SApple OSS Distributions _memRef = NULL;
2085*43a90889SApple OSS Distributions }
2086*43a90889SApple OSS Distributions if (_mappings) {
2087*43a90889SApple OSS Distributions _mappings->flushCollection();
2088*43a90889SApple OSS Distributions }
2089*43a90889SApple OSS Distributions }
2090*43a90889SApple OSS Distributions } else {
2091*43a90889SApple OSS Distributions if (!super::init()) {
2092*43a90889SApple OSS Distributions return false;
2093*43a90889SApple OSS Distributions }
2094*43a90889SApple OSS Distributions _initialized = true;
2095*43a90889SApple OSS Distributions }
2096*43a90889SApple OSS Distributions
2097*43a90889SApple OSS Distributions // Grab the appropriate mapper
2098*43a90889SApple OSS Distributions if (kIOMemoryHostOrRemote & options) {
2099*43a90889SApple OSS Distributions options |= kIOMemoryMapperNone;
2100*43a90889SApple OSS Distributions }
2101*43a90889SApple OSS Distributions if (kIOMemoryMapperNone & options) {
2102*43a90889SApple OSS Distributions mapper = NULL; // No Mapper
2103*43a90889SApple OSS Distributions } else if (mapper == kIOMapperSystem) {
2104*43a90889SApple OSS Distributions IOMapper::checkForSystemMapper();
2105*43a90889SApple OSS Distributions gIOSystemMapper = mapper = IOMapper::gSystem;
2106*43a90889SApple OSS Distributions }
2107*43a90889SApple OSS Distributions
2108*43a90889SApple OSS Distributions // Remove the dynamic internal use flags from the initial setting
2109*43a90889SApple OSS Distributions options &= ~(kIOMemoryPreparedReadOnly);
2110*43a90889SApple OSS Distributions _flags = options;
2111*43a90889SApple OSS Distributions _task = task;
2112*43a90889SApple OSS Distributions
2113*43a90889SApple OSS Distributions #ifndef __LP64__
2114*43a90889SApple OSS Distributions _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2115*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2116*43a90889SApple OSS Distributions
2117*43a90889SApple OSS Distributions _dmaReferences = 0;
2118*43a90889SApple OSS Distributions __iomd_reservedA = 0;
2119*43a90889SApple OSS Distributions __iomd_reservedB = 0;
2120*43a90889SApple OSS Distributions _highestPage = 0;
2121*43a90889SApple OSS Distributions
2122*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & options) {
2123*43a90889SApple OSS Distributions if (!_prepareLock) {
2124*43a90889SApple OSS Distributions _prepareLock = IOLockAlloc();
2125*43a90889SApple OSS Distributions }
2126*43a90889SApple OSS Distributions } else if (_prepareLock) {
2127*43a90889SApple OSS Distributions IOLockFree(_prepareLock);
2128*43a90889SApple OSS Distributions _prepareLock = NULL;
2129*43a90889SApple OSS Distributions }
2130*43a90889SApple OSS Distributions
2131*43a90889SApple OSS Distributions if (kIOMemoryTypeUPL == type) {
2132*43a90889SApple OSS Distributions ioGMDData *dataP;
2133*43a90889SApple OSS Distributions unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134*43a90889SApple OSS Distributions
2135*43a90889SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2136*43a90889SApple OSS Distributions return false;
2137*43a90889SApple OSS Distributions }
2138*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
2139*43a90889SApple OSS Distributions dataP->fPageCnt = 0;
2140*43a90889SApple OSS Distributions switch (kIOMemoryDirectionMask & options) {
2141*43a90889SApple OSS Distributions case kIODirectionOut:
2142*43a90889SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
2143*43a90889SApple OSS Distributions break;
2144*43a90889SApple OSS Distributions case kIODirectionIn:
2145*43a90889SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
2146*43a90889SApple OSS Distributions break;
2147*43a90889SApple OSS Distributions case kIODirectionNone:
2148*43a90889SApple OSS Distributions case kIODirectionOutIn:
2149*43a90889SApple OSS Distributions default:
2150*43a90889SApple OSS Distributions panic("bad dir for upl 0x%x", (int) options);
2151*43a90889SApple OSS Distributions break;
2152*43a90889SApple OSS Distributions }
2153*43a90889SApple OSS Distributions // _wireCount++; // UPLs start out life wired
2154*43a90889SApple OSS Distributions
2155*43a90889SApple OSS Distributions _length = count;
2156*43a90889SApple OSS Distributions _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157*43a90889SApple OSS Distributions
2158*43a90889SApple OSS Distributions ioPLBlock iopl;
2159*43a90889SApple OSS Distributions iopl.fIOPL = (upl_t) buffers;
2160*43a90889SApple OSS Distributions upl_set_referenced(iopl.fIOPL, true);
2161*43a90889SApple OSS Distributions upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162*43a90889SApple OSS Distributions
2163*43a90889SApple OSS Distributions if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164*43a90889SApple OSS Distributions panic("short external upl");
2165*43a90889SApple OSS Distributions }
2166*43a90889SApple OSS Distributions
2167*43a90889SApple OSS Distributions _highestPage = upl_get_highest_page(iopl.fIOPL);
2168*43a90889SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169*43a90889SApple OSS Distributions
2170*43a90889SApple OSS Distributions // Set the flag kIOPLOnDevice convieniently equal to 1
2171*43a90889SApple OSS Distributions iopl.fFlags = pageList->device | kIOPLExternUPL;
2172*43a90889SApple OSS Distributions if (!pageList->device) {
2173*43a90889SApple OSS Distributions // Pre-compute the offset into the UPL's page list
2174*43a90889SApple OSS Distributions pageList = &pageList[atop_32(offset)];
2175*43a90889SApple OSS Distributions offset &= PAGE_MASK;
2176*43a90889SApple OSS Distributions }
2177*43a90889SApple OSS Distributions iopl.fIOMDOffset = 0;
2178*43a90889SApple OSS Distributions iopl.fMappedPage = 0;
2179*43a90889SApple OSS Distributions iopl.fPageInfo = (vm_address_t) pageList;
2180*43a90889SApple OSS Distributions iopl.fPageOffset = offset;
2181*43a90889SApple OSS Distributions _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182*43a90889SApple OSS Distributions } else {
2183*43a90889SApple OSS Distributions // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184*43a90889SApple OSS Distributions // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185*43a90889SApple OSS Distributions
2186*43a90889SApple OSS Distributions // Initialize the memory descriptor
2187*43a90889SApple OSS Distributions if (options & kIOMemoryAsReference) {
2188*43a90889SApple OSS Distributions #ifndef __LP64__
2189*43a90889SApple OSS Distributions _rangesIsAllocated = false;
2190*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2191*43a90889SApple OSS Distributions
2192*43a90889SApple OSS Distributions // Hack assignment to get the buffer arg into _ranges.
2193*43a90889SApple OSS Distributions // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194*43a90889SApple OSS Distributions // work, C++ sigh.
2195*43a90889SApple OSS Distributions // This also initialises the uio & physical ranges.
2196*43a90889SApple OSS Distributions _ranges.v = (IOVirtualRange *) buffers;
2197*43a90889SApple OSS Distributions } else {
2198*43a90889SApple OSS Distributions #ifndef __LP64__
2199*43a90889SApple OSS Distributions _rangesIsAllocated = true;
2200*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2201*43a90889SApple OSS Distributions switch (type) {
2202*43a90889SApple OSS Distributions case kIOMemoryTypeUIO:
2203*43a90889SApple OSS Distributions _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204*43a90889SApple OSS Distributions break;
2205*43a90889SApple OSS Distributions
2206*43a90889SApple OSS Distributions #ifndef __LP64__
2207*43a90889SApple OSS Distributions case kIOMemoryTypeVirtual64:
2208*43a90889SApple OSS Distributions case kIOMemoryTypePhysical64:
2209*43a90889SApple OSS Distributions if (count == 1
2210*43a90889SApple OSS Distributions #ifndef __arm__
2211*43a90889SApple OSS Distributions && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212*43a90889SApple OSS Distributions #endif
2213*43a90889SApple OSS Distributions ) {
2214*43a90889SApple OSS Distributions if (type == kIOMemoryTypeVirtual64) {
2215*43a90889SApple OSS Distributions type = kIOMemoryTypeVirtual;
2216*43a90889SApple OSS Distributions } else {
2217*43a90889SApple OSS Distributions type = kIOMemoryTypePhysical;
2218*43a90889SApple OSS Distributions }
2219*43a90889SApple OSS Distributions _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220*43a90889SApple OSS Distributions _rangesIsAllocated = false;
2221*43a90889SApple OSS Distributions _ranges.v = &_singleRange.v;
2222*43a90889SApple OSS Distributions _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223*43a90889SApple OSS Distributions _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2224*43a90889SApple OSS Distributions break;
2225*43a90889SApple OSS Distributions }
2226*43a90889SApple OSS Distributions _ranges.v64 = IONew(IOAddressRange, count);
2227*43a90889SApple OSS Distributions if (!_ranges.v64) {
2228*43a90889SApple OSS Distributions return false;
2229*43a90889SApple OSS Distributions }
2230*43a90889SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231*43a90889SApple OSS Distributions break;
2232*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2233*43a90889SApple OSS Distributions case kIOMemoryTypeVirtual:
2234*43a90889SApple OSS Distributions case kIOMemoryTypePhysical:
2235*43a90889SApple OSS Distributions if (count == 1) {
2236*43a90889SApple OSS Distributions _flags |= kIOMemoryAsReference;
2237*43a90889SApple OSS Distributions #ifndef __LP64__
2238*43a90889SApple OSS Distributions _rangesIsAllocated = false;
2239*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2240*43a90889SApple OSS Distributions _ranges.v = &_singleRange.v;
2241*43a90889SApple OSS Distributions } else {
2242*43a90889SApple OSS Distributions _ranges.v = IONew(IOVirtualRange, count);
2243*43a90889SApple OSS Distributions if (!_ranges.v) {
2244*43a90889SApple OSS Distributions return false;
2245*43a90889SApple OSS Distributions }
2246*43a90889SApple OSS Distributions }
2247*43a90889SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248*43a90889SApple OSS Distributions break;
2249*43a90889SApple OSS Distributions }
2250*43a90889SApple OSS Distributions }
2251*43a90889SApple OSS Distributions _rangesCount = count;
2252*43a90889SApple OSS Distributions
2253*43a90889SApple OSS Distributions // Find starting address within the vector of ranges
2254*43a90889SApple OSS Distributions Ranges vec = _ranges;
2255*43a90889SApple OSS Distributions mach_vm_size_t totalLength = 0;
2256*43a90889SApple OSS Distributions unsigned int ind, pages = 0;
2257*43a90889SApple OSS Distributions for (ind = 0; ind < count; ind++) {
2258*43a90889SApple OSS Distributions mach_vm_address_t addr;
2259*43a90889SApple OSS Distributions mach_vm_address_t endAddr;
2260*43a90889SApple OSS Distributions mach_vm_size_t len;
2261*43a90889SApple OSS Distributions
2262*43a90889SApple OSS Distributions // addr & len are returned by this function
2263*43a90889SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, ind, _task);
2264*43a90889SApple OSS Distributions if (_task) {
2265*43a90889SApple OSS Distributions mach_vm_size_t phys_size;
2266*43a90889SApple OSS Distributions kern_return_t kret;
2267*43a90889SApple OSS Distributions kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268*43a90889SApple OSS Distributions if (KERN_SUCCESS != kret) {
2269*43a90889SApple OSS Distributions break;
2270*43a90889SApple OSS Distributions }
2271*43a90889SApple OSS Distributions if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272*43a90889SApple OSS Distributions break;
2273*43a90889SApple OSS Distributions }
2274*43a90889SApple OSS Distributions } else {
2275*43a90889SApple OSS Distributions if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276*43a90889SApple OSS Distributions break;
2277*43a90889SApple OSS Distributions }
2278*43a90889SApple OSS Distributions if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279*43a90889SApple OSS Distributions break;
2280*43a90889SApple OSS Distributions }
2281*43a90889SApple OSS Distributions if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282*43a90889SApple OSS Distributions break;
2283*43a90889SApple OSS Distributions }
2284*43a90889SApple OSS Distributions }
2285*43a90889SApple OSS Distributions if (os_add_overflow(totalLength, len, &totalLength)) {
2286*43a90889SApple OSS Distributions break;
2287*43a90889SApple OSS Distributions }
2288*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289*43a90889SApple OSS Distributions uint64_t highPage = atop_64(addr + len - 1);
2290*43a90889SApple OSS Distributions if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291*43a90889SApple OSS Distributions _highestPage = (ppnum_t) highPage;
2292*43a90889SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293*43a90889SApple OSS Distributions }
2294*43a90889SApple OSS Distributions }
2295*43a90889SApple OSS Distributions }
2296*43a90889SApple OSS Distributions if ((ind < count)
2297*43a90889SApple OSS Distributions || (totalLength != ((IOByteCount) totalLength))) {
2298*43a90889SApple OSS Distributions return false; /* overflow */
2299*43a90889SApple OSS Distributions }
2300*43a90889SApple OSS Distributions _length = totalLength;
2301*43a90889SApple OSS Distributions _pages = pages;
2302*43a90889SApple OSS Distributions
2303*43a90889SApple OSS Distributions // Auto-prepare memory at creation time.
2304*43a90889SApple OSS Distributions // Implied completion when descriptor is free-ed
2305*43a90889SApple OSS Distributions
2306*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2307*43a90889SApple OSS Distributions _wireCount++; // Physical MDs are, by definition, wired
2308*43a90889SApple OSS Distributions } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2309*43a90889SApple OSS Distributions ioGMDData *dataP;
2310*43a90889SApple OSS Distributions unsigned dataSize;
2311*43a90889SApple OSS Distributions
2312*43a90889SApple OSS Distributions if (_pages > atop_64(max_mem)) {
2313*43a90889SApple OSS Distributions return false;
2314*43a90889SApple OSS Distributions }
2315*43a90889SApple OSS Distributions
2316*43a90889SApple OSS Distributions dataSize = computeDataSize(_pages, /* upls */ count * 2);
2317*43a90889SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2318*43a90889SApple OSS Distributions return false;
2319*43a90889SApple OSS Distributions }
2320*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
2321*43a90889SApple OSS Distributions dataP->fPageCnt = _pages;
2322*43a90889SApple OSS Distributions
2323*43a90889SApple OSS Distributions if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2324*43a90889SApple OSS Distributions && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2325*43a90889SApple OSS Distributions _kernelTag = IOMemoryTag(kernel_map);
2326*43a90889SApple OSS Distributions if (_kernelTag == gIOSurfaceTag) {
2327*43a90889SApple OSS Distributions _userTag = VM_MEMORY_IOSURFACE;
2328*43a90889SApple OSS Distributions }
2329*43a90889SApple OSS Distributions }
2330*43a90889SApple OSS Distributions
2331*43a90889SApple OSS Distributions if ((kIOMemoryPersistent & _flags) && !_memRef) {
2332*43a90889SApple OSS Distributions IOReturn
2333*43a90889SApple OSS Distributions err = memoryReferenceCreate(0, &_memRef);
2334*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
2335*43a90889SApple OSS Distributions return false;
2336*43a90889SApple OSS Distributions }
2337*43a90889SApple OSS Distributions }
2338*43a90889SApple OSS Distributions
2339*43a90889SApple OSS Distributions if ((_flags & kIOMemoryAutoPrepare)
2340*43a90889SApple OSS Distributions && prepare() != kIOReturnSuccess) {
2341*43a90889SApple OSS Distributions return false;
2342*43a90889SApple OSS Distributions }
2343*43a90889SApple OSS Distributions }
2344*43a90889SApple OSS Distributions }
2345*43a90889SApple OSS Distributions
2346*43a90889SApple OSS Distributions return true;
2347*43a90889SApple OSS Distributions }
2348*43a90889SApple OSS Distributions
2349*43a90889SApple OSS Distributions /*
2350*43a90889SApple OSS Distributions * free
2351*43a90889SApple OSS Distributions *
2352*43a90889SApple OSS Distributions * Free resources.
2353*43a90889SApple OSS Distributions */
2354*43a90889SApple OSS Distributions void
free()2355*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2356*43a90889SApple OSS Distributions {
2357*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2358*43a90889SApple OSS Distributions
2359*43a90889SApple OSS Distributions if (reserved && reserved->dp.memory) {
2360*43a90889SApple OSS Distributions LOCK;
2361*43a90889SApple OSS Distributions reserved->dp.memory = NULL;
2362*43a90889SApple OSS Distributions UNLOCK;
2363*43a90889SApple OSS Distributions }
2364*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2365*43a90889SApple OSS Distributions ioGMDData * dataP;
2366*43a90889SApple OSS Distributions if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2367*43a90889SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2368*43a90889SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2369*43a90889SApple OSS Distributions }
2370*43a90889SApple OSS Distributions } else {
2371*43a90889SApple OSS Distributions while (_wireCount) {
2372*43a90889SApple OSS Distributions complete();
2373*43a90889SApple OSS Distributions }
2374*43a90889SApple OSS Distributions }
2375*43a90889SApple OSS Distributions
2376*43a90889SApple OSS Distributions if (_memoryEntries) {
2377*43a90889SApple OSS Distributions _memoryEntries.reset();
2378*43a90889SApple OSS Distributions }
2379*43a90889SApple OSS Distributions
2380*43a90889SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2381*43a90889SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2382*43a90889SApple OSS Distributions uio_free((uio_t) _ranges.v);
2383*43a90889SApple OSS Distributions }
2384*43a90889SApple OSS Distributions #ifndef __LP64__
2385*43a90889SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2386*43a90889SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2387*43a90889SApple OSS Distributions }
2388*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2389*43a90889SApple OSS Distributions else {
2390*43a90889SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2391*43a90889SApple OSS Distributions }
2392*43a90889SApple OSS Distributions
2393*43a90889SApple OSS Distributions _ranges.v = NULL;
2394*43a90889SApple OSS Distributions }
2395*43a90889SApple OSS Distributions
2396*43a90889SApple OSS Distributions if (reserved) {
2397*43a90889SApple OSS Distributions cleanKernelReserved(reserved);
2398*43a90889SApple OSS Distributions if (reserved->dp.devicePager) {
2399*43a90889SApple OSS Distributions // memEntry holds a ref on the device pager which owns reserved
2400*43a90889SApple OSS Distributions // (IOMemoryDescriptorReserved) so no reserved access after this point
2401*43a90889SApple OSS Distributions device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2402*43a90889SApple OSS Distributions } else {
2403*43a90889SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
2404*43a90889SApple OSS Distributions }
2405*43a90889SApple OSS Distributions reserved = NULL;
2406*43a90889SApple OSS Distributions }
2407*43a90889SApple OSS Distributions
2408*43a90889SApple OSS Distributions if (_memRef) {
2409*43a90889SApple OSS Distributions memoryReferenceRelease(_memRef);
2410*43a90889SApple OSS Distributions }
2411*43a90889SApple OSS Distributions if (_prepareLock) {
2412*43a90889SApple OSS Distributions IOLockFree(_prepareLock);
2413*43a90889SApple OSS Distributions }
2414*43a90889SApple OSS Distributions
2415*43a90889SApple OSS Distributions super::free();
2416*43a90889SApple OSS Distributions }
2417*43a90889SApple OSS Distributions
2418*43a90889SApple OSS Distributions #ifndef __LP64__
2419*43a90889SApple OSS Distributions void
unmapFromKernel()2420*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2421*43a90889SApple OSS Distributions {
2422*43a90889SApple OSS Distributions panic("IOGMD::unmapFromKernel deprecated");
2423*43a90889SApple OSS Distributions }
2424*43a90889SApple OSS Distributions
2425*43a90889SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2426*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2427*43a90889SApple OSS Distributions {
2428*43a90889SApple OSS Distributions panic("IOGMD::mapIntoKernel deprecated");
2429*43a90889SApple OSS Distributions }
2430*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2431*43a90889SApple OSS Distributions
2432*43a90889SApple OSS Distributions /*
2433*43a90889SApple OSS Distributions * getDirection:
2434*43a90889SApple OSS Distributions *
2435*43a90889SApple OSS Distributions * Get the direction of the transfer.
2436*43a90889SApple OSS Distributions */
2437*43a90889SApple OSS Distributions IODirection
getDirection() const2438*43a90889SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2439*43a90889SApple OSS Distributions {
2440*43a90889SApple OSS Distributions #ifndef __LP64__
2441*43a90889SApple OSS Distributions if (_direction) {
2442*43a90889SApple OSS Distributions return _direction;
2443*43a90889SApple OSS Distributions }
2444*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2445*43a90889SApple OSS Distributions return (IODirection) (_flags & kIOMemoryDirectionMask);
2446*43a90889SApple OSS Distributions }
2447*43a90889SApple OSS Distributions
2448*43a90889SApple OSS Distributions /*
2449*43a90889SApple OSS Distributions * getLength:
2450*43a90889SApple OSS Distributions *
2451*43a90889SApple OSS Distributions * Get the length of the transfer (over all ranges).
2452*43a90889SApple OSS Distributions */
2453*43a90889SApple OSS Distributions IOByteCount
getLength() const2454*43a90889SApple OSS Distributions IOMemoryDescriptor::getLength() const
2455*43a90889SApple OSS Distributions {
2456*43a90889SApple OSS Distributions return _length;
2457*43a90889SApple OSS Distributions }
2458*43a90889SApple OSS Distributions
2459*43a90889SApple OSS Distributions void
setTag(IOOptionBits tag)2460*43a90889SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2461*43a90889SApple OSS Distributions {
2462*43a90889SApple OSS Distributions _tag = tag;
2463*43a90889SApple OSS Distributions }
2464*43a90889SApple OSS Distributions
2465*43a90889SApple OSS Distributions IOOptionBits
getTag(void)2466*43a90889SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2467*43a90889SApple OSS Distributions {
2468*43a90889SApple OSS Distributions return _tag;
2469*43a90889SApple OSS Distributions }
2470*43a90889SApple OSS Distributions
2471*43a90889SApple OSS Distributions uint64_t
getFlags(void)2472*43a90889SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2473*43a90889SApple OSS Distributions {
2474*43a90889SApple OSS Distributions return _flags;
2475*43a90889SApple OSS Distributions }
2476*43a90889SApple OSS Distributions
2477*43a90889SApple OSS Distributions OSObject *
copyContext(void) const2478*43a90889SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2479*43a90889SApple OSS Distributions {
2480*43a90889SApple OSS Distributions if (reserved) {
2481*43a90889SApple OSS Distributions OSObject * context = reserved->contextObject;
2482*43a90889SApple OSS Distributions if (context) {
2483*43a90889SApple OSS Distributions context->retain();
2484*43a90889SApple OSS Distributions }
2485*43a90889SApple OSS Distributions return context;
2486*43a90889SApple OSS Distributions } else {
2487*43a90889SApple OSS Distributions return NULL;
2488*43a90889SApple OSS Distributions }
2489*43a90889SApple OSS Distributions }
2490*43a90889SApple OSS Distributions
2491*43a90889SApple OSS Distributions void
setContext(OSObject * obj)2492*43a90889SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2493*43a90889SApple OSS Distributions {
2494*43a90889SApple OSS Distributions if (this->reserved == NULL && obj == NULL) {
2495*43a90889SApple OSS Distributions // No existing object, and no object to set
2496*43a90889SApple OSS Distributions return;
2497*43a90889SApple OSS Distributions }
2498*43a90889SApple OSS Distributions
2499*43a90889SApple OSS Distributions IOMemoryDescriptorReserved * reserved = getKernelReserved();
2500*43a90889SApple OSS Distributions if (reserved) {
2501*43a90889SApple OSS Distributions OSObject * oldObject = reserved->contextObject;
2502*43a90889SApple OSS Distributions if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2503*43a90889SApple OSS Distributions oldObject->release();
2504*43a90889SApple OSS Distributions }
2505*43a90889SApple OSS Distributions if (obj != NULL) {
2506*43a90889SApple OSS Distributions obj->retain();
2507*43a90889SApple OSS Distributions reserved->contextObject = obj;
2508*43a90889SApple OSS Distributions }
2509*43a90889SApple OSS Distributions }
2510*43a90889SApple OSS Distributions }
2511*43a90889SApple OSS Distributions
2512*43a90889SApple OSS Distributions #ifndef __LP64__
2513*43a90889SApple OSS Distributions #pragma clang diagnostic push
2514*43a90889SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2515*43a90889SApple OSS Distributions
2516*43a90889SApple OSS Distributions // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2517*43a90889SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2518*43a90889SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2519*43a90889SApple OSS Distributions {
2520*43a90889SApple OSS Distributions addr64_t physAddr = 0;
2521*43a90889SApple OSS Distributions
2522*43a90889SApple OSS Distributions if (prepare() == kIOReturnSuccess) {
2523*43a90889SApple OSS Distributions physAddr = getPhysicalSegment64( offset, length );
2524*43a90889SApple OSS Distributions complete();
2525*43a90889SApple OSS Distributions }
2526*43a90889SApple OSS Distributions
2527*43a90889SApple OSS Distributions return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2528*43a90889SApple OSS Distributions }
2529*43a90889SApple OSS Distributions
2530*43a90889SApple OSS Distributions #pragma clang diagnostic pop
2531*43a90889SApple OSS Distributions
2532*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2533*43a90889SApple OSS Distributions
2534*43a90889SApple OSS Distributions
2535*43a90889SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536*43a90889SApple OSS Distributions IOMemoryDescriptor::readBytes
2537*43a90889SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2538*43a90889SApple OSS Distributions {
2539*43a90889SApple OSS Distributions addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540*43a90889SApple OSS Distributions IOByteCount endoffset;
2541*43a90889SApple OSS Distributions IOByteCount remaining;
2542*43a90889SApple OSS Distributions
2543*43a90889SApple OSS Distributions // Check that this entire I/O is within the available range
2544*43a90889SApple OSS Distributions if ((offset > _length)
2545*43a90889SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2546*43a90889SApple OSS Distributions || (endoffset > _length)) {
2547*43a90889SApple OSS Distributions assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2548*43a90889SApple OSS Distributions return 0;
2549*43a90889SApple OSS Distributions }
2550*43a90889SApple OSS Distributions if (offset >= _length) {
2551*43a90889SApple OSS Distributions return 0;
2552*43a90889SApple OSS Distributions }
2553*43a90889SApple OSS Distributions
2554*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2555*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2556*43a90889SApple OSS Distributions return 0;
2557*43a90889SApple OSS Distributions }
2558*43a90889SApple OSS Distributions
2559*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2560*43a90889SApple OSS Distributions LOCK;
2561*43a90889SApple OSS Distributions }
2562*43a90889SApple OSS Distributions
2563*43a90889SApple OSS Distributions remaining = length = min(length, _length - offset);
2564*43a90889SApple OSS Distributions while (remaining) { // (process another target segment?)
2565*43a90889SApple OSS Distributions addr64_t srcAddr64;
2566*43a90889SApple OSS Distributions IOByteCount srcLen;
2567*43a90889SApple OSS Distributions int options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2568*43a90889SApple OSS Distributions
2569*43a90889SApple OSS Distributions IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2570*43a90889SApple OSS Distributions srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2571*43a90889SApple OSS Distributions if (!srcAddr64) {
2572*43a90889SApple OSS Distributions break;
2573*43a90889SApple OSS Distributions }
2574*43a90889SApple OSS Distributions
2575*43a90889SApple OSS Distributions // Clip segment length to remaining
2576*43a90889SApple OSS Distributions if (srcLen > remaining) {
2577*43a90889SApple OSS Distributions srcLen = remaining;
2578*43a90889SApple OSS Distributions }
2579*43a90889SApple OSS Distributions
2580*43a90889SApple OSS Distributions if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2581*43a90889SApple OSS Distributions srcLen = (UINT_MAX - PAGE_SIZE + 1);
2582*43a90889SApple OSS Distributions }
2583*43a90889SApple OSS Distributions
2584*43a90889SApple OSS Distributions
2585*43a90889SApple OSS Distributions kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2586*43a90889SApple OSS Distributions #pragma unused(copy_ret)
2587*43a90889SApple OSS Distributions
2588*43a90889SApple OSS Distributions dstAddr += srcLen;
2589*43a90889SApple OSS Distributions offset += srcLen;
2590*43a90889SApple OSS Distributions remaining -= srcLen;
2591*43a90889SApple OSS Distributions }
2592*43a90889SApple OSS Distributions
2593*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2594*43a90889SApple OSS Distributions UNLOCK;
2595*43a90889SApple OSS Distributions }
2596*43a90889SApple OSS Distributions
2597*43a90889SApple OSS Distributions assert(!remaining);
2598*43a90889SApple OSS Distributions
2599*43a90889SApple OSS Distributions return length - remaining;
2600*43a90889SApple OSS Distributions }
2601*43a90889SApple OSS Distributions
2602*43a90889SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2603*43a90889SApple OSS Distributions IOMemoryDescriptor::writeBytes
2604*43a90889SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2605*43a90889SApple OSS Distributions {
2606*43a90889SApple OSS Distributions addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2607*43a90889SApple OSS Distributions IOByteCount remaining;
2608*43a90889SApple OSS Distributions IOByteCount endoffset;
2609*43a90889SApple OSS Distributions IOByteCount offset = inoffset;
2610*43a90889SApple OSS Distributions
2611*43a90889SApple OSS Distributions assert( !(kIOMemoryPreparedReadOnly & _flags));
2612*43a90889SApple OSS Distributions
2613*43a90889SApple OSS Distributions // Check that this entire I/O is within the available range
2614*43a90889SApple OSS Distributions if ((offset > _length)
2615*43a90889SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2616*43a90889SApple OSS Distributions || (endoffset > _length)) {
2617*43a90889SApple OSS Distributions assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2618*43a90889SApple OSS Distributions return 0;
2619*43a90889SApple OSS Distributions }
2620*43a90889SApple OSS Distributions if (kIOMemoryPreparedReadOnly & _flags) {
2621*43a90889SApple OSS Distributions return 0;
2622*43a90889SApple OSS Distributions }
2623*43a90889SApple OSS Distributions if (offset >= _length) {
2624*43a90889SApple OSS Distributions return 0;
2625*43a90889SApple OSS Distributions }
2626*43a90889SApple OSS Distributions
2627*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2628*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2629*43a90889SApple OSS Distributions return 0;
2630*43a90889SApple OSS Distributions }
2631*43a90889SApple OSS Distributions
2632*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2633*43a90889SApple OSS Distributions LOCK;
2634*43a90889SApple OSS Distributions }
2635*43a90889SApple OSS Distributions
2636*43a90889SApple OSS Distributions remaining = length = min(length, _length - offset);
2637*43a90889SApple OSS Distributions while (remaining) { // (process another target segment?)
2638*43a90889SApple OSS Distributions addr64_t dstAddr64;
2639*43a90889SApple OSS Distributions IOByteCount dstLen;
2640*43a90889SApple OSS Distributions int options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2641*43a90889SApple OSS Distributions
2642*43a90889SApple OSS Distributions IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2643*43a90889SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2644*43a90889SApple OSS Distributions if (!dstAddr64) {
2645*43a90889SApple OSS Distributions break;
2646*43a90889SApple OSS Distributions }
2647*43a90889SApple OSS Distributions
2648*43a90889SApple OSS Distributions // Clip segment length to remaining
2649*43a90889SApple OSS Distributions if (dstLen > remaining) {
2650*43a90889SApple OSS Distributions dstLen = remaining;
2651*43a90889SApple OSS Distributions }
2652*43a90889SApple OSS Distributions
2653*43a90889SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2654*43a90889SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
2655*43a90889SApple OSS Distributions }
2656*43a90889SApple OSS Distributions
2657*43a90889SApple OSS Distributions
2658*43a90889SApple OSS Distributions if (!srcAddr) {
2659*43a90889SApple OSS Distributions bzero_phys(dstAddr64, (unsigned int) dstLen);
2660*43a90889SApple OSS Distributions } else {
2661*43a90889SApple OSS Distributions kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2662*43a90889SApple OSS Distributions #pragma unused(copy_ret)
2663*43a90889SApple OSS Distributions srcAddr += dstLen;
2664*43a90889SApple OSS Distributions }
2665*43a90889SApple OSS Distributions offset += dstLen;
2666*43a90889SApple OSS Distributions remaining -= dstLen;
2667*43a90889SApple OSS Distributions }
2668*43a90889SApple OSS Distributions
2669*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2670*43a90889SApple OSS Distributions UNLOCK;
2671*43a90889SApple OSS Distributions }
2672*43a90889SApple OSS Distributions
2673*43a90889SApple OSS Distributions assert(!remaining);
2674*43a90889SApple OSS Distributions
2675*43a90889SApple OSS Distributions #if defined(__x86_64__)
2676*43a90889SApple OSS Distributions // copypv does not cppvFsnk on intel
2677*43a90889SApple OSS Distributions #else
2678*43a90889SApple OSS Distributions if (!srcAddr) {
2679*43a90889SApple OSS Distributions performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2680*43a90889SApple OSS Distributions }
2681*43a90889SApple OSS Distributions #endif
2682*43a90889SApple OSS Distributions
2683*43a90889SApple OSS Distributions return length - remaining;
2684*43a90889SApple OSS Distributions }
2685*43a90889SApple OSS Distributions
2686*43a90889SApple OSS Distributions #ifndef __LP64__
2687*43a90889SApple OSS Distributions void
setPosition(IOByteCount position)2688*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2689*43a90889SApple OSS Distributions {
2690*43a90889SApple OSS Distributions panic("IOGMD::setPosition deprecated");
2691*43a90889SApple OSS Distributions }
2692*43a90889SApple OSS Distributions #endif /* !__LP64__ */
2693*43a90889SApple OSS Distributions
2694*43a90889SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2695*43a90889SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2696*43a90889SApple OSS Distributions
2697*43a90889SApple OSS Distributions uint64_t
getPreparationID(void)2698*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2699*43a90889SApple OSS Distributions {
2700*43a90889SApple OSS Distributions ioGMDData *dataP;
2701*43a90889SApple OSS Distributions
2702*43a90889SApple OSS Distributions if (!_wireCount) {
2703*43a90889SApple OSS Distributions return kIOPreparationIDUnprepared;
2704*43a90889SApple OSS Distributions }
2705*43a90889SApple OSS Distributions
2706*43a90889SApple OSS Distributions if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2707*43a90889SApple OSS Distributions || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2708*43a90889SApple OSS Distributions IOMemoryDescriptor::setPreparationID();
2709*43a90889SApple OSS Distributions return IOMemoryDescriptor::getPreparationID();
2710*43a90889SApple OSS Distributions }
2711*43a90889SApple OSS Distributions
2712*43a90889SApple OSS Distributions if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2713*43a90889SApple OSS Distributions return kIOPreparationIDUnprepared;
2714*43a90889SApple OSS Distributions }
2715*43a90889SApple OSS Distributions
2716*43a90889SApple OSS Distributions if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2717*43a90889SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2718*43a90889SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2719*43a90889SApple OSS Distributions }
2720*43a90889SApple OSS Distributions return dataP->fPreparationID;
2721*43a90889SApple OSS Distributions }
2722*43a90889SApple OSS Distributions
2723*43a90889SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2724*43a90889SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2725*43a90889SApple OSS Distributions {
2726*43a90889SApple OSS Distributions if (reserved->creator) {
2727*43a90889SApple OSS Distributions task_deallocate(reserved->creator);
2728*43a90889SApple OSS Distributions reserved->creator = NULL;
2729*43a90889SApple OSS Distributions }
2730*43a90889SApple OSS Distributions
2731*43a90889SApple OSS Distributions if (reserved->contextObject) {
2732*43a90889SApple OSS Distributions reserved->contextObject->release();
2733*43a90889SApple OSS Distributions reserved->contextObject = NULL;
2734*43a90889SApple OSS Distributions }
2735*43a90889SApple OSS Distributions }
2736*43a90889SApple OSS Distributions
2737*43a90889SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2738*43a90889SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2739*43a90889SApple OSS Distributions {
2740*43a90889SApple OSS Distributions if (!reserved) {
2741*43a90889SApple OSS Distributions reserved = IOMallocType(IOMemoryDescriptorReserved);
2742*43a90889SApple OSS Distributions }
2743*43a90889SApple OSS Distributions return reserved;
2744*43a90889SApple OSS Distributions }
2745*43a90889SApple OSS Distributions
2746*43a90889SApple OSS Distributions void
setPreparationID(void)2747*43a90889SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2748*43a90889SApple OSS Distributions {
2749*43a90889SApple OSS Distributions if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2750*43a90889SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2751*43a90889SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2752*43a90889SApple OSS Distributions }
2753*43a90889SApple OSS Distributions }
2754*43a90889SApple OSS Distributions
2755*43a90889SApple OSS Distributions uint64_t
getPreparationID(void)2756*43a90889SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2757*43a90889SApple OSS Distributions {
2758*43a90889SApple OSS Distributions if (reserved) {
2759*43a90889SApple OSS Distributions return reserved->preparationID;
2760*43a90889SApple OSS Distributions } else {
2761*43a90889SApple OSS Distributions return kIOPreparationIDUnsupported;
2762*43a90889SApple OSS Distributions }
2763*43a90889SApple OSS Distributions }
2764*43a90889SApple OSS Distributions
2765*43a90889SApple OSS Distributions void
setDescriptorID(void)2766*43a90889SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2767*43a90889SApple OSS Distributions {
2768*43a90889SApple OSS Distributions if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2769*43a90889SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2770*43a90889SApple OSS Distributions OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2771*43a90889SApple OSS Distributions }
2772*43a90889SApple OSS Distributions }
2773*43a90889SApple OSS Distributions
2774*43a90889SApple OSS Distributions uint64_t
getDescriptorID(void)2775*43a90889SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2776*43a90889SApple OSS Distributions {
2777*43a90889SApple OSS Distributions setDescriptorID();
2778*43a90889SApple OSS Distributions
2779*43a90889SApple OSS Distributions if (reserved) {
2780*43a90889SApple OSS Distributions return reserved->descriptorID;
2781*43a90889SApple OSS Distributions } else {
2782*43a90889SApple OSS Distributions return kIODescriptorIDInvalid;
2783*43a90889SApple OSS Distributions }
2784*43a90889SApple OSS Distributions }
2785*43a90889SApple OSS Distributions
2786*43a90889SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2787*43a90889SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2788*43a90889SApple OSS Distributions {
2789*43a90889SApple OSS Distributions if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2790*43a90889SApple OSS Distributions return kIOReturnSuccess;
2791*43a90889SApple OSS Distributions }
2792*43a90889SApple OSS Distributions
2793*43a90889SApple OSS Distributions assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2794*43a90889SApple OSS Distributions if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2795*43a90889SApple OSS Distributions return kIOReturnBadArgument;
2796*43a90889SApple OSS Distributions }
2797*43a90889SApple OSS Distributions
2798*43a90889SApple OSS Distributions uint64_t descriptorID = getDescriptorID();
2799*43a90889SApple OSS Distributions assert(descriptorID != kIODescriptorIDInvalid);
2800*43a90889SApple OSS Distributions if (getDescriptorID() == kIODescriptorIDInvalid) {
2801*43a90889SApple OSS Distributions return kIOReturnBadArgument;
2802*43a90889SApple OSS Distributions }
2803*43a90889SApple OSS Distributions
2804*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2805*43a90889SApple OSS Distributions
2806*43a90889SApple OSS Distributions #if __LP64__
2807*43a90889SApple OSS Distributions static const uint8_t num_segments_page = 8;
2808*43a90889SApple OSS Distributions #else
2809*43a90889SApple OSS Distributions static const uint8_t num_segments_page = 4;
2810*43a90889SApple OSS Distributions #endif
2811*43a90889SApple OSS Distributions static const uint8_t num_segments_long = 2;
2812*43a90889SApple OSS Distributions
2813*43a90889SApple OSS Distributions IOPhysicalAddress segments_page[num_segments_page];
2814*43a90889SApple OSS Distributions IOPhysicalRange segments_long[num_segments_long];
2815*43a90889SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2816*43a90889SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2817*43a90889SApple OSS Distributions
2818*43a90889SApple OSS Distributions uint8_t segment_page_idx = 0;
2819*43a90889SApple OSS Distributions uint8_t segment_long_idx = 0;
2820*43a90889SApple OSS Distributions
2821*43a90889SApple OSS Distributions IOPhysicalRange physical_segment;
2822*43a90889SApple OSS Distributions for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2823*43a90889SApple OSS Distributions physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2824*43a90889SApple OSS Distributions
2825*43a90889SApple OSS Distributions if (physical_segment.length == 0) {
2826*43a90889SApple OSS Distributions break;
2827*43a90889SApple OSS Distributions }
2828*43a90889SApple OSS Distributions
2829*43a90889SApple OSS Distributions /**
2830*43a90889SApple OSS Distributions * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2831*43a90889SApple OSS Distributions * buffer memory, pack segment events according to the following.
2832*43a90889SApple OSS Distributions *
2833*43a90889SApple OSS Distributions * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2834*43a90889SApple OSS Distributions * IOMDPA_MAPPED event emitted on by the current thread_id.
2835*43a90889SApple OSS Distributions *
2836*43a90889SApple OSS Distributions * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2837*43a90889SApple OSS Distributions * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2838*43a90889SApple OSS Distributions * - unmapped pages will have a ppn of MAX_INT_32
2839*43a90889SApple OSS Distributions * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2840*43a90889SApple OSS Distributions * - address_0, length_0, address_0, length_1
2841*43a90889SApple OSS Distributions * - unmapped pages will have an address of 0
2842*43a90889SApple OSS Distributions *
2843*43a90889SApple OSS Distributions * During each iteration do the following depending on the length of the mapping:
2844*43a90889SApple OSS Distributions * 1. add the current segment to the appropriate queue of pending segments
2845*43a90889SApple OSS Distributions * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2846*43a90889SApple OSS Distributions * 1a. if FALSE emit and reset all events in the previous queue
2847*43a90889SApple OSS Distributions * 2. check if we have filled up the current queue of pending events
2848*43a90889SApple OSS Distributions * 2a. if TRUE emit and reset all events in the pending queue
2849*43a90889SApple OSS Distributions * 3. after completing all iterations emit events in the current queue
2850*43a90889SApple OSS Distributions */
2851*43a90889SApple OSS Distributions
2852*43a90889SApple OSS Distributions bool emit_page = false;
2853*43a90889SApple OSS Distributions bool emit_long = false;
2854*43a90889SApple OSS Distributions if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2855*43a90889SApple OSS Distributions segments_page[segment_page_idx] = physical_segment.address;
2856*43a90889SApple OSS Distributions segment_page_idx++;
2857*43a90889SApple OSS Distributions
2858*43a90889SApple OSS Distributions emit_long = segment_long_idx != 0;
2859*43a90889SApple OSS Distributions emit_page = segment_page_idx == num_segments_page;
2860*43a90889SApple OSS Distributions
2861*43a90889SApple OSS Distributions if (os_unlikely(emit_long)) {
2862*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2863*43a90889SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2864*43a90889SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2865*43a90889SApple OSS Distributions }
2866*43a90889SApple OSS Distributions
2867*43a90889SApple OSS Distributions if (os_unlikely(emit_page)) {
2868*43a90889SApple OSS Distributions #if __LP64__
2869*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2870*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2871*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2872*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2873*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2874*43a90889SApple OSS Distributions #else
2875*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2876*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2877*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2878*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2879*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2880*43a90889SApple OSS Distributions #endif
2881*43a90889SApple OSS Distributions }
2882*43a90889SApple OSS Distributions } else {
2883*43a90889SApple OSS Distributions segments_long[segment_long_idx] = physical_segment;
2884*43a90889SApple OSS Distributions segment_long_idx++;
2885*43a90889SApple OSS Distributions
2886*43a90889SApple OSS Distributions emit_page = segment_page_idx != 0;
2887*43a90889SApple OSS Distributions emit_long = segment_long_idx == num_segments_long;
2888*43a90889SApple OSS Distributions
2889*43a90889SApple OSS Distributions if (os_unlikely(emit_page)) {
2890*43a90889SApple OSS Distributions #if __LP64__
2891*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2892*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2893*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2894*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2895*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2896*43a90889SApple OSS Distributions #else
2897*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2898*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2899*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2900*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2901*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2902*43a90889SApple OSS Distributions #endif
2903*43a90889SApple OSS Distributions }
2904*43a90889SApple OSS Distributions
2905*43a90889SApple OSS Distributions if (emit_long) {
2906*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2907*43a90889SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2908*43a90889SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2909*43a90889SApple OSS Distributions }
2910*43a90889SApple OSS Distributions }
2911*43a90889SApple OSS Distributions
2912*43a90889SApple OSS Distributions if (os_unlikely(emit_page)) {
2913*43a90889SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2914*43a90889SApple OSS Distributions segment_page_idx = 0;
2915*43a90889SApple OSS Distributions }
2916*43a90889SApple OSS Distributions
2917*43a90889SApple OSS Distributions if (os_unlikely(emit_long)) {
2918*43a90889SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2919*43a90889SApple OSS Distributions segment_long_idx = 0;
2920*43a90889SApple OSS Distributions }
2921*43a90889SApple OSS Distributions }
2922*43a90889SApple OSS Distributions
2923*43a90889SApple OSS Distributions if (segment_page_idx != 0) {
2924*43a90889SApple OSS Distributions assert(segment_long_idx == 0);
2925*43a90889SApple OSS Distributions #if __LP64__
2926*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2927*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2928*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2929*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2930*43a90889SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2931*43a90889SApple OSS Distributions #else
2932*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2933*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2934*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2935*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2936*43a90889SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2937*43a90889SApple OSS Distributions #endif
2938*43a90889SApple OSS Distributions } else if (segment_long_idx != 0) {
2939*43a90889SApple OSS Distributions assert(segment_page_idx == 0);
2940*43a90889SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2941*43a90889SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2942*43a90889SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2943*43a90889SApple OSS Distributions }
2944*43a90889SApple OSS Distributions
2945*43a90889SApple OSS Distributions return kIOReturnSuccess;
2946*43a90889SApple OSS Distributions }
2947*43a90889SApple OSS Distributions
2948*43a90889SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2949*43a90889SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2950*43a90889SApple OSS Distributions {
2951*43a90889SApple OSS Distributions _kernelTag = (vm_tag_t) kernelTag;
2952*43a90889SApple OSS Distributions _userTag = (vm_tag_t) userTag;
2953*43a90889SApple OSS Distributions }
2954*43a90889SApple OSS Distributions
2955*43a90889SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2956*43a90889SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2957*43a90889SApple OSS Distributions {
2958*43a90889SApple OSS Distributions if (vm_kernel_map_is_kernel(map)) {
2959*43a90889SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _kernelTag) {
2960*43a90889SApple OSS Distributions return (uint32_t) _kernelTag;
2961*43a90889SApple OSS Distributions }
2962*43a90889SApple OSS Distributions } else {
2963*43a90889SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _userTag) {
2964*43a90889SApple OSS Distributions return (uint32_t) _userTag;
2965*43a90889SApple OSS Distributions }
2966*43a90889SApple OSS Distributions }
2967*43a90889SApple OSS Distributions return IOMemoryTag(map);
2968*43a90889SApple OSS Distributions }
2969*43a90889SApple OSS Distributions
2970*43a90889SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2971*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2972*43a90889SApple OSS Distributions {
2973*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
2974*43a90889SApple OSS Distributions DMACommandOps params;
2975*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2976*43a90889SApple OSS Distributions ioGMDData *dataP;
2977*43a90889SApple OSS Distributions
2978*43a90889SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
2979*43a90889SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
2980*43a90889SApple OSS Distributions
2981*43a90889SApple OSS Distributions if (kIOMDDMAMap == op) {
2982*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
2983*43a90889SApple OSS Distributions return kIOReturnUnderrun;
2984*43a90889SApple OSS Distributions }
2985*43a90889SApple OSS Distributions
2986*43a90889SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2987*43a90889SApple OSS Distributions
2988*43a90889SApple OSS Distributions if (!_memoryEntries
2989*43a90889SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2990*43a90889SApple OSS Distributions return kIOReturnNoMemory;
2991*43a90889SApple OSS Distributions }
2992*43a90889SApple OSS Distributions
2993*43a90889SApple OSS Distributions if (_memoryEntries && data->fMapper) {
2994*43a90889SApple OSS Distributions bool remap, keepMap;
2995*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
2996*43a90889SApple OSS Distributions
2997*43a90889SApple OSS Distributions if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2998*43a90889SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2999*43a90889SApple OSS Distributions }
3000*43a90889SApple OSS Distributions if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3001*43a90889SApple OSS Distributions dataP->fDMAMapAlignment = data->fMapSpec.alignment;
3002*43a90889SApple OSS Distributions }
3003*43a90889SApple OSS Distributions
3004*43a90889SApple OSS Distributions keepMap = (data->fMapper == gIOSystemMapper);
3005*43a90889SApple OSS Distributions keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3006*43a90889SApple OSS Distributions
3007*43a90889SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3008*43a90889SApple OSS Distributions IOLockLock(_prepareLock);
3009*43a90889SApple OSS Distributions }
3010*43a90889SApple OSS Distributions
3011*43a90889SApple OSS Distributions remap = (!keepMap);
3012*43a90889SApple OSS Distributions remap |= (dataP->fDMAMapNumAddressBits < 64)
3013*43a90889SApple OSS Distributions && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3014*43a90889SApple OSS Distributions remap |= (dataP->fDMAMapAlignment > page_size);
3015*43a90889SApple OSS Distributions
3016*43a90889SApple OSS Distributions if (remap || !dataP->fMappedBaseValid) {
3017*43a90889SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3018*43a90889SApple OSS Distributions if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3019*43a90889SApple OSS Distributions dataP->fMappedBase = data->fAlloc;
3020*43a90889SApple OSS Distributions dataP->fMappedBaseValid = true;
3021*43a90889SApple OSS Distributions dataP->fMappedLength = data->fAllocLength;
3022*43a90889SApple OSS Distributions data->fAllocLength = 0; // IOMD owns the alloc now
3023*43a90889SApple OSS Distributions }
3024*43a90889SApple OSS Distributions } else {
3025*43a90889SApple OSS Distributions data->fAlloc = dataP->fMappedBase;
3026*43a90889SApple OSS Distributions data->fAllocLength = 0; // give out IOMD map
3027*43a90889SApple OSS Distributions md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3028*43a90889SApple OSS Distributions }
3029*43a90889SApple OSS Distributions
3030*43a90889SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3031*43a90889SApple OSS Distributions IOLockUnlock(_prepareLock);
3032*43a90889SApple OSS Distributions }
3033*43a90889SApple OSS Distributions }
3034*43a90889SApple OSS Distributions return err;
3035*43a90889SApple OSS Distributions }
3036*43a90889SApple OSS Distributions if (kIOMDDMAUnmap == op) {
3037*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3038*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3039*43a90889SApple OSS Distributions }
3040*43a90889SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3041*43a90889SApple OSS Distributions
3042*43a90889SApple OSS Distributions if (_pages) {
3043*43a90889SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3044*43a90889SApple OSS Distributions }
3045*43a90889SApple OSS Distributions
3046*43a90889SApple OSS Distributions return kIOReturnSuccess;
3047*43a90889SApple OSS Distributions }
3048*43a90889SApple OSS Distributions
3049*43a90889SApple OSS Distributions if (kIOMDAddDMAMapSpec == op) {
3050*43a90889SApple OSS Distributions if (dataSize < sizeof(IODMAMapSpecification)) {
3051*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3052*43a90889SApple OSS Distributions }
3053*43a90889SApple OSS Distributions
3054*43a90889SApple OSS Distributions IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3055*43a90889SApple OSS Distributions
3056*43a90889SApple OSS Distributions if (!_memoryEntries
3057*43a90889SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3058*43a90889SApple OSS Distributions return kIOReturnNoMemory;
3059*43a90889SApple OSS Distributions }
3060*43a90889SApple OSS Distributions
3061*43a90889SApple OSS Distributions if (_memoryEntries) {
3062*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
3063*43a90889SApple OSS Distributions if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3064*43a90889SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->numAddressBits;
3065*43a90889SApple OSS Distributions }
3066*43a90889SApple OSS Distributions if (data->alignment > dataP->fDMAMapAlignment) {
3067*43a90889SApple OSS Distributions dataP->fDMAMapAlignment = data->alignment;
3068*43a90889SApple OSS Distributions }
3069*43a90889SApple OSS Distributions }
3070*43a90889SApple OSS Distributions return kIOReturnSuccess;
3071*43a90889SApple OSS Distributions }
3072*43a90889SApple OSS Distributions
3073*43a90889SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3074*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3075*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3076*43a90889SApple OSS Distributions }
3077*43a90889SApple OSS Distributions
3078*43a90889SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3079*43a90889SApple OSS Distributions data->fLength = _length;
3080*43a90889SApple OSS Distributions data->fSGCount = _rangesCount;
3081*43a90889SApple OSS Distributions data->fPages = _pages;
3082*43a90889SApple OSS Distributions data->fDirection = getDirection();
3083*43a90889SApple OSS Distributions if (!_wireCount) {
3084*43a90889SApple OSS Distributions data->fIsPrepared = false;
3085*43a90889SApple OSS Distributions } else {
3086*43a90889SApple OSS Distributions data->fIsPrepared = true;
3087*43a90889SApple OSS Distributions data->fHighestPage = _highestPage;
3088*43a90889SApple OSS Distributions if (_memoryEntries) {
3089*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
3090*43a90889SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
3091*43a90889SApple OSS Distributions UInt count = getNumIOPL(_memoryEntries, dataP);
3092*43a90889SApple OSS Distributions if (count == 1) {
3093*43a90889SApple OSS Distributions data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3094*43a90889SApple OSS Distributions }
3095*43a90889SApple OSS Distributions }
3096*43a90889SApple OSS Distributions }
3097*43a90889SApple OSS Distributions
3098*43a90889SApple OSS Distributions return kIOReturnSuccess;
3099*43a90889SApple OSS Distributions } else if (kIOMDDMAActive == op) {
3100*43a90889SApple OSS Distributions if (params) {
3101*43a90889SApple OSS Distributions int16_t prior;
3102*43a90889SApple OSS Distributions prior = OSAddAtomic16(1, &md->_dmaReferences);
3103*43a90889SApple OSS Distributions if (!prior) {
3104*43a90889SApple OSS Distributions md->_mapName = NULL;
3105*43a90889SApple OSS Distributions }
3106*43a90889SApple OSS Distributions } else {
3107*43a90889SApple OSS Distributions if (md->_dmaReferences) {
3108*43a90889SApple OSS Distributions OSAddAtomic16(-1, &md->_dmaReferences);
3109*43a90889SApple OSS Distributions } else {
3110*43a90889SApple OSS Distributions panic("_dmaReferences underflow");
3111*43a90889SApple OSS Distributions }
3112*43a90889SApple OSS Distributions }
3113*43a90889SApple OSS Distributions } else if (kIOMDWalkSegments != op) {
3114*43a90889SApple OSS Distributions return kIOReturnBadArgument;
3115*43a90889SApple OSS Distributions }
3116*43a90889SApple OSS Distributions
3117*43a90889SApple OSS Distributions // Get the next segment
3118*43a90889SApple OSS Distributions struct InternalState {
3119*43a90889SApple OSS Distributions IOMDDMAWalkSegmentArgs fIO;
3120*43a90889SApple OSS Distributions mach_vm_size_t fOffset2Index;
3121*43a90889SApple OSS Distributions mach_vm_size_t fNextOffset;
3122*43a90889SApple OSS Distributions UInt fIndex;
3123*43a90889SApple OSS Distributions } *isP;
3124*43a90889SApple OSS Distributions
3125*43a90889SApple OSS Distributions // Find the next segment
3126*43a90889SApple OSS Distributions if (dataSize < sizeof(*isP)) {
3127*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3128*43a90889SApple OSS Distributions }
3129*43a90889SApple OSS Distributions
3130*43a90889SApple OSS Distributions isP = (InternalState *) vData;
3131*43a90889SApple OSS Distributions uint64_t offset = isP->fIO.fOffset;
3132*43a90889SApple OSS Distributions uint8_t mapped = isP->fIO.fMapped;
3133*43a90889SApple OSS Distributions uint64_t mappedBase;
3134*43a90889SApple OSS Distributions
3135*43a90889SApple OSS Distributions if (mapped && (kIOMemoryRemote & _flags)) {
3136*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3137*43a90889SApple OSS Distributions }
3138*43a90889SApple OSS Distributions
3139*43a90889SApple OSS Distributions if (IOMapper::gSystem && mapped
3140*43a90889SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3141*43a90889SApple OSS Distributions && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3142*43a90889SApple OSS Distributions // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3143*43a90889SApple OSS Distributions if (!_memoryEntries
3144*43a90889SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3145*43a90889SApple OSS Distributions return kIOReturnNoMemory;
3146*43a90889SApple OSS Distributions }
3147*43a90889SApple OSS Distributions
3148*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
3149*43a90889SApple OSS Distributions if (dataP->fMapper) {
3150*43a90889SApple OSS Distributions IODMAMapSpecification mapSpec;
3151*43a90889SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
3152*43a90889SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3153*43a90889SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
3154*43a90889SApple OSS Distributions err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3155*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
3156*43a90889SApple OSS Distributions return err;
3157*43a90889SApple OSS Distributions }
3158*43a90889SApple OSS Distributions dataP->fMappedBaseValid = true;
3159*43a90889SApple OSS Distributions }
3160*43a90889SApple OSS Distributions }
3161*43a90889SApple OSS Distributions
3162*43a90889SApple OSS Distributions if (mapped) {
3163*43a90889SApple OSS Distributions if (IOMapper::gSystem
3164*43a90889SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3165*43a90889SApple OSS Distributions && _memoryEntries
3166*43a90889SApple OSS Distributions && (dataP = getDataP(_memoryEntries))
3167*43a90889SApple OSS Distributions && dataP->fMappedBaseValid) {
3168*43a90889SApple OSS Distributions mappedBase = dataP->fMappedBase;
3169*43a90889SApple OSS Distributions } else {
3170*43a90889SApple OSS Distributions mapped = 0;
3171*43a90889SApple OSS Distributions }
3172*43a90889SApple OSS Distributions }
3173*43a90889SApple OSS Distributions
3174*43a90889SApple OSS Distributions if (offset >= _length) {
3175*43a90889SApple OSS Distributions return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3176*43a90889SApple OSS Distributions }
3177*43a90889SApple OSS Distributions
3178*43a90889SApple OSS Distributions // Validate the previous offset
3179*43a90889SApple OSS Distributions UInt ind;
3180*43a90889SApple OSS Distributions mach_vm_size_t off2Ind = isP->fOffset2Index;
3181*43a90889SApple OSS Distributions if (!params
3182*43a90889SApple OSS Distributions && offset
3183*43a90889SApple OSS Distributions && (offset == isP->fNextOffset || off2Ind <= offset)) {
3184*43a90889SApple OSS Distributions ind = isP->fIndex;
3185*43a90889SApple OSS Distributions } else {
3186*43a90889SApple OSS Distributions ind = off2Ind = 0; // Start from beginning
3187*43a90889SApple OSS Distributions }
3188*43a90889SApple OSS Distributions mach_vm_size_t length;
3189*43a90889SApple OSS Distributions UInt64 address;
3190*43a90889SApple OSS Distributions
3191*43a90889SApple OSS Distributions if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3192*43a90889SApple OSS Distributions // Physical address based memory descriptor
3193*43a90889SApple OSS Distributions const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3194*43a90889SApple OSS Distributions
3195*43a90889SApple OSS Distributions // Find the range after the one that contains the offset
3196*43a90889SApple OSS Distributions mach_vm_size_t len;
3197*43a90889SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3198*43a90889SApple OSS Distributions len = physP[ind].length;
3199*43a90889SApple OSS Distributions off2Ind += len;
3200*43a90889SApple OSS Distributions }
3201*43a90889SApple OSS Distributions
3202*43a90889SApple OSS Distributions // Calculate length within range and starting address
3203*43a90889SApple OSS Distributions length = off2Ind - offset;
3204*43a90889SApple OSS Distributions address = physP[ind - 1].address + len - length;
3205*43a90889SApple OSS Distributions
3206*43a90889SApple OSS Distributions if (true && mapped) {
3207*43a90889SApple OSS Distributions address = mappedBase + offset;
3208*43a90889SApple OSS Distributions } else {
3209*43a90889SApple OSS Distributions // see how far we can coalesce ranges
3210*43a90889SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3211*43a90889SApple OSS Distributions len = physP[ind].length;
3212*43a90889SApple OSS Distributions length += len;
3213*43a90889SApple OSS Distributions off2Ind += len;
3214*43a90889SApple OSS Distributions ind++;
3215*43a90889SApple OSS Distributions }
3216*43a90889SApple OSS Distributions }
3217*43a90889SApple OSS Distributions
3218*43a90889SApple OSS Distributions // correct contiguous check overshoot
3219*43a90889SApple OSS Distributions ind--;
3220*43a90889SApple OSS Distributions off2Ind -= len;
3221*43a90889SApple OSS Distributions }
3222*43a90889SApple OSS Distributions #ifndef __LP64__
3223*43a90889SApple OSS Distributions else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3224*43a90889SApple OSS Distributions // Physical address based memory descriptor
3225*43a90889SApple OSS Distributions const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3226*43a90889SApple OSS Distributions
3227*43a90889SApple OSS Distributions // Find the range after the one that contains the offset
3228*43a90889SApple OSS Distributions mach_vm_size_t len;
3229*43a90889SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3230*43a90889SApple OSS Distributions len = physP[ind].length;
3231*43a90889SApple OSS Distributions off2Ind += len;
3232*43a90889SApple OSS Distributions }
3233*43a90889SApple OSS Distributions
3234*43a90889SApple OSS Distributions // Calculate length within range and starting address
3235*43a90889SApple OSS Distributions length = off2Ind - offset;
3236*43a90889SApple OSS Distributions address = physP[ind - 1].address + len - length;
3237*43a90889SApple OSS Distributions
3238*43a90889SApple OSS Distributions if (true && mapped) {
3239*43a90889SApple OSS Distributions address = mappedBase + offset;
3240*43a90889SApple OSS Distributions } else {
3241*43a90889SApple OSS Distributions // see how far we can coalesce ranges
3242*43a90889SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3243*43a90889SApple OSS Distributions len = physP[ind].length;
3244*43a90889SApple OSS Distributions length += len;
3245*43a90889SApple OSS Distributions off2Ind += len;
3246*43a90889SApple OSS Distributions ind++;
3247*43a90889SApple OSS Distributions }
3248*43a90889SApple OSS Distributions }
3249*43a90889SApple OSS Distributions // correct contiguous check overshoot
3250*43a90889SApple OSS Distributions ind--;
3251*43a90889SApple OSS Distributions off2Ind -= len;
3252*43a90889SApple OSS Distributions }
3253*43a90889SApple OSS Distributions #endif /* !__LP64__ */
3254*43a90889SApple OSS Distributions else {
3255*43a90889SApple OSS Distributions do {
3256*43a90889SApple OSS Distributions if (!_wireCount) {
3257*43a90889SApple OSS Distributions panic("IOGMD: not wired for the IODMACommand");
3258*43a90889SApple OSS Distributions }
3259*43a90889SApple OSS Distributions
3260*43a90889SApple OSS Distributions assert(_memoryEntries);
3261*43a90889SApple OSS Distributions
3262*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
3263*43a90889SApple OSS Distributions const ioPLBlock *ioplList = getIOPLList(dataP);
3264*43a90889SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3265*43a90889SApple OSS Distributions upl_page_info_t *pageList = getPageList(dataP);
3266*43a90889SApple OSS Distributions
3267*43a90889SApple OSS Distributions assert(numIOPLs > 0);
3268*43a90889SApple OSS Distributions
3269*43a90889SApple OSS Distributions // Scan through iopl info blocks looking for block containing offset
3270*43a90889SApple OSS Distributions while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3271*43a90889SApple OSS Distributions ind++;
3272*43a90889SApple OSS Distributions }
3273*43a90889SApple OSS Distributions
3274*43a90889SApple OSS Distributions // Go back to actual range as search goes past it
3275*43a90889SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ind - 1];
3276*43a90889SApple OSS Distributions off2Ind = ioplInfo.fIOMDOffset;
3277*43a90889SApple OSS Distributions
3278*43a90889SApple OSS Distributions if (ind < numIOPLs) {
3279*43a90889SApple OSS Distributions length = ioplList[ind].fIOMDOffset;
3280*43a90889SApple OSS Distributions } else {
3281*43a90889SApple OSS Distributions length = _length;
3282*43a90889SApple OSS Distributions }
3283*43a90889SApple OSS Distributions length -= offset; // Remainder within iopl
3284*43a90889SApple OSS Distributions
3285*43a90889SApple OSS Distributions // Subtract offset till this iopl in total list
3286*43a90889SApple OSS Distributions offset -= off2Ind;
3287*43a90889SApple OSS Distributions
3288*43a90889SApple OSS Distributions // If a mapped address is requested and this is a pre-mapped IOPL
3289*43a90889SApple OSS Distributions // then just need to compute an offset relative to the mapped base.
3290*43a90889SApple OSS Distributions if (mapped) {
3291*43a90889SApple OSS Distributions offset += (ioplInfo.fPageOffset & PAGE_MASK);
3292*43a90889SApple OSS Distributions address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3293*43a90889SApple OSS Distributions continue; // Done leave do/while(false) now
3294*43a90889SApple OSS Distributions }
3295*43a90889SApple OSS Distributions
3296*43a90889SApple OSS Distributions // The offset is rebased into the current iopl.
3297*43a90889SApple OSS Distributions // Now add the iopl 1st page offset.
3298*43a90889SApple OSS Distributions offset += ioplInfo.fPageOffset;
3299*43a90889SApple OSS Distributions
3300*43a90889SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
3301*43a90889SApple OSS Distributions // the upl's upl_page_info_t array.
3302*43a90889SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
3303*43a90889SApple OSS Distributions pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3304*43a90889SApple OSS Distributions } else {
3305*43a90889SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
3306*43a90889SApple OSS Distributions }
3307*43a90889SApple OSS Distributions
3308*43a90889SApple OSS Distributions // Check for direct device non-paged memory
3309*43a90889SApple OSS Distributions if (ioplInfo.fFlags & kIOPLOnDevice) {
3310*43a90889SApple OSS Distributions address = ptoa_64(pageList->phys_addr) + offset;
3311*43a90889SApple OSS Distributions continue; // Done leave do/while(false) now
3312*43a90889SApple OSS Distributions }
3313*43a90889SApple OSS Distributions
3314*43a90889SApple OSS Distributions // Now we need compute the index into the pageList
3315*43a90889SApple OSS Distributions UInt pageInd = atop_32(offset);
3316*43a90889SApple OSS Distributions offset &= PAGE_MASK;
3317*43a90889SApple OSS Distributions
3318*43a90889SApple OSS Distributions // Compute the starting address of this segment
3319*43a90889SApple OSS Distributions IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3320*43a90889SApple OSS Distributions if (!pageAddr) {
3321*43a90889SApple OSS Distributions panic("!pageList phys_addr");
3322*43a90889SApple OSS Distributions }
3323*43a90889SApple OSS Distributions
3324*43a90889SApple OSS Distributions address = ptoa_64(pageAddr) + offset;
3325*43a90889SApple OSS Distributions
3326*43a90889SApple OSS Distributions // length is currently set to the length of the remainider of the iopl.
3327*43a90889SApple OSS Distributions // We need to check that the remainder of the iopl is contiguous.
3328*43a90889SApple OSS Distributions // This is indicated by pageList[ind].phys_addr being sequential.
3329*43a90889SApple OSS Distributions IOByteCount contigLength = PAGE_SIZE - offset;
3330*43a90889SApple OSS Distributions while (contigLength < length
3331*43a90889SApple OSS Distributions && ++pageAddr == pageList[++pageInd].phys_addr) {
3332*43a90889SApple OSS Distributions contigLength += PAGE_SIZE;
3333*43a90889SApple OSS Distributions }
3334*43a90889SApple OSS Distributions
3335*43a90889SApple OSS Distributions if (contigLength < length) {
3336*43a90889SApple OSS Distributions length = contigLength;
3337*43a90889SApple OSS Distributions }
3338*43a90889SApple OSS Distributions
3339*43a90889SApple OSS Distributions assert(address);
3340*43a90889SApple OSS Distributions assert(length);
3341*43a90889SApple OSS Distributions } while (false);
3342*43a90889SApple OSS Distributions }
3343*43a90889SApple OSS Distributions
3344*43a90889SApple OSS Distributions // Update return values and state
3345*43a90889SApple OSS Distributions isP->fIO.fIOVMAddr = address;
3346*43a90889SApple OSS Distributions isP->fIO.fLength = length;
3347*43a90889SApple OSS Distributions isP->fIndex = ind;
3348*43a90889SApple OSS Distributions isP->fOffset2Index = off2Ind;
3349*43a90889SApple OSS Distributions isP->fNextOffset = isP->fIO.fOffset + length;
3350*43a90889SApple OSS Distributions
3351*43a90889SApple OSS Distributions return kIOReturnSuccess;
3352*43a90889SApple OSS Distributions }
3353*43a90889SApple OSS Distributions
3354*43a90889SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3355*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3356*43a90889SApple OSS Distributions {
3357*43a90889SApple OSS Distributions IOReturn ret;
3358*43a90889SApple OSS Distributions mach_vm_address_t address = 0;
3359*43a90889SApple OSS Distributions mach_vm_size_t length = 0;
3360*43a90889SApple OSS Distributions IOMapper * mapper = gIOSystemMapper;
3361*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3362*43a90889SApple OSS Distributions
3363*43a90889SApple OSS Distributions if (lengthOfSegment) {
3364*43a90889SApple OSS Distributions *lengthOfSegment = 0;
3365*43a90889SApple OSS Distributions }
3366*43a90889SApple OSS Distributions
3367*43a90889SApple OSS Distributions if (offset >= _length) {
3368*43a90889SApple OSS Distributions return 0;
3369*43a90889SApple OSS Distributions }
3370*43a90889SApple OSS Distributions
3371*43a90889SApple OSS Distributions // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3372*43a90889SApple OSS Distributions // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3373*43a90889SApple OSS Distributions // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3374*43a90889SApple OSS Distributions // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3375*43a90889SApple OSS Distributions
3376*43a90889SApple OSS Distributions if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3377*43a90889SApple OSS Distributions unsigned rangesIndex = 0;
3378*43a90889SApple OSS Distributions Ranges vec = _ranges;
3379*43a90889SApple OSS Distributions mach_vm_address_t addr;
3380*43a90889SApple OSS Distributions
3381*43a90889SApple OSS Distributions // Find starting address within the vector of ranges
3382*43a90889SApple OSS Distributions for (;;) {
3383*43a90889SApple OSS Distributions getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3384*43a90889SApple OSS Distributions if (offset < length) {
3385*43a90889SApple OSS Distributions break;
3386*43a90889SApple OSS Distributions }
3387*43a90889SApple OSS Distributions offset -= length; // (make offset relative)
3388*43a90889SApple OSS Distributions rangesIndex++;
3389*43a90889SApple OSS Distributions }
3390*43a90889SApple OSS Distributions
3391*43a90889SApple OSS Distributions // Now that we have the starting range,
3392*43a90889SApple OSS Distributions // lets find the last contiguous range
3393*43a90889SApple OSS Distributions addr += offset;
3394*43a90889SApple OSS Distributions length -= offset;
3395*43a90889SApple OSS Distributions
3396*43a90889SApple OSS Distributions for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3397*43a90889SApple OSS Distributions mach_vm_address_t newAddr;
3398*43a90889SApple OSS Distributions mach_vm_size_t newLen;
3399*43a90889SApple OSS Distributions
3400*43a90889SApple OSS Distributions getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3401*43a90889SApple OSS Distributions if (addr + length != newAddr) {
3402*43a90889SApple OSS Distributions break;
3403*43a90889SApple OSS Distributions }
3404*43a90889SApple OSS Distributions length += newLen;
3405*43a90889SApple OSS Distributions }
3406*43a90889SApple OSS Distributions if (addr) {
3407*43a90889SApple OSS Distributions address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3408*43a90889SApple OSS Distributions }
3409*43a90889SApple OSS Distributions } else {
3410*43a90889SApple OSS Distributions IOMDDMAWalkSegmentState _state;
3411*43a90889SApple OSS Distributions IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3412*43a90889SApple OSS Distributions
3413*43a90889SApple OSS Distributions state->fOffset = offset;
3414*43a90889SApple OSS Distributions state->fLength = _length - offset;
3415*43a90889SApple OSS Distributions state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3416*43a90889SApple OSS Distributions
3417*43a90889SApple OSS Distributions ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3418*43a90889SApple OSS Distributions
3419*43a90889SApple OSS Distributions if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3420*43a90889SApple OSS Distributions DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3421*43a90889SApple OSS Distributions ret, this, state->fOffset,
3422*43a90889SApple OSS Distributions state->fIOVMAddr, state->fLength);
3423*43a90889SApple OSS Distributions }
3424*43a90889SApple OSS Distributions if (kIOReturnSuccess == ret) {
3425*43a90889SApple OSS Distributions address = state->fIOVMAddr;
3426*43a90889SApple OSS Distributions length = state->fLength;
3427*43a90889SApple OSS Distributions }
3428*43a90889SApple OSS Distributions
3429*43a90889SApple OSS Distributions // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3430*43a90889SApple OSS Distributions // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3431*43a90889SApple OSS Distributions
3432*43a90889SApple OSS Distributions if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3433*43a90889SApple OSS Distributions if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3434*43a90889SApple OSS Distributions addr64_t origAddr = address;
3435*43a90889SApple OSS Distributions IOByteCount origLen = length;
3436*43a90889SApple OSS Distributions
3437*43a90889SApple OSS Distributions address = mapper->mapToPhysicalAddress(origAddr);
3438*43a90889SApple OSS Distributions length = page_size - (address & (page_size - 1));
3439*43a90889SApple OSS Distributions while ((length < origLen)
3440*43a90889SApple OSS Distributions && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3441*43a90889SApple OSS Distributions length += page_size;
3442*43a90889SApple OSS Distributions }
3443*43a90889SApple OSS Distributions if (length > origLen) {
3444*43a90889SApple OSS Distributions length = origLen;
3445*43a90889SApple OSS Distributions }
3446*43a90889SApple OSS Distributions }
3447*43a90889SApple OSS Distributions }
3448*43a90889SApple OSS Distributions }
3449*43a90889SApple OSS Distributions
3450*43a90889SApple OSS Distributions if (!address) {
3451*43a90889SApple OSS Distributions length = 0;
3452*43a90889SApple OSS Distributions }
3453*43a90889SApple OSS Distributions
3454*43a90889SApple OSS Distributions if (lengthOfSegment) {
3455*43a90889SApple OSS Distributions *lengthOfSegment = length;
3456*43a90889SApple OSS Distributions }
3457*43a90889SApple OSS Distributions
3458*43a90889SApple OSS Distributions return address;
3459*43a90889SApple OSS Distributions }
3460*43a90889SApple OSS Distributions
3461*43a90889SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3462*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::readBytes
3463*43a90889SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
3464*43a90889SApple OSS Distributions {
3465*43a90889SApple OSS Distributions IOByteCount count = super::readBytes(offset, bytes, length);
3466*43a90889SApple OSS Distributions return count;
3467*43a90889SApple OSS Distributions }
3468*43a90889SApple OSS Distributions
3469*43a90889SApple OSS Distributions IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3470*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::writeBytes
3471*43a90889SApple OSS Distributions (IOByteCount offset, const void* bytes, IOByteCount withLength)
3472*43a90889SApple OSS Distributions {
3473*43a90889SApple OSS Distributions IOByteCount count = super::writeBytes(offset, bytes, withLength);
3474*43a90889SApple OSS Distributions return count;
3475*43a90889SApple OSS Distributions }
3476*43a90889SApple OSS Distributions
3477*43a90889SApple OSS Distributions #ifndef __LP64__
3478*43a90889SApple OSS Distributions #pragma clang diagnostic push
3479*43a90889SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3480*43a90889SApple OSS Distributions
3481*43a90889SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3482*43a90889SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3483*43a90889SApple OSS Distributions {
3484*43a90889SApple OSS Distributions addr64_t address = 0;
3485*43a90889SApple OSS Distributions
3486*43a90889SApple OSS Distributions if (options & _kIOMemorySourceSegment) {
3487*43a90889SApple OSS Distributions address = getSourceSegment(offset, lengthOfSegment);
3488*43a90889SApple OSS Distributions } else if (options & kIOMemoryMapperNone) {
3489*43a90889SApple OSS Distributions address = getPhysicalSegment64(offset, lengthOfSegment);
3490*43a90889SApple OSS Distributions } else {
3491*43a90889SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment);
3492*43a90889SApple OSS Distributions }
3493*43a90889SApple OSS Distributions
3494*43a90889SApple OSS Distributions return address;
3495*43a90889SApple OSS Distributions }
3496*43a90889SApple OSS Distributions #pragma clang diagnostic pop
3497*43a90889SApple OSS Distributions
3498*43a90889SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500*43a90889SApple OSS Distributions {
3501*43a90889SApple OSS Distributions return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3502*43a90889SApple OSS Distributions }
3503*43a90889SApple OSS Distributions
3504*43a90889SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3505*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3506*43a90889SApple OSS Distributions {
3507*43a90889SApple OSS Distributions addr64_t address = 0;
3508*43a90889SApple OSS Distributions IOByteCount length = 0;
3509*43a90889SApple OSS Distributions
3510*43a90889SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment, 0);
3511*43a90889SApple OSS Distributions
3512*43a90889SApple OSS Distributions if (lengthOfSegment) {
3513*43a90889SApple OSS Distributions length = *lengthOfSegment;
3514*43a90889SApple OSS Distributions }
3515*43a90889SApple OSS Distributions
3516*43a90889SApple OSS Distributions if ((address + length) > 0x100000000ULL) {
3517*43a90889SApple OSS Distributions panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3518*43a90889SApple OSS Distributions address, (long) length, (getMetaClass())->getClassName());
3519*43a90889SApple OSS Distributions }
3520*43a90889SApple OSS Distributions
3521*43a90889SApple OSS Distributions return (IOPhysicalAddress) address;
3522*43a90889SApple OSS Distributions }
3523*43a90889SApple OSS Distributions
3524*43a90889SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3525*43a90889SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3526*43a90889SApple OSS Distributions {
3527*43a90889SApple OSS Distributions IOPhysicalAddress phys32;
3528*43a90889SApple OSS Distributions IOByteCount length;
3529*43a90889SApple OSS Distributions addr64_t phys64;
3530*43a90889SApple OSS Distributions IOMapper * mapper = NULL;
3531*43a90889SApple OSS Distributions
3532*43a90889SApple OSS Distributions phys32 = getPhysicalSegment(offset, lengthOfSegment);
3533*43a90889SApple OSS Distributions if (!phys32) {
3534*43a90889SApple OSS Distributions return 0;
3535*43a90889SApple OSS Distributions }
3536*43a90889SApple OSS Distributions
3537*43a90889SApple OSS Distributions if (gIOSystemMapper) {
3538*43a90889SApple OSS Distributions mapper = gIOSystemMapper;
3539*43a90889SApple OSS Distributions }
3540*43a90889SApple OSS Distributions
3541*43a90889SApple OSS Distributions if (mapper) {
3542*43a90889SApple OSS Distributions IOByteCount origLen;
3543*43a90889SApple OSS Distributions
3544*43a90889SApple OSS Distributions phys64 = mapper->mapToPhysicalAddress(phys32);
3545*43a90889SApple OSS Distributions origLen = *lengthOfSegment;
3546*43a90889SApple OSS Distributions length = page_size - (phys64 & (page_size - 1));
3547*43a90889SApple OSS Distributions while ((length < origLen)
3548*43a90889SApple OSS Distributions && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3549*43a90889SApple OSS Distributions length += page_size;
3550*43a90889SApple OSS Distributions }
3551*43a90889SApple OSS Distributions if (length > origLen) {
3552*43a90889SApple OSS Distributions length = origLen;
3553*43a90889SApple OSS Distributions }
3554*43a90889SApple OSS Distributions
3555*43a90889SApple OSS Distributions *lengthOfSegment = length;
3556*43a90889SApple OSS Distributions } else {
3557*43a90889SApple OSS Distributions phys64 = (addr64_t) phys32;
3558*43a90889SApple OSS Distributions }
3559*43a90889SApple OSS Distributions
3560*43a90889SApple OSS Distributions return phys64;
3561*43a90889SApple OSS Distributions }
3562*43a90889SApple OSS Distributions
3563*43a90889SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3564*43a90889SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3565*43a90889SApple OSS Distributions {
3566*43a90889SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3567*43a90889SApple OSS Distributions }
3568*43a90889SApple OSS Distributions
3569*43a90889SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3570*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3571*43a90889SApple OSS Distributions {
3572*43a90889SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3573*43a90889SApple OSS Distributions }
3574*43a90889SApple OSS Distributions
3575*43a90889SApple OSS Distributions #pragma clang diagnostic push
3576*43a90889SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3577*43a90889SApple OSS Distributions
3578*43a90889SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3579*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3580*43a90889SApple OSS Distributions IOByteCount * lengthOfSegment)
3581*43a90889SApple OSS Distributions {
3582*43a90889SApple OSS Distributions if (_task == kernel_task) {
3583*43a90889SApple OSS Distributions return (void *) getSourceSegment(offset, lengthOfSegment);
3584*43a90889SApple OSS Distributions } else {
3585*43a90889SApple OSS Distributions panic("IOGMD::getVirtualSegment deprecated");
3586*43a90889SApple OSS Distributions }
3587*43a90889SApple OSS Distributions
3588*43a90889SApple OSS Distributions return NULL;
3589*43a90889SApple OSS Distributions }
3590*43a90889SApple OSS Distributions #pragma clang diagnostic pop
3591*43a90889SApple OSS Distributions #endif /* !__LP64__ */
3592*43a90889SApple OSS Distributions
3593*43a90889SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3594*43a90889SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3595*43a90889SApple OSS Distributions {
3596*43a90889SApple OSS Distributions IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3597*43a90889SApple OSS Distributions DMACommandOps params;
3598*43a90889SApple OSS Distributions IOReturn err;
3599*43a90889SApple OSS Distributions
3600*43a90889SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
3601*43a90889SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
3602*43a90889SApple OSS Distributions
3603*43a90889SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3604*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3605*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3606*43a90889SApple OSS Distributions }
3607*43a90889SApple OSS Distributions
3608*43a90889SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3609*43a90889SApple OSS Distributions data->fLength = getLength();
3610*43a90889SApple OSS Distributions data->fSGCount = 0;
3611*43a90889SApple OSS Distributions data->fDirection = getDirection();
3612*43a90889SApple OSS Distributions data->fIsPrepared = true; // Assume prepared - fails safe
3613*43a90889SApple OSS Distributions } else if (kIOMDWalkSegments == op) {
3614*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3615*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3616*43a90889SApple OSS Distributions }
3617*43a90889SApple OSS Distributions
3618*43a90889SApple OSS Distributions IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3619*43a90889SApple OSS Distributions IOByteCount offset = (IOByteCount) data->fOffset;
3620*43a90889SApple OSS Distributions IOPhysicalLength length, nextLength;
3621*43a90889SApple OSS Distributions addr64_t addr, nextAddr;
3622*43a90889SApple OSS Distributions
3623*43a90889SApple OSS Distributions if (data->fMapped) {
3624*43a90889SApple OSS Distributions panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3625*43a90889SApple OSS Distributions }
3626*43a90889SApple OSS Distributions addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3627*43a90889SApple OSS Distributions offset += length;
3628*43a90889SApple OSS Distributions while (offset < getLength()) {
3629*43a90889SApple OSS Distributions nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3630*43a90889SApple OSS Distributions if ((addr + length) != nextAddr) {
3631*43a90889SApple OSS Distributions break;
3632*43a90889SApple OSS Distributions }
3633*43a90889SApple OSS Distributions length += nextLength;
3634*43a90889SApple OSS Distributions offset += nextLength;
3635*43a90889SApple OSS Distributions }
3636*43a90889SApple OSS Distributions data->fIOVMAddr = addr;
3637*43a90889SApple OSS Distributions data->fLength = length;
3638*43a90889SApple OSS Distributions } else if (kIOMDAddDMAMapSpec == op) {
3639*43a90889SApple OSS Distributions return kIOReturnUnsupported;
3640*43a90889SApple OSS Distributions } else if (kIOMDDMAMap == op) {
3641*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3642*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3643*43a90889SApple OSS Distributions }
3644*43a90889SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3645*43a90889SApple OSS Distributions
3646*43a90889SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3647*43a90889SApple OSS Distributions
3648*43a90889SApple OSS Distributions return err;
3649*43a90889SApple OSS Distributions } else if (kIOMDDMAUnmap == op) {
3650*43a90889SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3651*43a90889SApple OSS Distributions return kIOReturnUnderrun;
3652*43a90889SApple OSS Distributions }
3653*43a90889SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3654*43a90889SApple OSS Distributions
3655*43a90889SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3656*43a90889SApple OSS Distributions
3657*43a90889SApple OSS Distributions return kIOReturnSuccess;
3658*43a90889SApple OSS Distributions } else {
3659*43a90889SApple OSS Distributions return kIOReturnBadArgument;
3660*43a90889SApple OSS Distributions }
3661*43a90889SApple OSS Distributions
3662*43a90889SApple OSS Distributions return kIOReturnSuccess;
3663*43a90889SApple OSS Distributions }
3664*43a90889SApple OSS Distributions
3665*43a90889SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3666*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3667*43a90889SApple OSS Distributions IOOptionBits * oldState )
3668*43a90889SApple OSS Distributions {
3669*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3670*43a90889SApple OSS Distributions
3671*43a90889SApple OSS Distributions vm_purgable_t control;
3672*43a90889SApple OSS Distributions int state;
3673*43a90889SApple OSS Distributions
3674*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3675*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3676*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3677*43a90889SApple OSS Distributions }
3678*43a90889SApple OSS Distributions
3679*43a90889SApple OSS Distributions if (_memRef) {
3680*43a90889SApple OSS Distributions err = super::setPurgeable(newState, oldState);
3681*43a90889SApple OSS Distributions } else {
3682*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3683*43a90889SApple OSS Distributions LOCK;
3684*43a90889SApple OSS Distributions }
3685*43a90889SApple OSS Distributions do{
3686*43a90889SApple OSS Distributions // Find the appropriate vm_map for the given task
3687*43a90889SApple OSS Distributions vm_map_t curMap;
3688*43a90889SApple OSS Distributions if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3689*43a90889SApple OSS Distributions err = kIOReturnNotReady;
3690*43a90889SApple OSS Distributions break;
3691*43a90889SApple OSS Distributions } else if (!_task) {
3692*43a90889SApple OSS Distributions err = kIOReturnUnsupported;
3693*43a90889SApple OSS Distributions break;
3694*43a90889SApple OSS Distributions } else {
3695*43a90889SApple OSS Distributions curMap = get_task_map(_task);
3696*43a90889SApple OSS Distributions if (NULL == curMap) {
3697*43a90889SApple OSS Distributions err = KERN_INVALID_ARGUMENT;
3698*43a90889SApple OSS Distributions break;
3699*43a90889SApple OSS Distributions }
3700*43a90889SApple OSS Distributions }
3701*43a90889SApple OSS Distributions
3702*43a90889SApple OSS Distributions // can only do one range
3703*43a90889SApple OSS Distributions Ranges vec = _ranges;
3704*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3705*43a90889SApple OSS Distributions mach_vm_address_t addr;
3706*43a90889SApple OSS Distributions mach_vm_size_t len;
3707*43a90889SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, 0, _task);
3708*43a90889SApple OSS Distributions
3709*43a90889SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
3710*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
3711*43a90889SApple OSS Distributions break;
3712*43a90889SApple OSS Distributions }
3713*43a90889SApple OSS Distributions err = vm_map_purgable_control(curMap, addr, control, &state);
3714*43a90889SApple OSS Distributions if (oldState) {
3715*43a90889SApple OSS Distributions if (kIOReturnSuccess == err) {
3716*43a90889SApple OSS Distributions err = purgeableStateBits(&state);
3717*43a90889SApple OSS Distributions *oldState = state;
3718*43a90889SApple OSS Distributions }
3719*43a90889SApple OSS Distributions }
3720*43a90889SApple OSS Distributions }while (false);
3721*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3722*43a90889SApple OSS Distributions UNLOCK;
3723*43a90889SApple OSS Distributions }
3724*43a90889SApple OSS Distributions }
3725*43a90889SApple OSS Distributions
3726*43a90889SApple OSS Distributions return err;
3727*43a90889SApple OSS Distributions }
3728*43a90889SApple OSS Distributions
3729*43a90889SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3730*43a90889SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3731*43a90889SApple OSS Distributions IOOptionBits * oldState )
3732*43a90889SApple OSS Distributions {
3733*43a90889SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3734*43a90889SApple OSS Distributions
3735*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3736*43a90889SApple OSS Distributions LOCK;
3737*43a90889SApple OSS Distributions }
3738*43a90889SApple OSS Distributions if (_memRef) {
3739*43a90889SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3740*43a90889SApple OSS Distributions }
3741*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3742*43a90889SApple OSS Distributions UNLOCK;
3743*43a90889SApple OSS Distributions }
3744*43a90889SApple OSS Distributions
3745*43a90889SApple OSS Distributions return err;
3746*43a90889SApple OSS Distributions }
3747*43a90889SApple OSS Distributions
3748*43a90889SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3749*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3750*43a90889SApple OSS Distributions int newLedgerTag,
3751*43a90889SApple OSS Distributions IOOptionBits newLedgerOptions )
3752*43a90889SApple OSS Distributions {
3753*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3754*43a90889SApple OSS Distributions
3755*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3756*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3757*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3758*43a90889SApple OSS Distributions }
3759*43a90889SApple OSS Distributions
3760*43a90889SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3761*43a90889SApple OSS Distributions return kIOReturnUnsupported;
3762*43a90889SApple OSS Distributions }
3763*43a90889SApple OSS Distributions
3764*43a90889SApple OSS Distributions if (_memRef) {
3765*43a90889SApple OSS Distributions err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3766*43a90889SApple OSS Distributions } else {
3767*43a90889SApple OSS Distributions err = kIOReturnUnsupported;
3768*43a90889SApple OSS Distributions }
3769*43a90889SApple OSS Distributions
3770*43a90889SApple OSS Distributions return err;
3771*43a90889SApple OSS Distributions }
3772*43a90889SApple OSS Distributions
3773*43a90889SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3774*43a90889SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3775*43a90889SApple OSS Distributions int newLedgerTag,
3776*43a90889SApple OSS Distributions IOOptionBits newLedgerOptions )
3777*43a90889SApple OSS Distributions {
3778*43a90889SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3779*43a90889SApple OSS Distributions
3780*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3781*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3782*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3783*43a90889SApple OSS Distributions }
3784*43a90889SApple OSS Distributions
3785*43a90889SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3786*43a90889SApple OSS Distributions return kIOReturnUnsupported;
3787*43a90889SApple OSS Distributions }
3788*43a90889SApple OSS Distributions
3789*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3790*43a90889SApple OSS Distributions LOCK;
3791*43a90889SApple OSS Distributions }
3792*43a90889SApple OSS Distributions if (_memRef) {
3793*43a90889SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3794*43a90889SApple OSS Distributions } else {
3795*43a90889SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3796*43a90889SApple OSS Distributions IOSubMemoryDescriptor * smd;
3797*43a90889SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3798*43a90889SApple OSS Distributions err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3799*43a90889SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3800*43a90889SApple OSS Distributions err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3801*43a90889SApple OSS Distributions }
3802*43a90889SApple OSS Distributions }
3803*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3804*43a90889SApple OSS Distributions UNLOCK;
3805*43a90889SApple OSS Distributions }
3806*43a90889SApple OSS Distributions
3807*43a90889SApple OSS Distributions return err;
3808*43a90889SApple OSS Distributions }
3809*43a90889SApple OSS Distributions
3810*43a90889SApple OSS Distributions
3811*43a90889SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3812*43a90889SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3813*43a90889SApple OSS Distributions {
3814*43a90889SApple OSS Distributions uint64_t length;
3815*43a90889SApple OSS Distributions
3816*43a90889SApple OSS Distributions if (_memRef) {
3817*43a90889SApple OSS Distributions length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3818*43a90889SApple OSS Distributions } else {
3819*43a90889SApple OSS Distributions IOByteCount iterate, segLen;
3820*43a90889SApple OSS Distributions IOPhysicalAddress sourceAddr, sourceAlign;
3821*43a90889SApple OSS Distributions
3822*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3823*43a90889SApple OSS Distributions LOCK;
3824*43a90889SApple OSS Distributions }
3825*43a90889SApple OSS Distributions length = 0;
3826*43a90889SApple OSS Distributions iterate = 0;
3827*43a90889SApple OSS Distributions while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3828*43a90889SApple OSS Distributions sourceAlign = (sourceAddr & page_mask);
3829*43a90889SApple OSS Distributions if (offset && !iterate) {
3830*43a90889SApple OSS Distributions *offset = sourceAlign;
3831*43a90889SApple OSS Distributions }
3832*43a90889SApple OSS Distributions length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3833*43a90889SApple OSS Distributions iterate += segLen;
3834*43a90889SApple OSS Distributions }
3835*43a90889SApple OSS Distributions if (!iterate) {
3836*43a90889SApple OSS Distributions length = getLength();
3837*43a90889SApple OSS Distributions if (offset) {
3838*43a90889SApple OSS Distributions *offset = 0;
3839*43a90889SApple OSS Distributions }
3840*43a90889SApple OSS Distributions }
3841*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3842*43a90889SApple OSS Distributions UNLOCK;
3843*43a90889SApple OSS Distributions }
3844*43a90889SApple OSS Distributions }
3845*43a90889SApple OSS Distributions
3846*43a90889SApple OSS Distributions return length;
3847*43a90889SApple OSS Distributions }
3848*43a90889SApple OSS Distributions
3849*43a90889SApple OSS Distributions
3850*43a90889SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3851*43a90889SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3852*43a90889SApple OSS Distributions IOByteCount * dirtyPageCount )
3853*43a90889SApple OSS Distributions {
3854*43a90889SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3855*43a90889SApple OSS Distributions
3856*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3857*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3858*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3859*43a90889SApple OSS Distributions }
3860*43a90889SApple OSS Distributions
3861*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3862*43a90889SApple OSS Distributions LOCK;
3863*43a90889SApple OSS Distributions }
3864*43a90889SApple OSS Distributions if (_memRef) {
3865*43a90889SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3866*43a90889SApple OSS Distributions } else {
3867*43a90889SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3868*43a90889SApple OSS Distributions IOSubMemoryDescriptor * smd;
3869*43a90889SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3870*43a90889SApple OSS Distributions err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3871*43a90889SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3872*43a90889SApple OSS Distributions err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3873*43a90889SApple OSS Distributions }
3874*43a90889SApple OSS Distributions }
3875*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3876*43a90889SApple OSS Distributions UNLOCK;
3877*43a90889SApple OSS Distributions }
3878*43a90889SApple OSS Distributions
3879*43a90889SApple OSS Distributions return err;
3880*43a90889SApple OSS Distributions }
3881*43a90889SApple OSS Distributions
3882*43a90889SApple OSS Distributions
3883*43a90889SApple OSS Distributions #if defined(__arm64__)
3884*43a90889SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3885*43a90889SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3886*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
3887*43a90889SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3888*43a90889SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3889*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
3890*43a90889SApple OSS Distributions
3891*43a90889SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3892*43a90889SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3893*43a90889SApple OSS Distributions {
3894*43a90889SApple OSS Distributions ppnum_t page, end;
3895*43a90889SApple OSS Distributions
3896*43a90889SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3897*43a90889SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3898*43a90889SApple OSS Distributions for (; page < end; page++) {
3899*43a90889SApple OSS Distributions pmap_clear_noencrypt(page);
3900*43a90889SApple OSS Distributions }
3901*43a90889SApple OSS Distributions }
3902*43a90889SApple OSS Distributions
3903*43a90889SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3904*43a90889SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3905*43a90889SApple OSS Distributions {
3906*43a90889SApple OSS Distributions ppnum_t page, end;
3907*43a90889SApple OSS Distributions
3908*43a90889SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3909*43a90889SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3910*43a90889SApple OSS Distributions for (; page < end; page++) {
3911*43a90889SApple OSS Distributions pmap_set_noencrypt(page);
3912*43a90889SApple OSS Distributions }
3913*43a90889SApple OSS Distributions }
3914*43a90889SApple OSS Distributions
3915*43a90889SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3916*43a90889SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3917*43a90889SApple OSS Distributions IOByteCount offset, IOByteCount length )
3918*43a90889SApple OSS Distributions {
3919*43a90889SApple OSS Distributions IOByteCount remaining;
3920*43a90889SApple OSS Distributions unsigned int res;
3921*43a90889SApple OSS Distributions void (*func)(addr64_t pa, unsigned int count) = NULL;
3922*43a90889SApple OSS Distributions #if defined(__arm64__)
3923*43a90889SApple OSS Distributions void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3924*43a90889SApple OSS Distributions #endif
3925*43a90889SApple OSS Distributions
3926*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3927*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3928*43a90889SApple OSS Distributions return kIOReturnNotAttached;
3929*43a90889SApple OSS Distributions }
3930*43a90889SApple OSS Distributions
3931*43a90889SApple OSS Distributions switch (options) {
3932*43a90889SApple OSS Distributions case kIOMemoryIncoherentIOFlush:
3933*43a90889SApple OSS Distributions #if defined(__arm64__)
3934*43a90889SApple OSS Distributions func_ext = &dcache_incoherent_io_flush64;
3935*43a90889SApple OSS Distributions #if __ARM_COHERENT_IO__
3936*43a90889SApple OSS Distributions func_ext(0, 0, 0, &res);
3937*43a90889SApple OSS Distributions return kIOReturnSuccess;
3938*43a90889SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3939*43a90889SApple OSS Distributions break;
3940*43a90889SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3941*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
3942*43a90889SApple OSS Distributions func = &dcache_incoherent_io_flush64;
3943*43a90889SApple OSS Distributions break;
3944*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
3945*43a90889SApple OSS Distributions case kIOMemoryIncoherentIOStore:
3946*43a90889SApple OSS Distributions #if defined(__arm64__)
3947*43a90889SApple OSS Distributions func_ext = &dcache_incoherent_io_store64;
3948*43a90889SApple OSS Distributions #if __ARM_COHERENT_IO__
3949*43a90889SApple OSS Distributions func_ext(0, 0, 0, &res);
3950*43a90889SApple OSS Distributions return kIOReturnSuccess;
3951*43a90889SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3952*43a90889SApple OSS Distributions break;
3953*43a90889SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3954*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
3955*43a90889SApple OSS Distributions func = &dcache_incoherent_io_store64;
3956*43a90889SApple OSS Distributions break;
3957*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
3958*43a90889SApple OSS Distributions
3959*43a90889SApple OSS Distributions case kIOMemorySetEncrypted:
3960*43a90889SApple OSS Distributions func = &SetEncryptOp;
3961*43a90889SApple OSS Distributions break;
3962*43a90889SApple OSS Distributions case kIOMemoryClearEncrypted:
3963*43a90889SApple OSS Distributions func = &ClearEncryptOp;
3964*43a90889SApple OSS Distributions break;
3965*43a90889SApple OSS Distributions }
3966*43a90889SApple OSS Distributions
3967*43a90889SApple OSS Distributions #if defined(__arm64__)
3968*43a90889SApple OSS Distributions if ((func == NULL) && (func_ext == NULL)) {
3969*43a90889SApple OSS Distributions return kIOReturnUnsupported;
3970*43a90889SApple OSS Distributions }
3971*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
3972*43a90889SApple OSS Distributions if (!func) {
3973*43a90889SApple OSS Distributions return kIOReturnUnsupported;
3974*43a90889SApple OSS Distributions }
3975*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
3976*43a90889SApple OSS Distributions
3977*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3978*43a90889SApple OSS Distributions LOCK;
3979*43a90889SApple OSS Distributions }
3980*43a90889SApple OSS Distributions
3981*43a90889SApple OSS Distributions res = 0x0UL;
3982*43a90889SApple OSS Distributions remaining = length = min(length, getLength() - offset);
3983*43a90889SApple OSS Distributions while (remaining) {
3984*43a90889SApple OSS Distributions // (process another target segment?)
3985*43a90889SApple OSS Distributions addr64_t dstAddr64;
3986*43a90889SApple OSS Distributions IOByteCount dstLen;
3987*43a90889SApple OSS Distributions
3988*43a90889SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3989*43a90889SApple OSS Distributions if (!dstAddr64) {
3990*43a90889SApple OSS Distributions break;
3991*43a90889SApple OSS Distributions }
3992*43a90889SApple OSS Distributions
3993*43a90889SApple OSS Distributions // Clip segment length to remaining
3994*43a90889SApple OSS Distributions if (dstLen > remaining) {
3995*43a90889SApple OSS Distributions dstLen = remaining;
3996*43a90889SApple OSS Distributions }
3997*43a90889SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3998*43a90889SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
3999*43a90889SApple OSS Distributions }
4000*43a90889SApple OSS Distributions if (remaining > UINT_MAX) {
4001*43a90889SApple OSS Distributions remaining = UINT_MAX;
4002*43a90889SApple OSS Distributions }
4003*43a90889SApple OSS Distributions
4004*43a90889SApple OSS Distributions #if defined(__arm64__)
4005*43a90889SApple OSS Distributions if (func) {
4006*43a90889SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
4007*43a90889SApple OSS Distributions }
4008*43a90889SApple OSS Distributions if (func_ext) {
4009*43a90889SApple OSS Distributions (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4010*43a90889SApple OSS Distributions if (res != 0x0UL) {
4011*43a90889SApple OSS Distributions remaining = 0;
4012*43a90889SApple OSS Distributions break;
4013*43a90889SApple OSS Distributions }
4014*43a90889SApple OSS Distributions }
4015*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
4016*43a90889SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
4017*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
4018*43a90889SApple OSS Distributions
4019*43a90889SApple OSS Distributions offset += dstLen;
4020*43a90889SApple OSS Distributions remaining -= dstLen;
4021*43a90889SApple OSS Distributions }
4022*43a90889SApple OSS Distributions
4023*43a90889SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
4024*43a90889SApple OSS Distributions UNLOCK;
4025*43a90889SApple OSS Distributions }
4026*43a90889SApple OSS Distributions
4027*43a90889SApple OSS Distributions return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4028*43a90889SApple OSS Distributions }
4029*43a90889SApple OSS Distributions
4030*43a90889SApple OSS Distributions /*
4031*43a90889SApple OSS Distributions *
4032*43a90889SApple OSS Distributions */
4033*43a90889SApple OSS Distributions
4034*43a90889SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4035*43a90889SApple OSS Distributions
4036*43a90889SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4037*43a90889SApple OSS Distributions
4038*43a90889SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4039*43a90889SApple OSS Distributions * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4040*43a90889SApple OSS Distributions * kernel non-text data -- should we just add another range instead?
4041*43a90889SApple OSS Distributions */
4042*43a90889SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4043*43a90889SApple OSS Distributions #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4044*43a90889SApple OSS Distributions
4045*43a90889SApple OSS Distributions #elif defined(__arm64__)
4046*43a90889SApple OSS Distributions
4047*43a90889SApple OSS Distributions extern vm_offset_t static_memory_end;
4048*43a90889SApple OSS Distributions
4049*43a90889SApple OSS Distributions #if defined(__arm64__)
4050*43a90889SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4051*43a90889SApple OSS Distributions #else /* defined(__arm64__) */
4052*43a90889SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4053*43a90889SApple OSS Distributions #endif /* defined(__arm64__) */
4054*43a90889SApple OSS Distributions
4055*43a90889SApple OSS Distributions #define io_kernel_static_end static_memory_end
4056*43a90889SApple OSS Distributions
4057*43a90889SApple OSS Distributions #else
4058*43a90889SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4059*43a90889SApple OSS Distributions #endif
4060*43a90889SApple OSS Distributions
4061*43a90889SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4062*43a90889SApple OSS Distributions io_get_kernel_static_upl(
4063*43a90889SApple OSS Distributions vm_map_t /* map */,
4064*43a90889SApple OSS Distributions uintptr_t offset,
4065*43a90889SApple OSS Distributions upl_size_t *upl_size,
4066*43a90889SApple OSS Distributions unsigned int *page_offset,
4067*43a90889SApple OSS Distributions upl_t *upl,
4068*43a90889SApple OSS Distributions upl_page_info_array_t page_list,
4069*43a90889SApple OSS Distributions unsigned int *count,
4070*43a90889SApple OSS Distributions ppnum_t *highest_page)
4071*43a90889SApple OSS Distributions {
4072*43a90889SApple OSS Distributions unsigned int pageCount, page;
4073*43a90889SApple OSS Distributions ppnum_t phys;
4074*43a90889SApple OSS Distributions ppnum_t highestPage = 0;
4075*43a90889SApple OSS Distributions
4076*43a90889SApple OSS Distributions pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4077*43a90889SApple OSS Distributions if (pageCount > *count) {
4078*43a90889SApple OSS Distributions pageCount = *count;
4079*43a90889SApple OSS Distributions }
4080*43a90889SApple OSS Distributions *upl_size = (upl_size_t) ptoa_64(pageCount);
4081*43a90889SApple OSS Distributions
4082*43a90889SApple OSS Distributions *upl = NULL;
4083*43a90889SApple OSS Distributions *page_offset = ((unsigned int) page_mask & offset);
4084*43a90889SApple OSS Distributions
4085*43a90889SApple OSS Distributions for (page = 0; page < pageCount; page++) {
4086*43a90889SApple OSS Distributions phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4087*43a90889SApple OSS Distributions if (!phys) {
4088*43a90889SApple OSS Distributions break;
4089*43a90889SApple OSS Distributions }
4090*43a90889SApple OSS Distributions page_list[page].phys_addr = phys;
4091*43a90889SApple OSS Distributions page_list[page].free_when_done = 0;
4092*43a90889SApple OSS Distributions page_list[page].absent = 0;
4093*43a90889SApple OSS Distributions page_list[page].dirty = 0;
4094*43a90889SApple OSS Distributions page_list[page].precious = 0;
4095*43a90889SApple OSS Distributions page_list[page].device = 0;
4096*43a90889SApple OSS Distributions if (phys > highestPage) {
4097*43a90889SApple OSS Distributions highestPage = phys;
4098*43a90889SApple OSS Distributions }
4099*43a90889SApple OSS Distributions }
4100*43a90889SApple OSS Distributions
4101*43a90889SApple OSS Distributions *highest_page = highestPage;
4102*43a90889SApple OSS Distributions
4103*43a90889SApple OSS Distributions return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4104*43a90889SApple OSS Distributions }
4105*43a90889SApple OSS Distributions
4106*43a90889SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4107*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4108*43a90889SApple OSS Distributions {
4109*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4110*43a90889SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4111*43a90889SApple OSS Distributions ioGMDData *dataP;
4112*43a90889SApple OSS Distributions upl_page_info_array_t pageInfo;
4113*43a90889SApple OSS Distributions ppnum_t mapBase;
4114*43a90889SApple OSS Distributions vm_tag_t tag = VM_KERN_MEMORY_NONE;
4115*43a90889SApple OSS Distributions mach_vm_size_t numBytesWired = 0;
4116*43a90889SApple OSS Distributions
4117*43a90889SApple OSS Distributions assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4118*43a90889SApple OSS Distributions
4119*43a90889SApple OSS Distributions if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4120*43a90889SApple OSS Distributions forDirection = (IODirection) (forDirection | getDirection());
4121*43a90889SApple OSS Distributions }
4122*43a90889SApple OSS Distributions
4123*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4124*43a90889SApple OSS Distributions upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4125*43a90889SApple OSS Distributions switch (kIODirectionOutIn & forDirection) {
4126*43a90889SApple OSS Distributions case kIODirectionOut:
4127*43a90889SApple OSS Distributions // Pages do not need to be marked as dirty on commit
4128*43a90889SApple OSS Distributions uplFlags = UPL_COPYOUT_FROM;
4129*43a90889SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
4130*43a90889SApple OSS Distributions break;
4131*43a90889SApple OSS Distributions
4132*43a90889SApple OSS Distributions case kIODirectionIn:
4133*43a90889SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
4134*43a90889SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4135*43a90889SApple OSS Distributions break;
4136*43a90889SApple OSS Distributions
4137*43a90889SApple OSS Distributions default:
4138*43a90889SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4139*43a90889SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4140*43a90889SApple OSS Distributions break;
4141*43a90889SApple OSS Distributions }
4142*43a90889SApple OSS Distributions
4143*43a90889SApple OSS Distributions if (_wireCount) {
4144*43a90889SApple OSS Distributions if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4145*43a90889SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4146*43a90889SApple OSS Distributions (size_t)VM_KERNEL_ADDRPERM(this));
4147*43a90889SApple OSS Distributions error = kIOReturnNotWritable;
4148*43a90889SApple OSS Distributions }
4149*43a90889SApple OSS Distributions } else {
4150*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4151*43a90889SApple OSS Distributions IOMapper *mapper;
4152*43a90889SApple OSS Distributions
4153*43a90889SApple OSS Distributions mapper = dataP->fMapper;
4154*43a90889SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4155*43a90889SApple OSS Distributions
4156*43a90889SApple OSS Distributions uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4157*43a90889SApple OSS Distributions tag = _kernelTag;
4158*43a90889SApple OSS Distributions if (VM_KERN_MEMORY_NONE == tag) {
4159*43a90889SApple OSS Distributions tag = IOMemoryTag(kernel_map);
4160*43a90889SApple OSS Distributions }
4161*43a90889SApple OSS Distributions
4162*43a90889SApple OSS Distributions if (kIODirectionPrepareToPhys32 & forDirection) {
4163*43a90889SApple OSS Distributions if (!mapper) {
4164*43a90889SApple OSS Distributions uplFlags |= UPL_NEED_32BIT_ADDR;
4165*43a90889SApple OSS Distributions }
4166*43a90889SApple OSS Distributions if (dataP->fDMAMapNumAddressBits > 32) {
4167*43a90889SApple OSS Distributions dataP->fDMAMapNumAddressBits = 32;
4168*43a90889SApple OSS Distributions }
4169*43a90889SApple OSS Distributions }
4170*43a90889SApple OSS Distributions if (kIODirectionPrepareNoFault & forDirection) {
4171*43a90889SApple OSS Distributions uplFlags |= UPL_REQUEST_NO_FAULT;
4172*43a90889SApple OSS Distributions }
4173*43a90889SApple OSS Distributions if (kIODirectionPrepareNoZeroFill & forDirection) {
4174*43a90889SApple OSS Distributions uplFlags |= UPL_NOZEROFILLIO;
4175*43a90889SApple OSS Distributions }
4176*43a90889SApple OSS Distributions if (kIODirectionPrepareNonCoherent & forDirection) {
4177*43a90889SApple OSS Distributions uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4178*43a90889SApple OSS Distributions }
4179*43a90889SApple OSS Distributions
4180*43a90889SApple OSS Distributions mapBase = 0;
4181*43a90889SApple OSS Distributions
4182*43a90889SApple OSS Distributions // Note that appendBytes(NULL) zeros the data up to the desired length
4183*43a90889SApple OSS Distributions size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4184*43a90889SApple OSS Distributions if (uplPageSize > ((unsigned int)uplPageSize)) {
4185*43a90889SApple OSS Distributions error = kIOReturnNoMemory;
4186*43a90889SApple OSS Distributions traceInterval.setEndArg2(error);
4187*43a90889SApple OSS Distributions return error;
4188*43a90889SApple OSS Distributions }
4189*43a90889SApple OSS Distributions if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4190*43a90889SApple OSS Distributions error = kIOReturnNoMemory;
4191*43a90889SApple OSS Distributions traceInterval.setEndArg2(error);
4192*43a90889SApple OSS Distributions return error;
4193*43a90889SApple OSS Distributions }
4194*43a90889SApple OSS Distributions dataP = NULL;
4195*43a90889SApple OSS Distributions
4196*43a90889SApple OSS Distributions // Find the appropriate vm_map for the given task
4197*43a90889SApple OSS Distributions vm_map_t curMap;
4198*43a90889SApple OSS Distributions if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4199*43a90889SApple OSS Distributions curMap = NULL;
4200*43a90889SApple OSS Distributions } else {
4201*43a90889SApple OSS Distributions curMap = get_task_map(_task);
4202*43a90889SApple OSS Distributions }
4203*43a90889SApple OSS Distributions
4204*43a90889SApple OSS Distributions // Iterate over the vector of virtual ranges
4205*43a90889SApple OSS Distributions Ranges vec = _ranges;
4206*43a90889SApple OSS Distributions unsigned int pageIndex = 0;
4207*43a90889SApple OSS Distributions IOByteCount mdOffset = 0;
4208*43a90889SApple OSS Distributions ppnum_t highestPage = 0;
4209*43a90889SApple OSS Distributions bool byteAlignUPL;
4210*43a90889SApple OSS Distributions
4211*43a90889SApple OSS Distributions IOMemoryEntry * memRefEntry = NULL;
4212*43a90889SApple OSS Distributions if (_memRef) {
4213*43a90889SApple OSS Distributions memRefEntry = &_memRef->entries[0];
4214*43a90889SApple OSS Distributions byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4215*43a90889SApple OSS Distributions } else {
4216*43a90889SApple OSS Distributions byteAlignUPL = true;
4217*43a90889SApple OSS Distributions }
4218*43a90889SApple OSS Distributions
4219*43a90889SApple OSS Distributions for (UInt range = 0; mdOffset < _length; range++) {
4220*43a90889SApple OSS Distributions ioPLBlock iopl;
4221*43a90889SApple OSS Distributions mach_vm_address_t startPage, startPageOffset;
4222*43a90889SApple OSS Distributions mach_vm_size_t numBytes;
4223*43a90889SApple OSS Distributions ppnum_t highPage = 0;
4224*43a90889SApple OSS Distributions
4225*43a90889SApple OSS Distributions if (_memRef) {
4226*43a90889SApple OSS Distributions if (range >= _memRef->count) {
4227*43a90889SApple OSS Distributions panic("memRefEntry");
4228*43a90889SApple OSS Distributions }
4229*43a90889SApple OSS Distributions memRefEntry = &_memRef->entries[range];
4230*43a90889SApple OSS Distributions numBytes = memRefEntry->size;
4231*43a90889SApple OSS Distributions startPage = -1ULL;
4232*43a90889SApple OSS Distributions if (byteAlignUPL) {
4233*43a90889SApple OSS Distributions startPageOffset = 0;
4234*43a90889SApple OSS Distributions } else {
4235*43a90889SApple OSS Distributions startPageOffset = (memRefEntry->start & PAGE_MASK);
4236*43a90889SApple OSS Distributions }
4237*43a90889SApple OSS Distributions } else {
4238*43a90889SApple OSS Distributions // Get the startPage address and length of vec[range]
4239*43a90889SApple OSS Distributions getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4240*43a90889SApple OSS Distributions if (byteAlignUPL) {
4241*43a90889SApple OSS Distributions startPageOffset = 0;
4242*43a90889SApple OSS Distributions } else {
4243*43a90889SApple OSS Distributions startPageOffset = startPage & PAGE_MASK;
4244*43a90889SApple OSS Distributions startPage = trunc_page_64(startPage);
4245*43a90889SApple OSS Distributions }
4246*43a90889SApple OSS Distributions }
4247*43a90889SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4248*43a90889SApple OSS Distributions numBytes += startPageOffset;
4249*43a90889SApple OSS Distributions
4250*43a90889SApple OSS Distributions if (mapper) {
4251*43a90889SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4252*43a90889SApple OSS Distributions } else {
4253*43a90889SApple OSS Distributions iopl.fMappedPage = 0;
4254*43a90889SApple OSS Distributions }
4255*43a90889SApple OSS Distributions
4256*43a90889SApple OSS Distributions // Iterate over the current range, creating UPLs
4257*43a90889SApple OSS Distributions while (numBytes) {
4258*43a90889SApple OSS Distributions vm_address_t kernelStart = (vm_address_t) startPage;
4259*43a90889SApple OSS Distributions vm_map_t theMap;
4260*43a90889SApple OSS Distributions if (curMap) {
4261*43a90889SApple OSS Distributions theMap = curMap;
4262*43a90889SApple OSS Distributions } else if (_memRef) {
4263*43a90889SApple OSS Distributions theMap = NULL;
4264*43a90889SApple OSS Distributions } else {
4265*43a90889SApple OSS Distributions assert(_task == kernel_task);
4266*43a90889SApple OSS Distributions theMap = IOPageableMapForAddress(kernelStart);
4267*43a90889SApple OSS Distributions }
4268*43a90889SApple OSS Distributions
4269*43a90889SApple OSS Distributions // ioplFlags is an in/out parameter
4270*43a90889SApple OSS Distributions upl_control_flags_t ioplFlags = uplFlags;
4271*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4272*43a90889SApple OSS Distributions pageInfo = getPageList(dataP);
4273*43a90889SApple OSS Distributions upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4274*43a90889SApple OSS Distributions
4275*43a90889SApple OSS Distributions mach_vm_size_t ioplPhysSize;
4276*43a90889SApple OSS Distributions upl_size_t ioplSize;
4277*43a90889SApple OSS Distributions unsigned int numPageInfo;
4278*43a90889SApple OSS Distributions
4279*43a90889SApple OSS Distributions if (_memRef) {
4280*43a90889SApple OSS Distributions error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4281*43a90889SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4282*43a90889SApple OSS Distributions } else {
4283*43a90889SApple OSS Distributions error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4284*43a90889SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4285*43a90889SApple OSS Distributions }
4286*43a90889SApple OSS Distributions if (error != KERN_SUCCESS) {
4287*43a90889SApple OSS Distributions if (_memRef) {
4288*43a90889SApple OSS Distributions DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4289*43a90889SApple OSS Distributions } else {
4290*43a90889SApple OSS Distributions DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4291*43a90889SApple OSS Distributions }
4292*43a90889SApple OSS Distributions printf("entry size error %d\n", error);
4293*43a90889SApple OSS Distributions goto abortExit;
4294*43a90889SApple OSS Distributions }
4295*43a90889SApple OSS Distributions ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4296*43a90889SApple OSS Distributions numPageInfo = atop_32(ioplPhysSize);
4297*43a90889SApple OSS Distributions if (byteAlignUPL) {
4298*43a90889SApple OSS Distributions if (numBytes > ioplPhysSize) {
4299*43a90889SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4300*43a90889SApple OSS Distributions } else {
4301*43a90889SApple OSS Distributions ioplSize = ((typeof(ioplSize))numBytes);
4302*43a90889SApple OSS Distributions }
4303*43a90889SApple OSS Distributions } else {
4304*43a90889SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4305*43a90889SApple OSS Distributions }
4306*43a90889SApple OSS Distributions
4307*43a90889SApple OSS Distributions if (_memRef) {
4308*43a90889SApple OSS Distributions memory_object_offset_t entryOffset;
4309*43a90889SApple OSS Distributions
4310*43a90889SApple OSS Distributions entryOffset = mdOffset;
4311*43a90889SApple OSS Distributions if (byteAlignUPL) {
4312*43a90889SApple OSS Distributions entryOffset = (entryOffset - memRefEntry->offset);
4313*43a90889SApple OSS Distributions } else {
4314*43a90889SApple OSS Distributions entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4315*43a90889SApple OSS Distributions }
4316*43a90889SApple OSS Distributions if (ioplSize > (memRefEntry->size - entryOffset)) {
4317*43a90889SApple OSS Distributions ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4318*43a90889SApple OSS Distributions }
4319*43a90889SApple OSS Distributions error = memory_object_iopl_request(memRefEntry->entry,
4320*43a90889SApple OSS Distributions entryOffset,
4321*43a90889SApple OSS Distributions &ioplSize,
4322*43a90889SApple OSS Distributions &iopl.fIOPL,
4323*43a90889SApple OSS Distributions baseInfo,
4324*43a90889SApple OSS Distributions &numPageInfo,
4325*43a90889SApple OSS Distributions &ioplFlags,
4326*43a90889SApple OSS Distributions tag);
4327*43a90889SApple OSS Distributions } else if ((theMap == kernel_map)
4328*43a90889SApple OSS Distributions && (kernelStart >= io_kernel_static_start)
4329*43a90889SApple OSS Distributions && (kernelStart < io_kernel_static_end)) {
4330*43a90889SApple OSS Distributions error = io_get_kernel_static_upl(theMap,
4331*43a90889SApple OSS Distributions kernelStart,
4332*43a90889SApple OSS Distributions &ioplSize,
4333*43a90889SApple OSS Distributions &iopl.fPageOffset,
4334*43a90889SApple OSS Distributions &iopl.fIOPL,
4335*43a90889SApple OSS Distributions baseInfo,
4336*43a90889SApple OSS Distributions &numPageInfo,
4337*43a90889SApple OSS Distributions &highPage);
4338*43a90889SApple OSS Distributions } else {
4339*43a90889SApple OSS Distributions assert(theMap);
4340*43a90889SApple OSS Distributions error = vm_map_create_upl(theMap,
4341*43a90889SApple OSS Distributions startPage,
4342*43a90889SApple OSS Distributions (upl_size_t*)&ioplSize,
4343*43a90889SApple OSS Distributions &iopl.fIOPL,
4344*43a90889SApple OSS Distributions baseInfo,
4345*43a90889SApple OSS Distributions &numPageInfo,
4346*43a90889SApple OSS Distributions &ioplFlags,
4347*43a90889SApple OSS Distributions tag);
4348*43a90889SApple OSS Distributions }
4349*43a90889SApple OSS Distributions
4350*43a90889SApple OSS Distributions if (error != KERN_SUCCESS) {
4351*43a90889SApple OSS Distributions traceInterval.setEndArg2(error);
4352*43a90889SApple OSS Distributions DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4353*43a90889SApple OSS Distributions goto abortExit;
4354*43a90889SApple OSS Distributions }
4355*43a90889SApple OSS Distributions
4356*43a90889SApple OSS Distributions assert(ioplSize);
4357*43a90889SApple OSS Distributions
4358*43a90889SApple OSS Distributions if (iopl.fIOPL) {
4359*43a90889SApple OSS Distributions highPage = upl_get_highest_page(iopl.fIOPL);
4360*43a90889SApple OSS Distributions }
4361*43a90889SApple OSS Distributions if (highPage > highestPage) {
4362*43a90889SApple OSS Distributions highestPage = highPage;
4363*43a90889SApple OSS Distributions }
4364*43a90889SApple OSS Distributions
4365*43a90889SApple OSS Distributions if (baseInfo->device) {
4366*43a90889SApple OSS Distributions numPageInfo = 1;
4367*43a90889SApple OSS Distributions iopl.fFlags = kIOPLOnDevice;
4368*43a90889SApple OSS Distributions } else {
4369*43a90889SApple OSS Distributions iopl.fFlags = 0;
4370*43a90889SApple OSS Distributions }
4371*43a90889SApple OSS Distributions
4372*43a90889SApple OSS Distributions if (byteAlignUPL) {
4373*43a90889SApple OSS Distributions if (iopl.fIOPL) {
4374*43a90889SApple OSS Distributions DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4375*43a90889SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4376*43a90889SApple OSS Distributions }
4377*43a90889SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4378*43a90889SApple OSS Distributions // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4379*43a90889SApple OSS Distributions startPage -= iopl.fPageOffset;
4380*43a90889SApple OSS Distributions }
4381*43a90889SApple OSS Distributions ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4382*43a90889SApple OSS Distributions numBytes += iopl.fPageOffset;
4383*43a90889SApple OSS Distributions }
4384*43a90889SApple OSS Distributions
4385*43a90889SApple OSS Distributions iopl.fIOMDOffset = mdOffset;
4386*43a90889SApple OSS Distributions iopl.fPageInfo = pageIndex;
4387*43a90889SApple OSS Distributions
4388*43a90889SApple OSS Distributions if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4389*43a90889SApple OSS Distributions // Clean up partial created and unsaved iopl
4390*43a90889SApple OSS Distributions if (iopl.fIOPL) {
4391*43a90889SApple OSS Distributions upl_abort(iopl.fIOPL, 0);
4392*43a90889SApple OSS Distributions upl_deallocate(iopl.fIOPL);
4393*43a90889SApple OSS Distributions }
4394*43a90889SApple OSS Distributions error = kIOReturnNoMemory;
4395*43a90889SApple OSS Distributions traceInterval.setEndArg2(error);
4396*43a90889SApple OSS Distributions goto abortExit;
4397*43a90889SApple OSS Distributions }
4398*43a90889SApple OSS Distributions dataP = NULL;
4399*43a90889SApple OSS Distributions
4400*43a90889SApple OSS Distributions // Check for a multiple iopl's in one virtual range
4401*43a90889SApple OSS Distributions pageIndex += numPageInfo;
4402*43a90889SApple OSS Distributions mdOffset -= iopl.fPageOffset;
4403*43a90889SApple OSS Distributions numBytesWired += ioplSize;
4404*43a90889SApple OSS Distributions if (ioplSize < numBytes) {
4405*43a90889SApple OSS Distributions numBytes -= ioplSize;
4406*43a90889SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4407*43a90889SApple OSS Distributions startPage += ioplSize;
4408*43a90889SApple OSS Distributions }
4409*43a90889SApple OSS Distributions mdOffset += ioplSize;
4410*43a90889SApple OSS Distributions iopl.fPageOffset = 0;
4411*43a90889SApple OSS Distributions if (mapper) {
4412*43a90889SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4413*43a90889SApple OSS Distributions }
4414*43a90889SApple OSS Distributions } else {
4415*43a90889SApple OSS Distributions mdOffset += numBytes;
4416*43a90889SApple OSS Distributions break;
4417*43a90889SApple OSS Distributions }
4418*43a90889SApple OSS Distributions }
4419*43a90889SApple OSS Distributions }
4420*43a90889SApple OSS Distributions
4421*43a90889SApple OSS Distributions _highestPage = highestPage;
4422*43a90889SApple OSS Distributions DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4423*43a90889SApple OSS Distributions
4424*43a90889SApple OSS Distributions if (UPL_COPYOUT_FROM & uplFlags) {
4425*43a90889SApple OSS Distributions _flags |= kIOMemoryPreparedReadOnly;
4426*43a90889SApple OSS Distributions }
4427*43a90889SApple OSS Distributions traceInterval.setEndCodes(numBytesWired, error);
4428*43a90889SApple OSS Distributions }
4429*43a90889SApple OSS Distributions
4430*43a90889SApple OSS Distributions #if IOTRACKING
4431*43a90889SApple OSS Distributions if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4432*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4433*43a90889SApple OSS Distributions if (!dataP->fWireTracking.link.next) {
4434*43a90889SApple OSS Distributions IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4435*43a90889SApple OSS Distributions }
4436*43a90889SApple OSS Distributions }
4437*43a90889SApple OSS Distributions #endif /* IOTRACKING */
4438*43a90889SApple OSS Distributions
4439*43a90889SApple OSS Distributions return error;
4440*43a90889SApple OSS Distributions
4441*43a90889SApple OSS Distributions abortExit:
4442*43a90889SApple OSS Distributions {
4443*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4444*43a90889SApple OSS Distributions UInt done = getNumIOPL(_memoryEntries, dataP);
4445*43a90889SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4446*43a90889SApple OSS Distributions
4447*43a90889SApple OSS Distributions for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4448*43a90889SApple OSS Distributions if (ioplList[ioplIdx].fIOPL) {
4449*43a90889SApple OSS Distributions upl_abort(ioplList[ioplIdx].fIOPL, 0);
4450*43a90889SApple OSS Distributions upl_deallocate(ioplList[ioplIdx].fIOPL);
4451*43a90889SApple OSS Distributions }
4452*43a90889SApple OSS Distributions }
4453*43a90889SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4454*43a90889SApple OSS Distributions }
4455*43a90889SApple OSS Distributions
4456*43a90889SApple OSS Distributions if (error == KERN_FAILURE) {
4457*43a90889SApple OSS Distributions error = kIOReturnCannotWire;
4458*43a90889SApple OSS Distributions } else if (error == KERN_MEMORY_ERROR) {
4459*43a90889SApple OSS Distributions error = kIOReturnNoResources;
4460*43a90889SApple OSS Distributions }
4461*43a90889SApple OSS Distributions
4462*43a90889SApple OSS Distributions return error;
4463*43a90889SApple OSS Distributions }
4464*43a90889SApple OSS Distributions
4465*43a90889SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4466*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4467*43a90889SApple OSS Distributions {
4468*43a90889SApple OSS Distributions ioGMDData * dataP;
4469*43a90889SApple OSS Distributions
4470*43a90889SApple OSS Distributions if (size > UINT_MAX) {
4471*43a90889SApple OSS Distributions return false;
4472*43a90889SApple OSS Distributions }
4473*43a90889SApple OSS Distributions if (!_memoryEntries) {
4474*43a90889SApple OSS Distributions _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4475*43a90889SApple OSS Distributions if (!_memoryEntries) {
4476*43a90889SApple OSS Distributions return false;
4477*43a90889SApple OSS Distributions }
4478*43a90889SApple OSS Distributions } else if (!_memoryEntries->initWithCapacity(size)) {
4479*43a90889SApple OSS Distributions return false;
4480*43a90889SApple OSS Distributions }
4481*43a90889SApple OSS Distributions
4482*43a90889SApple OSS Distributions _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4483*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4484*43a90889SApple OSS Distributions
4485*43a90889SApple OSS Distributions if (mapper == kIOMapperWaitSystem) {
4486*43a90889SApple OSS Distributions IOMapper::checkForSystemMapper();
4487*43a90889SApple OSS Distributions mapper = IOMapper::gSystem;
4488*43a90889SApple OSS Distributions }
4489*43a90889SApple OSS Distributions dataP->fMapper = mapper;
4490*43a90889SApple OSS Distributions dataP->fPageCnt = 0;
4491*43a90889SApple OSS Distributions dataP->fMappedBase = 0;
4492*43a90889SApple OSS Distributions dataP->fDMAMapNumAddressBits = 64;
4493*43a90889SApple OSS Distributions dataP->fDMAMapAlignment = 0;
4494*43a90889SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4495*43a90889SApple OSS Distributions dataP->fCompletionError = false;
4496*43a90889SApple OSS Distributions dataP->fMappedBaseValid = false;
4497*43a90889SApple OSS Distributions
4498*43a90889SApple OSS Distributions return true;
4499*43a90889SApple OSS Distributions }
4500*43a90889SApple OSS Distributions
4501*43a90889SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4502*43a90889SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4503*43a90889SApple OSS Distributions IOMapper * mapper,
4504*43a90889SApple OSS Distributions IOMemoryDescriptor * memory,
4505*43a90889SApple OSS Distributions IODMACommand * command,
4506*43a90889SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4507*43a90889SApple OSS Distributions uint64_t offset,
4508*43a90889SApple OSS Distributions uint64_t length,
4509*43a90889SApple OSS Distributions uint64_t * mapAddress,
4510*43a90889SApple OSS Distributions uint64_t * mapLength)
4511*43a90889SApple OSS Distributions {
4512*43a90889SApple OSS Distributions IOReturn err;
4513*43a90889SApple OSS Distributions uint32_t mapOptions;
4514*43a90889SApple OSS Distributions
4515*43a90889SApple OSS Distributions mapOptions = 0;
4516*43a90889SApple OSS Distributions mapOptions |= kIODMAMapReadAccess;
4517*43a90889SApple OSS Distributions if (!(kIOMemoryPreparedReadOnly & _flags)) {
4518*43a90889SApple OSS Distributions mapOptions |= kIODMAMapWriteAccess;
4519*43a90889SApple OSS Distributions }
4520*43a90889SApple OSS Distributions
4521*43a90889SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4522*43a90889SApple OSS Distributions mapSpec, command, NULL, mapAddress, mapLength);
4523*43a90889SApple OSS Distributions
4524*43a90889SApple OSS Distributions if (kIOReturnSuccess == err) {
4525*43a90889SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4526*43a90889SApple OSS Distributions }
4527*43a90889SApple OSS Distributions
4528*43a90889SApple OSS Distributions return err;
4529*43a90889SApple OSS Distributions }
4530*43a90889SApple OSS Distributions
4531*43a90889SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4532*43a90889SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4533*43a90889SApple OSS Distributions IOMapper * mapper,
4534*43a90889SApple OSS Distributions IODMACommand * command,
4535*43a90889SApple OSS Distributions uint64_t mapLength)
4536*43a90889SApple OSS Distributions {
4537*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4538*43a90889SApple OSS Distributions kern_allocation_name_t alloc;
4539*43a90889SApple OSS Distributions int16_t prior;
4540*43a90889SApple OSS Distributions
4541*43a90889SApple OSS Distributions if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4542*43a90889SApple OSS Distributions kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4543*43a90889SApple OSS Distributions }
4544*43a90889SApple OSS Distributions
4545*43a90889SApple OSS Distributions if (!command) {
4546*43a90889SApple OSS Distributions return;
4547*43a90889SApple OSS Distributions }
4548*43a90889SApple OSS Distributions prior = OSAddAtomic16(1, &_dmaReferences);
4549*43a90889SApple OSS Distributions if (!prior) {
4550*43a90889SApple OSS Distributions if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4551*43a90889SApple OSS Distributions _mapName = alloc;
4552*43a90889SApple OSS Distributions mapLength = _length;
4553*43a90889SApple OSS Distributions kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4554*43a90889SApple OSS Distributions } else {
4555*43a90889SApple OSS Distributions _mapName = NULL;
4556*43a90889SApple OSS Distributions }
4557*43a90889SApple OSS Distributions }
4558*43a90889SApple OSS Distributions }
4559*43a90889SApple OSS Distributions
4560*43a90889SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4561*43a90889SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4562*43a90889SApple OSS Distributions IOMapper * mapper,
4563*43a90889SApple OSS Distributions IODMACommand * command,
4564*43a90889SApple OSS Distributions uint64_t offset,
4565*43a90889SApple OSS Distributions uint64_t mapAddress,
4566*43a90889SApple OSS Distributions uint64_t mapLength)
4567*43a90889SApple OSS Distributions {
4568*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4569*43a90889SApple OSS Distributions IOReturn ret;
4570*43a90889SApple OSS Distributions kern_allocation_name_t alloc;
4571*43a90889SApple OSS Distributions kern_allocation_name_t mapName;
4572*43a90889SApple OSS Distributions int16_t prior;
4573*43a90889SApple OSS Distributions
4574*43a90889SApple OSS Distributions mapName = NULL;
4575*43a90889SApple OSS Distributions prior = 0;
4576*43a90889SApple OSS Distributions if (command) {
4577*43a90889SApple OSS Distributions mapName = _mapName;
4578*43a90889SApple OSS Distributions if (_dmaReferences) {
4579*43a90889SApple OSS Distributions prior = OSAddAtomic16(-1, &_dmaReferences);
4580*43a90889SApple OSS Distributions } else {
4581*43a90889SApple OSS Distributions panic("_dmaReferences underflow");
4582*43a90889SApple OSS Distributions }
4583*43a90889SApple OSS Distributions }
4584*43a90889SApple OSS Distributions
4585*43a90889SApple OSS Distributions if (!mapLength) {
4586*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4587*43a90889SApple OSS Distributions return kIOReturnSuccess;
4588*43a90889SApple OSS Distributions }
4589*43a90889SApple OSS Distributions
4590*43a90889SApple OSS Distributions ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4591*43a90889SApple OSS Distributions
4592*43a90889SApple OSS Distributions if ((alloc = mapper->fAllocName)) {
4593*43a90889SApple OSS Distributions kern_allocation_update_size(alloc, -mapLength, NULL);
4594*43a90889SApple OSS Distributions if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4595*43a90889SApple OSS Distributions mapLength = _length;
4596*43a90889SApple OSS Distributions kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4597*43a90889SApple OSS Distributions }
4598*43a90889SApple OSS Distributions }
4599*43a90889SApple OSS Distributions
4600*43a90889SApple OSS Distributions traceInterval.setEndArg1(ret);
4601*43a90889SApple OSS Distributions return ret;
4602*43a90889SApple OSS Distributions }
4603*43a90889SApple OSS Distributions
4604*43a90889SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4605*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4606*43a90889SApple OSS Distributions IOMapper * mapper,
4607*43a90889SApple OSS Distributions IOMemoryDescriptor * memory,
4608*43a90889SApple OSS Distributions IODMACommand * command,
4609*43a90889SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4610*43a90889SApple OSS Distributions uint64_t offset,
4611*43a90889SApple OSS Distributions uint64_t length,
4612*43a90889SApple OSS Distributions uint64_t * mapAddress,
4613*43a90889SApple OSS Distributions uint64_t * mapLength)
4614*43a90889SApple OSS Distributions {
4615*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4616*43a90889SApple OSS Distributions ioGMDData * dataP;
4617*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4618*43a90889SApple OSS Distributions
4619*43a90889SApple OSS Distributions *mapAddress = 0;
4620*43a90889SApple OSS Distributions if (kIOMemoryHostOnly & _flags) {
4621*43a90889SApple OSS Distributions return kIOReturnSuccess;
4622*43a90889SApple OSS Distributions }
4623*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4624*43a90889SApple OSS Distributions return kIOReturnNotAttached;
4625*43a90889SApple OSS Distributions }
4626*43a90889SApple OSS Distributions
4627*43a90889SApple OSS Distributions if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4628*43a90889SApple OSS Distributions || offset || (length != _length)) {
4629*43a90889SApple OSS Distributions err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4630*43a90889SApple OSS Distributions } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4631*43a90889SApple OSS Distributions const ioPLBlock * ioplList = getIOPLList(dataP);
4632*43a90889SApple OSS Distributions upl_page_info_t * pageList;
4633*43a90889SApple OSS Distributions uint32_t mapOptions = 0;
4634*43a90889SApple OSS Distributions
4635*43a90889SApple OSS Distributions IODMAMapSpecification mapSpec;
4636*43a90889SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
4637*43a90889SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4638*43a90889SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
4639*43a90889SApple OSS Distributions
4640*43a90889SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
4641*43a90889SApple OSS Distributions // the upl's upl_page_info_t array.
4642*43a90889SApple OSS Distributions if (ioplList->fFlags & kIOPLExternUPL) {
4643*43a90889SApple OSS Distributions pageList = (upl_page_info_t *) ioplList->fPageInfo;
4644*43a90889SApple OSS Distributions mapOptions |= kIODMAMapPagingPath;
4645*43a90889SApple OSS Distributions } else {
4646*43a90889SApple OSS Distributions pageList = getPageList(dataP);
4647*43a90889SApple OSS Distributions }
4648*43a90889SApple OSS Distributions
4649*43a90889SApple OSS Distributions if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4650*43a90889SApple OSS Distributions mapOptions |= kIODMAMapPageListFullyOccupied;
4651*43a90889SApple OSS Distributions }
4652*43a90889SApple OSS Distributions
4653*43a90889SApple OSS Distributions assert(dataP->fDMAAccess);
4654*43a90889SApple OSS Distributions mapOptions |= dataP->fDMAAccess;
4655*43a90889SApple OSS Distributions
4656*43a90889SApple OSS Distributions // Check for direct device non-paged memory
4657*43a90889SApple OSS Distributions if (ioplList->fFlags & kIOPLOnDevice) {
4658*43a90889SApple OSS Distributions mapOptions |= kIODMAMapPhysicallyContiguous;
4659*43a90889SApple OSS Distributions }
4660*43a90889SApple OSS Distributions
4661*43a90889SApple OSS Distributions IODMAMapPageList dmaPageList =
4662*43a90889SApple OSS Distributions {
4663*43a90889SApple OSS Distributions .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4664*43a90889SApple OSS Distributions .pageListCount = _pages,
4665*43a90889SApple OSS Distributions .pageList = &pageList[0]
4666*43a90889SApple OSS Distributions };
4667*43a90889SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4668*43a90889SApple OSS Distributions command, &dmaPageList, mapAddress, mapLength);
4669*43a90889SApple OSS Distributions
4670*43a90889SApple OSS Distributions if (kIOReturnSuccess == err) {
4671*43a90889SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4672*43a90889SApple OSS Distributions }
4673*43a90889SApple OSS Distributions }
4674*43a90889SApple OSS Distributions
4675*43a90889SApple OSS Distributions return err;
4676*43a90889SApple OSS Distributions }
4677*43a90889SApple OSS Distributions
4678*43a90889SApple OSS Distributions /*
4679*43a90889SApple OSS Distributions * prepare
4680*43a90889SApple OSS Distributions *
4681*43a90889SApple OSS Distributions * Prepare the memory for an I/O transfer. This involves paging in
4682*43a90889SApple OSS Distributions * the memory, if necessary, and wiring it down for the duration of
4683*43a90889SApple OSS Distributions * the transfer. The complete() method completes the processing of
4684*43a90889SApple OSS Distributions * the memory after the I/O transfer finishes. This method needn't
4685*43a90889SApple OSS Distributions * called for non-pageable memory.
4686*43a90889SApple OSS Distributions */
4687*43a90889SApple OSS Distributions
4688*43a90889SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4689*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4690*43a90889SApple OSS Distributions {
4691*43a90889SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4692*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4693*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4694*43a90889SApple OSS Distributions
4695*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4696*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4697*43a90889SApple OSS Distributions return kIOReturnSuccess;
4698*43a90889SApple OSS Distributions }
4699*43a90889SApple OSS Distributions
4700*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4701*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4702*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4703*43a90889SApple OSS Distributions return kIOReturnNotAttached;
4704*43a90889SApple OSS Distributions }
4705*43a90889SApple OSS Distributions
4706*43a90889SApple OSS Distributions if (_prepareLock) {
4707*43a90889SApple OSS Distributions IOLockLock(_prepareLock);
4708*43a90889SApple OSS Distributions }
4709*43a90889SApple OSS Distributions
4710*43a90889SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4711*43a90889SApple OSS Distributions if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4712*43a90889SApple OSS Distributions error = kIOReturnNotReady;
4713*43a90889SApple OSS Distributions goto finish;
4714*43a90889SApple OSS Distributions }
4715*43a90889SApple OSS Distributions error = wireVirtual(forDirection);
4716*43a90889SApple OSS Distributions }
4717*43a90889SApple OSS Distributions
4718*43a90889SApple OSS Distributions if (kIOReturnSuccess == error) {
4719*43a90889SApple OSS Distributions if (1 == ++_wireCount) {
4720*43a90889SApple OSS Distributions if (kIOMemoryClearEncrypt & _flags) {
4721*43a90889SApple OSS Distributions performOperation(kIOMemoryClearEncrypted, 0, _length);
4722*43a90889SApple OSS Distributions }
4723*43a90889SApple OSS Distributions
4724*43a90889SApple OSS Distributions ktraceEmitPhysicalSegments();
4725*43a90889SApple OSS Distributions }
4726*43a90889SApple OSS Distributions }
4727*43a90889SApple OSS Distributions
4728*43a90889SApple OSS Distributions finish:
4729*43a90889SApple OSS Distributions
4730*43a90889SApple OSS Distributions if (_prepareLock) {
4731*43a90889SApple OSS Distributions IOLockUnlock(_prepareLock);
4732*43a90889SApple OSS Distributions }
4733*43a90889SApple OSS Distributions traceInterval.setEndArg1(error);
4734*43a90889SApple OSS Distributions
4735*43a90889SApple OSS Distributions return error;
4736*43a90889SApple OSS Distributions }
4737*43a90889SApple OSS Distributions
4738*43a90889SApple OSS Distributions /*
4739*43a90889SApple OSS Distributions * complete
4740*43a90889SApple OSS Distributions *
4741*43a90889SApple OSS Distributions * Complete processing of the memory after an I/O transfer finishes.
4742*43a90889SApple OSS Distributions * This method should not be called unless a prepare was previously
4743*43a90889SApple OSS Distributions * issued; the prepare() and complete() must occur in pairs, before
4744*43a90889SApple OSS Distributions * before and after an I/O transfer involving pageable memory.
4745*43a90889SApple OSS Distributions */
4746*43a90889SApple OSS Distributions
4747*43a90889SApple OSS Distributions IOReturn
complete(IODirection forDirection)4748*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4749*43a90889SApple OSS Distributions {
4750*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4751*43a90889SApple OSS Distributions ioGMDData * dataP;
4752*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4753*43a90889SApple OSS Distributions
4754*43a90889SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4755*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4756*43a90889SApple OSS Distributions return kIOReturnSuccess;
4757*43a90889SApple OSS Distributions }
4758*43a90889SApple OSS Distributions
4759*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4760*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4761*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4762*43a90889SApple OSS Distributions return kIOReturnNotAttached;
4763*43a90889SApple OSS Distributions }
4764*43a90889SApple OSS Distributions
4765*43a90889SApple OSS Distributions if (_prepareLock) {
4766*43a90889SApple OSS Distributions IOLockLock(_prepareLock);
4767*43a90889SApple OSS Distributions }
4768*43a90889SApple OSS Distributions do{
4769*43a90889SApple OSS Distributions assert(_wireCount);
4770*43a90889SApple OSS Distributions if (!_wireCount) {
4771*43a90889SApple OSS Distributions break;
4772*43a90889SApple OSS Distributions }
4773*43a90889SApple OSS Distributions dataP = getDataP(_memoryEntries);
4774*43a90889SApple OSS Distributions if (!dataP) {
4775*43a90889SApple OSS Distributions break;
4776*43a90889SApple OSS Distributions }
4777*43a90889SApple OSS Distributions
4778*43a90889SApple OSS Distributions if (kIODirectionCompleteWithError & forDirection) {
4779*43a90889SApple OSS Distributions dataP->fCompletionError = true;
4780*43a90889SApple OSS Distributions }
4781*43a90889SApple OSS Distributions
4782*43a90889SApple OSS Distributions if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4783*43a90889SApple OSS Distributions performOperation(kIOMemorySetEncrypted, 0, _length);
4784*43a90889SApple OSS Distributions }
4785*43a90889SApple OSS Distributions
4786*43a90889SApple OSS Distributions _wireCount--;
4787*43a90889SApple OSS Distributions if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4788*43a90889SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4789*43a90889SApple OSS Distributions UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4790*43a90889SApple OSS Distributions
4791*43a90889SApple OSS Distributions if (_wireCount) {
4792*43a90889SApple OSS Distributions // kIODirectionCompleteWithDataValid & forDirection
4793*43a90889SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*43a90889SApple OSS Distributions vm_tag_t tag;
4795*43a90889SApple OSS Distributions tag = (typeof(tag))getVMTag(kernel_map);
4796*43a90889SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4797*43a90889SApple OSS Distributions if (ioplList[ind].fIOPL) {
4798*43a90889SApple OSS Distributions iopl_valid_data(ioplList[ind].fIOPL, tag);
4799*43a90889SApple OSS Distributions }
4800*43a90889SApple OSS Distributions }
4801*43a90889SApple OSS Distributions }
4802*43a90889SApple OSS Distributions } else {
4803*43a90889SApple OSS Distributions if (_dmaReferences) {
4804*43a90889SApple OSS Distributions panic("complete() while dma active");
4805*43a90889SApple OSS Distributions }
4806*43a90889SApple OSS Distributions
4807*43a90889SApple OSS Distributions if (dataP->fMappedBaseValid) {
4808*43a90889SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4809*43a90889SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4810*43a90889SApple OSS Distributions }
4811*43a90889SApple OSS Distributions #if IOTRACKING
4812*43a90889SApple OSS Distributions if (dataP->fWireTracking.link.next) {
4813*43a90889SApple OSS Distributions IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4814*43a90889SApple OSS Distributions }
4815*43a90889SApple OSS Distributions #endif /* IOTRACKING */
4816*43a90889SApple OSS Distributions // Only complete iopls that we created which are for TypeVirtual
4817*43a90889SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4818*43a90889SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4819*43a90889SApple OSS Distributions if (ioplList[ind].fIOPL) {
4820*43a90889SApple OSS Distributions if (dataP->fCompletionError) {
4821*43a90889SApple OSS Distributions upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4822*43a90889SApple OSS Distributions } else {
4823*43a90889SApple OSS Distributions upl_commit(ioplList[ind].fIOPL, NULL, 0);
4824*43a90889SApple OSS Distributions }
4825*43a90889SApple OSS Distributions upl_deallocate(ioplList[ind].fIOPL);
4826*43a90889SApple OSS Distributions }
4827*43a90889SApple OSS Distributions }
4828*43a90889SApple OSS Distributions } else if (kIOMemoryTypeUPL == type) {
4829*43a90889SApple OSS Distributions upl_set_referenced(ioplList[0].fIOPL, false);
4830*43a90889SApple OSS Distributions }
4831*43a90889SApple OSS Distributions
4832*43a90889SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4833*43a90889SApple OSS Distributions
4834*43a90889SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4835*43a90889SApple OSS Distributions _flags &= ~kIOMemoryPreparedReadOnly;
4836*43a90889SApple OSS Distributions
4837*43a90889SApple OSS Distributions if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4838*43a90889SApple OSS Distributions IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4839*43a90889SApple OSS Distributions }
4840*43a90889SApple OSS Distributions }
4841*43a90889SApple OSS Distributions }
4842*43a90889SApple OSS Distributions }while (false);
4843*43a90889SApple OSS Distributions
4844*43a90889SApple OSS Distributions if (_prepareLock) {
4845*43a90889SApple OSS Distributions IOLockUnlock(_prepareLock);
4846*43a90889SApple OSS Distributions }
4847*43a90889SApple OSS Distributions
4848*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4849*43a90889SApple OSS Distributions return kIOReturnSuccess;
4850*43a90889SApple OSS Distributions }
4851*43a90889SApple OSS Distributions
4852*43a90889SApple OSS Distributions IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4853*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4854*43a90889SApple OSS Distributions {
4855*43a90889SApple OSS Distributions IOOptionBits createOptions = 0;
4856*43a90889SApple OSS Distributions
4857*43a90889SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4858*43a90889SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
4859*43a90889SApple OSS Distributions }
4860*43a90889SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
4861*43a90889SApple OSS Distributions createOptions |= kIOMemoryReferenceWrite;
4862*43a90889SApple OSS Distributions #if DEVELOPMENT || DEBUG
4863*43a90889SApple OSS Distributions if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4864*43a90889SApple OSS Distributions && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4865*43a90889SApple OSS Distributions OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4866*43a90889SApple OSS Distributions }
4867*43a90889SApple OSS Distributions #endif
4868*43a90889SApple OSS Distributions }
4869*43a90889SApple OSS Distributions return createOptions;
4870*43a90889SApple OSS Distributions }
4871*43a90889SApple OSS Distributions
4872*43a90889SApple OSS Distributions /*
4873*43a90889SApple OSS Distributions * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4874*43a90889SApple OSS Distributions * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4875*43a90889SApple OSS Distributions * creation.
4876*43a90889SApple OSS Distributions */
4877*43a90889SApple OSS Distributions
4878*43a90889SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4879*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::makeMapping(
4880*43a90889SApple OSS Distributions IOMemoryDescriptor * owner,
4881*43a90889SApple OSS Distributions task_t __intoTask,
4882*43a90889SApple OSS Distributions IOVirtualAddress __address,
4883*43a90889SApple OSS Distributions IOOptionBits options,
4884*43a90889SApple OSS Distributions IOByteCount __offset,
4885*43a90889SApple OSS Distributions IOByteCount __length )
4886*43a90889SApple OSS Distributions {
4887*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4888*43a90889SApple OSS Distributions IOMemoryMap * mapping;
4889*43a90889SApple OSS Distributions
4890*43a90889SApple OSS Distributions if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4891*43a90889SApple OSS Distributions struct IOMemoryReference * newRef;
4892*43a90889SApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4893*43a90889SApple OSS Distributions if (kIOReturnSuccess == err) {
4894*43a90889SApple OSS Distributions if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4895*43a90889SApple OSS Distributions memoryReferenceFree(newRef);
4896*43a90889SApple OSS Distributions }
4897*43a90889SApple OSS Distributions }
4898*43a90889SApple OSS Distributions }
4899*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
4900*43a90889SApple OSS Distributions return NULL;
4901*43a90889SApple OSS Distributions }
4902*43a90889SApple OSS Distributions mapping = IOMemoryDescriptor::makeMapping(
4903*43a90889SApple OSS Distributions owner, __intoTask, __address, options, __offset, __length);
4904*43a90889SApple OSS Distributions
4905*43a90889SApple OSS Distributions #if IOTRACKING
4906*43a90889SApple OSS Distributions if ((mapping == (IOMemoryMap *) __address)
4907*43a90889SApple OSS Distributions && (0 == (kIOMapStatic & mapping->fOptions))
4908*43a90889SApple OSS Distributions && (NULL == mapping->fSuperMap)
4909*43a90889SApple OSS Distributions && ((kIOTracking & gIOKitDebug) || _task)) {
4910*43a90889SApple OSS Distributions // only dram maps in the default on development case
4911*43a90889SApple OSS Distributions IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4912*43a90889SApple OSS Distributions }
4913*43a90889SApple OSS Distributions #endif /* IOTRACKING */
4914*43a90889SApple OSS Distributions
4915*43a90889SApple OSS Distributions return mapping;
4916*43a90889SApple OSS Distributions }
4917*43a90889SApple OSS Distributions
4918*43a90889SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4919*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4920*43a90889SApple OSS Distributions vm_map_t __addressMap,
4921*43a90889SApple OSS Distributions IOVirtualAddress * __address,
4922*43a90889SApple OSS Distributions IOOptionBits options,
4923*43a90889SApple OSS Distributions IOByteCount __offset,
4924*43a90889SApple OSS Distributions IOByteCount __length )
4925*43a90889SApple OSS Distributions {
4926*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4927*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4928*43a90889SApple OSS Distributions #ifndef __LP64__
4929*43a90889SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4930*43a90889SApple OSS Distributions panic("IOGeneralMemoryDescriptor::doMap !64bit");
4931*43a90889SApple OSS Distributions }
4932*43a90889SApple OSS Distributions #endif /* !__LP64__ */
4933*43a90889SApple OSS Distributions
4934*43a90889SApple OSS Distributions kern_return_t err;
4935*43a90889SApple OSS Distributions
4936*43a90889SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4937*43a90889SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
4938*43a90889SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
4939*43a90889SApple OSS Distributions
4940*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4941*43a90889SApple OSS Distributions Ranges vec = _ranges;
4942*43a90889SApple OSS Distributions
4943*43a90889SApple OSS Distributions mach_vm_address_t range0Addr = 0;
4944*43a90889SApple OSS Distributions mach_vm_size_t range0Len = 0;
4945*43a90889SApple OSS Distributions
4946*43a90889SApple OSS Distributions if ((offset >= _length) || ((offset + length) > _length)) {
4947*43a90889SApple OSS Distributions traceInterval.setEndArg1(kIOReturnBadArgument);
4948*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4949*43a90889SApple OSS Distributions // assert(offset == 0 && _length == 0 && length == 0);
4950*43a90889SApple OSS Distributions return kIOReturnBadArgument;
4951*43a90889SApple OSS Distributions }
4952*43a90889SApple OSS Distributions
4953*43a90889SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4954*43a90889SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4955*43a90889SApple OSS Distributions return 0;
4956*43a90889SApple OSS Distributions }
4957*43a90889SApple OSS Distributions
4958*43a90889SApple OSS Distributions if (vec.v) {
4959*43a90889SApple OSS Distributions getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4960*43a90889SApple OSS Distributions }
4961*43a90889SApple OSS Distributions
4962*43a90889SApple OSS Distributions // mapping source == dest? (could be much better)
4963*43a90889SApple OSS Distributions if (_task
4964*43a90889SApple OSS Distributions && (mapping->fAddressTask == _task)
4965*43a90889SApple OSS Distributions && (mapping->fAddressMap == get_task_map(_task))
4966*43a90889SApple OSS Distributions && (options & kIOMapAnywhere)
4967*43a90889SApple OSS Distributions && (!(kIOMapUnique & options))
4968*43a90889SApple OSS Distributions && (!(kIOMapGuardedMask & options))
4969*43a90889SApple OSS Distributions && (1 == _rangesCount)
4970*43a90889SApple OSS Distributions && (0 == offset)
4971*43a90889SApple OSS Distributions && range0Addr
4972*43a90889SApple OSS Distributions && (length <= range0Len)) {
4973*43a90889SApple OSS Distributions mapping->fAddress = range0Addr;
4974*43a90889SApple OSS Distributions mapping->fOptions |= kIOMapStatic;
4975*43a90889SApple OSS Distributions
4976*43a90889SApple OSS Distributions return kIOReturnSuccess;
4977*43a90889SApple OSS Distributions }
4978*43a90889SApple OSS Distributions
4979*43a90889SApple OSS Distributions if (!_memRef) {
4980*43a90889SApple OSS Distributions err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4981*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
4982*43a90889SApple OSS Distributions traceInterval.setEndArg1(err);
4983*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4984*43a90889SApple OSS Distributions return err;
4985*43a90889SApple OSS Distributions }
4986*43a90889SApple OSS Distributions }
4987*43a90889SApple OSS Distributions
4988*43a90889SApple OSS Distributions memory_object_t pager;
4989*43a90889SApple OSS Distributions pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4990*43a90889SApple OSS Distributions
4991*43a90889SApple OSS Distributions // <upl_transpose //
4992*43a90889SApple OSS Distributions if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4993*43a90889SApple OSS Distributions do{
4994*43a90889SApple OSS Distributions upl_t redirUPL2;
4995*43a90889SApple OSS Distributions upl_size_t size;
4996*43a90889SApple OSS Distributions upl_control_flags_t flags;
4997*43a90889SApple OSS Distributions unsigned int lock_count;
4998*43a90889SApple OSS Distributions
4999*43a90889SApple OSS Distributions if (!_memRef || (1 != _memRef->count)) {
5000*43a90889SApple OSS Distributions err = kIOReturnNotReadable;
5001*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5002*43a90889SApple OSS Distributions break;
5003*43a90889SApple OSS Distributions }
5004*43a90889SApple OSS Distributions
5005*43a90889SApple OSS Distributions size = (upl_size_t) round_page(mapping->fLength);
5006*43a90889SApple OSS Distributions flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5007*43a90889SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5008*43a90889SApple OSS Distributions
5009*43a90889SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5010*43a90889SApple OSS Distributions NULL, NULL,
5011*43a90889SApple OSS Distributions &flags, (vm_tag_t) getVMTag(kernel_map))) {
5012*43a90889SApple OSS Distributions redirUPL2 = NULL;
5013*43a90889SApple OSS Distributions }
5014*43a90889SApple OSS Distributions
5015*43a90889SApple OSS Distributions for (lock_count = 0;
5016*43a90889SApple OSS Distributions IORecursiveLockHaveLock(gIOMemoryLock);
5017*43a90889SApple OSS Distributions lock_count++) {
5018*43a90889SApple OSS Distributions UNLOCK;
5019*43a90889SApple OSS Distributions }
5020*43a90889SApple OSS Distributions err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5021*43a90889SApple OSS Distributions for (;
5022*43a90889SApple OSS Distributions lock_count;
5023*43a90889SApple OSS Distributions lock_count--) {
5024*43a90889SApple OSS Distributions LOCK;
5025*43a90889SApple OSS Distributions }
5026*43a90889SApple OSS Distributions
5027*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
5028*43a90889SApple OSS Distributions IOLog("upl_transpose(%x)\n", err);
5029*43a90889SApple OSS Distributions err = kIOReturnSuccess;
5030*43a90889SApple OSS Distributions }
5031*43a90889SApple OSS Distributions
5032*43a90889SApple OSS Distributions if (redirUPL2) {
5033*43a90889SApple OSS Distributions upl_commit(redirUPL2, NULL, 0);
5034*43a90889SApple OSS Distributions upl_deallocate(redirUPL2);
5035*43a90889SApple OSS Distributions redirUPL2 = NULL;
5036*43a90889SApple OSS Distributions }
5037*43a90889SApple OSS Distributions {
5038*43a90889SApple OSS Distributions // swap the memEntries since they now refer to different vm_objects
5039*43a90889SApple OSS Distributions IOMemoryReference * me = _memRef;
5040*43a90889SApple OSS Distributions _memRef = mapping->fMemory->_memRef;
5041*43a90889SApple OSS Distributions mapping->fMemory->_memRef = me;
5042*43a90889SApple OSS Distributions }
5043*43a90889SApple OSS Distributions if (pager) {
5044*43a90889SApple OSS Distributions err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5045*43a90889SApple OSS Distributions }
5046*43a90889SApple OSS Distributions }while (false);
5047*43a90889SApple OSS Distributions }
5048*43a90889SApple OSS Distributions // upl_transpose> //
5049*43a90889SApple OSS Distributions else {
5050*43a90889SApple OSS Distributions err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5051*43a90889SApple OSS Distributions if (err) {
5052*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5053*43a90889SApple OSS Distributions }
5054*43a90889SApple OSS Distributions if ((err == KERN_SUCCESS) && pager) {
5055*43a90889SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5056*43a90889SApple OSS Distributions
5057*43a90889SApple OSS Distributions if (err != KERN_SUCCESS) {
5058*43a90889SApple OSS Distributions doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5059*43a90889SApple OSS Distributions } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5060*43a90889SApple OSS Distributions mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5061*43a90889SApple OSS Distributions }
5062*43a90889SApple OSS Distributions }
5063*43a90889SApple OSS Distributions }
5064*43a90889SApple OSS Distributions
5065*43a90889SApple OSS Distributions traceInterval.setEndArg1(err);
5066*43a90889SApple OSS Distributions if (err) {
5067*43a90889SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5068*43a90889SApple OSS Distributions }
5069*43a90889SApple OSS Distributions return err;
5070*43a90889SApple OSS Distributions }
5071*43a90889SApple OSS Distributions
5072*43a90889SApple OSS Distributions #if IOTRACKING
5073*43a90889SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5074*43a90889SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5075*43a90889SApple OSS Distributions mach_vm_address_t * address, mach_vm_size_t * size)
5076*43a90889SApple OSS Distributions {
5077*43a90889SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5078*43a90889SApple OSS Distributions
5079*43a90889SApple OSS Distributions IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5080*43a90889SApple OSS Distributions
5081*43a90889SApple OSS Distributions if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5082*43a90889SApple OSS Distributions return kIOReturnNotReady;
5083*43a90889SApple OSS Distributions }
5084*43a90889SApple OSS Distributions
5085*43a90889SApple OSS Distributions *task = map->fAddressTask;
5086*43a90889SApple OSS Distributions *address = map->fAddress;
5087*43a90889SApple OSS Distributions *size = map->fLength;
5088*43a90889SApple OSS Distributions
5089*43a90889SApple OSS Distributions return kIOReturnSuccess;
5090*43a90889SApple OSS Distributions }
5091*43a90889SApple OSS Distributions #endif /* IOTRACKING */
5092*43a90889SApple OSS Distributions
5093*43a90889SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5094*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5095*43a90889SApple OSS Distributions vm_map_t addressMap,
5096*43a90889SApple OSS Distributions IOVirtualAddress __address,
5097*43a90889SApple OSS Distributions IOByteCount __length )
5098*43a90889SApple OSS Distributions {
5099*43a90889SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5100*43a90889SApple OSS Distributions IOReturn ret;
5101*43a90889SApple OSS Distributions ret = super::doUnmap(addressMap, __address, __length);
5102*43a90889SApple OSS Distributions traceInterval.setEndArg1(ret);
5103*43a90889SApple OSS Distributions return ret;
5104*43a90889SApple OSS Distributions }
5105*43a90889SApple OSS Distributions
5106*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5107*43a90889SApple OSS Distributions
5108*43a90889SApple OSS Distributions #undef super
5109*43a90889SApple OSS Distributions #define super OSObject
5110*43a90889SApple OSS Distributions
5111*43a90889SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5112*43a90889SApple OSS Distributions
5113*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5114*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5115*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5116*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5117*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5118*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5119*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5120*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5121*43a90889SApple OSS Distributions
5122*43a90889SApple OSS Distributions /* ex-inline function implementation */
5123*43a90889SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5124*43a90889SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5125*43a90889SApple OSS Distributions {
5126*43a90889SApple OSS Distributions return getPhysicalSegment( 0, NULL );
5127*43a90889SApple OSS Distributions }
5128*43a90889SApple OSS Distributions
5129*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5130*43a90889SApple OSS Distributions
5131*43a90889SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5132*43a90889SApple OSS Distributions IOMemoryMap::init(
5133*43a90889SApple OSS Distributions task_t intoTask,
5134*43a90889SApple OSS Distributions mach_vm_address_t toAddress,
5135*43a90889SApple OSS Distributions IOOptionBits _options,
5136*43a90889SApple OSS Distributions mach_vm_size_t _offset,
5137*43a90889SApple OSS Distributions mach_vm_size_t _length )
5138*43a90889SApple OSS Distributions {
5139*43a90889SApple OSS Distributions if (!intoTask) {
5140*43a90889SApple OSS Distributions return false;
5141*43a90889SApple OSS Distributions }
5142*43a90889SApple OSS Distributions
5143*43a90889SApple OSS Distributions if (!super::init()) {
5144*43a90889SApple OSS Distributions return false;
5145*43a90889SApple OSS Distributions }
5146*43a90889SApple OSS Distributions
5147*43a90889SApple OSS Distributions fAddressMap = get_task_map(intoTask);
5148*43a90889SApple OSS Distributions if (!fAddressMap) {
5149*43a90889SApple OSS Distributions return false;
5150*43a90889SApple OSS Distributions }
5151*43a90889SApple OSS Distributions vm_map_reference(fAddressMap);
5152*43a90889SApple OSS Distributions
5153*43a90889SApple OSS Distributions fAddressTask = intoTask;
5154*43a90889SApple OSS Distributions fOptions = _options;
5155*43a90889SApple OSS Distributions fLength = _length;
5156*43a90889SApple OSS Distributions fOffset = _offset;
5157*43a90889SApple OSS Distributions fAddress = toAddress;
5158*43a90889SApple OSS Distributions
5159*43a90889SApple OSS Distributions return true;
5160*43a90889SApple OSS Distributions }
5161*43a90889SApple OSS Distributions
5162*43a90889SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5163*43a90889SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5164*43a90889SApple OSS Distributions {
5165*43a90889SApple OSS Distributions if (!_memory) {
5166*43a90889SApple OSS Distributions return false;
5167*43a90889SApple OSS Distributions }
5168*43a90889SApple OSS Distributions
5169*43a90889SApple OSS Distributions if (!fSuperMap) {
5170*43a90889SApple OSS Distributions if ((_offset + fLength) > _memory->getLength()) {
5171*43a90889SApple OSS Distributions return false;
5172*43a90889SApple OSS Distributions }
5173*43a90889SApple OSS Distributions fOffset = _offset;
5174*43a90889SApple OSS Distributions }
5175*43a90889SApple OSS Distributions
5176*43a90889SApple OSS Distributions
5177*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5178*43a90889SApple OSS Distributions if (fMemory) {
5179*43a90889SApple OSS Distributions if (fMemory != _memory) {
5180*43a90889SApple OSS Distributions fMemory->removeMapping(this);
5181*43a90889SApple OSS Distributions }
5182*43a90889SApple OSS Distributions }
5183*43a90889SApple OSS Distributions fMemory = os::move(tempval);
5184*43a90889SApple OSS Distributions
5185*43a90889SApple OSS Distributions return true;
5186*43a90889SApple OSS Distributions }
5187*43a90889SApple OSS Distributions
5188*43a90889SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5189*43a90889SApple OSS Distributions IOMemoryDescriptor::doMap(
5190*43a90889SApple OSS Distributions vm_map_t __addressMap,
5191*43a90889SApple OSS Distributions IOVirtualAddress * __address,
5192*43a90889SApple OSS Distributions IOOptionBits options,
5193*43a90889SApple OSS Distributions IOByteCount __offset,
5194*43a90889SApple OSS Distributions IOByteCount __length )
5195*43a90889SApple OSS Distributions {
5196*43a90889SApple OSS Distributions return kIOReturnUnsupported;
5197*43a90889SApple OSS Distributions }
5198*43a90889SApple OSS Distributions
5199*43a90889SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5200*43a90889SApple OSS Distributions IOMemoryDescriptor::handleFault(
5201*43a90889SApple OSS Distributions void * _pager,
5202*43a90889SApple OSS Distributions mach_vm_size_t sourceOffset,
5203*43a90889SApple OSS Distributions mach_vm_size_t length)
5204*43a90889SApple OSS Distributions {
5205*43a90889SApple OSS Distributions if (kIOMemoryRedirected & _flags) {
5206*43a90889SApple OSS Distributions #if DEBUG
5207*43a90889SApple OSS Distributions IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5208*43a90889SApple OSS Distributions #endif
5209*43a90889SApple OSS Distributions do {
5210*43a90889SApple OSS Distributions SLEEP;
5211*43a90889SApple OSS Distributions } while (kIOMemoryRedirected & _flags);
5212*43a90889SApple OSS Distributions }
5213*43a90889SApple OSS Distributions return kIOReturnSuccess;
5214*43a90889SApple OSS Distributions }
5215*43a90889SApple OSS Distributions
5216*43a90889SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5217*43a90889SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5218*43a90889SApple OSS Distributions void * _pager,
5219*43a90889SApple OSS Distributions vm_map_t addressMap,
5220*43a90889SApple OSS Distributions mach_vm_address_t address,
5221*43a90889SApple OSS Distributions mach_vm_size_t sourceOffset,
5222*43a90889SApple OSS Distributions mach_vm_size_t length,
5223*43a90889SApple OSS Distributions IOOptionBits options )
5224*43a90889SApple OSS Distributions {
5225*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5226*43a90889SApple OSS Distributions memory_object_t pager = (memory_object_t) _pager;
5227*43a90889SApple OSS Distributions mach_vm_size_t size;
5228*43a90889SApple OSS Distributions mach_vm_size_t bytes;
5229*43a90889SApple OSS Distributions mach_vm_size_t page;
5230*43a90889SApple OSS Distributions mach_vm_size_t pageOffset;
5231*43a90889SApple OSS Distributions mach_vm_size_t pagerOffset;
5232*43a90889SApple OSS Distributions IOPhysicalLength segLen, chunk;
5233*43a90889SApple OSS Distributions addr64_t physAddr;
5234*43a90889SApple OSS Distributions IOOptionBits type;
5235*43a90889SApple OSS Distributions
5236*43a90889SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
5237*43a90889SApple OSS Distributions
5238*43a90889SApple OSS Distributions if (reserved->dp.pagerContig) {
5239*43a90889SApple OSS Distributions sourceOffset = 0;
5240*43a90889SApple OSS Distributions pagerOffset = 0;
5241*43a90889SApple OSS Distributions }
5242*43a90889SApple OSS Distributions
5243*43a90889SApple OSS Distributions physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5244*43a90889SApple OSS Distributions assert( physAddr );
5245*43a90889SApple OSS Distributions pageOffset = physAddr - trunc_page_64( physAddr );
5246*43a90889SApple OSS Distributions pagerOffset = sourceOffset;
5247*43a90889SApple OSS Distributions
5248*43a90889SApple OSS Distributions size = length + pageOffset;
5249*43a90889SApple OSS Distributions physAddr -= pageOffset;
5250*43a90889SApple OSS Distributions
5251*43a90889SApple OSS Distributions segLen += pageOffset;
5252*43a90889SApple OSS Distributions bytes = size;
5253*43a90889SApple OSS Distributions do{
5254*43a90889SApple OSS Distributions // in the middle of the loop only map whole pages
5255*43a90889SApple OSS Distributions if (segLen >= bytes) {
5256*43a90889SApple OSS Distributions segLen = bytes;
5257*43a90889SApple OSS Distributions } else if (segLen != trunc_page_64(segLen)) {
5258*43a90889SApple OSS Distributions err = kIOReturnVMError;
5259*43a90889SApple OSS Distributions }
5260*43a90889SApple OSS Distributions if (physAddr != trunc_page_64(physAddr)) {
5261*43a90889SApple OSS Distributions err = kIOReturnBadArgument;
5262*43a90889SApple OSS Distributions }
5263*43a90889SApple OSS Distributions
5264*43a90889SApple OSS Distributions if (kIOReturnSuccess != err) {
5265*43a90889SApple OSS Distributions break;
5266*43a90889SApple OSS Distributions }
5267*43a90889SApple OSS Distributions
5268*43a90889SApple OSS Distributions #if DEBUG || DEVELOPMENT
5269*43a90889SApple OSS Distributions if ((kIOMemoryTypeUPL != type)
5270*43a90889SApple OSS Distributions && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5271*43a90889SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5272*43a90889SApple OSS Distributions physAddr, (uint64_t)segLen);
5273*43a90889SApple OSS Distributions }
5274*43a90889SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5275*43a90889SApple OSS Distributions
5276*43a90889SApple OSS Distributions chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5277*43a90889SApple OSS Distributions for (page = 0;
5278*43a90889SApple OSS Distributions (page < segLen) && (KERN_SUCCESS == err);
5279*43a90889SApple OSS Distributions page += chunk) {
5280*43a90889SApple OSS Distributions err = device_pager_populate_object(pager, pagerOffset,
5281*43a90889SApple OSS Distributions (ppnum_t)(atop_64(physAddr + page)), chunk);
5282*43a90889SApple OSS Distributions pagerOffset += chunk;
5283*43a90889SApple OSS Distributions }
5284*43a90889SApple OSS Distributions
5285*43a90889SApple OSS Distributions assert(KERN_SUCCESS == err);
5286*43a90889SApple OSS Distributions if (err) {
5287*43a90889SApple OSS Distributions break;
5288*43a90889SApple OSS Distributions }
5289*43a90889SApple OSS Distributions
5290*43a90889SApple OSS Distributions // This call to vm_fault causes an early pmap level resolution
5291*43a90889SApple OSS Distributions // of the mappings created above for kernel mappings, since
5292*43a90889SApple OSS Distributions // faulting in later can't take place from interrupt level.
5293*43a90889SApple OSS Distributions if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5294*43a90889SApple OSS Distributions err = vm_fault(addressMap,
5295*43a90889SApple OSS Distributions (vm_map_offset_t)trunc_page_64(address),
5296*43a90889SApple OSS Distributions options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5297*43a90889SApple OSS Distributions FALSE, VM_KERN_MEMORY_NONE,
5298*43a90889SApple OSS Distributions THREAD_UNINT, NULL,
5299*43a90889SApple OSS Distributions (vm_map_offset_t)0);
5300*43a90889SApple OSS Distributions
5301*43a90889SApple OSS Distributions if (KERN_SUCCESS != err) {
5302*43a90889SApple OSS Distributions break;
5303*43a90889SApple OSS Distributions }
5304*43a90889SApple OSS Distributions }
5305*43a90889SApple OSS Distributions
5306*43a90889SApple OSS Distributions sourceOffset += segLen - pageOffset;
5307*43a90889SApple OSS Distributions address += segLen;
5308*43a90889SApple OSS Distributions bytes -= segLen;
5309*43a90889SApple OSS Distributions pageOffset = 0;
5310*43a90889SApple OSS Distributions }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5311*43a90889SApple OSS Distributions
5312*43a90889SApple OSS Distributions if (bytes) {
5313*43a90889SApple OSS Distributions err = kIOReturnBadArgument;
5314*43a90889SApple OSS Distributions }
5315*43a90889SApple OSS Distributions
5316*43a90889SApple OSS Distributions return err;
5317*43a90889SApple OSS Distributions }
5318*43a90889SApple OSS Distributions
5319*43a90889SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5320*43a90889SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5321*43a90889SApple OSS Distributions vm_map_t addressMap,
5322*43a90889SApple OSS Distributions IOVirtualAddress __address,
5323*43a90889SApple OSS Distributions IOByteCount __length )
5324*43a90889SApple OSS Distributions {
5325*43a90889SApple OSS Distributions IOReturn err;
5326*43a90889SApple OSS Distributions IOMemoryMap * mapping;
5327*43a90889SApple OSS Distributions mach_vm_address_t address;
5328*43a90889SApple OSS Distributions mach_vm_size_t length;
5329*43a90889SApple OSS Distributions
5330*43a90889SApple OSS Distributions if (__length) {
5331*43a90889SApple OSS Distributions panic("doUnmap");
5332*43a90889SApple OSS Distributions }
5333*43a90889SApple OSS Distributions
5334*43a90889SApple OSS Distributions mapping = (IOMemoryMap *) __address;
5335*43a90889SApple OSS Distributions addressMap = mapping->fAddressMap;
5336*43a90889SApple OSS Distributions address = mapping->fAddress;
5337*43a90889SApple OSS Distributions length = mapping->fLength;
5338*43a90889SApple OSS Distributions
5339*43a90889SApple OSS Distributions if (kIOMapOverwrite & mapping->fOptions) {
5340*43a90889SApple OSS Distributions err = KERN_SUCCESS;
5341*43a90889SApple OSS Distributions } else {
5342*43a90889SApple OSS Distributions if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5343*43a90889SApple OSS Distributions addressMap = IOPageableMapForAddress( address );
5344*43a90889SApple OSS Distributions }
5345*43a90889SApple OSS Distributions #if DEBUG
5346*43a90889SApple OSS Distributions if (kIOLogMapping & gIOKitDebug) {
5347*43a90889SApple OSS Distributions IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5348*43a90889SApple OSS Distributions addressMap, address, length );
5349*43a90889SApple OSS Distributions }
5350*43a90889SApple OSS Distributions #endif
5351*43a90889SApple OSS Distributions err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5352*43a90889SApple OSS Distributions if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5353*43a90889SApple OSS Distributions DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5354*43a90889SApple OSS Distributions }
5355*43a90889SApple OSS Distributions }
5356*43a90889SApple OSS Distributions
5357*43a90889SApple OSS Distributions #if IOTRACKING
5358*43a90889SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5359*43a90889SApple OSS Distributions #endif /* IOTRACKING */
5360*43a90889SApple OSS Distributions
5361*43a90889SApple OSS Distributions return err;
5362*43a90889SApple OSS Distributions }
5363*43a90889SApple OSS Distributions
5364*43a90889SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5365*43a90889SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5366*43a90889SApple OSS Distributions {
5367*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5368*43a90889SApple OSS Distributions IOMemoryMap * mapping = NULL;
5369*43a90889SApple OSS Distributions OSSharedPtr<OSIterator> iter;
5370*43a90889SApple OSS Distributions
5371*43a90889SApple OSS Distributions LOCK;
5372*43a90889SApple OSS Distributions
5373*43a90889SApple OSS Distributions if (doRedirect) {
5374*43a90889SApple OSS Distributions _flags |= kIOMemoryRedirected;
5375*43a90889SApple OSS Distributions } else {
5376*43a90889SApple OSS Distributions _flags &= ~kIOMemoryRedirected;
5377*43a90889SApple OSS Distributions }
5378*43a90889SApple OSS Distributions
5379*43a90889SApple OSS Distributions do {
5380*43a90889SApple OSS Distributions if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5381*43a90889SApple OSS Distributions memory_object_t pager;
5382*43a90889SApple OSS Distributions
5383*43a90889SApple OSS Distributions if (reserved) {
5384*43a90889SApple OSS Distributions pager = (memory_object_t) reserved->dp.devicePager;
5385*43a90889SApple OSS Distributions } else {
5386*43a90889SApple OSS Distributions pager = MACH_PORT_NULL;
5387*43a90889SApple OSS Distributions }
5388*43a90889SApple OSS Distributions
5389*43a90889SApple OSS Distributions while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5390*43a90889SApple OSS Distributions mapping->redirect( safeTask, doRedirect );
5391*43a90889SApple OSS Distributions if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5392*43a90889SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5393*43a90889SApple OSS Distributions }
5394*43a90889SApple OSS Distributions }
5395*43a90889SApple OSS Distributions
5396*43a90889SApple OSS Distributions iter.reset();
5397*43a90889SApple OSS Distributions }
5398*43a90889SApple OSS Distributions } while (false);
5399*43a90889SApple OSS Distributions
5400*43a90889SApple OSS Distributions if (!doRedirect) {
5401*43a90889SApple OSS Distributions WAKEUP;
5402*43a90889SApple OSS Distributions }
5403*43a90889SApple OSS Distributions
5404*43a90889SApple OSS Distributions UNLOCK;
5405*43a90889SApple OSS Distributions
5406*43a90889SApple OSS Distributions #ifndef __LP64__
5407*43a90889SApple OSS Distributions // temporary binary compatibility
5408*43a90889SApple OSS Distributions IOSubMemoryDescriptor * subMem;
5409*43a90889SApple OSS Distributions if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5410*43a90889SApple OSS Distributions err = subMem->redirect( safeTask, doRedirect );
5411*43a90889SApple OSS Distributions } else {
5412*43a90889SApple OSS Distributions err = kIOReturnSuccess;
5413*43a90889SApple OSS Distributions }
5414*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5415*43a90889SApple OSS Distributions
5416*43a90889SApple OSS Distributions return err;
5417*43a90889SApple OSS Distributions }
5418*43a90889SApple OSS Distributions
5419*43a90889SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5420*43a90889SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5421*43a90889SApple OSS Distributions {
5422*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5423*43a90889SApple OSS Distributions
5424*43a90889SApple OSS Distributions if (fSuperMap) {
5425*43a90889SApple OSS Distributions // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5426*43a90889SApple OSS Distributions } else {
5427*43a90889SApple OSS Distributions LOCK;
5428*43a90889SApple OSS Distributions
5429*43a90889SApple OSS Distributions do{
5430*43a90889SApple OSS Distributions if (!fAddress) {
5431*43a90889SApple OSS Distributions break;
5432*43a90889SApple OSS Distributions }
5433*43a90889SApple OSS Distributions if (!fAddressMap) {
5434*43a90889SApple OSS Distributions break;
5435*43a90889SApple OSS Distributions }
5436*43a90889SApple OSS Distributions
5437*43a90889SApple OSS Distributions if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5438*43a90889SApple OSS Distributions && (0 == (fOptions & kIOMapStatic))) {
5439*43a90889SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5440*43a90889SApple OSS Distributions err = kIOReturnSuccess;
5441*43a90889SApple OSS Distributions #if DEBUG
5442*43a90889SApple OSS Distributions IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5443*43a90889SApple OSS Distributions #endif
5444*43a90889SApple OSS Distributions } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5445*43a90889SApple OSS Distributions IOOptionBits newMode;
5446*43a90889SApple OSS Distributions newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5447*43a90889SApple OSS Distributions IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5448*43a90889SApple OSS Distributions }
5449*43a90889SApple OSS Distributions }while (false);
5450*43a90889SApple OSS Distributions UNLOCK;
5451*43a90889SApple OSS Distributions }
5452*43a90889SApple OSS Distributions
5453*43a90889SApple OSS Distributions if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5454*43a90889SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5455*43a90889SApple OSS Distributions && safeTask
5456*43a90889SApple OSS Distributions && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5457*43a90889SApple OSS Distributions fMemory->redirect(safeTask, doRedirect);
5458*43a90889SApple OSS Distributions }
5459*43a90889SApple OSS Distributions
5460*43a90889SApple OSS Distributions return err;
5461*43a90889SApple OSS Distributions }
5462*43a90889SApple OSS Distributions
5463*43a90889SApple OSS Distributions IOReturn
unmap(void)5464*43a90889SApple OSS Distributions IOMemoryMap::unmap( void )
5465*43a90889SApple OSS Distributions {
5466*43a90889SApple OSS Distributions IOReturn err;
5467*43a90889SApple OSS Distributions
5468*43a90889SApple OSS Distributions LOCK;
5469*43a90889SApple OSS Distributions
5470*43a90889SApple OSS Distributions if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5471*43a90889SApple OSS Distributions && (0 == (kIOMapStatic & fOptions))) {
5472*43a90889SApple OSS Distributions err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5473*43a90889SApple OSS Distributions } else {
5474*43a90889SApple OSS Distributions err = kIOReturnSuccess;
5475*43a90889SApple OSS Distributions }
5476*43a90889SApple OSS Distributions
5477*43a90889SApple OSS Distributions if (fAddressMap) {
5478*43a90889SApple OSS Distributions vm_map_deallocate(fAddressMap);
5479*43a90889SApple OSS Distributions fAddressMap = NULL;
5480*43a90889SApple OSS Distributions }
5481*43a90889SApple OSS Distributions
5482*43a90889SApple OSS Distributions fAddress = 0;
5483*43a90889SApple OSS Distributions
5484*43a90889SApple OSS Distributions UNLOCK;
5485*43a90889SApple OSS Distributions
5486*43a90889SApple OSS Distributions return err;
5487*43a90889SApple OSS Distributions }
5488*43a90889SApple OSS Distributions
5489*43a90889SApple OSS Distributions void
taskDied(void)5490*43a90889SApple OSS Distributions IOMemoryMap::taskDied( void )
5491*43a90889SApple OSS Distributions {
5492*43a90889SApple OSS Distributions LOCK;
5493*43a90889SApple OSS Distributions if (fUserClientUnmap) {
5494*43a90889SApple OSS Distributions unmap();
5495*43a90889SApple OSS Distributions }
5496*43a90889SApple OSS Distributions #if IOTRACKING
5497*43a90889SApple OSS Distributions else {
5498*43a90889SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5499*43a90889SApple OSS Distributions }
5500*43a90889SApple OSS Distributions #endif /* IOTRACKING */
5501*43a90889SApple OSS Distributions
5502*43a90889SApple OSS Distributions if (fAddressMap) {
5503*43a90889SApple OSS Distributions vm_map_deallocate(fAddressMap);
5504*43a90889SApple OSS Distributions fAddressMap = NULL;
5505*43a90889SApple OSS Distributions }
5506*43a90889SApple OSS Distributions fAddressTask = NULL;
5507*43a90889SApple OSS Distributions fAddress = 0;
5508*43a90889SApple OSS Distributions UNLOCK;
5509*43a90889SApple OSS Distributions }
5510*43a90889SApple OSS Distributions
5511*43a90889SApple OSS Distributions IOReturn
userClientUnmap(void)5512*43a90889SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5513*43a90889SApple OSS Distributions {
5514*43a90889SApple OSS Distributions fUserClientUnmap = true;
5515*43a90889SApple OSS Distributions return kIOReturnSuccess;
5516*43a90889SApple OSS Distributions }
5517*43a90889SApple OSS Distributions
5518*43a90889SApple OSS Distributions // Overload the release mechanism. All mappings must be a member
5519*43a90889SApple OSS Distributions // of a memory descriptors _mappings set. This means that we
5520*43a90889SApple OSS Distributions // always have 2 references on a mapping. When either of these mappings
5521*43a90889SApple OSS Distributions // are released we need to free ourselves.
5522*43a90889SApple OSS Distributions void
taggedRelease(const void * tag) const5523*43a90889SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5524*43a90889SApple OSS Distributions {
5525*43a90889SApple OSS Distributions LOCK;
5526*43a90889SApple OSS Distributions super::taggedRelease(tag, 2);
5527*43a90889SApple OSS Distributions UNLOCK;
5528*43a90889SApple OSS Distributions }
5529*43a90889SApple OSS Distributions
5530*43a90889SApple OSS Distributions void
free()5531*43a90889SApple OSS Distributions IOMemoryMap::free()
5532*43a90889SApple OSS Distributions {
5533*43a90889SApple OSS Distributions unmap();
5534*43a90889SApple OSS Distributions
5535*43a90889SApple OSS Distributions if (fMemory) {
5536*43a90889SApple OSS Distributions LOCK;
5537*43a90889SApple OSS Distributions fMemory->removeMapping(this);
5538*43a90889SApple OSS Distributions UNLOCK;
5539*43a90889SApple OSS Distributions fMemory.reset();
5540*43a90889SApple OSS Distributions }
5541*43a90889SApple OSS Distributions
5542*43a90889SApple OSS Distributions if (fSuperMap) {
5543*43a90889SApple OSS Distributions fSuperMap.reset();
5544*43a90889SApple OSS Distributions }
5545*43a90889SApple OSS Distributions
5546*43a90889SApple OSS Distributions if (fRedirUPL) {
5547*43a90889SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5548*43a90889SApple OSS Distributions upl_deallocate(fRedirUPL);
5549*43a90889SApple OSS Distributions }
5550*43a90889SApple OSS Distributions
5551*43a90889SApple OSS Distributions super::free();
5552*43a90889SApple OSS Distributions }
5553*43a90889SApple OSS Distributions
5554*43a90889SApple OSS Distributions IOByteCount
getLength()5555*43a90889SApple OSS Distributions IOMemoryMap::getLength()
5556*43a90889SApple OSS Distributions {
5557*43a90889SApple OSS Distributions return fLength;
5558*43a90889SApple OSS Distributions }
5559*43a90889SApple OSS Distributions
5560*43a90889SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5561*43a90889SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5562*43a90889SApple OSS Distributions {
5563*43a90889SApple OSS Distributions #ifndef __LP64__
5564*43a90889SApple OSS Distributions if (fSuperMap) {
5565*43a90889SApple OSS Distributions fSuperMap->getVirtualAddress();
5566*43a90889SApple OSS Distributions } else if (fAddressMap
5567*43a90889SApple OSS Distributions && vm_map_is_64bit(fAddressMap)
5568*43a90889SApple OSS Distributions && (sizeof(IOVirtualAddress) < 8)) {
5569*43a90889SApple OSS Distributions OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5570*43a90889SApple OSS Distributions }
5571*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5572*43a90889SApple OSS Distributions
5573*43a90889SApple OSS Distributions return fAddress;
5574*43a90889SApple OSS Distributions }
5575*43a90889SApple OSS Distributions
5576*43a90889SApple OSS Distributions #ifndef __LP64__
5577*43a90889SApple OSS Distributions mach_vm_address_t
getAddress()5578*43a90889SApple OSS Distributions IOMemoryMap::getAddress()
5579*43a90889SApple OSS Distributions {
5580*43a90889SApple OSS Distributions return fAddress;
5581*43a90889SApple OSS Distributions }
5582*43a90889SApple OSS Distributions
5583*43a90889SApple OSS Distributions mach_vm_size_t
getSize()5584*43a90889SApple OSS Distributions IOMemoryMap::getSize()
5585*43a90889SApple OSS Distributions {
5586*43a90889SApple OSS Distributions return fLength;
5587*43a90889SApple OSS Distributions }
5588*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5589*43a90889SApple OSS Distributions
5590*43a90889SApple OSS Distributions
5591*43a90889SApple OSS Distributions task_t
getAddressTask()5592*43a90889SApple OSS Distributions IOMemoryMap::getAddressTask()
5593*43a90889SApple OSS Distributions {
5594*43a90889SApple OSS Distributions if (fSuperMap) {
5595*43a90889SApple OSS Distributions return fSuperMap->getAddressTask();
5596*43a90889SApple OSS Distributions } else {
5597*43a90889SApple OSS Distributions return fAddressTask;
5598*43a90889SApple OSS Distributions }
5599*43a90889SApple OSS Distributions }
5600*43a90889SApple OSS Distributions
5601*43a90889SApple OSS Distributions IOOptionBits
getMapOptions()5602*43a90889SApple OSS Distributions IOMemoryMap::getMapOptions()
5603*43a90889SApple OSS Distributions {
5604*43a90889SApple OSS Distributions return fOptions;
5605*43a90889SApple OSS Distributions }
5606*43a90889SApple OSS Distributions
5607*43a90889SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5608*43a90889SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5609*43a90889SApple OSS Distributions {
5610*43a90889SApple OSS Distributions return fMemory.get();
5611*43a90889SApple OSS Distributions }
5612*43a90889SApple OSS Distributions
5613*43a90889SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5614*43a90889SApple OSS Distributions IOMemoryMap::copyCompatible(
5615*43a90889SApple OSS Distributions IOMemoryMap * newMapping )
5616*43a90889SApple OSS Distributions {
5617*43a90889SApple OSS Distributions task_t task = newMapping->getAddressTask();
5618*43a90889SApple OSS Distributions mach_vm_address_t toAddress = newMapping->fAddress;
5619*43a90889SApple OSS Distributions IOOptionBits _options = newMapping->fOptions;
5620*43a90889SApple OSS Distributions mach_vm_size_t _offset = newMapping->fOffset;
5621*43a90889SApple OSS Distributions mach_vm_size_t _length = newMapping->fLength;
5622*43a90889SApple OSS Distributions
5623*43a90889SApple OSS Distributions if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5624*43a90889SApple OSS Distributions return NULL;
5625*43a90889SApple OSS Distributions }
5626*43a90889SApple OSS Distributions if ((fOptions ^ _options) & kIOMapReadOnly) {
5627*43a90889SApple OSS Distributions return NULL;
5628*43a90889SApple OSS Distributions }
5629*43a90889SApple OSS Distributions if ((fOptions ^ _options) & kIOMapGuardedMask) {
5630*43a90889SApple OSS Distributions return NULL;
5631*43a90889SApple OSS Distributions }
5632*43a90889SApple OSS Distributions if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5633*43a90889SApple OSS Distributions && ((fOptions ^ _options) & kIOMapCacheMask)) {
5634*43a90889SApple OSS Distributions return NULL;
5635*43a90889SApple OSS Distributions }
5636*43a90889SApple OSS Distributions
5637*43a90889SApple OSS Distributions if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5638*43a90889SApple OSS Distributions return NULL;
5639*43a90889SApple OSS Distributions }
5640*43a90889SApple OSS Distributions
5641*43a90889SApple OSS Distributions if (_offset < fOffset) {
5642*43a90889SApple OSS Distributions return NULL;
5643*43a90889SApple OSS Distributions }
5644*43a90889SApple OSS Distributions
5645*43a90889SApple OSS Distributions _offset -= fOffset;
5646*43a90889SApple OSS Distributions
5647*43a90889SApple OSS Distributions if ((_offset + _length) > fLength) {
5648*43a90889SApple OSS Distributions return NULL;
5649*43a90889SApple OSS Distributions }
5650*43a90889SApple OSS Distributions
5651*43a90889SApple OSS Distributions if ((fLength == _length) && (!_offset)) {
5652*43a90889SApple OSS Distributions retain();
5653*43a90889SApple OSS Distributions newMapping = this;
5654*43a90889SApple OSS Distributions } else {
5655*43a90889SApple OSS Distributions newMapping->fSuperMap.reset(this, OSRetain);
5656*43a90889SApple OSS Distributions newMapping->fOffset = fOffset + _offset;
5657*43a90889SApple OSS Distributions newMapping->fAddress = fAddress + _offset;
5658*43a90889SApple OSS Distributions }
5659*43a90889SApple OSS Distributions
5660*43a90889SApple OSS Distributions return newMapping;
5661*43a90889SApple OSS Distributions }
5662*43a90889SApple OSS Distributions
5663*43a90889SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5664*43a90889SApple OSS Distributions IOMemoryMap::wireRange(
5665*43a90889SApple OSS Distributions uint32_t options,
5666*43a90889SApple OSS Distributions mach_vm_size_t offset,
5667*43a90889SApple OSS Distributions mach_vm_size_t length)
5668*43a90889SApple OSS Distributions {
5669*43a90889SApple OSS Distributions IOReturn kr;
5670*43a90889SApple OSS Distributions mach_vm_address_t start = trunc_page_64(fAddress + offset);
5671*43a90889SApple OSS Distributions mach_vm_address_t end = round_page_64(fAddress + offset + length);
5672*43a90889SApple OSS Distributions vm_prot_t prot;
5673*43a90889SApple OSS Distributions
5674*43a90889SApple OSS Distributions prot = (kIODirectionOutIn & options);
5675*43a90889SApple OSS Distributions if (prot) {
5676*43a90889SApple OSS Distributions kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5677*43a90889SApple OSS Distributions } else {
5678*43a90889SApple OSS Distributions kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5679*43a90889SApple OSS Distributions }
5680*43a90889SApple OSS Distributions
5681*43a90889SApple OSS Distributions return kr;
5682*43a90889SApple OSS Distributions }
5683*43a90889SApple OSS Distributions
5684*43a90889SApple OSS Distributions
5685*43a90889SApple OSS Distributions IOPhysicalAddress
5686*43a90889SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5687*43a90889SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5688*43a90889SApple OSS Distributions #else /* !__LP64__ */
5689*43a90889SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5690*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5691*43a90889SApple OSS Distributions {
5692*43a90889SApple OSS Distributions IOPhysicalAddress address;
5693*43a90889SApple OSS Distributions
5694*43a90889SApple OSS Distributions LOCK;
5695*43a90889SApple OSS Distributions #ifdef __LP64__
5696*43a90889SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5697*43a90889SApple OSS Distributions #else /* !__LP64__ */
5698*43a90889SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5699*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5700*43a90889SApple OSS Distributions UNLOCK;
5701*43a90889SApple OSS Distributions
5702*43a90889SApple OSS Distributions return address;
5703*43a90889SApple OSS Distributions }
5704*43a90889SApple OSS Distributions
5705*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5706*43a90889SApple OSS Distributions
5707*43a90889SApple OSS Distributions #undef super
5708*43a90889SApple OSS Distributions #define super OSObject
5709*43a90889SApple OSS Distributions
5710*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5711*43a90889SApple OSS Distributions
5712*43a90889SApple OSS Distributions void
initialize(void)5713*43a90889SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5714*43a90889SApple OSS Distributions {
5715*43a90889SApple OSS Distributions if (NULL == gIOMemoryLock) {
5716*43a90889SApple OSS Distributions gIOMemoryLock = IORecursiveLockAlloc();
5717*43a90889SApple OSS Distributions }
5718*43a90889SApple OSS Distributions
5719*43a90889SApple OSS Distributions gIOLastPage = IOGetLastPageNumber();
5720*43a90889SApple OSS Distributions }
5721*43a90889SApple OSS Distributions
5722*43a90889SApple OSS Distributions void
free(void)5723*43a90889SApple OSS Distributions IOMemoryDescriptor::free( void )
5724*43a90889SApple OSS Distributions {
5725*43a90889SApple OSS Distributions if (_mappings) {
5726*43a90889SApple OSS Distributions _mappings.reset();
5727*43a90889SApple OSS Distributions }
5728*43a90889SApple OSS Distributions
5729*43a90889SApple OSS Distributions if (reserved) {
5730*43a90889SApple OSS Distributions cleanKernelReserved(reserved);
5731*43a90889SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
5732*43a90889SApple OSS Distributions reserved = NULL;
5733*43a90889SApple OSS Distributions }
5734*43a90889SApple OSS Distributions super::free();
5735*43a90889SApple OSS Distributions }
5736*43a90889SApple OSS Distributions
5737*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5738*43a90889SApple OSS Distributions IOMemoryDescriptor::setMapping(
5739*43a90889SApple OSS Distributions task_t intoTask,
5740*43a90889SApple OSS Distributions IOVirtualAddress mapAddress,
5741*43a90889SApple OSS Distributions IOOptionBits options )
5742*43a90889SApple OSS Distributions {
5743*43a90889SApple OSS Distributions return createMappingInTask( intoTask, mapAddress,
5744*43a90889SApple OSS Distributions options | kIOMapStatic,
5745*43a90889SApple OSS Distributions 0, getLength());
5746*43a90889SApple OSS Distributions }
5747*43a90889SApple OSS Distributions
5748*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5749*43a90889SApple OSS Distributions IOMemoryDescriptor::map(
5750*43a90889SApple OSS Distributions IOOptionBits options )
5751*43a90889SApple OSS Distributions {
5752*43a90889SApple OSS Distributions return createMappingInTask( kernel_task, 0,
5753*43a90889SApple OSS Distributions options | kIOMapAnywhere,
5754*43a90889SApple OSS Distributions 0, getLength());
5755*43a90889SApple OSS Distributions }
5756*43a90889SApple OSS Distributions
5757*43a90889SApple OSS Distributions #ifndef __LP64__
5758*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5759*43a90889SApple OSS Distributions IOMemoryDescriptor::map(
5760*43a90889SApple OSS Distributions task_t intoTask,
5761*43a90889SApple OSS Distributions IOVirtualAddress atAddress,
5762*43a90889SApple OSS Distributions IOOptionBits options,
5763*43a90889SApple OSS Distributions IOByteCount offset,
5764*43a90889SApple OSS Distributions IOByteCount length )
5765*43a90889SApple OSS Distributions {
5766*43a90889SApple OSS Distributions if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5767*43a90889SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5768*43a90889SApple OSS Distributions return NULL;
5769*43a90889SApple OSS Distributions }
5770*43a90889SApple OSS Distributions
5771*43a90889SApple OSS Distributions return createMappingInTask(intoTask, atAddress,
5772*43a90889SApple OSS Distributions options, offset, length);
5773*43a90889SApple OSS Distributions }
5774*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5775*43a90889SApple OSS Distributions
5776*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5777*43a90889SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5778*43a90889SApple OSS Distributions task_t intoTask,
5779*43a90889SApple OSS Distributions mach_vm_address_t atAddress,
5780*43a90889SApple OSS Distributions IOOptionBits options,
5781*43a90889SApple OSS Distributions mach_vm_size_t offset,
5782*43a90889SApple OSS Distributions mach_vm_size_t length)
5783*43a90889SApple OSS Distributions {
5784*43a90889SApple OSS Distributions IOMemoryMap * result;
5785*43a90889SApple OSS Distributions IOMemoryMap * mapping;
5786*43a90889SApple OSS Distributions
5787*43a90889SApple OSS Distributions if (0 == length) {
5788*43a90889SApple OSS Distributions length = getLength();
5789*43a90889SApple OSS Distributions }
5790*43a90889SApple OSS Distributions
5791*43a90889SApple OSS Distributions mapping = new IOMemoryMap;
5792*43a90889SApple OSS Distributions
5793*43a90889SApple OSS Distributions #if 136275805
5794*43a90889SApple OSS Distributions /*
5795*43a90889SApple OSS Distributions * XXX: Redundantly check the mapping size here so that failure stack traces
5796*43a90889SApple OSS Distributions * are more useful. This has no functional value but is helpful because
5797*43a90889SApple OSS Distributions * telemetry traps can currently only capture the last five calls and
5798*43a90889SApple OSS Distributions * so we want to trap as shallow as possible in a select few cases
5799*43a90889SApple OSS Distributions * where we anticipate issues.
5800*43a90889SApple OSS Distributions *
5801*43a90889SApple OSS Distributions * When telemetry collection is complete, this will be removed.
5802*43a90889SApple OSS Distributions */
5803*43a90889SApple OSS Distributions if (__improbable(mapping && !vm_map_is_map_size_valid(
5804*43a90889SApple OSS Distributions get_task_map(intoTask), length, /* no_soft_limit */ false))) {
5805*43a90889SApple OSS Distributions mapping->release();
5806*43a90889SApple OSS Distributions mapping = NULL;
5807*43a90889SApple OSS Distributions }
5808*43a90889SApple OSS Distributions #endif /* 136275805 */
5809*43a90889SApple OSS Distributions
5810*43a90889SApple OSS Distributions if (mapping
5811*43a90889SApple OSS Distributions && !mapping->init( intoTask, atAddress,
5812*43a90889SApple OSS Distributions options, offset, length )) {
5813*43a90889SApple OSS Distributions mapping->release();
5814*43a90889SApple OSS Distributions mapping = NULL;
5815*43a90889SApple OSS Distributions }
5816*43a90889SApple OSS Distributions
5817*43a90889SApple OSS Distributions if (mapping) {
5818*43a90889SApple OSS Distributions result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5819*43a90889SApple OSS Distributions } else {
5820*43a90889SApple OSS Distributions result = nullptr;
5821*43a90889SApple OSS Distributions }
5822*43a90889SApple OSS Distributions
5823*43a90889SApple OSS Distributions #if DEBUG
5824*43a90889SApple OSS Distributions if (!result) {
5825*43a90889SApple OSS Distributions IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5826*43a90889SApple OSS Distributions this, atAddress, (uint32_t) options, offset, length);
5827*43a90889SApple OSS Distributions }
5828*43a90889SApple OSS Distributions #endif
5829*43a90889SApple OSS Distributions
5830*43a90889SApple OSS Distributions // already retained through makeMapping
5831*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5832*43a90889SApple OSS Distributions
5833*43a90889SApple OSS Distributions return retval;
5834*43a90889SApple OSS Distributions }
5835*43a90889SApple OSS Distributions
5836*43a90889SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5837*43a90889SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5838*43a90889SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5839*43a90889SApple OSS Distributions IOOptionBits options,
5840*43a90889SApple OSS Distributions IOByteCount offset)
5841*43a90889SApple OSS Distributions {
5842*43a90889SApple OSS Distributions return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5843*43a90889SApple OSS Distributions }
5844*43a90889SApple OSS Distributions #endif
5845*43a90889SApple OSS Distributions
5846*43a90889SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5847*43a90889SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5848*43a90889SApple OSS Distributions IOOptionBits options,
5849*43a90889SApple OSS Distributions mach_vm_size_t offset)
5850*43a90889SApple OSS Distributions {
5851*43a90889SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5852*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> physMem;
5853*43a90889SApple OSS Distributions
5854*43a90889SApple OSS Distributions LOCK;
5855*43a90889SApple OSS Distributions
5856*43a90889SApple OSS Distributions if (fAddress && fAddressMap) {
5857*43a90889SApple OSS Distributions do{
5858*43a90889SApple OSS Distributions if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5859*43a90889SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5860*43a90889SApple OSS Distributions physMem = fMemory;
5861*43a90889SApple OSS Distributions }
5862*43a90889SApple OSS Distributions
5863*43a90889SApple OSS Distributions if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5864*43a90889SApple OSS Distributions upl_size_t size = (typeof(size))round_page(fLength);
5865*43a90889SApple OSS Distributions upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5866*43a90889SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5867*43a90889SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5868*43a90889SApple OSS Distributions NULL, NULL,
5869*43a90889SApple OSS Distributions &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5870*43a90889SApple OSS Distributions fRedirUPL = NULL;
5871*43a90889SApple OSS Distributions }
5872*43a90889SApple OSS Distributions
5873*43a90889SApple OSS Distributions if (physMem) {
5874*43a90889SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5875*43a90889SApple OSS Distributions if ((false)) {
5876*43a90889SApple OSS Distributions physMem->redirect(NULL, true);
5877*43a90889SApple OSS Distributions }
5878*43a90889SApple OSS Distributions }
5879*43a90889SApple OSS Distributions }
5880*43a90889SApple OSS Distributions
5881*43a90889SApple OSS Distributions if (newBackingMemory) {
5882*43a90889SApple OSS Distributions if (newBackingMemory != fMemory) {
5883*43a90889SApple OSS Distributions fOffset = 0;
5884*43a90889SApple OSS Distributions if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5885*43a90889SApple OSS Distributions options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5886*43a90889SApple OSS Distributions offset, fLength)) {
5887*43a90889SApple OSS Distributions err = kIOReturnError;
5888*43a90889SApple OSS Distributions }
5889*43a90889SApple OSS Distributions }
5890*43a90889SApple OSS Distributions if (fRedirUPL) {
5891*43a90889SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5892*43a90889SApple OSS Distributions upl_deallocate(fRedirUPL);
5893*43a90889SApple OSS Distributions fRedirUPL = NULL;
5894*43a90889SApple OSS Distributions }
5895*43a90889SApple OSS Distributions if ((false) && physMem) {
5896*43a90889SApple OSS Distributions physMem->redirect(NULL, false);
5897*43a90889SApple OSS Distributions }
5898*43a90889SApple OSS Distributions }
5899*43a90889SApple OSS Distributions }while (false);
5900*43a90889SApple OSS Distributions }
5901*43a90889SApple OSS Distributions
5902*43a90889SApple OSS Distributions UNLOCK;
5903*43a90889SApple OSS Distributions
5904*43a90889SApple OSS Distributions return err;
5905*43a90889SApple OSS Distributions }
5906*43a90889SApple OSS Distributions
5907*43a90889SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5908*43a90889SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5909*43a90889SApple OSS Distributions IOMemoryDescriptor * owner,
5910*43a90889SApple OSS Distributions task_t __intoTask,
5911*43a90889SApple OSS Distributions IOVirtualAddress __address,
5912*43a90889SApple OSS Distributions IOOptionBits options,
5913*43a90889SApple OSS Distributions IOByteCount __offset,
5914*43a90889SApple OSS Distributions IOByteCount __length )
5915*43a90889SApple OSS Distributions {
5916*43a90889SApple OSS Distributions #ifndef __LP64__
5917*43a90889SApple OSS Distributions if (!(kIOMap64Bit & options)) {
5918*43a90889SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
5919*43a90889SApple OSS Distributions }
5920*43a90889SApple OSS Distributions #endif /* !__LP64__ */
5921*43a90889SApple OSS Distributions
5922*43a90889SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> mapDesc;
5923*43a90889SApple OSS Distributions __block IOMemoryMap * result = NULL;
5924*43a90889SApple OSS Distributions
5925*43a90889SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) __address;
5926*43a90889SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
5927*43a90889SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
5928*43a90889SApple OSS Distributions
5929*43a90889SApple OSS Distributions mapping->fOffset = offset;
5930*43a90889SApple OSS Distributions
5931*43a90889SApple OSS Distributions LOCK;
5932*43a90889SApple OSS Distributions
5933*43a90889SApple OSS Distributions do{
5934*43a90889SApple OSS Distributions if (kIOMapStatic & options) {
5935*43a90889SApple OSS Distributions result = mapping;
5936*43a90889SApple OSS Distributions addMapping(mapping);
5937*43a90889SApple OSS Distributions mapping->setMemoryDescriptor(this, 0);
5938*43a90889SApple OSS Distributions continue;
5939*43a90889SApple OSS Distributions }
5940*43a90889SApple OSS Distributions
5941*43a90889SApple OSS Distributions if (kIOMapUnique & options) {
5942*43a90889SApple OSS Distributions addr64_t phys;
5943*43a90889SApple OSS Distributions IOByteCount physLen;
5944*43a90889SApple OSS Distributions
5945*43a90889SApple OSS Distributions // if (owner != this) continue;
5946*43a90889SApple OSS Distributions
5947*43a90889SApple OSS Distributions if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5948*43a90889SApple OSS Distributions || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5949*43a90889SApple OSS Distributions phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5950*43a90889SApple OSS Distributions if (!phys || (physLen < length)) {
5951*43a90889SApple OSS Distributions continue;
5952*43a90889SApple OSS Distributions }
5953*43a90889SApple OSS Distributions
5954*43a90889SApple OSS Distributions mapDesc = IOMemoryDescriptor::withAddressRange(
5955*43a90889SApple OSS Distributions phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5956*43a90889SApple OSS Distributions if (!mapDesc) {
5957*43a90889SApple OSS Distributions continue;
5958*43a90889SApple OSS Distributions }
5959*43a90889SApple OSS Distributions offset = 0;
5960*43a90889SApple OSS Distributions mapping->fOffset = offset;
5961*43a90889SApple OSS Distributions }
5962*43a90889SApple OSS Distributions } else {
5963*43a90889SApple OSS Distributions // look for a compatible existing mapping
5964*43a90889SApple OSS Distributions if (_mappings) {
5965*43a90889SApple OSS Distributions _mappings->iterateObjects(^(OSObject * object)
5966*43a90889SApple OSS Distributions {
5967*43a90889SApple OSS Distributions IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5968*43a90889SApple OSS Distributions if ((result = lookMapping->copyCompatible(mapping))) {
5969*43a90889SApple OSS Distributions addMapping(result);
5970*43a90889SApple OSS Distributions result->setMemoryDescriptor(this, offset);
5971*43a90889SApple OSS Distributions return true;
5972*43a90889SApple OSS Distributions }
5973*43a90889SApple OSS Distributions return false;
5974*43a90889SApple OSS Distributions });
5975*43a90889SApple OSS Distributions }
5976*43a90889SApple OSS Distributions if (result || (options & kIOMapReference)) {
5977*43a90889SApple OSS Distributions if (result != mapping) {
5978*43a90889SApple OSS Distributions mapping->release();
5979*43a90889SApple OSS Distributions mapping = NULL;
5980*43a90889SApple OSS Distributions }
5981*43a90889SApple OSS Distributions continue;
5982*43a90889SApple OSS Distributions }
5983*43a90889SApple OSS Distributions }
5984*43a90889SApple OSS Distributions
5985*43a90889SApple OSS Distributions if (!mapDesc) {
5986*43a90889SApple OSS Distributions mapDesc.reset(this, OSRetain);
5987*43a90889SApple OSS Distributions }
5988*43a90889SApple OSS Distributions IOReturn
5989*43a90889SApple OSS Distributions kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5990*43a90889SApple OSS Distributions if (kIOReturnSuccess == kr) {
5991*43a90889SApple OSS Distributions result = mapping;
5992*43a90889SApple OSS Distributions mapDesc->addMapping(result);
5993*43a90889SApple OSS Distributions result->setMemoryDescriptor(mapDesc.get(), offset);
5994*43a90889SApple OSS Distributions } else {
5995*43a90889SApple OSS Distributions mapping->release();
5996*43a90889SApple OSS Distributions mapping = NULL;
5997*43a90889SApple OSS Distributions }
5998*43a90889SApple OSS Distributions }while (false);
5999*43a90889SApple OSS Distributions
6000*43a90889SApple OSS Distributions UNLOCK;
6001*43a90889SApple OSS Distributions
6002*43a90889SApple OSS Distributions return result;
6003*43a90889SApple OSS Distributions }
6004*43a90889SApple OSS Distributions
6005*43a90889SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)6006*43a90889SApple OSS Distributions IOMemoryDescriptor::addMapping(
6007*43a90889SApple OSS Distributions IOMemoryMap * mapping )
6008*43a90889SApple OSS Distributions {
6009*43a90889SApple OSS Distributions if (mapping) {
6010*43a90889SApple OSS Distributions if (NULL == _mappings) {
6011*43a90889SApple OSS Distributions _mappings = OSSet::withCapacity(1);
6012*43a90889SApple OSS Distributions }
6013*43a90889SApple OSS Distributions if (_mappings) {
6014*43a90889SApple OSS Distributions _mappings->setObject( mapping );
6015*43a90889SApple OSS Distributions }
6016*43a90889SApple OSS Distributions }
6017*43a90889SApple OSS Distributions }
6018*43a90889SApple OSS Distributions
6019*43a90889SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)6020*43a90889SApple OSS Distributions IOMemoryDescriptor::removeMapping(
6021*43a90889SApple OSS Distributions IOMemoryMap * mapping )
6022*43a90889SApple OSS Distributions {
6023*43a90889SApple OSS Distributions if (_mappings) {
6024*43a90889SApple OSS Distributions _mappings->removeObject( mapping);
6025*43a90889SApple OSS Distributions }
6026*43a90889SApple OSS Distributions }
6027*43a90889SApple OSS Distributions
6028*43a90889SApple OSS Distributions void
setMapperOptions(uint16_t options)6029*43a90889SApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
6030*43a90889SApple OSS Distributions {
6031*43a90889SApple OSS Distributions _iomapperOptions = options;
6032*43a90889SApple OSS Distributions }
6033*43a90889SApple OSS Distributions
6034*43a90889SApple OSS Distributions uint16_t
getMapperOptions(void)6035*43a90889SApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
6036*43a90889SApple OSS Distributions {
6037*43a90889SApple OSS Distributions return _iomapperOptions;
6038*43a90889SApple OSS Distributions }
6039*43a90889SApple OSS Distributions
6040*43a90889SApple OSS Distributions #ifndef __LP64__
6041*43a90889SApple OSS Distributions // obsolete initializers
6042*43a90889SApple OSS Distributions // - initWithOptions is the designated initializer
6043*43a90889SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6044*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void * address,
6045*43a90889SApple OSS Distributions IOByteCount length,
6046*43a90889SApple OSS Distributions IODirection direction)
6047*43a90889SApple OSS Distributions {
6048*43a90889SApple OSS Distributions return false;
6049*43a90889SApple OSS Distributions }
6050*43a90889SApple OSS Distributions
6051*43a90889SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6052*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6053*43a90889SApple OSS Distributions IOByteCount length,
6054*43a90889SApple OSS Distributions IODirection direction,
6055*43a90889SApple OSS Distributions task_t task)
6056*43a90889SApple OSS Distributions {
6057*43a90889SApple OSS Distributions return false;
6058*43a90889SApple OSS Distributions }
6059*43a90889SApple OSS Distributions
6060*43a90889SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6061*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
6062*43a90889SApple OSS Distributions IOPhysicalAddress address,
6063*43a90889SApple OSS Distributions IOByteCount length,
6064*43a90889SApple OSS Distributions IODirection direction )
6065*43a90889SApple OSS Distributions {
6066*43a90889SApple OSS Distributions return false;
6067*43a90889SApple OSS Distributions }
6068*43a90889SApple OSS Distributions
6069*43a90889SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6070*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
6071*43a90889SApple OSS Distributions IOVirtualRange * ranges,
6072*43a90889SApple OSS Distributions UInt32 withCount,
6073*43a90889SApple OSS Distributions IODirection direction,
6074*43a90889SApple OSS Distributions task_t task,
6075*43a90889SApple OSS Distributions bool asReference)
6076*43a90889SApple OSS Distributions {
6077*43a90889SApple OSS Distributions return false;
6078*43a90889SApple OSS Distributions }
6079*43a90889SApple OSS Distributions
6080*43a90889SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6081*43a90889SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6082*43a90889SApple OSS Distributions UInt32 withCount,
6083*43a90889SApple OSS Distributions IODirection direction,
6084*43a90889SApple OSS Distributions bool asReference)
6085*43a90889SApple OSS Distributions {
6086*43a90889SApple OSS Distributions return false;
6087*43a90889SApple OSS Distributions }
6088*43a90889SApple OSS Distributions
6089*43a90889SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6090*43a90889SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6091*43a90889SApple OSS Distributions IOByteCount * lengthOfSegment)
6092*43a90889SApple OSS Distributions {
6093*43a90889SApple OSS Distributions return NULL;
6094*43a90889SApple OSS Distributions }
6095*43a90889SApple OSS Distributions #endif /* !__LP64__ */
6096*43a90889SApple OSS Distributions
6097*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6098*43a90889SApple OSS Distributions
6099*43a90889SApple OSS Distributions bool
serialize(OSSerialize * s) const6100*43a90889SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6101*43a90889SApple OSS Distributions {
6102*43a90889SApple OSS Distributions OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6103*43a90889SApple OSS Distributions OSSharedPtr<OSObject> values[2] = {NULL};
6104*43a90889SApple OSS Distributions OSSharedPtr<OSArray> array;
6105*43a90889SApple OSS Distributions
6106*43a90889SApple OSS Distributions struct SerData {
6107*43a90889SApple OSS Distributions user_addr_t address;
6108*43a90889SApple OSS Distributions user_size_t length;
6109*43a90889SApple OSS Distributions };
6110*43a90889SApple OSS Distributions
6111*43a90889SApple OSS Distributions unsigned int index;
6112*43a90889SApple OSS Distributions
6113*43a90889SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
6114*43a90889SApple OSS Distributions
6115*43a90889SApple OSS Distributions if (s == NULL) {
6116*43a90889SApple OSS Distributions return false;
6117*43a90889SApple OSS Distributions }
6118*43a90889SApple OSS Distributions
6119*43a90889SApple OSS Distributions array = OSArray::withCapacity(4);
6120*43a90889SApple OSS Distributions if (!array) {
6121*43a90889SApple OSS Distributions return false;
6122*43a90889SApple OSS Distributions }
6123*43a90889SApple OSS Distributions
6124*43a90889SApple OSS Distributions OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6125*43a90889SApple OSS Distributions if (!vcopy) {
6126*43a90889SApple OSS Distributions return false;
6127*43a90889SApple OSS Distributions }
6128*43a90889SApple OSS Distributions
6129*43a90889SApple OSS Distributions keys[0] = OSSymbol::withCString("address");
6130*43a90889SApple OSS Distributions keys[1] = OSSymbol::withCString("length");
6131*43a90889SApple OSS Distributions
6132*43a90889SApple OSS Distributions // Copy the volatile data so we don't have to allocate memory
6133*43a90889SApple OSS Distributions // while the lock is held.
6134*43a90889SApple OSS Distributions LOCK;
6135*43a90889SApple OSS Distributions if (vcopy.size() == _rangesCount) {
6136*43a90889SApple OSS Distributions Ranges vec = _ranges;
6137*43a90889SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6138*43a90889SApple OSS Distributions mach_vm_address_t addr; mach_vm_size_t len;
6139*43a90889SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, index, _task);
6140*43a90889SApple OSS Distributions vcopy[index].address = addr;
6141*43a90889SApple OSS Distributions vcopy[index].length = len;
6142*43a90889SApple OSS Distributions }
6143*43a90889SApple OSS Distributions } else {
6144*43a90889SApple OSS Distributions // The descriptor changed out from under us. Give up.
6145*43a90889SApple OSS Distributions UNLOCK;
6146*43a90889SApple OSS Distributions return false;
6147*43a90889SApple OSS Distributions }
6148*43a90889SApple OSS Distributions UNLOCK;
6149*43a90889SApple OSS Distributions
6150*43a90889SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6151*43a90889SApple OSS Distributions user_addr_t addr = vcopy[index].address;
6152*43a90889SApple OSS Distributions IOByteCount len = (IOByteCount) vcopy[index].length;
6153*43a90889SApple OSS Distributions values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6154*43a90889SApple OSS Distributions if (values[0] == NULL) {
6155*43a90889SApple OSS Distributions return false;
6156*43a90889SApple OSS Distributions }
6157*43a90889SApple OSS Distributions values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6158*43a90889SApple OSS Distributions if (values[1] == NULL) {
6159*43a90889SApple OSS Distributions return false;
6160*43a90889SApple OSS Distributions }
6161*43a90889SApple OSS Distributions OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6162*43a90889SApple OSS Distributions if (dict == NULL) {
6163*43a90889SApple OSS Distributions return false;
6164*43a90889SApple OSS Distributions }
6165*43a90889SApple OSS Distributions array->setObject(dict.get());
6166*43a90889SApple OSS Distributions dict.reset();
6167*43a90889SApple OSS Distributions values[0].reset();
6168*43a90889SApple OSS Distributions values[1].reset();
6169*43a90889SApple OSS Distributions }
6170*43a90889SApple OSS Distributions
6171*43a90889SApple OSS Distributions return array->serialize(s);
6172*43a90889SApple OSS Distributions }
6173*43a90889SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6174*43a90889SApple OSS Distributions
6175*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6176*43a90889SApple OSS Distributions #ifdef __LP64__
6177*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6178*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6179*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6180*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6181*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6182*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6183*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6184*43a90889SApple OSS Distributions #else /* !__LP64__ */
6185*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6186*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6187*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6188*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6189*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6190*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6191*43a90889SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6192*43a90889SApple OSS Distributions #endif /* !__LP64__ */
6193*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6194*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6195*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6196*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6197*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6198*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6199*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6200*43a90889SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6201*43a90889SApple OSS Distributions
6202*43a90889SApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6203*43a90889SApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6204*43a90889SApple OSS Distributions struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6205*43a90889SApple OSS Distributions
6206*43a90889SApple OSS Distributions /* ex-inline function implementation */
6207*43a90889SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6208*43a90889SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6209*43a90889SApple OSS Distributions {
6210*43a90889SApple OSS Distributions return getPhysicalSegment( 0, NULL );
6211*43a90889SApple OSS Distributions }
6212*43a90889SApple OSS Distributions
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6213*43a90889SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6214*43a90889SApple OSS Distributions
6215*43a90889SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6216*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6217*43a90889SApple OSS Distributions {
6218*43a90889SApple OSS Distributions OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6219*43a90889SApple OSS Distributions if (me && !me->initWithCapacity(capacity)) {
6220*43a90889SApple OSS Distributions return nullptr;
6221*43a90889SApple OSS Distributions }
6222*43a90889SApple OSS Distributions return me;
6223*43a90889SApple OSS Distributions }
6224*43a90889SApple OSS Distributions
6225*43a90889SApple OSS Distributions bool
initWithCapacity(size_t capacity)6226*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6227*43a90889SApple OSS Distributions {
6228*43a90889SApple OSS Distributions if (_data && (!capacity || (_capacity < capacity))) {
6229*43a90889SApple OSS Distributions freeMemory();
6230*43a90889SApple OSS Distributions }
6231*43a90889SApple OSS Distributions
6232*43a90889SApple OSS Distributions if (!OSObject::init()) {
6233*43a90889SApple OSS Distributions return false;
6234*43a90889SApple OSS Distributions }
6235*43a90889SApple OSS Distributions
6236*43a90889SApple OSS Distributions if (!_data && capacity) {
6237*43a90889SApple OSS Distributions _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6238*43a90889SApple OSS Distributions Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6239*43a90889SApple OSS Distributions if (!_data) {
6240*43a90889SApple OSS Distributions return false;
6241*43a90889SApple OSS Distributions }
6242*43a90889SApple OSS Distributions _capacity = capacity;
6243*43a90889SApple OSS Distributions }
6244*43a90889SApple OSS Distributions
6245*43a90889SApple OSS Distributions _length = 0;
6246*43a90889SApple OSS Distributions
6247*43a90889SApple OSS Distributions return true;
6248*43a90889SApple OSS Distributions }
6249*43a90889SApple OSS Distributions
6250*43a90889SApple OSS Distributions void
free()6251*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6252*43a90889SApple OSS Distributions {
6253*43a90889SApple OSS Distributions freeMemory();
6254*43a90889SApple OSS Distributions OSObject::free();
6255*43a90889SApple OSS Distributions }
6256*43a90889SApple OSS Distributions
6257*43a90889SApple OSS Distributions void
freeMemory()6258*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6259*43a90889SApple OSS Distributions {
6260*43a90889SApple OSS Distributions kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6261*43a90889SApple OSS Distributions _data = nullptr;
6262*43a90889SApple OSS Distributions _capacity = _length = 0;
6263*43a90889SApple OSS Distributions }
6264*43a90889SApple OSS Distributions
6265*43a90889SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6266*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6267*43a90889SApple OSS Distributions {
6268*43a90889SApple OSS Distributions const auto oldLength = getLength();
6269*43a90889SApple OSS Distributions size_t newLength;
6270*43a90889SApple OSS Distributions if (os_add_overflow(oldLength, length, &newLength)) {
6271*43a90889SApple OSS Distributions return false;
6272*43a90889SApple OSS Distributions }
6273*43a90889SApple OSS Distributions
6274*43a90889SApple OSS Distributions if (!setLength(newLength)) {
6275*43a90889SApple OSS Distributions return false;
6276*43a90889SApple OSS Distributions }
6277*43a90889SApple OSS Distributions
6278*43a90889SApple OSS Distributions unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6279*43a90889SApple OSS Distributions if (bytes) {
6280*43a90889SApple OSS Distributions bcopy(bytes, dest, length);
6281*43a90889SApple OSS Distributions }
6282*43a90889SApple OSS Distributions
6283*43a90889SApple OSS Distributions return true;
6284*43a90889SApple OSS Distributions }
6285*43a90889SApple OSS Distributions
6286*43a90889SApple OSS Distributions bool
setLength(size_t length)6287*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6288*43a90889SApple OSS Distributions {
6289*43a90889SApple OSS Distributions if (!_data || (length > _capacity)) {
6290*43a90889SApple OSS Distributions void *newData;
6291*43a90889SApple OSS Distributions
6292*43a90889SApple OSS Distributions newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6293*43a90889SApple OSS Distributions length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6294*43a90889SApple OSS Distributions NULL);
6295*43a90889SApple OSS Distributions if (!newData) {
6296*43a90889SApple OSS Distributions return false;
6297*43a90889SApple OSS Distributions }
6298*43a90889SApple OSS Distributions
6299*43a90889SApple OSS Distributions _data = newData;
6300*43a90889SApple OSS Distributions _capacity = length;
6301*43a90889SApple OSS Distributions }
6302*43a90889SApple OSS Distributions
6303*43a90889SApple OSS Distributions _length = length;
6304*43a90889SApple OSS Distributions return true;
6305*43a90889SApple OSS Distributions }
6306*43a90889SApple OSS Distributions
6307*43a90889SApple OSS Distributions const void *
getBytes() const6308*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6309*43a90889SApple OSS Distributions {
6310*43a90889SApple OSS Distributions return _length ? _data : nullptr;
6311*43a90889SApple OSS Distributions }
6312*43a90889SApple OSS Distributions
6313*43a90889SApple OSS Distributions size_t
getLength() const6314*43a90889SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6315*43a90889SApple OSS Distributions {
6316*43a90889SApple OSS Distributions return _data ? _length : 0;
6317*43a90889SApple OSS Distributions }
6318