1*699cd480SApple OSS Distributions /*
2*699cd480SApple OSS Distributions * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3*699cd480SApple OSS Distributions *
4*699cd480SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*699cd480SApple OSS Distributions *
6*699cd480SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*699cd480SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*699cd480SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*699cd480SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*699cd480SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*699cd480SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*699cd480SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*699cd480SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*699cd480SApple OSS Distributions *
15*699cd480SApple OSS Distributions * Please obtain a copy of the License at
16*699cd480SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*699cd480SApple OSS Distributions *
18*699cd480SApple OSS Distributions * The Original Code and all software distributed under the License are
19*699cd480SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*699cd480SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*699cd480SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*699cd480SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*699cd480SApple OSS Distributions * Please see the License for the specific language governing rights and
24*699cd480SApple OSS Distributions * limitations under the License.
25*699cd480SApple OSS Distributions *
26*699cd480SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*699cd480SApple OSS Distributions */
28*699cd480SApple OSS Distributions #define IOKIT_ENABLE_SHARED_PTR
29*699cd480SApple OSS Distributions
30*699cd480SApple OSS Distributions #include <sys/cdefs.h>
31*699cd480SApple OSS Distributions
32*699cd480SApple OSS Distributions #include <IOKit/assert.h>
33*699cd480SApple OSS Distributions #include <IOKit/system.h>
34*699cd480SApple OSS Distributions #include <IOKit/IOLib.h>
35*699cd480SApple OSS Distributions #include <IOKit/IOMemoryDescriptor.h>
36*699cd480SApple OSS Distributions #include <IOKit/IOMapper.h>
37*699cd480SApple OSS Distributions #include <IOKit/IODMACommand.h>
38*699cd480SApple OSS Distributions #include <IOKit/IOKitKeysPrivate.h>
39*699cd480SApple OSS Distributions
40*699cd480SApple OSS Distributions #include <IOKit/IOSubMemoryDescriptor.h>
41*699cd480SApple OSS Distributions #include <IOKit/IOMultiMemoryDescriptor.h>
42*699cd480SApple OSS Distributions #include <IOKit/IOBufferMemoryDescriptor.h>
43*699cd480SApple OSS Distributions
44*699cd480SApple OSS Distributions #include <IOKit/IOKitDebug.h>
45*699cd480SApple OSS Distributions #include <IOKit/IOTimeStamp.h>
46*699cd480SApple OSS Distributions #include <libkern/OSDebug.h>
47*699cd480SApple OSS Distributions #include <libkern/OSKextLibPrivate.h>
48*699cd480SApple OSS Distributions
49*699cd480SApple OSS Distributions #include "IOKitKernelInternal.h"
50*699cd480SApple OSS Distributions
51*699cd480SApple OSS Distributions #include <libkern/c++/OSAllocation.h>
52*699cd480SApple OSS Distributions #include <libkern/c++/OSContainers.h>
53*699cd480SApple OSS Distributions #include <libkern/c++/OSDictionary.h>
54*699cd480SApple OSS Distributions #include <libkern/c++/OSArray.h>
55*699cd480SApple OSS Distributions #include <libkern/c++/OSSymbol.h>
56*699cd480SApple OSS Distributions #include <libkern/c++/OSNumber.h>
57*699cd480SApple OSS Distributions #include <os/overflow.h>
58*699cd480SApple OSS Distributions #include <os/cpp_util.h>
59*699cd480SApple OSS Distributions #include <os/base_private.h>
60*699cd480SApple OSS Distributions
61*699cd480SApple OSS Distributions #include <sys/uio.h>
62*699cd480SApple OSS Distributions
63*699cd480SApple OSS Distributions __BEGIN_DECLS
64*699cd480SApple OSS Distributions #include <vm/pmap.h>
65*699cd480SApple OSS Distributions #include <vm/vm_pageout.h>
66*699cd480SApple OSS Distributions #include <mach/memory_object_types.h>
67*699cd480SApple OSS Distributions #include <device/device_port.h>
68*699cd480SApple OSS Distributions
69*699cd480SApple OSS Distributions #include <mach/vm_prot.h>
70*699cd480SApple OSS Distributions #include <mach/mach_vm.h>
71*699cd480SApple OSS Distributions #include <mach/memory_entry.h>
72*699cd480SApple OSS Distributions #include <vm/vm_fault.h>
73*699cd480SApple OSS Distributions #include <vm/vm_protos.h>
74*699cd480SApple OSS Distributions
75*699cd480SApple OSS Distributions extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76*699cd480SApple OSS Distributions extern void ipc_port_release_send(ipc_port_t port);
77*699cd480SApple OSS Distributions
78*699cd480SApple OSS Distributions extern kern_return_t
79*699cd480SApple OSS Distributions mach_memory_entry_ownership(
80*699cd480SApple OSS Distributions ipc_port_t entry_port,
81*699cd480SApple OSS Distributions task_t owner,
82*699cd480SApple OSS Distributions int ledger_tag,
83*699cd480SApple OSS Distributions int ledger_flags);
84*699cd480SApple OSS Distributions
85*699cd480SApple OSS Distributions __END_DECLS
86*699cd480SApple OSS Distributions
87*699cd480SApple OSS Distributions #define kIOMapperWaitSystem ((IOMapper *) 1)
88*699cd480SApple OSS Distributions
89*699cd480SApple OSS Distributions static IOMapper * gIOSystemMapper = NULL;
90*699cd480SApple OSS Distributions
91*699cd480SApple OSS Distributions ppnum_t gIOLastPage;
92*699cd480SApple OSS Distributions
93*699cd480SApple OSS Distributions enum {
94*699cd480SApple OSS Distributions kIOMapGuardSizeLarge = 65536
95*699cd480SApple OSS Distributions };
96*699cd480SApple OSS Distributions
97*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98*699cd480SApple OSS Distributions
99*699cd480SApple OSS Distributions OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100*699cd480SApple OSS Distributions
101*699cd480SApple OSS Distributions #define super IOMemoryDescriptor
102*699cd480SApple OSS Distributions
103*699cd480SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104*699cd480SApple OSS Distributions IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105*699cd480SApple OSS Distributions
106*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107*699cd480SApple OSS Distributions
108*699cd480SApple OSS Distributions static IORecursiveLock * gIOMemoryLock;
109*699cd480SApple OSS Distributions
110*699cd480SApple OSS Distributions #define LOCK IORecursiveLockLock( gIOMemoryLock)
111*699cd480SApple OSS Distributions #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112*699cd480SApple OSS Distributions #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113*699cd480SApple OSS Distributions #define WAKEUP \
114*699cd480SApple OSS Distributions IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115*699cd480SApple OSS Distributions
116*699cd480SApple OSS Distributions #if 0
117*699cd480SApple OSS Distributions #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118*699cd480SApple OSS Distributions #else
119*699cd480SApple OSS Distributions #define DEBG(fmt, args...) {}
120*699cd480SApple OSS Distributions #endif
121*699cd480SApple OSS Distributions
122*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123*699cd480SApple OSS Distributions
124*699cd480SApple OSS Distributions // Some data structures and accessor macros used by the initWithOptions
125*699cd480SApple OSS Distributions // Function
126*699cd480SApple OSS Distributions
127*699cd480SApple OSS Distributions enum ioPLBlockFlags {
128*699cd480SApple OSS Distributions kIOPLOnDevice = 0x00000001,
129*699cd480SApple OSS Distributions kIOPLExternUPL = 0x00000002,
130*699cd480SApple OSS Distributions };
131*699cd480SApple OSS Distributions
132*699cd480SApple OSS Distributions struct IOMDPersistentInitData {
133*699cd480SApple OSS Distributions const IOGeneralMemoryDescriptor * fMD;
134*699cd480SApple OSS Distributions IOMemoryReference * fMemRef;
135*699cd480SApple OSS Distributions };
136*699cd480SApple OSS Distributions
137*699cd480SApple OSS Distributions struct ioPLBlock {
138*699cd480SApple OSS Distributions upl_t fIOPL;
139*699cd480SApple OSS Distributions vm_address_t fPageInfo; // Pointer to page list or index into it
140*699cd480SApple OSS Distributions uint64_t fIOMDOffset; // The offset of this iopl in descriptor
141*699cd480SApple OSS Distributions ppnum_t fMappedPage; // Page number of first page in this iopl
142*699cd480SApple OSS Distributions unsigned int fPageOffset; // Offset within first page of iopl
143*699cd480SApple OSS Distributions unsigned int fFlags; // Flags
144*699cd480SApple OSS Distributions };
145*699cd480SApple OSS Distributions
146*699cd480SApple OSS Distributions enum { kMaxWireTags = 6 };
147*699cd480SApple OSS Distributions
148*699cd480SApple OSS Distributions struct ioGMDData {
149*699cd480SApple OSS Distributions IOMapper * fMapper;
150*699cd480SApple OSS Distributions uint64_t fDMAMapAlignment;
151*699cd480SApple OSS Distributions uint64_t fMappedBase;
152*699cd480SApple OSS Distributions uint64_t fMappedLength;
153*699cd480SApple OSS Distributions uint64_t fPreparationID;
154*699cd480SApple OSS Distributions #if IOTRACKING
155*699cd480SApple OSS Distributions IOTracking fWireTracking;
156*699cd480SApple OSS Distributions #endif /* IOTRACKING */
157*699cd480SApple OSS Distributions unsigned int fPageCnt;
158*699cd480SApple OSS Distributions uint8_t fDMAMapNumAddressBits;
159*699cd480SApple OSS Distributions unsigned char fCompletionError:1;
160*699cd480SApple OSS Distributions unsigned char fMappedBaseValid:1;
161*699cd480SApple OSS Distributions unsigned char _resv:4;
162*699cd480SApple OSS Distributions unsigned char fDMAAccess:2;
163*699cd480SApple OSS Distributions
164*699cd480SApple OSS Distributions /* variable length arrays */
165*699cd480SApple OSS Distributions upl_page_info_t fPageList[1]
166*699cd480SApple OSS Distributions #if __LP64__
167*699cd480SApple OSS Distributions // align fPageList as for ioPLBlock
168*699cd480SApple OSS Distributions __attribute__((aligned(sizeof(upl_t))))
169*699cd480SApple OSS Distributions #endif
170*699cd480SApple OSS Distributions ;
171*699cd480SApple OSS Distributions //ioPLBlock fBlocks[1];
172*699cd480SApple OSS Distributions };
173*699cd480SApple OSS Distributions
174*699cd480SApple OSS Distributions #pragma GCC visibility push(hidden)
175*699cd480SApple OSS Distributions
176*699cd480SApple OSS Distributions class _IOMemoryDescriptorMixedData : public OSObject
177*699cd480SApple OSS Distributions {
178*699cd480SApple OSS Distributions OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179*699cd480SApple OSS Distributions
180*699cd480SApple OSS Distributions public:
181*699cd480SApple OSS Distributions static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182*699cd480SApple OSS Distributions bool initWithCapacity(size_t capacity);
183*699cd480SApple OSS Distributions virtual void free() APPLE_KEXT_OVERRIDE;
184*699cd480SApple OSS Distributions
185*699cd480SApple OSS Distributions bool appendBytes(const void * bytes, size_t length);
186*699cd480SApple OSS Distributions bool setLength(size_t length);
187*699cd480SApple OSS Distributions
188*699cd480SApple OSS Distributions const void * getBytes() const;
189*699cd480SApple OSS Distributions size_t getLength() const;
190*699cd480SApple OSS Distributions
191*699cd480SApple OSS Distributions private:
192*699cd480SApple OSS Distributions void freeMemory();
193*699cd480SApple OSS Distributions
194*699cd480SApple OSS Distributions void * _data = nullptr;
195*699cd480SApple OSS Distributions size_t _length = 0;
196*699cd480SApple OSS Distributions size_t _capacity = 0;
197*699cd480SApple OSS Distributions };
198*699cd480SApple OSS Distributions
199*699cd480SApple OSS Distributions #pragma GCC visibility pop
200*699cd480SApple OSS Distributions
201*699cd480SApple OSS Distributions #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
202*699cd480SApple OSS Distributions #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203*699cd480SApple OSS Distributions #define getNumIOPL(osd, d) \
204*699cd480SApple OSS Distributions ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205*699cd480SApple OSS Distributions #define getPageList(d) (&(d->fPageList[0]))
206*699cd480SApple OSS Distributions #define computeDataSize(p, u) \
207*699cd480SApple OSS Distributions (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208*699cd480SApple OSS Distributions
209*699cd480SApple OSS Distributions enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210*699cd480SApple OSS Distributions
211*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212*699cd480SApple OSS Distributions
213*699cd480SApple OSS Distributions extern "C" {
214*699cd480SApple OSS Distributions kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)215*699cd480SApple OSS Distributions device_data_action(
216*699cd480SApple OSS Distributions uintptr_t device_handle,
217*699cd480SApple OSS Distributions ipc_port_t device_pager,
218*699cd480SApple OSS Distributions vm_prot_t protection,
219*699cd480SApple OSS Distributions vm_object_offset_t offset,
220*699cd480SApple OSS Distributions vm_size_t size)
221*699cd480SApple OSS Distributions {
222*699cd480SApple OSS Distributions kern_return_t kr;
223*699cd480SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> memDesc;
225*699cd480SApple OSS Distributions
226*699cd480SApple OSS Distributions LOCK;
227*699cd480SApple OSS Distributions if (ref->dp.memory) {
228*699cd480SApple OSS Distributions memDesc.reset(ref->dp.memory, OSRetain);
229*699cd480SApple OSS Distributions kr = memDesc->handleFault(device_pager, offset, size);
230*699cd480SApple OSS Distributions memDesc.reset();
231*699cd480SApple OSS Distributions } else {
232*699cd480SApple OSS Distributions kr = KERN_ABORTED;
233*699cd480SApple OSS Distributions }
234*699cd480SApple OSS Distributions UNLOCK;
235*699cd480SApple OSS Distributions
236*699cd480SApple OSS Distributions return kr;
237*699cd480SApple OSS Distributions }
238*699cd480SApple OSS Distributions
239*699cd480SApple OSS Distributions kern_return_t
device_close(uintptr_t device_handle)240*699cd480SApple OSS Distributions device_close(
241*699cd480SApple OSS Distributions uintptr_t device_handle)
242*699cd480SApple OSS Distributions {
243*699cd480SApple OSS Distributions IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244*699cd480SApple OSS Distributions
245*699cd480SApple OSS Distributions IOFreeType( ref, IOMemoryDescriptorReserved );
246*699cd480SApple OSS Distributions
247*699cd480SApple OSS Distributions return kIOReturnSuccess;
248*699cd480SApple OSS Distributions }
249*699cd480SApple OSS Distributions }; // end extern "C"
250*699cd480SApple OSS Distributions
251*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252*699cd480SApple OSS Distributions
253*699cd480SApple OSS Distributions // Note this inline function uses C++ reference arguments to return values
254*699cd480SApple OSS Distributions // This means that pointers are not passed and NULLs don't have to be
255*699cd480SApple OSS Distributions // checked for as a NULL reference is illegal.
256*699cd480SApple OSS Distributions static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)257*699cd480SApple OSS Distributions getAddrLenForInd(
258*699cd480SApple OSS Distributions mach_vm_address_t &addr,
259*699cd480SApple OSS Distributions mach_vm_size_t &len, // Output variables
260*699cd480SApple OSS Distributions UInt32 type,
261*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::Ranges r,
262*699cd480SApple OSS Distributions UInt32 ind,
263*699cd480SApple OSS Distributions task_t task __unused)
264*699cd480SApple OSS Distributions {
265*699cd480SApple OSS Distributions assert(kIOMemoryTypeUIO == type
266*699cd480SApple OSS Distributions || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
267*699cd480SApple OSS Distributions || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
268*699cd480SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
269*699cd480SApple OSS Distributions user_size_t us;
270*699cd480SApple OSS Distributions user_addr_t ad;
271*699cd480SApple OSS Distributions uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
272*699cd480SApple OSS Distributions }
273*699cd480SApple OSS Distributions #ifndef __LP64__
274*699cd480SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
275*699cd480SApple OSS Distributions IOAddressRange cur = r.v64[ind];
276*699cd480SApple OSS Distributions addr = cur.address;
277*699cd480SApple OSS Distributions len = cur.length;
278*699cd480SApple OSS Distributions }
279*699cd480SApple OSS Distributions #endif /* !__LP64__ */
280*699cd480SApple OSS Distributions else {
281*699cd480SApple OSS Distributions IOVirtualRange cur = r.v[ind];
282*699cd480SApple OSS Distributions addr = cur.address;
283*699cd480SApple OSS Distributions len = cur.length;
284*699cd480SApple OSS Distributions }
285*699cd480SApple OSS Distributions #if CONFIG_PROB_GZALLOC
286*699cd480SApple OSS Distributions if (task == kernel_task) {
287*699cd480SApple OSS Distributions addr = pgz_decode(addr, len);
288*699cd480SApple OSS Distributions }
289*699cd480SApple OSS Distributions #endif /* CONFIG_PROB_GZALLOC */
290*699cd480SApple OSS Distributions }
291*699cd480SApple OSS Distributions
292*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
293*699cd480SApple OSS Distributions
294*699cd480SApple OSS Distributions static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)295*699cd480SApple OSS Distributions purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
296*699cd480SApple OSS Distributions {
297*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
298*699cd480SApple OSS Distributions
299*699cd480SApple OSS Distributions *control = VM_PURGABLE_SET_STATE;
300*699cd480SApple OSS Distributions
301*699cd480SApple OSS Distributions enum { kIOMemoryPurgeableControlMask = 15 };
302*699cd480SApple OSS Distributions
303*699cd480SApple OSS Distributions switch (kIOMemoryPurgeableControlMask & newState) {
304*699cd480SApple OSS Distributions case kIOMemoryPurgeableKeepCurrent:
305*699cd480SApple OSS Distributions *control = VM_PURGABLE_GET_STATE;
306*699cd480SApple OSS Distributions break;
307*699cd480SApple OSS Distributions
308*699cd480SApple OSS Distributions case kIOMemoryPurgeableNonVolatile:
309*699cd480SApple OSS Distributions *state = VM_PURGABLE_NONVOLATILE;
310*699cd480SApple OSS Distributions break;
311*699cd480SApple OSS Distributions case kIOMemoryPurgeableVolatile:
312*699cd480SApple OSS Distributions *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
313*699cd480SApple OSS Distributions break;
314*699cd480SApple OSS Distributions case kIOMemoryPurgeableEmpty:
315*699cd480SApple OSS Distributions *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
316*699cd480SApple OSS Distributions break;
317*699cd480SApple OSS Distributions default:
318*699cd480SApple OSS Distributions err = kIOReturnBadArgument;
319*699cd480SApple OSS Distributions break;
320*699cd480SApple OSS Distributions }
321*699cd480SApple OSS Distributions
322*699cd480SApple OSS Distributions if (*control == VM_PURGABLE_SET_STATE) {
323*699cd480SApple OSS Distributions // let VM know this call is from the kernel and is allowed to alter
324*699cd480SApple OSS Distributions // the volatility of the memory entry even if it was created with
325*699cd480SApple OSS Distributions // MAP_MEM_PURGABLE_KERNEL_ONLY
326*699cd480SApple OSS Distributions *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
327*699cd480SApple OSS Distributions }
328*699cd480SApple OSS Distributions
329*699cd480SApple OSS Distributions return err;
330*699cd480SApple OSS Distributions }
331*699cd480SApple OSS Distributions
332*699cd480SApple OSS Distributions static IOReturn
purgeableStateBits(int * state)333*699cd480SApple OSS Distributions purgeableStateBits(int * state)
334*699cd480SApple OSS Distributions {
335*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
336*699cd480SApple OSS Distributions
337*699cd480SApple OSS Distributions switch (VM_PURGABLE_STATE_MASK & *state) {
338*699cd480SApple OSS Distributions case VM_PURGABLE_NONVOLATILE:
339*699cd480SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
340*699cd480SApple OSS Distributions break;
341*699cd480SApple OSS Distributions case VM_PURGABLE_VOLATILE:
342*699cd480SApple OSS Distributions *state = kIOMemoryPurgeableVolatile;
343*699cd480SApple OSS Distributions break;
344*699cd480SApple OSS Distributions case VM_PURGABLE_EMPTY:
345*699cd480SApple OSS Distributions *state = kIOMemoryPurgeableEmpty;
346*699cd480SApple OSS Distributions break;
347*699cd480SApple OSS Distributions default:
348*699cd480SApple OSS Distributions *state = kIOMemoryPurgeableNonVolatile;
349*699cd480SApple OSS Distributions err = kIOReturnNotReady;
350*699cd480SApple OSS Distributions break;
351*699cd480SApple OSS Distributions }
352*699cd480SApple OSS Distributions return err;
353*699cd480SApple OSS Distributions }
354*699cd480SApple OSS Distributions
355*699cd480SApple OSS Distributions typedef struct {
356*699cd480SApple OSS Distributions unsigned int wimg;
357*699cd480SApple OSS Distributions unsigned int object_type;
358*699cd480SApple OSS Distributions } iokit_memtype_entry;
359*699cd480SApple OSS Distributions
360*699cd480SApple OSS Distributions static const iokit_memtype_entry iomd_mem_types[] = {
361*699cd480SApple OSS Distributions [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
362*699cd480SApple OSS Distributions [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
363*699cd480SApple OSS Distributions [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
364*699cd480SApple OSS Distributions [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
365*699cd480SApple OSS Distributions [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
366*699cd480SApple OSS Distributions [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
367*699cd480SApple OSS Distributions [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
368*699cd480SApple OSS Distributions [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
369*699cd480SApple OSS Distributions [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
370*699cd480SApple OSS Distributions [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
371*699cd480SApple OSS Distributions };
372*699cd480SApple OSS Distributions
373*699cd480SApple OSS Distributions static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)374*699cd480SApple OSS Distributions vmProtForCacheMode(IOOptionBits cacheMode)
375*699cd480SApple OSS Distributions {
376*699cd480SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
377*699cd480SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
378*699cd480SApple OSS Distributions cacheMode = kIODefaultCache;
379*699cd480SApple OSS Distributions }
380*699cd480SApple OSS Distributions vm_prot_t prot = 0;
381*699cd480SApple OSS Distributions SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
382*699cd480SApple OSS Distributions return prot;
383*699cd480SApple OSS Distributions }
384*699cd480SApple OSS Distributions
385*699cd480SApple OSS Distributions static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)386*699cd480SApple OSS Distributions pagerFlagsForCacheMode(IOOptionBits cacheMode)
387*699cd480SApple OSS Distributions {
388*699cd480SApple OSS Distributions assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
389*699cd480SApple OSS Distributions if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
390*699cd480SApple OSS Distributions cacheMode = kIODefaultCache;
391*699cd480SApple OSS Distributions }
392*699cd480SApple OSS Distributions if (cacheMode == kIODefaultCache) {
393*699cd480SApple OSS Distributions return -1U;
394*699cd480SApple OSS Distributions }
395*699cd480SApple OSS Distributions return iomd_mem_types[cacheMode].wimg;
396*699cd480SApple OSS Distributions }
397*699cd480SApple OSS Distributions
398*699cd480SApple OSS Distributions static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)399*699cd480SApple OSS Distributions cacheModeForPagerFlags(unsigned int pagerFlags)
400*699cd480SApple OSS Distributions {
401*699cd480SApple OSS Distributions pagerFlags &= VM_WIMG_MASK;
402*699cd480SApple OSS Distributions IOOptionBits cacheMode = kIODefaultCache;
403*699cd480SApple OSS Distributions for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
404*699cd480SApple OSS Distributions if (iomd_mem_types[i].wimg == pagerFlags) {
405*699cd480SApple OSS Distributions cacheMode = i;
406*699cd480SApple OSS Distributions break;
407*699cd480SApple OSS Distributions }
408*699cd480SApple OSS Distributions }
409*699cd480SApple OSS Distributions return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
410*699cd480SApple OSS Distributions }
411*699cd480SApple OSS Distributions
412*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414*699cd480SApple OSS Distributions
415*699cd480SApple OSS Distributions struct IOMemoryEntry {
416*699cd480SApple OSS Distributions ipc_port_t entry;
417*699cd480SApple OSS Distributions int64_t offset;
418*699cd480SApple OSS Distributions uint64_t size;
419*699cd480SApple OSS Distributions uint64_t start;
420*699cd480SApple OSS Distributions };
421*699cd480SApple OSS Distributions
422*699cd480SApple OSS Distributions struct IOMemoryReference {
423*699cd480SApple OSS Distributions volatile SInt32 refCount;
424*699cd480SApple OSS Distributions vm_prot_t prot;
425*699cd480SApple OSS Distributions uint32_t capacity;
426*699cd480SApple OSS Distributions uint32_t count;
427*699cd480SApple OSS Distributions struct IOMemoryReference * mapRef;
428*699cd480SApple OSS Distributions IOMemoryEntry entries[0];
429*699cd480SApple OSS Distributions };
430*699cd480SApple OSS Distributions
431*699cd480SApple OSS Distributions enum{
432*699cd480SApple OSS Distributions kIOMemoryReferenceReuse = 0x00000001,
433*699cd480SApple OSS Distributions kIOMemoryReferenceWrite = 0x00000002,
434*699cd480SApple OSS Distributions kIOMemoryReferenceCOW = 0x00000004,
435*699cd480SApple OSS Distributions };
436*699cd480SApple OSS Distributions
437*699cd480SApple OSS Distributions SInt32 gIOMemoryReferenceCount;
438*699cd480SApple OSS Distributions
439*699cd480SApple OSS Distributions IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)440*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
441*699cd480SApple OSS Distributions {
442*699cd480SApple OSS Distributions IOMemoryReference * ref;
443*699cd480SApple OSS Distributions size_t oldCapacity;
444*699cd480SApple OSS Distributions
445*699cd480SApple OSS Distributions if (realloc) {
446*699cd480SApple OSS Distributions oldCapacity = realloc->capacity;
447*699cd480SApple OSS Distributions } else {
448*699cd480SApple OSS Distributions oldCapacity = 0;
449*699cd480SApple OSS Distributions }
450*699cd480SApple OSS Distributions
451*699cd480SApple OSS Distributions // Use the kalloc API instead of manually handling the reallocation
452*699cd480SApple OSS Distributions ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
453*699cd480SApple OSS Distributions oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
454*699cd480SApple OSS Distributions if (ref) {
455*699cd480SApple OSS Distributions if (oldCapacity == 0) {
456*699cd480SApple OSS Distributions ref->refCount = 1;
457*699cd480SApple OSS Distributions OSIncrementAtomic(&gIOMemoryReferenceCount);
458*699cd480SApple OSS Distributions }
459*699cd480SApple OSS Distributions ref->capacity = capacity;
460*699cd480SApple OSS Distributions }
461*699cd480SApple OSS Distributions return ref;
462*699cd480SApple OSS Distributions }
463*699cd480SApple OSS Distributions
464*699cd480SApple OSS Distributions void
memoryReferenceFree(IOMemoryReference * ref)465*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
466*699cd480SApple OSS Distributions {
467*699cd480SApple OSS Distributions IOMemoryEntry * entries;
468*699cd480SApple OSS Distributions
469*699cd480SApple OSS Distributions if (ref->mapRef) {
470*699cd480SApple OSS Distributions memoryReferenceFree(ref->mapRef);
471*699cd480SApple OSS Distributions ref->mapRef = NULL;
472*699cd480SApple OSS Distributions }
473*699cd480SApple OSS Distributions
474*699cd480SApple OSS Distributions entries = ref->entries + ref->count;
475*699cd480SApple OSS Distributions while (entries > &ref->entries[0]) {
476*699cd480SApple OSS Distributions entries--;
477*699cd480SApple OSS Distributions ipc_port_release_send(entries->entry);
478*699cd480SApple OSS Distributions }
479*699cd480SApple OSS Distributions kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
480*699cd480SApple OSS Distributions
481*699cd480SApple OSS Distributions OSDecrementAtomic(&gIOMemoryReferenceCount);
482*699cd480SApple OSS Distributions }
483*699cd480SApple OSS Distributions
484*699cd480SApple OSS Distributions void
memoryReferenceRelease(IOMemoryReference * ref)485*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
486*699cd480SApple OSS Distributions {
487*699cd480SApple OSS Distributions if (1 == OSDecrementAtomic(&ref->refCount)) {
488*699cd480SApple OSS Distributions memoryReferenceFree(ref);
489*699cd480SApple OSS Distributions }
490*699cd480SApple OSS Distributions }
491*699cd480SApple OSS Distributions
492*699cd480SApple OSS Distributions
493*699cd480SApple OSS Distributions IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)494*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceCreate(
495*699cd480SApple OSS Distributions IOOptionBits options,
496*699cd480SApple OSS Distributions IOMemoryReference ** reference)
497*699cd480SApple OSS Distributions {
498*699cd480SApple OSS Distributions enum { kCapacity = 4, kCapacityInc = 4 };
499*699cd480SApple OSS Distributions
500*699cd480SApple OSS Distributions kern_return_t err;
501*699cd480SApple OSS Distributions IOMemoryReference * ref;
502*699cd480SApple OSS Distributions IOMemoryEntry * entries;
503*699cd480SApple OSS Distributions IOMemoryEntry * cloneEntries = NULL;
504*699cd480SApple OSS Distributions vm_map_t map;
505*699cd480SApple OSS Distributions ipc_port_t entry, cloneEntry;
506*699cd480SApple OSS Distributions vm_prot_t prot;
507*699cd480SApple OSS Distributions memory_object_size_t actualSize;
508*699cd480SApple OSS Distributions uint32_t rangeIdx;
509*699cd480SApple OSS Distributions uint32_t count;
510*699cd480SApple OSS Distributions mach_vm_address_t entryAddr, endAddr, entrySize;
511*699cd480SApple OSS Distributions mach_vm_size_t srcAddr, srcLen;
512*699cd480SApple OSS Distributions mach_vm_size_t nextAddr, nextLen;
513*699cd480SApple OSS Distributions mach_vm_size_t offset, remain;
514*699cd480SApple OSS Distributions vm_map_offset_t overmap_start = 0, overmap_end = 0;
515*699cd480SApple OSS Distributions int misaligned_start = 0, misaligned_end = 0;
516*699cd480SApple OSS Distributions IOByteCount physLen;
517*699cd480SApple OSS Distributions IOOptionBits type = (_flags & kIOMemoryTypeMask);
518*699cd480SApple OSS Distributions IOOptionBits cacheMode;
519*699cd480SApple OSS Distributions unsigned int pagerFlags;
520*699cd480SApple OSS Distributions vm_tag_t tag;
521*699cd480SApple OSS Distributions vm_named_entry_kernel_flags_t vmne_kflags;
522*699cd480SApple OSS Distributions
523*699cd480SApple OSS Distributions ref = memoryReferenceAlloc(kCapacity, NULL);
524*699cd480SApple OSS Distributions if (!ref) {
525*699cd480SApple OSS Distributions return kIOReturnNoMemory;
526*699cd480SApple OSS Distributions }
527*699cd480SApple OSS Distributions
528*699cd480SApple OSS Distributions tag = (vm_tag_t) getVMTag(kernel_map);
529*699cd480SApple OSS Distributions vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
530*699cd480SApple OSS Distributions entries = &ref->entries[0];
531*699cd480SApple OSS Distributions count = 0;
532*699cd480SApple OSS Distributions err = KERN_SUCCESS;
533*699cd480SApple OSS Distributions
534*699cd480SApple OSS Distributions offset = 0;
535*699cd480SApple OSS Distributions rangeIdx = 0;
536*699cd480SApple OSS Distributions remain = _length;
537*699cd480SApple OSS Distributions if (_task) {
538*699cd480SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
539*699cd480SApple OSS Distributions
540*699cd480SApple OSS Distributions // account for IOBMD setLength(), use its capacity as length
541*699cd480SApple OSS Distributions IOBufferMemoryDescriptor * bmd;
542*699cd480SApple OSS Distributions if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
543*699cd480SApple OSS Distributions nextLen = bmd->getCapacity();
544*699cd480SApple OSS Distributions remain = nextLen;
545*699cd480SApple OSS Distributions }
546*699cd480SApple OSS Distributions } else {
547*699cd480SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
548*699cd480SApple OSS Distributions nextLen = physLen;
549*699cd480SApple OSS Distributions
550*699cd480SApple OSS Distributions // default cache mode for physical
551*699cd480SApple OSS Distributions if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
552*699cd480SApple OSS Distributions IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
553*699cd480SApple OSS Distributions _flags |= (mode << kIOMemoryBufferCacheShift);
554*699cd480SApple OSS Distributions }
555*699cd480SApple OSS Distributions }
556*699cd480SApple OSS Distributions
557*699cd480SApple OSS Distributions // cache mode & vm_prot
558*699cd480SApple OSS Distributions prot = VM_PROT_READ;
559*699cd480SApple OSS Distributions cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
560*699cd480SApple OSS Distributions prot |= vmProtForCacheMode(cacheMode);
561*699cd480SApple OSS Distributions // VM system requires write access to change cache mode
562*699cd480SApple OSS Distributions if (kIODefaultCache != cacheMode) {
563*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
564*699cd480SApple OSS Distributions }
565*699cd480SApple OSS Distributions if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
566*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
567*699cd480SApple OSS Distributions }
568*699cd480SApple OSS Distributions if (kIOMemoryReferenceWrite & options) {
569*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
570*699cd480SApple OSS Distributions }
571*699cd480SApple OSS Distributions if (kIOMemoryReferenceCOW & options) {
572*699cd480SApple OSS Distributions prot |= MAP_MEM_VM_COPY;
573*699cd480SApple OSS Distributions }
574*699cd480SApple OSS Distributions
575*699cd480SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
576*699cd480SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
577*699cd480SApple OSS Distributions }
578*699cd480SApple OSS Distributions
579*699cd480SApple OSS Distributions if ((kIOMemoryReferenceReuse & options) && _memRef) {
580*699cd480SApple OSS Distributions cloneEntries = &_memRef->entries[0];
581*699cd480SApple OSS Distributions prot |= MAP_MEM_NAMED_REUSE;
582*699cd480SApple OSS Distributions }
583*699cd480SApple OSS Distributions
584*699cd480SApple OSS Distributions if (_task) {
585*699cd480SApple OSS Distributions // virtual ranges
586*699cd480SApple OSS Distributions
587*699cd480SApple OSS Distributions if (kIOMemoryBufferPageable & _flags) {
588*699cd480SApple OSS Distributions int ledger_tag, ledger_no_footprint;
589*699cd480SApple OSS Distributions
590*699cd480SApple OSS Distributions // IOBufferMemoryDescriptor alloc - set flags for entry + object create
591*699cd480SApple OSS Distributions prot |= MAP_MEM_NAMED_CREATE;
592*699cd480SApple OSS Distributions
593*699cd480SApple OSS Distributions // default accounting settings:
594*699cd480SApple OSS Distributions // + "none" ledger tag
595*699cd480SApple OSS Distributions // + include in footprint
596*699cd480SApple OSS Distributions // can be changed later with ::setOwnership()
597*699cd480SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
598*699cd480SApple OSS Distributions ledger_no_footprint = 0;
599*699cd480SApple OSS Distributions
600*699cd480SApple OSS Distributions if (kIOMemoryBufferPurgeable & _flags) {
601*699cd480SApple OSS Distributions prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
602*699cd480SApple OSS Distributions if (VM_KERN_MEMORY_SKYWALK == tag) {
603*699cd480SApple OSS Distributions // Skywalk purgeable memory accounting:
604*699cd480SApple OSS Distributions // + "network" ledger tag
605*699cd480SApple OSS Distributions // + not included in footprint
606*699cd480SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NETWORK;
607*699cd480SApple OSS Distributions ledger_no_footprint = 1;
608*699cd480SApple OSS Distributions } else {
609*699cd480SApple OSS Distributions // regular purgeable memory accounting:
610*699cd480SApple OSS Distributions // + no ledger tag
611*699cd480SApple OSS Distributions // + included in footprint
612*699cd480SApple OSS Distributions ledger_tag = VM_LEDGER_TAG_NONE;
613*699cd480SApple OSS Distributions ledger_no_footprint = 0;
614*699cd480SApple OSS Distributions }
615*699cd480SApple OSS Distributions }
616*699cd480SApple OSS Distributions vmne_kflags.vmnekf_ledger_tag = ledger_tag;
617*699cd480SApple OSS Distributions vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
618*699cd480SApple OSS Distributions if (kIOMemoryUseReserve & _flags) {
619*699cd480SApple OSS Distributions prot |= MAP_MEM_GRAB_SECLUDED;
620*699cd480SApple OSS Distributions }
621*699cd480SApple OSS Distributions
622*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
623*699cd480SApple OSS Distributions map = NULL;
624*699cd480SApple OSS Distributions } else {
625*699cd480SApple OSS Distributions prot |= MAP_MEM_USE_DATA_ADDR;
626*699cd480SApple OSS Distributions map = get_task_map(_task);
627*699cd480SApple OSS Distributions }
628*699cd480SApple OSS Distributions DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
629*699cd480SApple OSS Distributions
630*699cd480SApple OSS Distributions while (remain) {
631*699cd480SApple OSS Distributions srcAddr = nextAddr;
632*699cd480SApple OSS Distributions srcLen = nextLen;
633*699cd480SApple OSS Distributions nextAddr = 0;
634*699cd480SApple OSS Distributions nextLen = 0;
635*699cd480SApple OSS Distributions // coalesce addr range
636*699cd480SApple OSS Distributions for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
637*699cd480SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
638*699cd480SApple OSS Distributions if ((srcAddr + srcLen) != nextAddr) {
639*699cd480SApple OSS Distributions break;
640*699cd480SApple OSS Distributions }
641*699cd480SApple OSS Distributions srcLen += nextLen;
642*699cd480SApple OSS Distributions }
643*699cd480SApple OSS Distributions
644*699cd480SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
645*699cd480SApple OSS Distributions entryAddr = srcAddr;
646*699cd480SApple OSS Distributions endAddr = srcAddr + srcLen;
647*699cd480SApple OSS Distributions } else {
648*699cd480SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
649*699cd480SApple OSS Distributions endAddr = round_page_64(srcAddr + srcLen);
650*699cd480SApple OSS Distributions }
651*699cd480SApple OSS Distributions if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
652*699cd480SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
653*699cd480SApple OSS Distributions }
654*699cd480SApple OSS Distributions
655*699cd480SApple OSS Distributions do{
656*699cd480SApple OSS Distributions entrySize = (endAddr - entryAddr);
657*699cd480SApple OSS Distributions if (!entrySize) {
658*699cd480SApple OSS Distributions break;
659*699cd480SApple OSS Distributions }
660*699cd480SApple OSS Distributions actualSize = entrySize;
661*699cd480SApple OSS Distributions
662*699cd480SApple OSS Distributions cloneEntry = MACH_PORT_NULL;
663*699cd480SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
664*699cd480SApple OSS Distributions if (cloneEntries < &_memRef->entries[_memRef->count]) {
665*699cd480SApple OSS Distributions cloneEntry = cloneEntries->entry;
666*699cd480SApple OSS Distributions } else {
667*699cd480SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
668*699cd480SApple OSS Distributions }
669*699cd480SApple OSS Distributions }
670*699cd480SApple OSS Distributions
671*699cd480SApple OSS Distributions err = mach_make_memory_entry_internal(map,
672*699cd480SApple OSS Distributions &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
673*699cd480SApple OSS Distributions
674*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
675*699cd480SApple OSS Distributions DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
676*699cd480SApple OSS Distributions break;
677*699cd480SApple OSS Distributions }
678*699cd480SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & prot) {
679*699cd480SApple OSS Distributions if (actualSize > entrySize) {
680*699cd480SApple OSS Distributions actualSize = entrySize;
681*699cd480SApple OSS Distributions }
682*699cd480SApple OSS Distributions } else if (actualSize > entrySize) {
683*699cd480SApple OSS Distributions panic("mach_make_memory_entry_64 actualSize");
684*699cd480SApple OSS Distributions }
685*699cd480SApple OSS Distributions
686*699cd480SApple OSS Distributions memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687*699cd480SApple OSS Distributions
688*699cd480SApple OSS Distributions if (count && overmap_start) {
689*699cd480SApple OSS Distributions /*
690*699cd480SApple OSS Distributions * Track misaligned start for all
691*699cd480SApple OSS Distributions * except the first entry.
692*699cd480SApple OSS Distributions */
693*699cd480SApple OSS Distributions misaligned_start++;
694*699cd480SApple OSS Distributions }
695*699cd480SApple OSS Distributions
696*699cd480SApple OSS Distributions if (overmap_end) {
697*699cd480SApple OSS Distributions /*
698*699cd480SApple OSS Distributions * Ignore misaligned end for the
699*699cd480SApple OSS Distributions * last entry.
700*699cd480SApple OSS Distributions */
701*699cd480SApple OSS Distributions if ((entryAddr + actualSize) != endAddr) {
702*699cd480SApple OSS Distributions misaligned_end++;
703*699cd480SApple OSS Distributions }
704*699cd480SApple OSS Distributions }
705*699cd480SApple OSS Distributions
706*699cd480SApple OSS Distributions if (count) {
707*699cd480SApple OSS Distributions /* Middle entries */
708*699cd480SApple OSS Distributions if (misaligned_start || misaligned_end) {
709*699cd480SApple OSS Distributions DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710*699cd480SApple OSS Distributions ipc_port_release_send(entry);
711*699cd480SApple OSS Distributions err = KERN_NOT_SUPPORTED;
712*699cd480SApple OSS Distributions break;
713*699cd480SApple OSS Distributions }
714*699cd480SApple OSS Distributions }
715*699cd480SApple OSS Distributions
716*699cd480SApple OSS Distributions if (count >= ref->capacity) {
717*699cd480SApple OSS Distributions ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718*699cd480SApple OSS Distributions entries = &ref->entries[count];
719*699cd480SApple OSS Distributions }
720*699cd480SApple OSS Distributions entries->entry = entry;
721*699cd480SApple OSS Distributions entries->size = actualSize;
722*699cd480SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
723*699cd480SApple OSS Distributions entries->start = entryAddr;
724*699cd480SApple OSS Distributions entryAddr += actualSize;
725*699cd480SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
726*699cd480SApple OSS Distributions if ((cloneEntries->entry == entries->entry)
727*699cd480SApple OSS Distributions && (cloneEntries->size == entries->size)
728*699cd480SApple OSS Distributions && (cloneEntries->offset == entries->offset)) {
729*699cd480SApple OSS Distributions cloneEntries++;
730*699cd480SApple OSS Distributions } else {
731*699cd480SApple OSS Distributions prot &= ~MAP_MEM_NAMED_REUSE;
732*699cd480SApple OSS Distributions }
733*699cd480SApple OSS Distributions }
734*699cd480SApple OSS Distributions entries++;
735*699cd480SApple OSS Distributions count++;
736*699cd480SApple OSS Distributions }while (true);
737*699cd480SApple OSS Distributions offset += srcLen;
738*699cd480SApple OSS Distributions remain -= srcLen;
739*699cd480SApple OSS Distributions }
740*699cd480SApple OSS Distributions } else {
741*699cd480SApple OSS Distributions // _task == 0, physical or kIOMemoryTypeUPL
742*699cd480SApple OSS Distributions memory_object_t pager;
743*699cd480SApple OSS Distributions vm_size_t size = ptoa_64(_pages);
744*699cd480SApple OSS Distributions
745*699cd480SApple OSS Distributions if (!getKernelReserved()) {
746*699cd480SApple OSS Distributions panic("getKernelReserved");
747*699cd480SApple OSS Distributions }
748*699cd480SApple OSS Distributions
749*699cd480SApple OSS Distributions reserved->dp.pagerContig = (1 == _rangesCount);
750*699cd480SApple OSS Distributions reserved->dp.memory = this;
751*699cd480SApple OSS Distributions
752*699cd480SApple OSS Distributions pagerFlags = pagerFlagsForCacheMode(cacheMode);
753*699cd480SApple OSS Distributions if (-1U == pagerFlags) {
754*699cd480SApple OSS Distributions panic("phys is kIODefaultCache");
755*699cd480SApple OSS Distributions }
756*699cd480SApple OSS Distributions if (reserved->dp.pagerContig) {
757*699cd480SApple OSS Distributions pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758*699cd480SApple OSS Distributions }
759*699cd480SApple OSS Distributions
760*699cd480SApple OSS Distributions pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761*699cd480SApple OSS Distributions size, pagerFlags);
762*699cd480SApple OSS Distributions assert(pager);
763*699cd480SApple OSS Distributions if (!pager) {
764*699cd480SApple OSS Distributions DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765*699cd480SApple OSS Distributions err = kIOReturnVMError;
766*699cd480SApple OSS Distributions } else {
767*699cd480SApple OSS Distributions srcAddr = nextAddr;
768*699cd480SApple OSS Distributions entryAddr = trunc_page_64(srcAddr);
769*699cd480SApple OSS Distributions err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770*699cd480SApple OSS Distributions size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
772*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
773*699cd480SApple OSS Distributions device_pager_deallocate(pager);
774*699cd480SApple OSS Distributions } else {
775*699cd480SApple OSS Distributions reserved->dp.devicePager = pager;
776*699cd480SApple OSS Distributions entries->entry = entry;
777*699cd480SApple OSS Distributions entries->size = size;
778*699cd480SApple OSS Distributions entries->offset = offset + (entryAddr - srcAddr);
779*699cd480SApple OSS Distributions entries++;
780*699cd480SApple OSS Distributions count++;
781*699cd480SApple OSS Distributions }
782*699cd480SApple OSS Distributions }
783*699cd480SApple OSS Distributions }
784*699cd480SApple OSS Distributions
785*699cd480SApple OSS Distributions ref->count = count;
786*699cd480SApple OSS Distributions ref->prot = prot;
787*699cd480SApple OSS Distributions
788*699cd480SApple OSS Distributions if (_task && (KERN_SUCCESS == err)
789*699cd480SApple OSS Distributions && (kIOMemoryMapCopyOnWrite & _flags)
790*699cd480SApple OSS Distributions && !(kIOMemoryReferenceCOW & options)) {
791*699cd480SApple OSS Distributions err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
793*699cd480SApple OSS Distributions DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794*699cd480SApple OSS Distributions }
795*699cd480SApple OSS Distributions }
796*699cd480SApple OSS Distributions
797*699cd480SApple OSS Distributions if (KERN_SUCCESS == err) {
798*699cd480SApple OSS Distributions if (MAP_MEM_NAMED_REUSE & prot) {
799*699cd480SApple OSS Distributions memoryReferenceFree(ref);
800*699cd480SApple OSS Distributions OSIncrementAtomic(&_memRef->refCount);
801*699cd480SApple OSS Distributions ref = _memRef;
802*699cd480SApple OSS Distributions }
803*699cd480SApple OSS Distributions } else {
804*699cd480SApple OSS Distributions DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805*699cd480SApple OSS Distributions memoryReferenceFree(ref);
806*699cd480SApple OSS Distributions ref = NULL;
807*699cd480SApple OSS Distributions }
808*699cd480SApple OSS Distributions
809*699cd480SApple OSS Distributions *reference = ref;
810*699cd480SApple OSS Distributions
811*699cd480SApple OSS Distributions return err;
812*699cd480SApple OSS Distributions }
813*699cd480SApple OSS Distributions
814*699cd480SApple OSS Distributions static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815*699cd480SApple OSS Distributions IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816*699cd480SApple OSS Distributions {
817*699cd480SApple OSS Distributions switch (kIOMapGuardedMask & options) {
818*699cd480SApple OSS Distributions default:
819*699cd480SApple OSS Distributions case kIOMapGuardedSmall:
820*699cd480SApple OSS Distributions return vm_map_page_size(map);
821*699cd480SApple OSS Distributions case kIOMapGuardedLarge:
822*699cd480SApple OSS Distributions assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823*699cd480SApple OSS Distributions return kIOMapGuardSizeLarge;
824*699cd480SApple OSS Distributions }
825*699cd480SApple OSS Distributions ;
826*699cd480SApple OSS Distributions }
827*699cd480SApple OSS Distributions
828*699cd480SApple OSS Distributions static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829*699cd480SApple OSS Distributions IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830*699cd480SApple OSS Distributions vm_map_offset_t addr, mach_vm_size_t size)
831*699cd480SApple OSS Distributions {
832*699cd480SApple OSS Distributions kern_return_t kr;
833*699cd480SApple OSS Distributions vm_map_offset_t actualAddr;
834*699cd480SApple OSS Distributions mach_vm_size_t actualSize;
835*699cd480SApple OSS Distributions
836*699cd480SApple OSS Distributions actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837*699cd480SApple OSS Distributions actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838*699cd480SApple OSS Distributions
839*699cd480SApple OSS Distributions if (kIOMapGuardedMask & options) {
840*699cd480SApple OSS Distributions mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841*699cd480SApple OSS Distributions actualAddr -= guardSize;
842*699cd480SApple OSS Distributions actualSize += 2 * guardSize;
843*699cd480SApple OSS Distributions }
844*699cd480SApple OSS Distributions kr = mach_vm_deallocate(map, actualAddr, actualSize);
845*699cd480SApple OSS Distributions
846*699cd480SApple OSS Distributions return kr;
847*699cd480SApple OSS Distributions }
848*699cd480SApple OSS Distributions
849*699cd480SApple OSS Distributions kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850*699cd480SApple OSS Distributions IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851*699cd480SApple OSS Distributions {
852*699cd480SApple OSS Distributions IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853*699cd480SApple OSS Distributions IOReturn err;
854*699cd480SApple OSS Distributions vm_map_offset_t addr;
855*699cd480SApple OSS Distributions mach_vm_size_t size;
856*699cd480SApple OSS Distributions mach_vm_size_t guardSize;
857*699cd480SApple OSS Distributions vm_map_kernel_flags_t vmk_flags;
858*699cd480SApple OSS Distributions
859*699cd480SApple OSS Distributions addr = ref->mapped;
860*699cd480SApple OSS Distributions size = ref->size;
861*699cd480SApple OSS Distributions guardSize = 0;
862*699cd480SApple OSS Distributions
863*699cd480SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
864*699cd480SApple OSS Distributions if (!(kIOMapAnywhere & ref->options)) {
865*699cd480SApple OSS Distributions return kIOReturnBadArgument;
866*699cd480SApple OSS Distributions }
867*699cd480SApple OSS Distributions guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868*699cd480SApple OSS Distributions size += 2 * guardSize;
869*699cd480SApple OSS Distributions }
870*699cd480SApple OSS Distributions if (kIOMapAnywhere & ref->options) {
871*699cd480SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872*699cd480SApple OSS Distributions } else {
873*699cd480SApple OSS Distributions vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874*699cd480SApple OSS Distributions }
875*699cd480SApple OSS Distributions vmk_flags.vm_tag = ref->tag;
876*699cd480SApple OSS Distributions
877*699cd480SApple OSS Distributions /*
878*699cd480SApple OSS Distributions * Mapping memory into the kernel_map using IOMDs use the data range.
879*699cd480SApple OSS Distributions * Memory being mapped should not contain kernel pointers.
880*699cd480SApple OSS Distributions */
881*699cd480SApple OSS Distributions if (map == kernel_map) {
882*699cd480SApple OSS Distributions vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883*699cd480SApple OSS Distributions }
884*699cd480SApple OSS Distributions
885*699cd480SApple OSS Distributions err = vm_map_enter_mem_object(map, &addr, size,
886*699cd480SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
887*699cd480SApple OSS Distributions // TODO4K this should not be necessary...
888*699cd480SApple OSS Distributions (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889*699cd480SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
890*699cd480SApple OSS Distributions (vm_map_offset_t) 0,
891*699cd480SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
892*699cd480SApple OSS Distributions vmk_flags,
893*699cd480SApple OSS Distributions IPC_PORT_NULL,
894*699cd480SApple OSS Distributions (memory_object_offset_t) 0,
895*699cd480SApple OSS Distributions false, /* copy */
896*699cd480SApple OSS Distributions ref->prot,
897*699cd480SApple OSS Distributions ref->prot,
898*699cd480SApple OSS Distributions VM_INHERIT_NONE);
899*699cd480SApple OSS Distributions if (KERN_SUCCESS == err) {
900*699cd480SApple OSS Distributions ref->mapped = (mach_vm_address_t) addr;
901*699cd480SApple OSS Distributions ref->map = map;
902*699cd480SApple OSS Distributions if (kIOMapGuardedMask & ref->options) {
903*699cd480SApple OSS Distributions vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904*699cd480SApple OSS Distributions
905*699cd480SApple OSS Distributions err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
906*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
907*699cd480SApple OSS Distributions err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
908*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
909*699cd480SApple OSS Distributions ref->mapped += guardSize;
910*699cd480SApple OSS Distributions }
911*699cd480SApple OSS Distributions }
912*699cd480SApple OSS Distributions
913*699cd480SApple OSS Distributions return err;
914*699cd480SApple OSS Distributions }
915*699cd480SApple OSS Distributions
916*699cd480SApple OSS Distributions IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMap(
918*699cd480SApple OSS Distributions IOMemoryReference * ref,
919*699cd480SApple OSS Distributions vm_map_t map,
920*699cd480SApple OSS Distributions mach_vm_size_t inoffset,
921*699cd480SApple OSS Distributions mach_vm_size_t size,
922*699cd480SApple OSS Distributions IOOptionBits options,
923*699cd480SApple OSS Distributions mach_vm_address_t * inaddr)
924*699cd480SApple OSS Distributions {
925*699cd480SApple OSS Distributions IOReturn err;
926*699cd480SApple OSS Distributions int64_t offset = inoffset;
927*699cd480SApple OSS Distributions uint32_t rangeIdx, entryIdx;
928*699cd480SApple OSS Distributions vm_map_offset_t addr, mapAddr;
929*699cd480SApple OSS Distributions vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930*699cd480SApple OSS Distributions
931*699cd480SApple OSS Distributions mach_vm_address_t nextAddr;
932*699cd480SApple OSS Distributions mach_vm_size_t nextLen;
933*699cd480SApple OSS Distributions IOByteCount physLen;
934*699cd480SApple OSS Distributions IOMemoryEntry * entry;
935*699cd480SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
936*699cd480SApple OSS Distributions IOOptionBits type;
937*699cd480SApple OSS Distributions IOOptionBits cacheMode;
938*699cd480SApple OSS Distributions vm_tag_t tag;
939*699cd480SApple OSS Distributions // for the kIOMapPrefault option.
940*699cd480SApple OSS Distributions upl_page_info_t * pageList = NULL;
941*699cd480SApple OSS Distributions UInt currentPageIndex = 0;
942*699cd480SApple OSS Distributions bool didAlloc;
943*699cd480SApple OSS Distributions
944*699cd480SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945*699cd480SApple OSS Distributions
946*699cd480SApple OSS Distributions if (ref->mapRef) {
947*699cd480SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948*699cd480SApple OSS Distributions return err;
949*699cd480SApple OSS Distributions }
950*699cd480SApple OSS Distributions
951*699cd480SApple OSS Distributions if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952*699cd480SApple OSS Distributions err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953*699cd480SApple OSS Distributions return err;
954*699cd480SApple OSS Distributions }
955*699cd480SApple OSS Distributions
956*699cd480SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
957*699cd480SApple OSS Distributions
958*699cd480SApple OSS Distributions prot = VM_PROT_READ;
959*699cd480SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
960*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
961*699cd480SApple OSS Distributions }
962*699cd480SApple OSS Distributions prot &= ref->prot;
963*699cd480SApple OSS Distributions
964*699cd480SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965*699cd480SApple OSS Distributions if (kIODefaultCache != cacheMode) {
966*699cd480SApple OSS Distributions // VM system requires write access to update named entry cache mode
967*699cd480SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968*699cd480SApple OSS Distributions }
969*699cd480SApple OSS Distributions
970*699cd480SApple OSS Distributions tag = (typeof(tag))getVMTag(map);
971*699cd480SApple OSS Distributions
972*699cd480SApple OSS Distributions if (_task) {
973*699cd480SApple OSS Distributions // Find first range for offset
974*699cd480SApple OSS Distributions if (!_rangesCount) {
975*699cd480SApple OSS Distributions return kIOReturnBadArgument;
976*699cd480SApple OSS Distributions }
977*699cd480SApple OSS Distributions for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978*699cd480SApple OSS Distributions getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979*699cd480SApple OSS Distributions if (remain < nextLen) {
980*699cd480SApple OSS Distributions break;
981*699cd480SApple OSS Distributions }
982*699cd480SApple OSS Distributions remain -= nextLen;
983*699cd480SApple OSS Distributions }
984*699cd480SApple OSS Distributions } else {
985*699cd480SApple OSS Distributions rangeIdx = 0;
986*699cd480SApple OSS Distributions remain = 0;
987*699cd480SApple OSS Distributions nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988*699cd480SApple OSS Distributions nextLen = size;
989*699cd480SApple OSS Distributions }
990*699cd480SApple OSS Distributions
991*699cd480SApple OSS Distributions assert(remain < nextLen);
992*699cd480SApple OSS Distributions if (remain >= nextLen) {
993*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994*699cd480SApple OSS Distributions return kIOReturnBadArgument;
995*699cd480SApple OSS Distributions }
996*699cd480SApple OSS Distributions
997*699cd480SApple OSS Distributions nextAddr += remain;
998*699cd480SApple OSS Distributions nextLen -= remain;
999*699cd480SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1000*699cd480SApple OSS Distributions pageOffset = (vm_map_page_mask(map) & nextAddr);
1001*699cd480SApple OSS Distributions #else /* __ARM_MIXED_PAGE_SIZE__ */
1002*699cd480SApple OSS Distributions pageOffset = (page_mask & nextAddr);
1003*699cd480SApple OSS Distributions #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004*699cd480SApple OSS Distributions addr = 0;
1005*699cd480SApple OSS Distributions didAlloc = false;
1006*699cd480SApple OSS Distributions
1007*699cd480SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1008*699cd480SApple OSS Distributions addr = *inaddr;
1009*699cd480SApple OSS Distributions if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011*699cd480SApple OSS Distributions }
1012*699cd480SApple OSS Distributions addr -= pageOffset;
1013*699cd480SApple OSS Distributions }
1014*699cd480SApple OSS Distributions
1015*699cd480SApple OSS Distributions // find first entry for offset
1016*699cd480SApple OSS Distributions for (entryIdx = 0;
1017*699cd480SApple OSS Distributions (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018*699cd480SApple OSS Distributions entryIdx++) {
1019*699cd480SApple OSS Distributions }
1020*699cd480SApple OSS Distributions entryIdx--;
1021*699cd480SApple OSS Distributions entry = &ref->entries[entryIdx];
1022*699cd480SApple OSS Distributions
1023*699cd480SApple OSS Distributions // allocate VM
1024*699cd480SApple OSS Distributions #if __ARM_MIXED_PAGE_SIZE__
1025*699cd480SApple OSS Distributions size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026*699cd480SApple OSS Distributions #else
1027*699cd480SApple OSS Distributions size = round_page_64(size + pageOffset);
1028*699cd480SApple OSS Distributions #endif
1029*699cd480SApple OSS Distributions if (kIOMapOverwrite & options) {
1030*699cd480SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031*699cd480SApple OSS Distributions map = IOPageableMapForAddress(addr);
1032*699cd480SApple OSS Distributions }
1033*699cd480SApple OSS Distributions err = KERN_SUCCESS;
1034*699cd480SApple OSS Distributions } else {
1035*699cd480SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1036*699cd480SApple OSS Distributions ref.map = map;
1037*699cd480SApple OSS Distributions ref.tag = tag;
1038*699cd480SApple OSS Distributions ref.options = options;
1039*699cd480SApple OSS Distributions ref.size = size;
1040*699cd480SApple OSS Distributions ref.prot = prot;
1041*699cd480SApple OSS Distributions if (options & kIOMapAnywhere) {
1042*699cd480SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043*699cd480SApple OSS Distributions ref.mapped = 0;
1044*699cd480SApple OSS Distributions } else {
1045*699cd480SApple OSS Distributions ref.mapped = addr;
1046*699cd480SApple OSS Distributions }
1047*699cd480SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048*699cd480SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049*699cd480SApple OSS Distributions } else {
1050*699cd480SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051*699cd480SApple OSS Distributions }
1052*699cd480SApple OSS Distributions if (KERN_SUCCESS == err) {
1053*699cd480SApple OSS Distributions addr = ref.mapped;
1054*699cd480SApple OSS Distributions map = ref.map;
1055*699cd480SApple OSS Distributions didAlloc = true;
1056*699cd480SApple OSS Distributions }
1057*699cd480SApple OSS Distributions }
1058*699cd480SApple OSS Distributions
1059*699cd480SApple OSS Distributions /*
1060*699cd480SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1061*699cd480SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1062*699cd480SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063*699cd480SApple OSS Distributions * operations.
1064*699cd480SApple OSS Distributions */
1065*699cd480SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066*699cd480SApple OSS Distributions options &= ~kIOMapPrefault;
1067*699cd480SApple OSS Distributions }
1068*699cd480SApple OSS Distributions
1069*699cd480SApple OSS Distributions /*
1070*699cd480SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1071*699cd480SApple OSS Distributions * memory type, and the underlying data.
1072*699cd480SApple OSS Distributions */
1073*699cd480SApple OSS Distributions if (options & kIOMapPrefault) {
1074*699cd480SApple OSS Distributions /*
1075*699cd480SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1076*699cd480SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077*699cd480SApple OSS Distributions */
1078*699cd480SApple OSS Distributions assert(_wireCount != 0);
1079*699cd480SApple OSS Distributions assert(_memoryEntries != NULL);
1080*699cd480SApple OSS Distributions if ((_wireCount == 0) ||
1081*699cd480SApple OSS Distributions (_memoryEntries == NULL)) {
1082*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083*699cd480SApple OSS Distributions return kIOReturnBadArgument;
1084*699cd480SApple OSS Distributions }
1085*699cd480SApple OSS Distributions
1086*699cd480SApple OSS Distributions // Get the page list.
1087*699cd480SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1088*699cd480SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1089*699cd480SApple OSS Distributions pageList = getPageList(dataP);
1090*699cd480SApple OSS Distributions
1091*699cd480SApple OSS Distributions // Get the number of IOPLs.
1092*699cd480SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093*699cd480SApple OSS Distributions
1094*699cd480SApple OSS Distributions /*
1095*699cd480SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1096*699cd480SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1097*699cd480SApple OSS Distributions * right range at the end.
1098*699cd480SApple OSS Distributions */
1099*699cd480SApple OSS Distributions UInt ioplIndex = 0;
1100*699cd480SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101*699cd480SApple OSS Distributions ioplIndex++;
1102*699cd480SApple OSS Distributions }
1103*699cd480SApple OSS Distributions ioplIndex--;
1104*699cd480SApple OSS Distributions
1105*699cd480SApple OSS Distributions // Retrieve the IOPL info block.
1106*699cd480SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1107*699cd480SApple OSS Distributions
1108*699cd480SApple OSS Distributions /*
1109*699cd480SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110*699cd480SApple OSS Distributions * array.
1111*699cd480SApple OSS Distributions */
1112*699cd480SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1113*699cd480SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114*699cd480SApple OSS Distributions } else {
1115*699cd480SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1116*699cd480SApple OSS Distributions }
1117*699cd480SApple OSS Distributions
1118*699cd480SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1119*699cd480SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120*699cd480SApple OSS Distributions
1121*699cd480SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1122*699cd480SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1123*699cd480SApple OSS Distributions }
1124*699cd480SApple OSS Distributions
1125*699cd480SApple OSS Distributions // enter mappings
1126*699cd480SApple OSS Distributions remain = size;
1127*699cd480SApple OSS Distributions mapAddr = addr;
1128*699cd480SApple OSS Distributions addr += pageOffset;
1129*699cd480SApple OSS Distributions
1130*699cd480SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1131*699cd480SApple OSS Distributions entryOffset = offset - entry->offset;
1132*699cd480SApple OSS Distributions if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133*699cd480SApple OSS Distributions err = kIOReturnNotAligned;
1134*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135*699cd480SApple OSS Distributions break;
1136*699cd480SApple OSS Distributions }
1137*699cd480SApple OSS Distributions
1138*699cd480SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1139*699cd480SApple OSS Distributions vm_size_t unused = 0;
1140*699cd480SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141*699cd480SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1142*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
1143*699cd480SApple OSS Distributions }
1144*699cd480SApple OSS Distributions
1145*699cd480SApple OSS Distributions entryOffset -= pageOffset;
1146*699cd480SApple OSS Distributions if (entryOffset >= entry->size) {
1147*699cd480SApple OSS Distributions panic("entryOffset");
1148*699cd480SApple OSS Distributions }
1149*699cd480SApple OSS Distributions chunk = entry->size - entryOffset;
1150*699cd480SApple OSS Distributions if (chunk) {
1151*699cd480SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1152*699cd480SApple OSS Distributions .vmf_fixed = true,
1153*699cd480SApple OSS Distributions .vmf_overwrite = true,
1154*699cd480SApple OSS Distributions .vm_tag = tag,
1155*699cd480SApple OSS Distributions .vmkf_iokit_acct = true,
1156*699cd480SApple OSS Distributions };
1157*699cd480SApple OSS Distributions
1158*699cd480SApple OSS Distributions if (chunk > remain) {
1159*699cd480SApple OSS Distributions chunk = remain;
1160*699cd480SApple OSS Distributions }
1161*699cd480SApple OSS Distributions if (options & kIOMapPrefault) {
1162*699cd480SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163*699cd480SApple OSS Distributions
1164*699cd480SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1165*699cd480SApple OSS Distributions &mapAddr,
1166*699cd480SApple OSS Distributions chunk, 0 /* mask */,
1167*699cd480SApple OSS Distributions vmk_flags,
1168*699cd480SApple OSS Distributions entry->entry,
1169*699cd480SApple OSS Distributions entryOffset,
1170*699cd480SApple OSS Distributions prot, // cur
1171*699cd480SApple OSS Distributions prot, // max
1172*699cd480SApple OSS Distributions &pageList[currentPageIndex],
1173*699cd480SApple OSS Distributions nb_pages);
1174*699cd480SApple OSS Distributions
1175*699cd480SApple OSS Distributions if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176*699cd480SApple OSS Distributions DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177*699cd480SApple OSS Distributions }
1178*699cd480SApple OSS Distributions // Compute the next index in the page list.
1179*699cd480SApple OSS Distributions currentPageIndex += nb_pages;
1180*699cd480SApple OSS Distributions assert(currentPageIndex <= _pages);
1181*699cd480SApple OSS Distributions } else {
1182*699cd480SApple OSS Distributions err = vm_map_enter_mem_object(map,
1183*699cd480SApple OSS Distributions &mapAddr,
1184*699cd480SApple OSS Distributions chunk, 0 /* mask */,
1185*699cd480SApple OSS Distributions vmk_flags,
1186*699cd480SApple OSS Distributions entry->entry,
1187*699cd480SApple OSS Distributions entryOffset,
1188*699cd480SApple OSS Distributions false, // copy
1189*699cd480SApple OSS Distributions prot, // cur
1190*699cd480SApple OSS Distributions prot, // max
1191*699cd480SApple OSS Distributions VM_INHERIT_NONE);
1192*699cd480SApple OSS Distributions }
1193*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1194*699cd480SApple OSS Distributions DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195*699cd480SApple OSS Distributions break;
1196*699cd480SApple OSS Distributions }
1197*699cd480SApple OSS Distributions remain -= chunk;
1198*699cd480SApple OSS Distributions if (!remain) {
1199*699cd480SApple OSS Distributions break;
1200*699cd480SApple OSS Distributions }
1201*699cd480SApple OSS Distributions mapAddr += chunk;
1202*699cd480SApple OSS Distributions offset += chunk - pageOffset;
1203*699cd480SApple OSS Distributions }
1204*699cd480SApple OSS Distributions pageOffset = 0;
1205*699cd480SApple OSS Distributions entry++;
1206*699cd480SApple OSS Distributions entryIdx++;
1207*699cd480SApple OSS Distributions if (entryIdx >= ref->count) {
1208*699cd480SApple OSS Distributions err = kIOReturnOverrun;
1209*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210*699cd480SApple OSS Distributions break;
1211*699cd480SApple OSS Distributions }
1212*699cd480SApple OSS Distributions }
1213*699cd480SApple OSS Distributions
1214*699cd480SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1215*699cd480SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216*699cd480SApple OSS Distributions addr = 0;
1217*699cd480SApple OSS Distributions }
1218*699cd480SApple OSS Distributions *inaddr = addr;
1219*699cd480SApple OSS Distributions
1220*699cd480SApple OSS Distributions if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222*699cd480SApple OSS Distributions }
1223*699cd480SApple OSS Distributions return err;
1224*699cd480SApple OSS Distributions }
1225*699cd480SApple OSS Distributions
1226*699cd480SApple OSS Distributions #define LOGUNALIGN 0
1227*699cd480SApple OSS Distributions IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229*699cd480SApple OSS Distributions IOMemoryReference * ref,
1230*699cd480SApple OSS Distributions vm_map_t map,
1231*699cd480SApple OSS Distributions mach_vm_size_t inoffset,
1232*699cd480SApple OSS Distributions mach_vm_size_t size,
1233*699cd480SApple OSS Distributions IOOptionBits options,
1234*699cd480SApple OSS Distributions mach_vm_address_t * inaddr)
1235*699cd480SApple OSS Distributions {
1236*699cd480SApple OSS Distributions IOReturn err;
1237*699cd480SApple OSS Distributions int64_t offset = inoffset;
1238*699cd480SApple OSS Distributions uint32_t entryIdx, firstEntryIdx;
1239*699cd480SApple OSS Distributions vm_map_offset_t addr, mapAddr, mapAddrOut;
1240*699cd480SApple OSS Distributions vm_map_offset_t entryOffset, remain, chunk;
1241*699cd480SApple OSS Distributions
1242*699cd480SApple OSS Distributions IOMemoryEntry * entry;
1243*699cd480SApple OSS Distributions vm_prot_t prot, memEntryCacheMode;
1244*699cd480SApple OSS Distributions IOOptionBits type;
1245*699cd480SApple OSS Distributions IOOptionBits cacheMode;
1246*699cd480SApple OSS Distributions vm_tag_t tag;
1247*699cd480SApple OSS Distributions // for the kIOMapPrefault option.
1248*699cd480SApple OSS Distributions upl_page_info_t * pageList = NULL;
1249*699cd480SApple OSS Distributions UInt currentPageIndex = 0;
1250*699cd480SApple OSS Distributions bool didAlloc;
1251*699cd480SApple OSS Distributions
1252*699cd480SApple OSS Distributions DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253*699cd480SApple OSS Distributions
1254*699cd480SApple OSS Distributions if (ref->mapRef) {
1255*699cd480SApple OSS Distributions err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256*699cd480SApple OSS Distributions return err;
1257*699cd480SApple OSS Distributions }
1258*699cd480SApple OSS Distributions
1259*699cd480SApple OSS Distributions #if LOGUNALIGN
1260*699cd480SApple OSS Distributions printf("MAP offset %qx, %qx\n", inoffset, size);
1261*699cd480SApple OSS Distributions #endif
1262*699cd480SApple OSS Distributions
1263*699cd480SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
1264*699cd480SApple OSS Distributions
1265*699cd480SApple OSS Distributions prot = VM_PROT_READ;
1266*699cd480SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
1267*699cd480SApple OSS Distributions prot |= VM_PROT_WRITE;
1268*699cd480SApple OSS Distributions }
1269*699cd480SApple OSS Distributions prot &= ref->prot;
1270*699cd480SApple OSS Distributions
1271*699cd480SApple OSS Distributions cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272*699cd480SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1273*699cd480SApple OSS Distributions // VM system requires write access to update named entry cache mode
1274*699cd480SApple OSS Distributions memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275*699cd480SApple OSS Distributions }
1276*699cd480SApple OSS Distributions
1277*699cd480SApple OSS Distributions tag = (vm_tag_t) getVMTag(map);
1278*699cd480SApple OSS Distributions
1279*699cd480SApple OSS Distributions addr = 0;
1280*699cd480SApple OSS Distributions didAlloc = false;
1281*699cd480SApple OSS Distributions
1282*699cd480SApple OSS Distributions if (!(options & kIOMapAnywhere)) {
1283*699cd480SApple OSS Distributions addr = *inaddr;
1284*699cd480SApple OSS Distributions }
1285*699cd480SApple OSS Distributions
1286*699cd480SApple OSS Distributions // find first entry for offset
1287*699cd480SApple OSS Distributions for (firstEntryIdx = 0;
1288*699cd480SApple OSS Distributions (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289*699cd480SApple OSS Distributions firstEntryIdx++) {
1290*699cd480SApple OSS Distributions }
1291*699cd480SApple OSS Distributions firstEntryIdx--;
1292*699cd480SApple OSS Distributions
1293*699cd480SApple OSS Distributions // calculate required VM space
1294*699cd480SApple OSS Distributions
1295*699cd480SApple OSS Distributions entryIdx = firstEntryIdx;
1296*699cd480SApple OSS Distributions entry = &ref->entries[entryIdx];
1297*699cd480SApple OSS Distributions
1298*699cd480SApple OSS Distributions remain = size;
1299*699cd480SApple OSS Distributions int64_t iteroffset = offset;
1300*699cd480SApple OSS Distributions uint64_t mapSize = 0;
1301*699cd480SApple OSS Distributions while (remain) {
1302*699cd480SApple OSS Distributions entryOffset = iteroffset - entry->offset;
1303*699cd480SApple OSS Distributions if (entryOffset >= entry->size) {
1304*699cd480SApple OSS Distributions panic("entryOffset");
1305*699cd480SApple OSS Distributions }
1306*699cd480SApple OSS Distributions
1307*699cd480SApple OSS Distributions #if LOGUNALIGN
1308*699cd480SApple OSS Distributions printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309*699cd480SApple OSS Distributions entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310*699cd480SApple OSS Distributions #endif
1311*699cd480SApple OSS Distributions
1312*699cd480SApple OSS Distributions chunk = entry->size - entryOffset;
1313*699cd480SApple OSS Distributions if (chunk) {
1314*699cd480SApple OSS Distributions if (chunk > remain) {
1315*699cd480SApple OSS Distributions chunk = remain;
1316*699cd480SApple OSS Distributions }
1317*699cd480SApple OSS Distributions mach_vm_size_t entrySize;
1318*699cd480SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
1320*699cd480SApple OSS Distributions mapSize += entrySize;
1321*699cd480SApple OSS Distributions
1322*699cd480SApple OSS Distributions remain -= chunk;
1323*699cd480SApple OSS Distributions if (!remain) {
1324*699cd480SApple OSS Distributions break;
1325*699cd480SApple OSS Distributions }
1326*699cd480SApple OSS Distributions iteroffset += chunk; // - pageOffset;
1327*699cd480SApple OSS Distributions }
1328*699cd480SApple OSS Distributions entry++;
1329*699cd480SApple OSS Distributions entryIdx++;
1330*699cd480SApple OSS Distributions if (entryIdx >= ref->count) {
1331*699cd480SApple OSS Distributions panic("overrun");
1332*699cd480SApple OSS Distributions err = kIOReturnOverrun;
1333*699cd480SApple OSS Distributions break;
1334*699cd480SApple OSS Distributions }
1335*699cd480SApple OSS Distributions }
1336*699cd480SApple OSS Distributions
1337*699cd480SApple OSS Distributions if (kIOMapOverwrite & options) {
1338*699cd480SApple OSS Distributions if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339*699cd480SApple OSS Distributions map = IOPageableMapForAddress(addr);
1340*699cd480SApple OSS Distributions }
1341*699cd480SApple OSS Distributions err = KERN_SUCCESS;
1342*699cd480SApple OSS Distributions } else {
1343*699cd480SApple OSS Distributions IOMemoryDescriptorMapAllocRef ref;
1344*699cd480SApple OSS Distributions ref.map = map;
1345*699cd480SApple OSS Distributions ref.tag = tag;
1346*699cd480SApple OSS Distributions ref.options = options;
1347*699cd480SApple OSS Distributions ref.size = mapSize;
1348*699cd480SApple OSS Distributions ref.prot = prot;
1349*699cd480SApple OSS Distributions if (options & kIOMapAnywhere) {
1350*699cd480SApple OSS Distributions // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351*699cd480SApple OSS Distributions ref.mapped = 0;
1352*699cd480SApple OSS Distributions } else {
1353*699cd480SApple OSS Distributions ref.mapped = addr;
1354*699cd480SApple OSS Distributions }
1355*699cd480SApple OSS Distributions if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356*699cd480SApple OSS Distributions err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357*699cd480SApple OSS Distributions } else {
1358*699cd480SApple OSS Distributions err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359*699cd480SApple OSS Distributions }
1360*699cd480SApple OSS Distributions
1361*699cd480SApple OSS Distributions if (KERN_SUCCESS == err) {
1362*699cd480SApple OSS Distributions addr = ref.mapped;
1363*699cd480SApple OSS Distributions map = ref.map;
1364*699cd480SApple OSS Distributions didAlloc = true;
1365*699cd480SApple OSS Distributions }
1366*699cd480SApple OSS Distributions #if LOGUNALIGN
1367*699cd480SApple OSS Distributions IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368*699cd480SApple OSS Distributions #endif
1369*699cd480SApple OSS Distributions }
1370*699cd480SApple OSS Distributions
1371*699cd480SApple OSS Distributions /*
1372*699cd480SApple OSS Distributions * If the memory is associated with a device pager but doesn't have a UPL,
1373*699cd480SApple OSS Distributions * it will be immediately faulted in through the pager via populateDevicePager().
1374*699cd480SApple OSS Distributions * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375*699cd480SApple OSS Distributions * operations.
1376*699cd480SApple OSS Distributions */
1377*699cd480SApple OSS Distributions if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378*699cd480SApple OSS Distributions options &= ~kIOMapPrefault;
1379*699cd480SApple OSS Distributions }
1380*699cd480SApple OSS Distributions
1381*699cd480SApple OSS Distributions /*
1382*699cd480SApple OSS Distributions * Prefaulting is only possible if we wired the memory earlier. Check the
1383*699cd480SApple OSS Distributions * memory type, and the underlying data.
1384*699cd480SApple OSS Distributions */
1385*699cd480SApple OSS Distributions if (options & kIOMapPrefault) {
1386*699cd480SApple OSS Distributions /*
1387*699cd480SApple OSS Distributions * The memory must have been wired by calling ::prepare(), otherwise
1388*699cd480SApple OSS Distributions * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389*699cd480SApple OSS Distributions */
1390*699cd480SApple OSS Distributions assert(_wireCount != 0);
1391*699cd480SApple OSS Distributions assert(_memoryEntries != NULL);
1392*699cd480SApple OSS Distributions if ((_wireCount == 0) ||
1393*699cd480SApple OSS Distributions (_memoryEntries == NULL)) {
1394*699cd480SApple OSS Distributions return kIOReturnBadArgument;
1395*699cd480SApple OSS Distributions }
1396*699cd480SApple OSS Distributions
1397*699cd480SApple OSS Distributions // Get the page list.
1398*699cd480SApple OSS Distributions ioGMDData* dataP = getDataP(_memoryEntries);
1399*699cd480SApple OSS Distributions ioPLBlock const* ioplList = getIOPLList(dataP);
1400*699cd480SApple OSS Distributions pageList = getPageList(dataP);
1401*699cd480SApple OSS Distributions
1402*699cd480SApple OSS Distributions // Get the number of IOPLs.
1403*699cd480SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404*699cd480SApple OSS Distributions
1405*699cd480SApple OSS Distributions /*
1406*699cd480SApple OSS Distributions * Scan through the IOPL Info Blocks, looking for the first block containing
1407*699cd480SApple OSS Distributions * the offset. The research will go past it, so we'll need to go back to the
1408*699cd480SApple OSS Distributions * right range at the end.
1409*699cd480SApple OSS Distributions */
1410*699cd480SApple OSS Distributions UInt ioplIndex = 0;
1411*699cd480SApple OSS Distributions while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412*699cd480SApple OSS Distributions ioplIndex++;
1413*699cd480SApple OSS Distributions }
1414*699cd480SApple OSS Distributions ioplIndex--;
1415*699cd480SApple OSS Distributions
1416*699cd480SApple OSS Distributions // Retrieve the IOPL info block.
1417*699cd480SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ioplIndex];
1418*699cd480SApple OSS Distributions
1419*699cd480SApple OSS Distributions /*
1420*699cd480SApple OSS Distributions * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421*699cd480SApple OSS Distributions * array.
1422*699cd480SApple OSS Distributions */
1423*699cd480SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
1424*699cd480SApple OSS Distributions pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425*699cd480SApple OSS Distributions } else {
1426*699cd480SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
1427*699cd480SApple OSS Distributions }
1428*699cd480SApple OSS Distributions
1429*699cd480SApple OSS Distributions // Rebase [offset] into the IOPL in order to looks for the first page index.
1430*699cd480SApple OSS Distributions mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431*699cd480SApple OSS Distributions
1432*699cd480SApple OSS Distributions // Retrieve the index of the first page corresponding to the offset.
1433*699cd480SApple OSS Distributions currentPageIndex = atop_32(offsetInIOPL);
1434*699cd480SApple OSS Distributions }
1435*699cd480SApple OSS Distributions
1436*699cd480SApple OSS Distributions // enter mappings
1437*699cd480SApple OSS Distributions remain = size;
1438*699cd480SApple OSS Distributions mapAddr = addr;
1439*699cd480SApple OSS Distributions entryIdx = firstEntryIdx;
1440*699cd480SApple OSS Distributions entry = &ref->entries[entryIdx];
1441*699cd480SApple OSS Distributions
1442*699cd480SApple OSS Distributions while (remain && (KERN_SUCCESS == err)) {
1443*699cd480SApple OSS Distributions #if LOGUNALIGN
1444*699cd480SApple OSS Distributions printf("offset %qx, %qx\n", offset, entry->offset);
1445*699cd480SApple OSS Distributions #endif
1446*699cd480SApple OSS Distributions if (kIODefaultCache != cacheMode) {
1447*699cd480SApple OSS Distributions vm_size_t unused = 0;
1448*699cd480SApple OSS Distributions err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449*699cd480SApple OSS Distributions memEntryCacheMode, NULL, entry->entry);
1450*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
1451*699cd480SApple OSS Distributions }
1452*699cd480SApple OSS Distributions entryOffset = offset - entry->offset;
1453*699cd480SApple OSS Distributions if (entryOffset >= entry->size) {
1454*699cd480SApple OSS Distributions panic("entryOffset");
1455*699cd480SApple OSS Distributions }
1456*699cd480SApple OSS Distributions chunk = entry->size - entryOffset;
1457*699cd480SApple OSS Distributions #if LOGUNALIGN
1458*699cd480SApple OSS Distributions printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459*699cd480SApple OSS Distributions #endif
1460*699cd480SApple OSS Distributions if (chunk) {
1461*699cd480SApple OSS Distributions vm_map_kernel_flags_t vmk_flags = {
1462*699cd480SApple OSS Distributions .vmf_fixed = true,
1463*699cd480SApple OSS Distributions .vmf_overwrite = true,
1464*699cd480SApple OSS Distributions .vmf_return_data_addr = true,
1465*699cd480SApple OSS Distributions .vm_tag = tag,
1466*699cd480SApple OSS Distributions .vmkf_iokit_acct = true,
1467*699cd480SApple OSS Distributions };
1468*699cd480SApple OSS Distributions
1469*699cd480SApple OSS Distributions if (chunk > remain) {
1470*699cd480SApple OSS Distributions chunk = remain;
1471*699cd480SApple OSS Distributions }
1472*699cd480SApple OSS Distributions mapAddrOut = mapAddr;
1473*699cd480SApple OSS Distributions if (options & kIOMapPrefault) {
1474*699cd480SApple OSS Distributions UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475*699cd480SApple OSS Distributions
1476*699cd480SApple OSS Distributions err = vm_map_enter_mem_object_prefault(map,
1477*699cd480SApple OSS Distributions &mapAddrOut,
1478*699cd480SApple OSS Distributions chunk, 0 /* mask */,
1479*699cd480SApple OSS Distributions vmk_flags,
1480*699cd480SApple OSS Distributions entry->entry,
1481*699cd480SApple OSS Distributions entryOffset,
1482*699cd480SApple OSS Distributions prot, // cur
1483*699cd480SApple OSS Distributions prot, // max
1484*699cd480SApple OSS Distributions &pageList[currentPageIndex],
1485*699cd480SApple OSS Distributions nb_pages);
1486*699cd480SApple OSS Distributions
1487*699cd480SApple OSS Distributions // Compute the next index in the page list.
1488*699cd480SApple OSS Distributions currentPageIndex += nb_pages;
1489*699cd480SApple OSS Distributions assert(currentPageIndex <= _pages);
1490*699cd480SApple OSS Distributions } else {
1491*699cd480SApple OSS Distributions #if LOGUNALIGN
1492*699cd480SApple OSS Distributions printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493*699cd480SApple OSS Distributions #endif
1494*699cd480SApple OSS Distributions err = vm_map_enter_mem_object(map,
1495*699cd480SApple OSS Distributions &mapAddrOut,
1496*699cd480SApple OSS Distributions chunk, 0 /* mask */,
1497*699cd480SApple OSS Distributions vmk_flags,
1498*699cd480SApple OSS Distributions entry->entry,
1499*699cd480SApple OSS Distributions entryOffset,
1500*699cd480SApple OSS Distributions false, // copy
1501*699cd480SApple OSS Distributions prot, // cur
1502*699cd480SApple OSS Distributions prot, // max
1503*699cd480SApple OSS Distributions VM_INHERIT_NONE);
1504*699cd480SApple OSS Distributions }
1505*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1506*699cd480SApple OSS Distributions panic("map enter err %x", err);
1507*699cd480SApple OSS Distributions break;
1508*699cd480SApple OSS Distributions }
1509*699cd480SApple OSS Distributions #if LOGUNALIGN
1510*699cd480SApple OSS Distributions printf("mapAddr o %qx\n", mapAddrOut);
1511*699cd480SApple OSS Distributions #endif
1512*699cd480SApple OSS Distributions if (entryIdx == firstEntryIdx) {
1513*699cd480SApple OSS Distributions addr = mapAddrOut;
1514*699cd480SApple OSS Distributions }
1515*699cd480SApple OSS Distributions remain -= chunk;
1516*699cd480SApple OSS Distributions if (!remain) {
1517*699cd480SApple OSS Distributions break;
1518*699cd480SApple OSS Distributions }
1519*699cd480SApple OSS Distributions mach_vm_size_t entrySize;
1520*699cd480SApple OSS Distributions err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
1522*699cd480SApple OSS Distributions mapAddr += entrySize;
1523*699cd480SApple OSS Distributions offset += chunk;
1524*699cd480SApple OSS Distributions }
1525*699cd480SApple OSS Distributions
1526*699cd480SApple OSS Distributions entry++;
1527*699cd480SApple OSS Distributions entryIdx++;
1528*699cd480SApple OSS Distributions if (entryIdx >= ref->count) {
1529*699cd480SApple OSS Distributions err = kIOReturnOverrun;
1530*699cd480SApple OSS Distributions break;
1531*699cd480SApple OSS Distributions }
1532*699cd480SApple OSS Distributions }
1533*699cd480SApple OSS Distributions
1534*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1535*699cd480SApple OSS Distributions DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536*699cd480SApple OSS Distributions }
1537*699cd480SApple OSS Distributions
1538*699cd480SApple OSS Distributions if ((KERN_SUCCESS != err) && didAlloc) {
1539*699cd480SApple OSS Distributions (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540*699cd480SApple OSS Distributions addr = 0;
1541*699cd480SApple OSS Distributions }
1542*699cd480SApple OSS Distributions *inaddr = addr;
1543*699cd480SApple OSS Distributions
1544*699cd480SApple OSS Distributions return err;
1545*699cd480SApple OSS Distributions }
1546*699cd480SApple OSS Distributions
1547*699cd480SApple OSS Distributions uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549*699cd480SApple OSS Distributions IOMemoryReference * ref,
1550*699cd480SApple OSS Distributions uint64_t * offset)
1551*699cd480SApple OSS Distributions {
1552*699cd480SApple OSS Distributions kern_return_t kr;
1553*699cd480SApple OSS Distributions vm_object_offset_t data_offset = 0;
1554*699cd480SApple OSS Distributions uint64_t total;
1555*699cd480SApple OSS Distributions uint32_t idx;
1556*699cd480SApple OSS Distributions
1557*699cd480SApple OSS Distributions assert(ref->count);
1558*699cd480SApple OSS Distributions if (offset) {
1559*699cd480SApple OSS Distributions *offset = (uint64_t) data_offset;
1560*699cd480SApple OSS Distributions }
1561*699cd480SApple OSS Distributions total = 0;
1562*699cd480SApple OSS Distributions for (idx = 0; idx < ref->count; idx++) {
1563*699cd480SApple OSS Distributions kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564*699cd480SApple OSS Distributions &data_offset);
1565*699cd480SApple OSS Distributions if (KERN_SUCCESS != kr) {
1566*699cd480SApple OSS Distributions DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567*699cd480SApple OSS Distributions } else if (0 != data_offset) {
1568*699cd480SApple OSS Distributions DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569*699cd480SApple OSS Distributions }
1570*699cd480SApple OSS Distributions if (offset && !idx) {
1571*699cd480SApple OSS Distributions *offset = (uint64_t) data_offset;
1572*699cd480SApple OSS Distributions }
1573*699cd480SApple OSS Distributions total += round_page(data_offset + ref->entries[idx].size);
1574*699cd480SApple OSS Distributions }
1575*699cd480SApple OSS Distributions
1576*699cd480SApple OSS Distributions DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577*699cd480SApple OSS Distributions (offset ? *offset : (vm_object_offset_t)-1), total);
1578*699cd480SApple OSS Distributions
1579*699cd480SApple OSS Distributions return total;
1580*699cd480SApple OSS Distributions }
1581*699cd480SApple OSS Distributions
1582*699cd480SApple OSS Distributions
1583*699cd480SApple OSS Distributions IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585*699cd480SApple OSS Distributions IOMemoryReference * ref,
1586*699cd480SApple OSS Distributions IOByteCount * residentPageCount,
1587*699cd480SApple OSS Distributions IOByteCount * dirtyPageCount)
1588*699cd480SApple OSS Distributions {
1589*699cd480SApple OSS Distributions IOReturn err;
1590*699cd480SApple OSS Distributions IOMemoryEntry * entries;
1591*699cd480SApple OSS Distributions unsigned int resident, dirty;
1592*699cd480SApple OSS Distributions unsigned int totalResident, totalDirty;
1593*699cd480SApple OSS Distributions
1594*699cd480SApple OSS Distributions totalResident = totalDirty = 0;
1595*699cd480SApple OSS Distributions err = kIOReturnSuccess;
1596*699cd480SApple OSS Distributions entries = ref->entries + ref->count;
1597*699cd480SApple OSS Distributions while (entries > &ref->entries[0]) {
1598*699cd480SApple OSS Distributions entries--;
1599*699cd480SApple OSS Distributions err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1601*699cd480SApple OSS Distributions break;
1602*699cd480SApple OSS Distributions }
1603*699cd480SApple OSS Distributions totalResident += resident;
1604*699cd480SApple OSS Distributions totalDirty += dirty;
1605*699cd480SApple OSS Distributions }
1606*699cd480SApple OSS Distributions
1607*699cd480SApple OSS Distributions if (residentPageCount) {
1608*699cd480SApple OSS Distributions *residentPageCount = totalResident;
1609*699cd480SApple OSS Distributions }
1610*699cd480SApple OSS Distributions if (dirtyPageCount) {
1611*699cd480SApple OSS Distributions *dirtyPageCount = totalDirty;
1612*699cd480SApple OSS Distributions }
1613*699cd480SApple OSS Distributions return err;
1614*699cd480SApple OSS Distributions }
1615*699cd480SApple OSS Distributions
1616*699cd480SApple OSS Distributions IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618*699cd480SApple OSS Distributions IOMemoryReference * ref,
1619*699cd480SApple OSS Distributions IOOptionBits newState,
1620*699cd480SApple OSS Distributions IOOptionBits * oldState)
1621*699cd480SApple OSS Distributions {
1622*699cd480SApple OSS Distributions IOReturn err;
1623*699cd480SApple OSS Distributions IOMemoryEntry * entries;
1624*699cd480SApple OSS Distributions vm_purgable_t control;
1625*699cd480SApple OSS Distributions int totalState, state;
1626*699cd480SApple OSS Distributions
1627*699cd480SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1628*699cd480SApple OSS Distributions err = kIOReturnSuccess;
1629*699cd480SApple OSS Distributions entries = ref->entries + ref->count;
1630*699cd480SApple OSS Distributions while (entries > &ref->entries[0]) {
1631*699cd480SApple OSS Distributions entries--;
1632*699cd480SApple OSS Distributions
1633*699cd480SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
1634*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1635*699cd480SApple OSS Distributions break;
1636*699cd480SApple OSS Distributions }
1637*699cd480SApple OSS Distributions err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1639*699cd480SApple OSS Distributions break;
1640*699cd480SApple OSS Distributions }
1641*699cd480SApple OSS Distributions err = purgeableStateBits(&state);
1642*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1643*699cd480SApple OSS Distributions break;
1644*699cd480SApple OSS Distributions }
1645*699cd480SApple OSS Distributions
1646*699cd480SApple OSS Distributions if (kIOMemoryPurgeableEmpty == state) {
1647*699cd480SApple OSS Distributions totalState = kIOMemoryPurgeableEmpty;
1648*699cd480SApple OSS Distributions } else if (kIOMemoryPurgeableEmpty == totalState) {
1649*699cd480SApple OSS Distributions continue;
1650*699cd480SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == totalState) {
1651*699cd480SApple OSS Distributions continue;
1652*699cd480SApple OSS Distributions } else if (kIOMemoryPurgeableVolatile == state) {
1653*699cd480SApple OSS Distributions totalState = kIOMemoryPurgeableVolatile;
1654*699cd480SApple OSS Distributions } else {
1655*699cd480SApple OSS Distributions totalState = kIOMemoryPurgeableNonVolatile;
1656*699cd480SApple OSS Distributions }
1657*699cd480SApple OSS Distributions }
1658*699cd480SApple OSS Distributions
1659*699cd480SApple OSS Distributions if (oldState) {
1660*699cd480SApple OSS Distributions *oldState = totalState;
1661*699cd480SApple OSS Distributions }
1662*699cd480SApple OSS Distributions return err;
1663*699cd480SApple OSS Distributions }
1664*699cd480SApple OSS Distributions
1665*699cd480SApple OSS Distributions IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667*699cd480SApple OSS Distributions IOMemoryReference * ref,
1668*699cd480SApple OSS Distributions task_t newOwner,
1669*699cd480SApple OSS Distributions int newLedgerTag,
1670*699cd480SApple OSS Distributions IOOptionBits newLedgerOptions)
1671*699cd480SApple OSS Distributions {
1672*699cd480SApple OSS Distributions IOReturn err, totalErr;
1673*699cd480SApple OSS Distributions IOMemoryEntry * entries;
1674*699cd480SApple OSS Distributions
1675*699cd480SApple OSS Distributions totalErr = kIOReturnSuccess;
1676*699cd480SApple OSS Distributions entries = ref->entries + ref->count;
1677*699cd480SApple OSS Distributions while (entries > &ref->entries[0]) {
1678*699cd480SApple OSS Distributions entries--;
1679*699cd480SApple OSS Distributions
1680*699cd480SApple OSS Distributions err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
1682*699cd480SApple OSS Distributions totalErr = err;
1683*699cd480SApple OSS Distributions }
1684*699cd480SApple OSS Distributions }
1685*699cd480SApple OSS Distributions
1686*699cd480SApple OSS Distributions return totalErr;
1687*699cd480SApple OSS Distributions }
1688*699cd480SApple OSS Distributions
1689*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690*699cd480SApple OSS Distributions
1691*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692*699cd480SApple OSS Distributions IOMemoryDescriptor::withAddress(void * address,
1693*699cd480SApple OSS Distributions IOByteCount length,
1694*699cd480SApple OSS Distributions IODirection direction)
1695*699cd480SApple OSS Distributions {
1696*699cd480SApple OSS Distributions return IOMemoryDescriptor::
1697*699cd480SApple OSS Distributions withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698*699cd480SApple OSS Distributions }
1699*699cd480SApple OSS Distributions
1700*699cd480SApple OSS Distributions #ifndef __LP64__
1701*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702*699cd480SApple OSS Distributions IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703*699cd480SApple OSS Distributions IOByteCount length,
1704*699cd480SApple OSS Distributions IODirection direction,
1705*699cd480SApple OSS Distributions task_t task)
1706*699cd480SApple OSS Distributions {
1707*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708*699cd480SApple OSS Distributions if (that) {
1709*699cd480SApple OSS Distributions if (that->initWithAddress(address, length, direction, task)) {
1710*699cd480SApple OSS Distributions return os::move(that);
1711*699cd480SApple OSS Distributions }
1712*699cd480SApple OSS Distributions }
1713*699cd480SApple OSS Distributions return nullptr;
1714*699cd480SApple OSS Distributions }
1715*699cd480SApple OSS Distributions #endif /* !__LP64__ */
1716*699cd480SApple OSS Distributions
1717*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718*699cd480SApple OSS Distributions IOMemoryDescriptor::withPhysicalAddress(
1719*699cd480SApple OSS Distributions IOPhysicalAddress address,
1720*699cd480SApple OSS Distributions IOByteCount length,
1721*699cd480SApple OSS Distributions IODirection direction )
1722*699cd480SApple OSS Distributions {
1723*699cd480SApple OSS Distributions return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724*699cd480SApple OSS Distributions }
1725*699cd480SApple OSS Distributions
1726*699cd480SApple OSS Distributions #ifndef __LP64__
1727*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728*699cd480SApple OSS Distributions IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729*699cd480SApple OSS Distributions UInt32 withCount,
1730*699cd480SApple OSS Distributions IODirection direction,
1731*699cd480SApple OSS Distributions task_t task,
1732*699cd480SApple OSS Distributions bool asReference)
1733*699cd480SApple OSS Distributions {
1734*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735*699cd480SApple OSS Distributions if (that) {
1736*699cd480SApple OSS Distributions if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737*699cd480SApple OSS Distributions return os::move(that);
1738*699cd480SApple OSS Distributions }
1739*699cd480SApple OSS Distributions }
1740*699cd480SApple OSS Distributions return nullptr;
1741*699cd480SApple OSS Distributions }
1742*699cd480SApple OSS Distributions #endif /* !__LP64__ */
1743*699cd480SApple OSS Distributions
1744*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745*699cd480SApple OSS Distributions IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746*699cd480SApple OSS Distributions mach_vm_size_t length,
1747*699cd480SApple OSS Distributions IOOptionBits options,
1748*699cd480SApple OSS Distributions task_t task)
1749*699cd480SApple OSS Distributions {
1750*699cd480SApple OSS Distributions IOAddressRange range = { address, length };
1751*699cd480SApple OSS Distributions return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752*699cd480SApple OSS Distributions }
1753*699cd480SApple OSS Distributions
1754*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755*699cd480SApple OSS Distributions IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1756*699cd480SApple OSS Distributions UInt32 rangeCount,
1757*699cd480SApple OSS Distributions IOOptionBits options,
1758*699cd480SApple OSS Distributions task_t task)
1759*699cd480SApple OSS Distributions {
1760*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761*699cd480SApple OSS Distributions if (that) {
1762*699cd480SApple OSS Distributions if (task) {
1763*699cd480SApple OSS Distributions options |= kIOMemoryTypeVirtual64;
1764*699cd480SApple OSS Distributions } else {
1765*699cd480SApple OSS Distributions options |= kIOMemoryTypePhysical64;
1766*699cd480SApple OSS Distributions }
1767*699cd480SApple OSS Distributions
1768*699cd480SApple OSS Distributions if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769*699cd480SApple OSS Distributions return os::move(that);
1770*699cd480SApple OSS Distributions }
1771*699cd480SApple OSS Distributions }
1772*699cd480SApple OSS Distributions
1773*699cd480SApple OSS Distributions return nullptr;
1774*699cd480SApple OSS Distributions }
1775*699cd480SApple OSS Distributions
1776*699cd480SApple OSS Distributions
1777*699cd480SApple OSS Distributions /*
1778*699cd480SApple OSS Distributions * withOptions:
1779*699cd480SApple OSS Distributions *
1780*699cd480SApple OSS Distributions * Create a new IOMemoryDescriptor. The buffer is made up of several
1781*699cd480SApple OSS Distributions * virtual address ranges, from a given task.
1782*699cd480SApple OSS Distributions *
1783*699cd480SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1784*699cd480SApple OSS Distributions */
1785*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786*699cd480SApple OSS Distributions IOMemoryDescriptor::withOptions(void * buffers,
1787*699cd480SApple OSS Distributions UInt32 count,
1788*699cd480SApple OSS Distributions UInt32 offset,
1789*699cd480SApple OSS Distributions task_t task,
1790*699cd480SApple OSS Distributions IOOptionBits opts,
1791*699cd480SApple OSS Distributions IOMapper * mapper)
1792*699cd480SApple OSS Distributions {
1793*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794*699cd480SApple OSS Distributions
1795*699cd480SApple OSS Distributions if (self
1796*699cd480SApple OSS Distributions && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797*699cd480SApple OSS Distributions return nullptr;
1798*699cd480SApple OSS Distributions }
1799*699cd480SApple OSS Distributions
1800*699cd480SApple OSS Distributions return os::move(self);
1801*699cd480SApple OSS Distributions }
1802*699cd480SApple OSS Distributions
1803*699cd480SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithOptions(void * buffers,
1805*699cd480SApple OSS Distributions UInt32 count,
1806*699cd480SApple OSS Distributions UInt32 offset,
1807*699cd480SApple OSS Distributions task_t task,
1808*699cd480SApple OSS Distributions IOOptionBits options,
1809*699cd480SApple OSS Distributions IOMapper * mapper)
1810*699cd480SApple OSS Distributions {
1811*699cd480SApple OSS Distributions return false;
1812*699cd480SApple OSS Distributions }
1813*699cd480SApple OSS Distributions
1814*699cd480SApple OSS Distributions #ifndef __LP64__
1815*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816*699cd480SApple OSS Distributions IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817*699cd480SApple OSS Distributions UInt32 withCount,
1818*699cd480SApple OSS Distributions IODirection direction,
1819*699cd480SApple OSS Distributions bool asReference)
1820*699cd480SApple OSS Distributions {
1821*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822*699cd480SApple OSS Distributions if (that) {
1823*699cd480SApple OSS Distributions if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824*699cd480SApple OSS Distributions return os::move(that);
1825*699cd480SApple OSS Distributions }
1826*699cd480SApple OSS Distributions }
1827*699cd480SApple OSS Distributions return nullptr;
1828*699cd480SApple OSS Distributions }
1829*699cd480SApple OSS Distributions
1830*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831*699cd480SApple OSS Distributions IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1832*699cd480SApple OSS Distributions IOByteCount offset,
1833*699cd480SApple OSS Distributions IOByteCount length,
1834*699cd480SApple OSS Distributions IODirection direction)
1835*699cd480SApple OSS Distributions {
1836*699cd480SApple OSS Distributions return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837*699cd480SApple OSS Distributions }
1838*699cd480SApple OSS Distributions #endif /* !__LP64__ */
1839*699cd480SApple OSS Distributions
1840*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841*699cd480SApple OSS Distributions IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842*699cd480SApple OSS Distributions {
1843*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor *origGenMD =
1844*699cd480SApple OSS Distributions OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845*699cd480SApple OSS Distributions
1846*699cd480SApple OSS Distributions if (origGenMD) {
1847*699cd480SApple OSS Distributions return IOGeneralMemoryDescriptor::
1848*699cd480SApple OSS Distributions withPersistentMemoryDescriptor(origGenMD);
1849*699cd480SApple OSS Distributions } else {
1850*699cd480SApple OSS Distributions return nullptr;
1851*699cd480SApple OSS Distributions }
1852*699cd480SApple OSS Distributions }
1853*699cd480SApple OSS Distributions
1854*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856*699cd480SApple OSS Distributions {
1857*699cd480SApple OSS Distributions IOMemoryReference * memRef;
1858*699cd480SApple OSS Distributions OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859*699cd480SApple OSS Distributions
1860*699cd480SApple OSS Distributions if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861*699cd480SApple OSS Distributions return nullptr;
1862*699cd480SApple OSS Distributions }
1863*699cd480SApple OSS Distributions
1864*699cd480SApple OSS Distributions if (memRef == originalMD->_memRef) {
1865*699cd480SApple OSS Distributions self.reset(originalMD, OSRetain);
1866*699cd480SApple OSS Distributions originalMD->memoryReferenceRelease(memRef);
1867*699cd480SApple OSS Distributions return os::move(self);
1868*699cd480SApple OSS Distributions }
1869*699cd480SApple OSS Distributions
1870*699cd480SApple OSS Distributions self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871*699cd480SApple OSS Distributions IOMDPersistentInitData initData = { originalMD, memRef };
1872*699cd480SApple OSS Distributions
1873*699cd480SApple OSS Distributions if (self
1874*699cd480SApple OSS Distributions && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875*699cd480SApple OSS Distributions return nullptr;
1876*699cd480SApple OSS Distributions }
1877*699cd480SApple OSS Distributions return os::move(self);
1878*699cd480SApple OSS Distributions }
1879*699cd480SApple OSS Distributions
1880*699cd480SApple OSS Distributions #ifndef __LP64__
1881*699cd480SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(void * address,
1883*699cd480SApple OSS Distributions IOByteCount withLength,
1884*699cd480SApple OSS Distributions IODirection withDirection)
1885*699cd480SApple OSS Distributions {
1886*699cd480SApple OSS Distributions _singleRange.v.address = (vm_offset_t) address;
1887*699cd480SApple OSS Distributions _singleRange.v.length = withLength;
1888*699cd480SApple OSS Distributions
1889*699cd480SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890*699cd480SApple OSS Distributions }
1891*699cd480SApple OSS Distributions
1892*699cd480SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894*699cd480SApple OSS Distributions IOByteCount withLength,
1895*699cd480SApple OSS Distributions IODirection withDirection,
1896*699cd480SApple OSS Distributions task_t withTask)
1897*699cd480SApple OSS Distributions {
1898*699cd480SApple OSS Distributions _singleRange.v.address = address;
1899*699cd480SApple OSS Distributions _singleRange.v.length = withLength;
1900*699cd480SApple OSS Distributions
1901*699cd480SApple OSS Distributions return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902*699cd480SApple OSS Distributions }
1903*699cd480SApple OSS Distributions
1904*699cd480SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906*699cd480SApple OSS Distributions IOPhysicalAddress address,
1907*699cd480SApple OSS Distributions IOByteCount withLength,
1908*699cd480SApple OSS Distributions IODirection withDirection )
1909*699cd480SApple OSS Distributions {
1910*699cd480SApple OSS Distributions _singleRange.p.address = address;
1911*699cd480SApple OSS Distributions _singleRange.p.length = withLength;
1912*699cd480SApple OSS Distributions
1913*699cd480SApple OSS Distributions return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914*699cd480SApple OSS Distributions }
1915*699cd480SApple OSS Distributions
1916*699cd480SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918*699cd480SApple OSS Distributions IOPhysicalRange * ranges,
1919*699cd480SApple OSS Distributions UInt32 count,
1920*699cd480SApple OSS Distributions IODirection direction,
1921*699cd480SApple OSS Distributions bool reference)
1922*699cd480SApple OSS Distributions {
1923*699cd480SApple OSS Distributions IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924*699cd480SApple OSS Distributions
1925*699cd480SApple OSS Distributions if (reference) {
1926*699cd480SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1927*699cd480SApple OSS Distributions }
1928*699cd480SApple OSS Distributions
1929*699cd480SApple OSS Distributions return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930*699cd480SApple OSS Distributions }
1931*699cd480SApple OSS Distributions
1932*699cd480SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithRanges(
1934*699cd480SApple OSS Distributions IOVirtualRange * ranges,
1935*699cd480SApple OSS Distributions UInt32 count,
1936*699cd480SApple OSS Distributions IODirection direction,
1937*699cd480SApple OSS Distributions task_t task,
1938*699cd480SApple OSS Distributions bool reference)
1939*699cd480SApple OSS Distributions {
1940*699cd480SApple OSS Distributions IOOptionBits mdOpts = direction;
1941*699cd480SApple OSS Distributions
1942*699cd480SApple OSS Distributions if (reference) {
1943*699cd480SApple OSS Distributions mdOpts |= kIOMemoryAsReference;
1944*699cd480SApple OSS Distributions }
1945*699cd480SApple OSS Distributions
1946*699cd480SApple OSS Distributions if (task) {
1947*699cd480SApple OSS Distributions mdOpts |= kIOMemoryTypeVirtual;
1948*699cd480SApple OSS Distributions
1949*699cd480SApple OSS Distributions // Auto-prepare if this is a kernel memory descriptor as very few
1950*699cd480SApple OSS Distributions // clients bother to prepare() kernel memory.
1951*699cd480SApple OSS Distributions // But it was not enforced so what are you going to do?
1952*699cd480SApple OSS Distributions if (task == kernel_task) {
1953*699cd480SApple OSS Distributions mdOpts |= kIOMemoryAutoPrepare;
1954*699cd480SApple OSS Distributions }
1955*699cd480SApple OSS Distributions } else {
1956*699cd480SApple OSS Distributions mdOpts |= kIOMemoryTypePhysical;
1957*699cd480SApple OSS Distributions }
1958*699cd480SApple OSS Distributions
1959*699cd480SApple OSS Distributions return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960*699cd480SApple OSS Distributions }
1961*699cd480SApple OSS Distributions #endif /* !__LP64__ */
1962*699cd480SApple OSS Distributions
1963*699cd480SApple OSS Distributions /*
1964*699cd480SApple OSS Distributions * initWithOptions:
1965*699cd480SApple OSS Distributions *
1966*699cd480SApple OSS Distributions * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967*699cd480SApple OSS Distributions * from a given task, several physical ranges, an UPL from the ubc
1968*699cd480SApple OSS Distributions * system or a uio (may be 64bit) from the BSD subsystem.
1969*699cd480SApple OSS Distributions *
1970*699cd480SApple OSS Distributions * Passing the ranges as a reference will avoid an extra allocation.
1971*699cd480SApple OSS Distributions *
1972*699cd480SApple OSS Distributions * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973*699cd480SApple OSS Distributions * existing instance -- note this behavior is not commonly supported in other
1974*699cd480SApple OSS Distributions * I/O Kit classes, although it is supported here.
1975*699cd480SApple OSS Distributions */
1976*699cd480SApple OSS Distributions
1977*699cd480SApple OSS Distributions bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1979*699cd480SApple OSS Distributions UInt32 count,
1980*699cd480SApple OSS Distributions UInt32 offset,
1981*699cd480SApple OSS Distributions task_t task,
1982*699cd480SApple OSS Distributions IOOptionBits options,
1983*699cd480SApple OSS Distributions IOMapper * mapper)
1984*699cd480SApple OSS Distributions {
1985*699cd480SApple OSS Distributions IOOptionBits type = options & kIOMemoryTypeMask;
1986*699cd480SApple OSS Distributions
1987*699cd480SApple OSS Distributions #ifndef __LP64__
1988*699cd480SApple OSS Distributions if (task
1989*699cd480SApple OSS Distributions && (kIOMemoryTypeVirtual == type)
1990*699cd480SApple OSS Distributions && vm_map_is_64bit(get_task_map(task))
1991*699cd480SApple OSS Distributions && ((IOVirtualRange *) buffers)->address) {
1992*699cd480SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993*699cd480SApple OSS Distributions return false;
1994*699cd480SApple OSS Distributions }
1995*699cd480SApple OSS Distributions #endif /* !__LP64__ */
1996*699cd480SApple OSS Distributions
1997*699cd480SApple OSS Distributions // Grab the original MD's configuation data to initialse the
1998*699cd480SApple OSS Distributions // arguments to this function.
1999*699cd480SApple OSS Distributions if (kIOMemoryTypePersistentMD == type) {
2000*699cd480SApple OSS Distributions IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001*699cd480SApple OSS Distributions const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002*699cd480SApple OSS Distributions ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003*699cd480SApple OSS Distributions
2004*699cd480SApple OSS Distributions // Only accept persistent memory descriptors with valid dataP data.
2005*699cd480SApple OSS Distributions assert(orig->_rangesCount == 1);
2006*699cd480SApple OSS Distributions if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007*699cd480SApple OSS Distributions return false;
2008*699cd480SApple OSS Distributions }
2009*699cd480SApple OSS Distributions
2010*699cd480SApple OSS Distributions _memRef = initData->fMemRef; // Grab the new named entry
2011*699cd480SApple OSS Distributions options = orig->_flags & ~kIOMemoryAsReference;
2012*699cd480SApple OSS Distributions type = options & kIOMemoryTypeMask;
2013*699cd480SApple OSS Distributions buffers = orig->_ranges.v;
2014*699cd480SApple OSS Distributions count = orig->_rangesCount;
2015*699cd480SApple OSS Distributions
2016*699cd480SApple OSS Distributions // Now grab the original task and whatever mapper was previously used
2017*699cd480SApple OSS Distributions task = orig->_task;
2018*699cd480SApple OSS Distributions mapper = dataP->fMapper;
2019*699cd480SApple OSS Distributions
2020*699cd480SApple OSS Distributions // We are ready to go through the original initialisation now
2021*699cd480SApple OSS Distributions }
2022*699cd480SApple OSS Distributions
2023*699cd480SApple OSS Distributions switch (type) {
2024*699cd480SApple OSS Distributions case kIOMemoryTypeUIO:
2025*699cd480SApple OSS Distributions case kIOMemoryTypeVirtual:
2026*699cd480SApple OSS Distributions #ifndef __LP64__
2027*699cd480SApple OSS Distributions case kIOMemoryTypeVirtual64:
2028*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2029*699cd480SApple OSS Distributions assert(task);
2030*699cd480SApple OSS Distributions if (!task) {
2031*699cd480SApple OSS Distributions return false;
2032*699cd480SApple OSS Distributions }
2033*699cd480SApple OSS Distributions break;
2034*699cd480SApple OSS Distributions
2035*699cd480SApple OSS Distributions case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2036*699cd480SApple OSS Distributions #ifndef __LP64__
2037*699cd480SApple OSS Distributions case kIOMemoryTypePhysical64:
2038*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2039*699cd480SApple OSS Distributions case kIOMemoryTypeUPL:
2040*699cd480SApple OSS Distributions assert(!task);
2041*699cd480SApple OSS Distributions break;
2042*699cd480SApple OSS Distributions default:
2043*699cd480SApple OSS Distributions return false; /* bad argument */
2044*699cd480SApple OSS Distributions }
2045*699cd480SApple OSS Distributions
2046*699cd480SApple OSS Distributions assert(buffers);
2047*699cd480SApple OSS Distributions assert(count);
2048*699cd480SApple OSS Distributions
2049*699cd480SApple OSS Distributions /*
2050*699cd480SApple OSS Distributions * We can check the _initialized instance variable before having ever set
2051*699cd480SApple OSS Distributions * it to an initial value because I/O Kit guarantees that all our instance
2052*699cd480SApple OSS Distributions * variables are zeroed on an object's allocation.
2053*699cd480SApple OSS Distributions */
2054*699cd480SApple OSS Distributions
2055*699cd480SApple OSS Distributions if (_initialized) {
2056*699cd480SApple OSS Distributions /*
2057*699cd480SApple OSS Distributions * An existing memory descriptor is being retargeted to point to
2058*699cd480SApple OSS Distributions * somewhere else. Clean up our present state.
2059*699cd480SApple OSS Distributions */
2060*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2061*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062*699cd480SApple OSS Distributions while (_wireCount) {
2063*699cd480SApple OSS Distributions complete();
2064*699cd480SApple OSS Distributions }
2065*699cd480SApple OSS Distributions }
2066*699cd480SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067*699cd480SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2068*699cd480SApple OSS Distributions uio_free((uio_t) _ranges.v);
2069*699cd480SApple OSS Distributions }
2070*699cd480SApple OSS Distributions #ifndef __LP64__
2071*699cd480SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072*699cd480SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073*699cd480SApple OSS Distributions }
2074*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2075*699cd480SApple OSS Distributions else {
2076*699cd480SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077*699cd480SApple OSS Distributions }
2078*699cd480SApple OSS Distributions }
2079*699cd480SApple OSS Distributions
2080*699cd480SApple OSS Distributions options |= (kIOMemoryRedirected & _flags);
2081*699cd480SApple OSS Distributions if (!(kIOMemoryRedirected & options)) {
2082*699cd480SApple OSS Distributions if (_memRef) {
2083*699cd480SApple OSS Distributions memoryReferenceRelease(_memRef);
2084*699cd480SApple OSS Distributions _memRef = NULL;
2085*699cd480SApple OSS Distributions }
2086*699cd480SApple OSS Distributions if (_mappings) {
2087*699cd480SApple OSS Distributions _mappings->flushCollection();
2088*699cd480SApple OSS Distributions }
2089*699cd480SApple OSS Distributions }
2090*699cd480SApple OSS Distributions } else {
2091*699cd480SApple OSS Distributions if (!super::init()) {
2092*699cd480SApple OSS Distributions return false;
2093*699cd480SApple OSS Distributions }
2094*699cd480SApple OSS Distributions _initialized = true;
2095*699cd480SApple OSS Distributions }
2096*699cd480SApple OSS Distributions
2097*699cd480SApple OSS Distributions // Grab the appropriate mapper
2098*699cd480SApple OSS Distributions if (kIOMemoryHostOrRemote & options) {
2099*699cd480SApple OSS Distributions options |= kIOMemoryMapperNone;
2100*699cd480SApple OSS Distributions }
2101*699cd480SApple OSS Distributions if (kIOMemoryMapperNone & options) {
2102*699cd480SApple OSS Distributions mapper = NULL; // No Mapper
2103*699cd480SApple OSS Distributions } else if (mapper == kIOMapperSystem) {
2104*699cd480SApple OSS Distributions IOMapper::checkForSystemMapper();
2105*699cd480SApple OSS Distributions gIOSystemMapper = mapper = IOMapper::gSystem;
2106*699cd480SApple OSS Distributions }
2107*699cd480SApple OSS Distributions
2108*699cd480SApple OSS Distributions // Remove the dynamic internal use flags from the initial setting
2109*699cd480SApple OSS Distributions options &= ~(kIOMemoryPreparedReadOnly);
2110*699cd480SApple OSS Distributions _flags = options;
2111*699cd480SApple OSS Distributions _task = task;
2112*699cd480SApple OSS Distributions
2113*699cd480SApple OSS Distributions #ifndef __LP64__
2114*699cd480SApple OSS Distributions _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2115*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2116*699cd480SApple OSS Distributions
2117*699cd480SApple OSS Distributions _dmaReferences = 0;
2118*699cd480SApple OSS Distributions __iomd_reservedA = 0;
2119*699cd480SApple OSS Distributions __iomd_reservedB = 0;
2120*699cd480SApple OSS Distributions _highestPage = 0;
2121*699cd480SApple OSS Distributions
2122*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & options) {
2123*699cd480SApple OSS Distributions if (!_prepareLock) {
2124*699cd480SApple OSS Distributions _prepareLock = IOLockAlloc();
2125*699cd480SApple OSS Distributions }
2126*699cd480SApple OSS Distributions } else if (_prepareLock) {
2127*699cd480SApple OSS Distributions IOLockFree(_prepareLock);
2128*699cd480SApple OSS Distributions _prepareLock = NULL;
2129*699cd480SApple OSS Distributions }
2130*699cd480SApple OSS Distributions
2131*699cd480SApple OSS Distributions if (kIOMemoryTypeUPL == type) {
2132*699cd480SApple OSS Distributions ioGMDData *dataP;
2133*699cd480SApple OSS Distributions unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134*699cd480SApple OSS Distributions
2135*699cd480SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2136*699cd480SApple OSS Distributions return false;
2137*699cd480SApple OSS Distributions }
2138*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
2139*699cd480SApple OSS Distributions dataP->fPageCnt = 0;
2140*699cd480SApple OSS Distributions switch (kIOMemoryDirectionMask & options) {
2141*699cd480SApple OSS Distributions case kIODirectionOut:
2142*699cd480SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
2143*699cd480SApple OSS Distributions break;
2144*699cd480SApple OSS Distributions case kIODirectionIn:
2145*699cd480SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
2146*699cd480SApple OSS Distributions break;
2147*699cd480SApple OSS Distributions case kIODirectionNone:
2148*699cd480SApple OSS Distributions case kIODirectionOutIn:
2149*699cd480SApple OSS Distributions default:
2150*699cd480SApple OSS Distributions panic("bad dir for upl 0x%x", (int) options);
2151*699cd480SApple OSS Distributions break;
2152*699cd480SApple OSS Distributions }
2153*699cd480SApple OSS Distributions // _wireCount++; // UPLs start out life wired
2154*699cd480SApple OSS Distributions
2155*699cd480SApple OSS Distributions _length = count;
2156*699cd480SApple OSS Distributions _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157*699cd480SApple OSS Distributions
2158*699cd480SApple OSS Distributions ioPLBlock iopl;
2159*699cd480SApple OSS Distributions iopl.fIOPL = (upl_t) buffers;
2160*699cd480SApple OSS Distributions upl_set_referenced(iopl.fIOPL, true);
2161*699cd480SApple OSS Distributions upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162*699cd480SApple OSS Distributions
2163*699cd480SApple OSS Distributions if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164*699cd480SApple OSS Distributions panic("short external upl");
2165*699cd480SApple OSS Distributions }
2166*699cd480SApple OSS Distributions
2167*699cd480SApple OSS Distributions _highestPage = upl_get_highest_page(iopl.fIOPL);
2168*699cd480SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169*699cd480SApple OSS Distributions
2170*699cd480SApple OSS Distributions // Set the flag kIOPLOnDevice convieniently equal to 1
2171*699cd480SApple OSS Distributions iopl.fFlags = pageList->device | kIOPLExternUPL;
2172*699cd480SApple OSS Distributions if (!pageList->device) {
2173*699cd480SApple OSS Distributions // Pre-compute the offset into the UPL's page list
2174*699cd480SApple OSS Distributions pageList = &pageList[atop_32(offset)];
2175*699cd480SApple OSS Distributions offset &= PAGE_MASK;
2176*699cd480SApple OSS Distributions }
2177*699cd480SApple OSS Distributions iopl.fIOMDOffset = 0;
2178*699cd480SApple OSS Distributions iopl.fMappedPage = 0;
2179*699cd480SApple OSS Distributions iopl.fPageInfo = (vm_address_t) pageList;
2180*699cd480SApple OSS Distributions iopl.fPageOffset = offset;
2181*699cd480SApple OSS Distributions _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182*699cd480SApple OSS Distributions } else {
2183*699cd480SApple OSS Distributions // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184*699cd480SApple OSS Distributions // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185*699cd480SApple OSS Distributions
2186*699cd480SApple OSS Distributions // Initialize the memory descriptor
2187*699cd480SApple OSS Distributions if (options & kIOMemoryAsReference) {
2188*699cd480SApple OSS Distributions #ifndef __LP64__
2189*699cd480SApple OSS Distributions _rangesIsAllocated = false;
2190*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2191*699cd480SApple OSS Distributions
2192*699cd480SApple OSS Distributions // Hack assignment to get the buffer arg into _ranges.
2193*699cd480SApple OSS Distributions // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194*699cd480SApple OSS Distributions // work, C++ sigh.
2195*699cd480SApple OSS Distributions // This also initialises the uio & physical ranges.
2196*699cd480SApple OSS Distributions _ranges.v = (IOVirtualRange *) buffers;
2197*699cd480SApple OSS Distributions } else {
2198*699cd480SApple OSS Distributions #ifndef __LP64__
2199*699cd480SApple OSS Distributions _rangesIsAllocated = true;
2200*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2201*699cd480SApple OSS Distributions switch (type) {
2202*699cd480SApple OSS Distributions case kIOMemoryTypeUIO:
2203*699cd480SApple OSS Distributions _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204*699cd480SApple OSS Distributions break;
2205*699cd480SApple OSS Distributions
2206*699cd480SApple OSS Distributions #ifndef __LP64__
2207*699cd480SApple OSS Distributions case kIOMemoryTypeVirtual64:
2208*699cd480SApple OSS Distributions case kIOMemoryTypePhysical64:
2209*699cd480SApple OSS Distributions if (count == 1
2210*699cd480SApple OSS Distributions #ifndef __arm__
2211*699cd480SApple OSS Distributions && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212*699cd480SApple OSS Distributions #endif
2213*699cd480SApple OSS Distributions ) {
2214*699cd480SApple OSS Distributions if (kIOMemoryTypeVirtual64 == type) {
2215*699cd480SApple OSS Distributions type = kIOMemoryTypeVirtual;
2216*699cd480SApple OSS Distributions } else {
2217*699cd480SApple OSS Distributions type = kIOMemoryTypePhysical;
2218*699cd480SApple OSS Distributions }
2219*699cd480SApple OSS Distributions _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220*699cd480SApple OSS Distributions _rangesIsAllocated = false;
2221*699cd480SApple OSS Distributions _ranges.v = &_singleRange.v;
2222*699cd480SApple OSS Distributions _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223*699cd480SApple OSS Distributions _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2224*699cd480SApple OSS Distributions break;
2225*699cd480SApple OSS Distributions }
2226*699cd480SApple OSS Distributions _ranges.v64 = IONew(IOAddressRange, count);
2227*699cd480SApple OSS Distributions if (!_ranges.v64) {
2228*699cd480SApple OSS Distributions return false;
2229*699cd480SApple OSS Distributions }
2230*699cd480SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231*699cd480SApple OSS Distributions break;
2232*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2233*699cd480SApple OSS Distributions case kIOMemoryTypeVirtual:
2234*699cd480SApple OSS Distributions case kIOMemoryTypePhysical:
2235*699cd480SApple OSS Distributions if (count == 1) {
2236*699cd480SApple OSS Distributions _flags |= kIOMemoryAsReference;
2237*699cd480SApple OSS Distributions #ifndef __LP64__
2238*699cd480SApple OSS Distributions _rangesIsAllocated = false;
2239*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2240*699cd480SApple OSS Distributions _ranges.v = &_singleRange.v;
2241*699cd480SApple OSS Distributions } else {
2242*699cd480SApple OSS Distributions _ranges.v = IONew(IOVirtualRange, count);
2243*699cd480SApple OSS Distributions if (!_ranges.v) {
2244*699cd480SApple OSS Distributions return false;
2245*699cd480SApple OSS Distributions }
2246*699cd480SApple OSS Distributions }
2247*699cd480SApple OSS Distributions bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248*699cd480SApple OSS Distributions break;
2249*699cd480SApple OSS Distributions }
2250*699cd480SApple OSS Distributions }
2251*699cd480SApple OSS Distributions _rangesCount = count;
2252*699cd480SApple OSS Distributions
2253*699cd480SApple OSS Distributions // Find starting address within the vector of ranges
2254*699cd480SApple OSS Distributions Ranges vec = _ranges;
2255*699cd480SApple OSS Distributions mach_vm_size_t totalLength = 0;
2256*699cd480SApple OSS Distributions unsigned int ind, pages = 0;
2257*699cd480SApple OSS Distributions for (ind = 0; ind < count; ind++) {
2258*699cd480SApple OSS Distributions mach_vm_address_t addr;
2259*699cd480SApple OSS Distributions mach_vm_address_t endAddr;
2260*699cd480SApple OSS Distributions mach_vm_size_t len;
2261*699cd480SApple OSS Distributions
2262*699cd480SApple OSS Distributions // addr & len are returned by this function
2263*699cd480SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, ind, _task);
2264*699cd480SApple OSS Distributions if (_task) {
2265*699cd480SApple OSS Distributions mach_vm_size_t phys_size;
2266*699cd480SApple OSS Distributions kern_return_t kret;
2267*699cd480SApple OSS Distributions kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268*699cd480SApple OSS Distributions if (KERN_SUCCESS != kret) {
2269*699cd480SApple OSS Distributions break;
2270*699cd480SApple OSS Distributions }
2271*699cd480SApple OSS Distributions if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272*699cd480SApple OSS Distributions break;
2273*699cd480SApple OSS Distributions }
2274*699cd480SApple OSS Distributions } else {
2275*699cd480SApple OSS Distributions if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276*699cd480SApple OSS Distributions break;
2277*699cd480SApple OSS Distributions }
2278*699cd480SApple OSS Distributions if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279*699cd480SApple OSS Distributions break;
2280*699cd480SApple OSS Distributions }
2281*699cd480SApple OSS Distributions if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282*699cd480SApple OSS Distributions break;
2283*699cd480SApple OSS Distributions }
2284*699cd480SApple OSS Distributions }
2285*699cd480SApple OSS Distributions if (os_add_overflow(totalLength, len, &totalLength)) {
2286*699cd480SApple OSS Distributions break;
2287*699cd480SApple OSS Distributions }
2288*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289*699cd480SApple OSS Distributions uint64_t highPage = atop_64(addr + len - 1);
2290*699cd480SApple OSS Distributions if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291*699cd480SApple OSS Distributions _highestPage = (ppnum_t) highPage;
2292*699cd480SApple OSS Distributions DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293*699cd480SApple OSS Distributions }
2294*699cd480SApple OSS Distributions }
2295*699cd480SApple OSS Distributions }
2296*699cd480SApple OSS Distributions if ((ind < count)
2297*699cd480SApple OSS Distributions || (totalLength != ((IOByteCount) totalLength))) {
2298*699cd480SApple OSS Distributions return false; /* overflow */
2299*699cd480SApple OSS Distributions }
2300*699cd480SApple OSS Distributions _length = totalLength;
2301*699cd480SApple OSS Distributions _pages = pages;
2302*699cd480SApple OSS Distributions
2303*699cd480SApple OSS Distributions // Auto-prepare memory at creation time.
2304*699cd480SApple OSS Distributions // Implied completion when descriptor is free-ed
2305*699cd480SApple OSS Distributions
2306*699cd480SApple OSS Distributions
2307*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2308*699cd480SApple OSS Distributions _wireCount++; // Physical MDs are, by definition, wired
2309*699cd480SApple OSS Distributions } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2310*699cd480SApple OSS Distributions ioGMDData *dataP;
2311*699cd480SApple OSS Distributions unsigned dataSize;
2312*699cd480SApple OSS Distributions
2313*699cd480SApple OSS Distributions if (_pages > atop_64(max_mem)) {
2314*699cd480SApple OSS Distributions return false;
2315*699cd480SApple OSS Distributions }
2316*699cd480SApple OSS Distributions
2317*699cd480SApple OSS Distributions dataSize = computeDataSize(_pages, /* upls */ count * 2);
2318*699cd480SApple OSS Distributions if (!initMemoryEntries(dataSize, mapper)) {
2319*699cd480SApple OSS Distributions return false;
2320*699cd480SApple OSS Distributions }
2321*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
2322*699cd480SApple OSS Distributions dataP->fPageCnt = _pages;
2323*699cd480SApple OSS Distributions
2324*699cd480SApple OSS Distributions if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2325*699cd480SApple OSS Distributions && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2326*699cd480SApple OSS Distributions _kernelTag = IOMemoryTag(kernel_map);
2327*699cd480SApple OSS Distributions if (_kernelTag == gIOSurfaceTag) {
2328*699cd480SApple OSS Distributions _userTag = VM_MEMORY_IOSURFACE;
2329*699cd480SApple OSS Distributions }
2330*699cd480SApple OSS Distributions }
2331*699cd480SApple OSS Distributions
2332*699cd480SApple OSS Distributions if ((kIOMemoryPersistent & _flags) && !_memRef) {
2333*699cd480SApple OSS Distributions IOReturn
2334*699cd480SApple OSS Distributions err = memoryReferenceCreate(0, &_memRef);
2335*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
2336*699cd480SApple OSS Distributions return false;
2337*699cd480SApple OSS Distributions }
2338*699cd480SApple OSS Distributions }
2339*699cd480SApple OSS Distributions
2340*699cd480SApple OSS Distributions if ((_flags & kIOMemoryAutoPrepare)
2341*699cd480SApple OSS Distributions && prepare() != kIOReturnSuccess) {
2342*699cd480SApple OSS Distributions return false;
2343*699cd480SApple OSS Distributions }
2344*699cd480SApple OSS Distributions }
2345*699cd480SApple OSS Distributions }
2346*699cd480SApple OSS Distributions
2347*699cd480SApple OSS Distributions return true;
2348*699cd480SApple OSS Distributions }
2349*699cd480SApple OSS Distributions
2350*699cd480SApple OSS Distributions /*
2351*699cd480SApple OSS Distributions * free
2352*699cd480SApple OSS Distributions *
2353*699cd480SApple OSS Distributions * Free resources.
2354*699cd480SApple OSS Distributions */
2355*699cd480SApple OSS Distributions void
free()2356*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::free()
2357*699cd480SApple OSS Distributions {
2358*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
2359*699cd480SApple OSS Distributions
2360*699cd480SApple OSS Distributions if (reserved && reserved->dp.memory) {
2361*699cd480SApple OSS Distributions LOCK;
2362*699cd480SApple OSS Distributions reserved->dp.memory = NULL;
2363*699cd480SApple OSS Distributions UNLOCK;
2364*699cd480SApple OSS Distributions }
2365*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2366*699cd480SApple OSS Distributions ioGMDData * dataP;
2367*699cd480SApple OSS Distributions if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2368*699cd480SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2369*699cd480SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2370*699cd480SApple OSS Distributions }
2371*699cd480SApple OSS Distributions } else {
2372*699cd480SApple OSS Distributions while (_wireCount) {
2373*699cd480SApple OSS Distributions complete();
2374*699cd480SApple OSS Distributions }
2375*699cd480SApple OSS Distributions }
2376*699cd480SApple OSS Distributions
2377*699cd480SApple OSS Distributions if (_memoryEntries) {
2378*699cd480SApple OSS Distributions _memoryEntries.reset();
2379*699cd480SApple OSS Distributions }
2380*699cd480SApple OSS Distributions
2381*699cd480SApple OSS Distributions if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2382*699cd480SApple OSS Distributions if (kIOMemoryTypeUIO == type) {
2383*699cd480SApple OSS Distributions uio_free((uio_t) _ranges.v);
2384*699cd480SApple OSS Distributions }
2385*699cd480SApple OSS Distributions #ifndef __LP64__
2386*699cd480SApple OSS Distributions else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2387*699cd480SApple OSS Distributions IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2388*699cd480SApple OSS Distributions }
2389*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2390*699cd480SApple OSS Distributions else {
2391*699cd480SApple OSS Distributions IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2392*699cd480SApple OSS Distributions }
2393*699cd480SApple OSS Distributions
2394*699cd480SApple OSS Distributions _ranges.v = NULL;
2395*699cd480SApple OSS Distributions }
2396*699cd480SApple OSS Distributions
2397*699cd480SApple OSS Distributions if (reserved) {
2398*699cd480SApple OSS Distributions cleanKernelReserved(reserved);
2399*699cd480SApple OSS Distributions if (reserved->dp.devicePager) {
2400*699cd480SApple OSS Distributions // memEntry holds a ref on the device pager which owns reserved
2401*699cd480SApple OSS Distributions // (IOMemoryDescriptorReserved) so no reserved access after this point
2402*699cd480SApple OSS Distributions device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2403*699cd480SApple OSS Distributions } else {
2404*699cd480SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
2405*699cd480SApple OSS Distributions }
2406*699cd480SApple OSS Distributions reserved = NULL;
2407*699cd480SApple OSS Distributions }
2408*699cd480SApple OSS Distributions
2409*699cd480SApple OSS Distributions if (_memRef) {
2410*699cd480SApple OSS Distributions memoryReferenceRelease(_memRef);
2411*699cd480SApple OSS Distributions }
2412*699cd480SApple OSS Distributions if (_prepareLock) {
2413*699cd480SApple OSS Distributions IOLockFree(_prepareLock);
2414*699cd480SApple OSS Distributions }
2415*699cd480SApple OSS Distributions
2416*699cd480SApple OSS Distributions super::free();
2417*699cd480SApple OSS Distributions }
2418*699cd480SApple OSS Distributions
2419*699cd480SApple OSS Distributions #ifndef __LP64__
2420*699cd480SApple OSS Distributions void
unmapFromKernel()2421*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::unmapFromKernel()
2422*699cd480SApple OSS Distributions {
2423*699cd480SApple OSS Distributions panic("IOGMD::unmapFromKernel deprecated");
2424*699cd480SApple OSS Distributions }
2425*699cd480SApple OSS Distributions
2426*699cd480SApple OSS Distributions void
mapIntoKernel(unsigned rangeIndex)2427*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2428*699cd480SApple OSS Distributions {
2429*699cd480SApple OSS Distributions panic("IOGMD::mapIntoKernel deprecated");
2430*699cd480SApple OSS Distributions }
2431*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2432*699cd480SApple OSS Distributions
2433*699cd480SApple OSS Distributions /*
2434*699cd480SApple OSS Distributions * getDirection:
2435*699cd480SApple OSS Distributions *
2436*699cd480SApple OSS Distributions * Get the direction of the transfer.
2437*699cd480SApple OSS Distributions */
2438*699cd480SApple OSS Distributions IODirection
getDirection() const2439*699cd480SApple OSS Distributions IOMemoryDescriptor::getDirection() const
2440*699cd480SApple OSS Distributions {
2441*699cd480SApple OSS Distributions #ifndef __LP64__
2442*699cd480SApple OSS Distributions if (_direction) {
2443*699cd480SApple OSS Distributions return _direction;
2444*699cd480SApple OSS Distributions }
2445*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2446*699cd480SApple OSS Distributions return (IODirection) (_flags & kIOMemoryDirectionMask);
2447*699cd480SApple OSS Distributions }
2448*699cd480SApple OSS Distributions
2449*699cd480SApple OSS Distributions /*
2450*699cd480SApple OSS Distributions * getLength:
2451*699cd480SApple OSS Distributions *
2452*699cd480SApple OSS Distributions * Get the length of the transfer (over all ranges).
2453*699cd480SApple OSS Distributions */
2454*699cd480SApple OSS Distributions IOByteCount
getLength() const2455*699cd480SApple OSS Distributions IOMemoryDescriptor::getLength() const
2456*699cd480SApple OSS Distributions {
2457*699cd480SApple OSS Distributions return _length;
2458*699cd480SApple OSS Distributions }
2459*699cd480SApple OSS Distributions
2460*699cd480SApple OSS Distributions void
setTag(IOOptionBits tag)2461*699cd480SApple OSS Distributions IOMemoryDescriptor::setTag( IOOptionBits tag )
2462*699cd480SApple OSS Distributions {
2463*699cd480SApple OSS Distributions _tag = tag;
2464*699cd480SApple OSS Distributions }
2465*699cd480SApple OSS Distributions
2466*699cd480SApple OSS Distributions IOOptionBits
getTag(void)2467*699cd480SApple OSS Distributions IOMemoryDescriptor::getTag( void )
2468*699cd480SApple OSS Distributions {
2469*699cd480SApple OSS Distributions return _tag;
2470*699cd480SApple OSS Distributions }
2471*699cd480SApple OSS Distributions
2472*699cd480SApple OSS Distributions uint64_t
getFlags(void)2473*699cd480SApple OSS Distributions IOMemoryDescriptor::getFlags(void)
2474*699cd480SApple OSS Distributions {
2475*699cd480SApple OSS Distributions return _flags;
2476*699cd480SApple OSS Distributions }
2477*699cd480SApple OSS Distributions
2478*699cd480SApple OSS Distributions OSObject *
copyContext(void) const2479*699cd480SApple OSS Distributions IOMemoryDescriptor::copyContext(void) const
2480*699cd480SApple OSS Distributions {
2481*699cd480SApple OSS Distributions if (reserved) {
2482*699cd480SApple OSS Distributions OSObject * context = reserved->contextObject;
2483*699cd480SApple OSS Distributions if (context) {
2484*699cd480SApple OSS Distributions context->retain();
2485*699cd480SApple OSS Distributions }
2486*699cd480SApple OSS Distributions return context;
2487*699cd480SApple OSS Distributions } else {
2488*699cd480SApple OSS Distributions return NULL;
2489*699cd480SApple OSS Distributions }
2490*699cd480SApple OSS Distributions }
2491*699cd480SApple OSS Distributions
2492*699cd480SApple OSS Distributions void
setContext(OSObject * obj)2493*699cd480SApple OSS Distributions IOMemoryDescriptor::setContext(OSObject * obj)
2494*699cd480SApple OSS Distributions {
2495*699cd480SApple OSS Distributions if (this->reserved == NULL && obj == NULL) {
2496*699cd480SApple OSS Distributions // No existing object, and no object to set
2497*699cd480SApple OSS Distributions return;
2498*699cd480SApple OSS Distributions }
2499*699cd480SApple OSS Distributions
2500*699cd480SApple OSS Distributions IOMemoryDescriptorReserved * reserved = getKernelReserved();
2501*699cd480SApple OSS Distributions if (reserved) {
2502*699cd480SApple OSS Distributions OSObject * oldObject = reserved->contextObject;
2503*699cd480SApple OSS Distributions if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2504*699cd480SApple OSS Distributions oldObject->release();
2505*699cd480SApple OSS Distributions }
2506*699cd480SApple OSS Distributions if (obj != NULL) {
2507*699cd480SApple OSS Distributions obj->retain();
2508*699cd480SApple OSS Distributions reserved->contextObject = obj;
2509*699cd480SApple OSS Distributions }
2510*699cd480SApple OSS Distributions }
2511*699cd480SApple OSS Distributions }
2512*699cd480SApple OSS Distributions
2513*699cd480SApple OSS Distributions #ifndef __LP64__
2514*699cd480SApple OSS Distributions #pragma clang diagnostic push
2515*699cd480SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2516*699cd480SApple OSS Distributions
2517*699cd480SApple OSS Distributions // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2518*699cd480SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2519*699cd480SApple OSS Distributions IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2520*699cd480SApple OSS Distributions {
2521*699cd480SApple OSS Distributions addr64_t physAddr = 0;
2522*699cd480SApple OSS Distributions
2523*699cd480SApple OSS Distributions if (prepare() == kIOReturnSuccess) {
2524*699cd480SApple OSS Distributions physAddr = getPhysicalSegment64( offset, length );
2525*699cd480SApple OSS Distributions complete();
2526*699cd480SApple OSS Distributions }
2527*699cd480SApple OSS Distributions
2528*699cd480SApple OSS Distributions return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2529*699cd480SApple OSS Distributions }
2530*699cd480SApple OSS Distributions
2531*699cd480SApple OSS Distributions #pragma clang diagnostic pop
2532*699cd480SApple OSS Distributions
2533*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2534*699cd480SApple OSS Distributions
2535*699cd480SApple OSS Distributions IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536*699cd480SApple OSS Distributions IOMemoryDescriptor::readBytes
2537*699cd480SApple OSS Distributions (IOByteCount offset, void *bytes, IOByteCount length)
2538*699cd480SApple OSS Distributions {
2539*699cd480SApple OSS Distributions addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540*699cd480SApple OSS Distributions IOByteCount endoffset;
2541*699cd480SApple OSS Distributions IOByteCount remaining;
2542*699cd480SApple OSS Distributions
2543*699cd480SApple OSS Distributions
2544*699cd480SApple OSS Distributions // Check that this entire I/O is within the available range
2545*699cd480SApple OSS Distributions if ((offset > _length)
2546*699cd480SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2547*699cd480SApple OSS Distributions || (endoffset > _length)) {
2548*699cd480SApple OSS Distributions assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2549*699cd480SApple OSS Distributions return 0;
2550*699cd480SApple OSS Distributions }
2551*699cd480SApple OSS Distributions if (offset >= _length) {
2552*699cd480SApple OSS Distributions return 0;
2553*699cd480SApple OSS Distributions }
2554*699cd480SApple OSS Distributions
2555*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2556*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2557*699cd480SApple OSS Distributions return 0;
2558*699cd480SApple OSS Distributions }
2559*699cd480SApple OSS Distributions
2560*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2561*699cd480SApple OSS Distributions LOCK;
2562*699cd480SApple OSS Distributions }
2563*699cd480SApple OSS Distributions
2564*699cd480SApple OSS Distributions remaining = length = min(length, _length - offset);
2565*699cd480SApple OSS Distributions while (remaining) { // (process another target segment?)
2566*699cd480SApple OSS Distributions addr64_t srcAddr64;
2567*699cd480SApple OSS Distributions IOByteCount srcLen;
2568*699cd480SApple OSS Distributions
2569*699cd480SApple OSS Distributions srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2570*699cd480SApple OSS Distributions if (!srcAddr64) {
2571*699cd480SApple OSS Distributions break;
2572*699cd480SApple OSS Distributions }
2573*699cd480SApple OSS Distributions
2574*699cd480SApple OSS Distributions // Clip segment length to remaining
2575*699cd480SApple OSS Distributions if (srcLen > remaining) {
2576*699cd480SApple OSS Distributions srcLen = remaining;
2577*699cd480SApple OSS Distributions }
2578*699cd480SApple OSS Distributions
2579*699cd480SApple OSS Distributions if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2580*699cd480SApple OSS Distributions srcLen = (UINT_MAX - PAGE_SIZE + 1);
2581*699cd480SApple OSS Distributions }
2582*699cd480SApple OSS Distributions copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2583*699cd480SApple OSS Distributions cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2584*699cd480SApple OSS Distributions
2585*699cd480SApple OSS Distributions dstAddr += srcLen;
2586*699cd480SApple OSS Distributions offset += srcLen;
2587*699cd480SApple OSS Distributions remaining -= srcLen;
2588*699cd480SApple OSS Distributions }
2589*699cd480SApple OSS Distributions
2590*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2591*699cd480SApple OSS Distributions UNLOCK;
2592*699cd480SApple OSS Distributions }
2593*699cd480SApple OSS Distributions
2594*699cd480SApple OSS Distributions assert(!remaining);
2595*699cd480SApple OSS Distributions
2596*699cd480SApple OSS Distributions return length - remaining;
2597*699cd480SApple OSS Distributions }
2598*699cd480SApple OSS Distributions
2599*699cd480SApple OSS Distributions IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2600*699cd480SApple OSS Distributions IOMemoryDescriptor::writeBytes
2601*699cd480SApple OSS Distributions (IOByteCount inoffset, const void *bytes, IOByteCount length)
2602*699cd480SApple OSS Distributions {
2603*699cd480SApple OSS Distributions addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2604*699cd480SApple OSS Distributions IOByteCount remaining;
2605*699cd480SApple OSS Distributions IOByteCount endoffset;
2606*699cd480SApple OSS Distributions IOByteCount offset = inoffset;
2607*699cd480SApple OSS Distributions
2608*699cd480SApple OSS Distributions assert( !(kIOMemoryPreparedReadOnly & _flags));
2609*699cd480SApple OSS Distributions
2610*699cd480SApple OSS Distributions // Check that this entire I/O is within the available range
2611*699cd480SApple OSS Distributions if ((offset > _length)
2612*699cd480SApple OSS Distributions || os_add_overflow(length, offset, &endoffset)
2613*699cd480SApple OSS Distributions || (endoffset > _length)) {
2614*699cd480SApple OSS Distributions assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2615*699cd480SApple OSS Distributions return 0;
2616*699cd480SApple OSS Distributions }
2617*699cd480SApple OSS Distributions if (kIOMemoryPreparedReadOnly & _flags) {
2618*699cd480SApple OSS Distributions return 0;
2619*699cd480SApple OSS Distributions }
2620*699cd480SApple OSS Distributions if (offset >= _length) {
2621*699cd480SApple OSS Distributions return 0;
2622*699cd480SApple OSS Distributions }
2623*699cd480SApple OSS Distributions
2624*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
2625*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
2626*699cd480SApple OSS Distributions return 0;
2627*699cd480SApple OSS Distributions }
2628*699cd480SApple OSS Distributions
2629*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2630*699cd480SApple OSS Distributions LOCK;
2631*699cd480SApple OSS Distributions }
2632*699cd480SApple OSS Distributions
2633*699cd480SApple OSS Distributions remaining = length = min(length, _length - offset);
2634*699cd480SApple OSS Distributions while (remaining) { // (process another target segment?)
2635*699cd480SApple OSS Distributions addr64_t dstAddr64;
2636*699cd480SApple OSS Distributions IOByteCount dstLen;
2637*699cd480SApple OSS Distributions
2638*699cd480SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2639*699cd480SApple OSS Distributions if (!dstAddr64) {
2640*699cd480SApple OSS Distributions break;
2641*699cd480SApple OSS Distributions }
2642*699cd480SApple OSS Distributions
2643*699cd480SApple OSS Distributions // Clip segment length to remaining
2644*699cd480SApple OSS Distributions if (dstLen > remaining) {
2645*699cd480SApple OSS Distributions dstLen = remaining;
2646*699cd480SApple OSS Distributions }
2647*699cd480SApple OSS Distributions
2648*699cd480SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2649*699cd480SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
2650*699cd480SApple OSS Distributions }
2651*699cd480SApple OSS Distributions if (!srcAddr) {
2652*699cd480SApple OSS Distributions bzero_phys(dstAddr64, (unsigned int) dstLen);
2653*699cd480SApple OSS Distributions } else {
2654*699cd480SApple OSS Distributions copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2655*699cd480SApple OSS Distributions cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2656*699cd480SApple OSS Distributions srcAddr += dstLen;
2657*699cd480SApple OSS Distributions }
2658*699cd480SApple OSS Distributions offset += dstLen;
2659*699cd480SApple OSS Distributions remaining -= dstLen;
2660*699cd480SApple OSS Distributions }
2661*699cd480SApple OSS Distributions
2662*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
2663*699cd480SApple OSS Distributions UNLOCK;
2664*699cd480SApple OSS Distributions }
2665*699cd480SApple OSS Distributions
2666*699cd480SApple OSS Distributions assert(!remaining);
2667*699cd480SApple OSS Distributions
2668*699cd480SApple OSS Distributions #if defined(__x86_64__)
2669*699cd480SApple OSS Distributions // copypv does not cppvFsnk on intel
2670*699cd480SApple OSS Distributions #else
2671*699cd480SApple OSS Distributions if (!srcAddr) {
2672*699cd480SApple OSS Distributions performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2673*699cd480SApple OSS Distributions }
2674*699cd480SApple OSS Distributions #endif
2675*699cd480SApple OSS Distributions
2676*699cd480SApple OSS Distributions return length - remaining;
2677*699cd480SApple OSS Distributions }
2678*699cd480SApple OSS Distributions
2679*699cd480SApple OSS Distributions #ifndef __LP64__
2680*699cd480SApple OSS Distributions void
setPosition(IOByteCount position)2681*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2682*699cd480SApple OSS Distributions {
2683*699cd480SApple OSS Distributions panic("IOGMD::setPosition deprecated");
2684*699cd480SApple OSS Distributions }
2685*699cd480SApple OSS Distributions #endif /* !__LP64__ */
2686*699cd480SApple OSS Distributions
2687*699cd480SApple OSS Distributions static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2688*699cd480SApple OSS Distributions static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2689*699cd480SApple OSS Distributions
2690*699cd480SApple OSS Distributions uint64_t
getPreparationID(void)2691*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getPreparationID( void )
2692*699cd480SApple OSS Distributions {
2693*699cd480SApple OSS Distributions ioGMDData *dataP;
2694*699cd480SApple OSS Distributions
2695*699cd480SApple OSS Distributions if (!_wireCount) {
2696*699cd480SApple OSS Distributions return kIOPreparationIDUnprepared;
2697*699cd480SApple OSS Distributions }
2698*699cd480SApple OSS Distributions
2699*699cd480SApple OSS Distributions if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2700*699cd480SApple OSS Distributions || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2701*699cd480SApple OSS Distributions IOMemoryDescriptor::setPreparationID();
2702*699cd480SApple OSS Distributions return IOMemoryDescriptor::getPreparationID();
2703*699cd480SApple OSS Distributions }
2704*699cd480SApple OSS Distributions
2705*699cd480SApple OSS Distributions if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2706*699cd480SApple OSS Distributions return kIOPreparationIDUnprepared;
2707*699cd480SApple OSS Distributions }
2708*699cd480SApple OSS Distributions
2709*699cd480SApple OSS Distributions if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2710*699cd480SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2711*699cd480SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2712*699cd480SApple OSS Distributions }
2713*699cd480SApple OSS Distributions return dataP->fPreparationID;
2714*699cd480SApple OSS Distributions }
2715*699cd480SApple OSS Distributions
2716*699cd480SApple OSS Distributions void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2717*699cd480SApple OSS Distributions IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2718*699cd480SApple OSS Distributions {
2719*699cd480SApple OSS Distributions if (reserved->creator) {
2720*699cd480SApple OSS Distributions task_deallocate(reserved->creator);
2721*699cd480SApple OSS Distributions reserved->creator = NULL;
2722*699cd480SApple OSS Distributions }
2723*699cd480SApple OSS Distributions
2724*699cd480SApple OSS Distributions if (reserved->contextObject) {
2725*699cd480SApple OSS Distributions reserved->contextObject->release();
2726*699cd480SApple OSS Distributions reserved->contextObject = NULL;
2727*699cd480SApple OSS Distributions }
2728*699cd480SApple OSS Distributions }
2729*699cd480SApple OSS Distributions
2730*699cd480SApple OSS Distributions IOMemoryDescriptorReserved *
getKernelReserved(void)2731*699cd480SApple OSS Distributions IOMemoryDescriptor::getKernelReserved( void )
2732*699cd480SApple OSS Distributions {
2733*699cd480SApple OSS Distributions if (!reserved) {
2734*699cd480SApple OSS Distributions reserved = IOMallocType(IOMemoryDescriptorReserved);
2735*699cd480SApple OSS Distributions }
2736*699cd480SApple OSS Distributions return reserved;
2737*699cd480SApple OSS Distributions }
2738*699cd480SApple OSS Distributions
2739*699cd480SApple OSS Distributions void
setPreparationID(void)2740*699cd480SApple OSS Distributions IOMemoryDescriptor::setPreparationID( void )
2741*699cd480SApple OSS Distributions {
2742*699cd480SApple OSS Distributions if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2743*699cd480SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2744*699cd480SApple OSS Distributions OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2745*699cd480SApple OSS Distributions }
2746*699cd480SApple OSS Distributions }
2747*699cd480SApple OSS Distributions
2748*699cd480SApple OSS Distributions uint64_t
getPreparationID(void)2749*699cd480SApple OSS Distributions IOMemoryDescriptor::getPreparationID( void )
2750*699cd480SApple OSS Distributions {
2751*699cd480SApple OSS Distributions if (reserved) {
2752*699cd480SApple OSS Distributions return reserved->preparationID;
2753*699cd480SApple OSS Distributions } else {
2754*699cd480SApple OSS Distributions return kIOPreparationIDUnsupported;
2755*699cd480SApple OSS Distributions }
2756*699cd480SApple OSS Distributions }
2757*699cd480SApple OSS Distributions
2758*699cd480SApple OSS Distributions void
setDescriptorID(void)2759*699cd480SApple OSS Distributions IOMemoryDescriptor::setDescriptorID( void )
2760*699cd480SApple OSS Distributions {
2761*699cd480SApple OSS Distributions if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2762*699cd480SApple OSS Distributions SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2763*699cd480SApple OSS Distributions OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2764*699cd480SApple OSS Distributions }
2765*699cd480SApple OSS Distributions }
2766*699cd480SApple OSS Distributions
2767*699cd480SApple OSS Distributions uint64_t
getDescriptorID(void)2768*699cd480SApple OSS Distributions IOMemoryDescriptor::getDescriptorID( void )
2769*699cd480SApple OSS Distributions {
2770*699cd480SApple OSS Distributions setDescriptorID();
2771*699cd480SApple OSS Distributions
2772*699cd480SApple OSS Distributions if (reserved) {
2773*699cd480SApple OSS Distributions return reserved->descriptorID;
2774*699cd480SApple OSS Distributions } else {
2775*699cd480SApple OSS Distributions return kIODescriptorIDInvalid;
2776*699cd480SApple OSS Distributions }
2777*699cd480SApple OSS Distributions }
2778*699cd480SApple OSS Distributions
2779*699cd480SApple OSS Distributions IOReturn
ktraceEmitPhysicalSegments(void)2780*699cd480SApple OSS Distributions IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2781*699cd480SApple OSS Distributions {
2782*699cd480SApple OSS Distributions if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2783*699cd480SApple OSS Distributions return kIOReturnSuccess;
2784*699cd480SApple OSS Distributions }
2785*699cd480SApple OSS Distributions
2786*699cd480SApple OSS Distributions assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2787*699cd480SApple OSS Distributions if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2788*699cd480SApple OSS Distributions return kIOReturnBadArgument;
2789*699cd480SApple OSS Distributions }
2790*699cd480SApple OSS Distributions
2791*699cd480SApple OSS Distributions uint64_t descriptorID = getDescriptorID();
2792*699cd480SApple OSS Distributions assert(descriptorID != kIODescriptorIDInvalid);
2793*699cd480SApple OSS Distributions if (getDescriptorID() == kIODescriptorIDInvalid) {
2794*699cd480SApple OSS Distributions return kIOReturnBadArgument;
2795*699cd480SApple OSS Distributions }
2796*699cd480SApple OSS Distributions
2797*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2798*699cd480SApple OSS Distributions
2799*699cd480SApple OSS Distributions #if __LP64__
2800*699cd480SApple OSS Distributions static const uint8_t num_segments_page = 8;
2801*699cd480SApple OSS Distributions #else
2802*699cd480SApple OSS Distributions static const uint8_t num_segments_page = 4;
2803*699cd480SApple OSS Distributions #endif
2804*699cd480SApple OSS Distributions static const uint8_t num_segments_long = 2;
2805*699cd480SApple OSS Distributions
2806*699cd480SApple OSS Distributions IOPhysicalAddress segments_page[num_segments_page];
2807*699cd480SApple OSS Distributions IOPhysicalRange segments_long[num_segments_long];
2808*699cd480SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2809*699cd480SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2810*699cd480SApple OSS Distributions
2811*699cd480SApple OSS Distributions uint8_t segment_page_idx = 0;
2812*699cd480SApple OSS Distributions uint8_t segment_long_idx = 0;
2813*699cd480SApple OSS Distributions
2814*699cd480SApple OSS Distributions IOPhysicalRange physical_segment;
2815*699cd480SApple OSS Distributions for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2816*699cd480SApple OSS Distributions physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2817*699cd480SApple OSS Distributions
2818*699cd480SApple OSS Distributions if (physical_segment.length == 0) {
2819*699cd480SApple OSS Distributions break;
2820*699cd480SApple OSS Distributions }
2821*699cd480SApple OSS Distributions
2822*699cd480SApple OSS Distributions /**
2823*699cd480SApple OSS Distributions * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2824*699cd480SApple OSS Distributions * buffer memory, pack segment events according to the following.
2825*699cd480SApple OSS Distributions *
2826*699cd480SApple OSS Distributions * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2827*699cd480SApple OSS Distributions * IOMDPA_MAPPED event emitted on by the current thread_id.
2828*699cd480SApple OSS Distributions *
2829*699cd480SApple OSS Distributions * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2830*699cd480SApple OSS Distributions * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2831*699cd480SApple OSS Distributions * - unmapped pages will have a ppn of MAX_INT_32
2832*699cd480SApple OSS Distributions * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2833*699cd480SApple OSS Distributions * - address_0, length_0, address_0, length_1
2834*699cd480SApple OSS Distributions * - unmapped pages will have an address of 0
2835*699cd480SApple OSS Distributions *
2836*699cd480SApple OSS Distributions * During each iteration do the following depending on the length of the mapping:
2837*699cd480SApple OSS Distributions * 1. add the current segment to the appropriate queue of pending segments
2838*699cd480SApple OSS Distributions * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2839*699cd480SApple OSS Distributions * 1a. if FALSE emit and reset all events in the previous queue
2840*699cd480SApple OSS Distributions * 2. check if we have filled up the current queue of pending events
2841*699cd480SApple OSS Distributions * 2a. if TRUE emit and reset all events in the pending queue
2842*699cd480SApple OSS Distributions * 3. after completing all iterations emit events in the current queue
2843*699cd480SApple OSS Distributions */
2844*699cd480SApple OSS Distributions
2845*699cd480SApple OSS Distributions bool emit_page = false;
2846*699cd480SApple OSS Distributions bool emit_long = false;
2847*699cd480SApple OSS Distributions if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2848*699cd480SApple OSS Distributions segments_page[segment_page_idx] = physical_segment.address;
2849*699cd480SApple OSS Distributions segment_page_idx++;
2850*699cd480SApple OSS Distributions
2851*699cd480SApple OSS Distributions emit_long = segment_long_idx != 0;
2852*699cd480SApple OSS Distributions emit_page = segment_page_idx == num_segments_page;
2853*699cd480SApple OSS Distributions
2854*699cd480SApple OSS Distributions if (os_unlikely(emit_long)) {
2855*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2856*699cd480SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2857*699cd480SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2858*699cd480SApple OSS Distributions }
2859*699cd480SApple OSS Distributions
2860*699cd480SApple OSS Distributions if (os_unlikely(emit_page)) {
2861*699cd480SApple OSS Distributions #if __LP64__
2862*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2863*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2864*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2865*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2866*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2867*699cd480SApple OSS Distributions #else
2868*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2869*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2870*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2871*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2872*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2873*699cd480SApple OSS Distributions #endif
2874*699cd480SApple OSS Distributions }
2875*699cd480SApple OSS Distributions } else {
2876*699cd480SApple OSS Distributions segments_long[segment_long_idx] = physical_segment;
2877*699cd480SApple OSS Distributions segment_long_idx++;
2878*699cd480SApple OSS Distributions
2879*699cd480SApple OSS Distributions emit_page = segment_page_idx != 0;
2880*699cd480SApple OSS Distributions emit_long = segment_long_idx == num_segments_long;
2881*699cd480SApple OSS Distributions
2882*699cd480SApple OSS Distributions if (os_unlikely(emit_page)) {
2883*699cd480SApple OSS Distributions #if __LP64__
2884*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2885*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2886*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2887*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2888*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2889*699cd480SApple OSS Distributions #else
2890*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2891*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2892*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2893*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2894*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2895*699cd480SApple OSS Distributions #endif
2896*699cd480SApple OSS Distributions }
2897*699cd480SApple OSS Distributions
2898*699cd480SApple OSS Distributions if (emit_long) {
2899*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2900*699cd480SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2901*699cd480SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2902*699cd480SApple OSS Distributions }
2903*699cd480SApple OSS Distributions }
2904*699cd480SApple OSS Distributions
2905*699cd480SApple OSS Distributions if (os_unlikely(emit_page)) {
2906*699cd480SApple OSS Distributions memset(segments_page, UINT32_MAX, sizeof(segments_page));
2907*699cd480SApple OSS Distributions segment_page_idx = 0;
2908*699cd480SApple OSS Distributions }
2909*699cd480SApple OSS Distributions
2910*699cd480SApple OSS Distributions if (os_unlikely(emit_long)) {
2911*699cd480SApple OSS Distributions memset(segments_long, 0, sizeof(segments_long));
2912*699cd480SApple OSS Distributions segment_long_idx = 0;
2913*699cd480SApple OSS Distributions }
2914*699cd480SApple OSS Distributions }
2915*699cd480SApple OSS Distributions
2916*699cd480SApple OSS Distributions if (segment_page_idx != 0) {
2917*699cd480SApple OSS Distributions assert(segment_long_idx == 0);
2918*699cd480SApple OSS Distributions #if __LP64__
2919*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2920*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2921*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2922*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2923*699cd480SApple OSS Distributions ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2924*699cd480SApple OSS Distributions #else
2925*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2926*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[1]),
2927*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[2]),
2928*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[3]),
2929*699cd480SApple OSS Distributions (ppnum_t) atop_32(segments_page[4]));
2930*699cd480SApple OSS Distributions #endif
2931*699cd480SApple OSS Distributions } else if (segment_long_idx != 0) {
2932*699cd480SApple OSS Distributions assert(segment_page_idx == 0);
2933*699cd480SApple OSS Distributions IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2934*699cd480SApple OSS Distributions segments_long[0].address, segments_long[0].length,
2935*699cd480SApple OSS Distributions segments_long[1].address, segments_long[1].length);
2936*699cd480SApple OSS Distributions }
2937*699cd480SApple OSS Distributions
2938*699cd480SApple OSS Distributions return kIOReturnSuccess;
2939*699cd480SApple OSS Distributions }
2940*699cd480SApple OSS Distributions
2941*699cd480SApple OSS Distributions void
setVMTags(uint32_t kernelTag,uint32_t userTag)2942*699cd480SApple OSS Distributions IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2943*699cd480SApple OSS Distributions {
2944*699cd480SApple OSS Distributions _kernelTag = (vm_tag_t) kernelTag;
2945*699cd480SApple OSS Distributions _userTag = (vm_tag_t) userTag;
2946*699cd480SApple OSS Distributions }
2947*699cd480SApple OSS Distributions
2948*699cd480SApple OSS Distributions uint32_t
getVMTag(vm_map_t map)2949*699cd480SApple OSS Distributions IOMemoryDescriptor::getVMTag(vm_map_t map)
2950*699cd480SApple OSS Distributions {
2951*699cd480SApple OSS Distributions if (vm_kernel_map_is_kernel(map)) {
2952*699cd480SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _kernelTag) {
2953*699cd480SApple OSS Distributions return (uint32_t) _kernelTag;
2954*699cd480SApple OSS Distributions }
2955*699cd480SApple OSS Distributions } else {
2956*699cd480SApple OSS Distributions if (VM_KERN_MEMORY_NONE != _userTag) {
2957*699cd480SApple OSS Distributions return (uint32_t) _userTag;
2958*699cd480SApple OSS Distributions }
2959*699cd480SApple OSS Distributions }
2960*699cd480SApple OSS Distributions return IOMemoryTag(map);
2961*699cd480SApple OSS Distributions }
2962*699cd480SApple OSS Distributions
2963*699cd480SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2964*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2965*699cd480SApple OSS Distributions {
2966*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
2967*699cd480SApple OSS Distributions DMACommandOps params;
2968*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2969*699cd480SApple OSS Distributions ioGMDData *dataP;
2970*699cd480SApple OSS Distributions
2971*699cd480SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
2972*699cd480SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
2973*699cd480SApple OSS Distributions
2974*699cd480SApple OSS Distributions if (kIOMDDMAMap == op) {
2975*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
2976*699cd480SApple OSS Distributions return kIOReturnUnderrun;
2977*699cd480SApple OSS Distributions }
2978*699cd480SApple OSS Distributions
2979*699cd480SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2980*699cd480SApple OSS Distributions
2981*699cd480SApple OSS Distributions if (!_memoryEntries
2982*699cd480SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2983*699cd480SApple OSS Distributions return kIOReturnNoMemory;
2984*699cd480SApple OSS Distributions }
2985*699cd480SApple OSS Distributions
2986*699cd480SApple OSS Distributions if (_memoryEntries && data->fMapper) {
2987*699cd480SApple OSS Distributions bool remap, keepMap;
2988*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
2989*699cd480SApple OSS Distributions
2990*699cd480SApple OSS Distributions if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2991*699cd480SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2992*699cd480SApple OSS Distributions }
2993*699cd480SApple OSS Distributions if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2994*699cd480SApple OSS Distributions dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2995*699cd480SApple OSS Distributions }
2996*699cd480SApple OSS Distributions
2997*699cd480SApple OSS Distributions keepMap = (data->fMapper == gIOSystemMapper);
2998*699cd480SApple OSS Distributions keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2999*699cd480SApple OSS Distributions
3000*699cd480SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3001*699cd480SApple OSS Distributions IOLockLock(_prepareLock);
3002*699cd480SApple OSS Distributions }
3003*699cd480SApple OSS Distributions
3004*699cd480SApple OSS Distributions remap = (!keepMap);
3005*699cd480SApple OSS Distributions remap |= (dataP->fDMAMapNumAddressBits < 64)
3006*699cd480SApple OSS Distributions && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3007*699cd480SApple OSS Distributions remap |= (dataP->fDMAMapAlignment > page_size);
3008*699cd480SApple OSS Distributions
3009*699cd480SApple OSS Distributions if (remap || !dataP->fMappedBaseValid) {
3010*699cd480SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3011*699cd480SApple OSS Distributions if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3012*699cd480SApple OSS Distributions dataP->fMappedBase = data->fAlloc;
3013*699cd480SApple OSS Distributions dataP->fMappedBaseValid = true;
3014*699cd480SApple OSS Distributions dataP->fMappedLength = data->fAllocLength;
3015*699cd480SApple OSS Distributions data->fAllocLength = 0; // IOMD owns the alloc now
3016*699cd480SApple OSS Distributions }
3017*699cd480SApple OSS Distributions } else {
3018*699cd480SApple OSS Distributions data->fAlloc = dataP->fMappedBase;
3019*699cd480SApple OSS Distributions data->fAllocLength = 0; // give out IOMD map
3020*699cd480SApple OSS Distributions md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3021*699cd480SApple OSS Distributions }
3022*699cd480SApple OSS Distributions
3023*699cd480SApple OSS Distributions if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3024*699cd480SApple OSS Distributions IOLockUnlock(_prepareLock);
3025*699cd480SApple OSS Distributions }
3026*699cd480SApple OSS Distributions }
3027*699cd480SApple OSS Distributions return err;
3028*699cd480SApple OSS Distributions }
3029*699cd480SApple OSS Distributions if (kIOMDDMAUnmap == op) {
3030*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3031*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3032*699cd480SApple OSS Distributions }
3033*699cd480SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3034*699cd480SApple OSS Distributions
3035*699cd480SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3036*699cd480SApple OSS Distributions
3037*699cd480SApple OSS Distributions return kIOReturnSuccess;
3038*699cd480SApple OSS Distributions }
3039*699cd480SApple OSS Distributions
3040*699cd480SApple OSS Distributions if (kIOMDAddDMAMapSpec == op) {
3041*699cd480SApple OSS Distributions if (dataSize < sizeof(IODMAMapSpecification)) {
3042*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3043*699cd480SApple OSS Distributions }
3044*699cd480SApple OSS Distributions
3045*699cd480SApple OSS Distributions IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046*699cd480SApple OSS Distributions
3047*699cd480SApple OSS Distributions if (!_memoryEntries
3048*699cd480SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049*699cd480SApple OSS Distributions return kIOReturnNoMemory;
3050*699cd480SApple OSS Distributions }
3051*699cd480SApple OSS Distributions
3052*699cd480SApple OSS Distributions if (_memoryEntries) {
3053*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
3054*699cd480SApple OSS Distributions if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055*699cd480SApple OSS Distributions dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056*699cd480SApple OSS Distributions }
3057*699cd480SApple OSS Distributions if (data->alignment > dataP->fDMAMapAlignment) {
3058*699cd480SApple OSS Distributions dataP->fDMAMapAlignment = data->alignment;
3059*699cd480SApple OSS Distributions }
3060*699cd480SApple OSS Distributions }
3061*699cd480SApple OSS Distributions return kIOReturnSuccess;
3062*699cd480SApple OSS Distributions }
3063*699cd480SApple OSS Distributions
3064*699cd480SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3065*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3067*699cd480SApple OSS Distributions }
3068*699cd480SApple OSS Distributions
3069*699cd480SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070*699cd480SApple OSS Distributions data->fLength = _length;
3071*699cd480SApple OSS Distributions data->fSGCount = _rangesCount;
3072*699cd480SApple OSS Distributions data->fPages = _pages;
3073*699cd480SApple OSS Distributions data->fDirection = getDirection();
3074*699cd480SApple OSS Distributions if (!_wireCount) {
3075*699cd480SApple OSS Distributions data->fIsPrepared = false;
3076*699cd480SApple OSS Distributions } else {
3077*699cd480SApple OSS Distributions data->fIsPrepared = true;
3078*699cd480SApple OSS Distributions data->fHighestPage = _highestPage;
3079*699cd480SApple OSS Distributions if (_memoryEntries) {
3080*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
3081*699cd480SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
3082*699cd480SApple OSS Distributions UInt count = getNumIOPL(_memoryEntries, dataP);
3083*699cd480SApple OSS Distributions if (count == 1) {
3084*699cd480SApple OSS Distributions data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085*699cd480SApple OSS Distributions }
3086*699cd480SApple OSS Distributions }
3087*699cd480SApple OSS Distributions }
3088*699cd480SApple OSS Distributions
3089*699cd480SApple OSS Distributions return kIOReturnSuccess;
3090*699cd480SApple OSS Distributions } else if (kIOMDDMAActive == op) {
3091*699cd480SApple OSS Distributions if (params) {
3092*699cd480SApple OSS Distributions int16_t prior;
3093*699cd480SApple OSS Distributions prior = OSAddAtomic16(1, &md->_dmaReferences);
3094*699cd480SApple OSS Distributions if (!prior) {
3095*699cd480SApple OSS Distributions md->_mapName = NULL;
3096*699cd480SApple OSS Distributions }
3097*699cd480SApple OSS Distributions } else {
3098*699cd480SApple OSS Distributions if (md->_dmaReferences) {
3099*699cd480SApple OSS Distributions OSAddAtomic16(-1, &md->_dmaReferences);
3100*699cd480SApple OSS Distributions } else {
3101*699cd480SApple OSS Distributions panic("_dmaReferences underflow");
3102*699cd480SApple OSS Distributions }
3103*699cd480SApple OSS Distributions }
3104*699cd480SApple OSS Distributions } else if (kIOMDWalkSegments != op) {
3105*699cd480SApple OSS Distributions return kIOReturnBadArgument;
3106*699cd480SApple OSS Distributions }
3107*699cd480SApple OSS Distributions
3108*699cd480SApple OSS Distributions // Get the next segment
3109*699cd480SApple OSS Distributions struct InternalState {
3110*699cd480SApple OSS Distributions IOMDDMAWalkSegmentArgs fIO;
3111*699cd480SApple OSS Distributions mach_vm_size_t fOffset2Index;
3112*699cd480SApple OSS Distributions mach_vm_size_t fNextOffset;
3113*699cd480SApple OSS Distributions UInt fIndex;
3114*699cd480SApple OSS Distributions } *isP;
3115*699cd480SApple OSS Distributions
3116*699cd480SApple OSS Distributions // Find the next segment
3117*699cd480SApple OSS Distributions if (dataSize < sizeof(*isP)) {
3118*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3119*699cd480SApple OSS Distributions }
3120*699cd480SApple OSS Distributions
3121*699cd480SApple OSS Distributions isP = (InternalState *) vData;
3122*699cd480SApple OSS Distributions uint64_t offset = isP->fIO.fOffset;
3123*699cd480SApple OSS Distributions uint8_t mapped = isP->fIO.fMapped;
3124*699cd480SApple OSS Distributions uint64_t mappedBase;
3125*699cd480SApple OSS Distributions
3126*699cd480SApple OSS Distributions if (mapped && (kIOMemoryRemote & _flags)) {
3127*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3128*699cd480SApple OSS Distributions }
3129*699cd480SApple OSS Distributions
3130*699cd480SApple OSS Distributions if (IOMapper::gSystem && mapped
3131*699cd480SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3132*699cd480SApple OSS Distributions && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133*699cd480SApple OSS Distributions // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134*699cd480SApple OSS Distributions if (!_memoryEntries
3135*699cd480SApple OSS Distributions && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136*699cd480SApple OSS Distributions return kIOReturnNoMemory;
3137*699cd480SApple OSS Distributions }
3138*699cd480SApple OSS Distributions
3139*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
3140*699cd480SApple OSS Distributions if (dataP->fMapper) {
3141*699cd480SApple OSS Distributions IODMAMapSpecification mapSpec;
3142*699cd480SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
3143*699cd480SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144*699cd480SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
3145*699cd480SApple OSS Distributions err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
3147*699cd480SApple OSS Distributions return err;
3148*699cd480SApple OSS Distributions }
3149*699cd480SApple OSS Distributions dataP->fMappedBaseValid = true;
3150*699cd480SApple OSS Distributions }
3151*699cd480SApple OSS Distributions }
3152*699cd480SApple OSS Distributions
3153*699cd480SApple OSS Distributions if (mapped) {
3154*699cd480SApple OSS Distributions if (IOMapper::gSystem
3155*699cd480SApple OSS Distributions && (!(kIOMemoryHostOnly & _flags))
3156*699cd480SApple OSS Distributions && _memoryEntries
3157*699cd480SApple OSS Distributions && (dataP = getDataP(_memoryEntries))
3158*699cd480SApple OSS Distributions && dataP->fMappedBaseValid) {
3159*699cd480SApple OSS Distributions mappedBase = dataP->fMappedBase;
3160*699cd480SApple OSS Distributions } else {
3161*699cd480SApple OSS Distributions mapped = 0;
3162*699cd480SApple OSS Distributions }
3163*699cd480SApple OSS Distributions }
3164*699cd480SApple OSS Distributions
3165*699cd480SApple OSS Distributions if (offset >= _length) {
3166*699cd480SApple OSS Distributions return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167*699cd480SApple OSS Distributions }
3168*699cd480SApple OSS Distributions
3169*699cd480SApple OSS Distributions // Validate the previous offset
3170*699cd480SApple OSS Distributions UInt ind;
3171*699cd480SApple OSS Distributions mach_vm_size_t off2Ind = isP->fOffset2Index;
3172*699cd480SApple OSS Distributions if (!params
3173*699cd480SApple OSS Distributions && offset
3174*699cd480SApple OSS Distributions && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175*699cd480SApple OSS Distributions ind = isP->fIndex;
3176*699cd480SApple OSS Distributions } else {
3177*699cd480SApple OSS Distributions ind = off2Ind = 0; // Start from beginning
3178*699cd480SApple OSS Distributions }
3179*699cd480SApple OSS Distributions mach_vm_size_t length;
3180*699cd480SApple OSS Distributions UInt64 address;
3181*699cd480SApple OSS Distributions
3182*699cd480SApple OSS Distributions if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183*699cd480SApple OSS Distributions // Physical address based memory descriptor
3184*699cd480SApple OSS Distributions const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185*699cd480SApple OSS Distributions
3186*699cd480SApple OSS Distributions // Find the range after the one that contains the offset
3187*699cd480SApple OSS Distributions mach_vm_size_t len;
3188*699cd480SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3189*699cd480SApple OSS Distributions len = physP[ind].length;
3190*699cd480SApple OSS Distributions off2Ind += len;
3191*699cd480SApple OSS Distributions }
3192*699cd480SApple OSS Distributions
3193*699cd480SApple OSS Distributions // Calculate length within range and starting address
3194*699cd480SApple OSS Distributions length = off2Ind - offset;
3195*699cd480SApple OSS Distributions address = physP[ind - 1].address + len - length;
3196*699cd480SApple OSS Distributions
3197*699cd480SApple OSS Distributions if (true && mapped) {
3198*699cd480SApple OSS Distributions address = mappedBase + offset;
3199*699cd480SApple OSS Distributions } else {
3200*699cd480SApple OSS Distributions // see how far we can coalesce ranges
3201*699cd480SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3202*699cd480SApple OSS Distributions len = physP[ind].length;
3203*699cd480SApple OSS Distributions length += len;
3204*699cd480SApple OSS Distributions off2Ind += len;
3205*699cd480SApple OSS Distributions ind++;
3206*699cd480SApple OSS Distributions }
3207*699cd480SApple OSS Distributions }
3208*699cd480SApple OSS Distributions
3209*699cd480SApple OSS Distributions // correct contiguous check overshoot
3210*699cd480SApple OSS Distributions ind--;
3211*699cd480SApple OSS Distributions off2Ind -= len;
3212*699cd480SApple OSS Distributions }
3213*699cd480SApple OSS Distributions #ifndef __LP64__
3214*699cd480SApple OSS Distributions else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215*699cd480SApple OSS Distributions // Physical address based memory descriptor
3216*699cd480SApple OSS Distributions const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217*699cd480SApple OSS Distributions
3218*699cd480SApple OSS Distributions // Find the range after the one that contains the offset
3219*699cd480SApple OSS Distributions mach_vm_size_t len;
3220*699cd480SApple OSS Distributions for (len = 0; off2Ind <= offset; ind++) {
3221*699cd480SApple OSS Distributions len = physP[ind].length;
3222*699cd480SApple OSS Distributions off2Ind += len;
3223*699cd480SApple OSS Distributions }
3224*699cd480SApple OSS Distributions
3225*699cd480SApple OSS Distributions // Calculate length within range and starting address
3226*699cd480SApple OSS Distributions length = off2Ind - offset;
3227*699cd480SApple OSS Distributions address = physP[ind - 1].address + len - length;
3228*699cd480SApple OSS Distributions
3229*699cd480SApple OSS Distributions if (true && mapped) {
3230*699cd480SApple OSS Distributions address = mappedBase + offset;
3231*699cd480SApple OSS Distributions } else {
3232*699cd480SApple OSS Distributions // see how far we can coalesce ranges
3233*699cd480SApple OSS Distributions while (ind < _rangesCount && address + length == physP[ind].address) {
3234*699cd480SApple OSS Distributions len = physP[ind].length;
3235*699cd480SApple OSS Distributions length += len;
3236*699cd480SApple OSS Distributions off2Ind += len;
3237*699cd480SApple OSS Distributions ind++;
3238*699cd480SApple OSS Distributions }
3239*699cd480SApple OSS Distributions }
3240*699cd480SApple OSS Distributions // correct contiguous check overshoot
3241*699cd480SApple OSS Distributions ind--;
3242*699cd480SApple OSS Distributions off2Ind -= len;
3243*699cd480SApple OSS Distributions }
3244*699cd480SApple OSS Distributions #endif /* !__LP64__ */
3245*699cd480SApple OSS Distributions else {
3246*699cd480SApple OSS Distributions do {
3247*699cd480SApple OSS Distributions if (!_wireCount) {
3248*699cd480SApple OSS Distributions panic("IOGMD: not wired for the IODMACommand");
3249*699cd480SApple OSS Distributions }
3250*699cd480SApple OSS Distributions
3251*699cd480SApple OSS Distributions assert(_memoryEntries);
3252*699cd480SApple OSS Distributions
3253*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
3254*699cd480SApple OSS Distributions const ioPLBlock *ioplList = getIOPLList(dataP);
3255*699cd480SApple OSS Distributions UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256*699cd480SApple OSS Distributions upl_page_info_t *pageList = getPageList(dataP);
3257*699cd480SApple OSS Distributions
3258*699cd480SApple OSS Distributions assert(numIOPLs > 0);
3259*699cd480SApple OSS Distributions
3260*699cd480SApple OSS Distributions // Scan through iopl info blocks looking for block containing offset
3261*699cd480SApple OSS Distributions while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262*699cd480SApple OSS Distributions ind++;
3263*699cd480SApple OSS Distributions }
3264*699cd480SApple OSS Distributions
3265*699cd480SApple OSS Distributions // Go back to actual range as search goes past it
3266*699cd480SApple OSS Distributions ioPLBlock ioplInfo = ioplList[ind - 1];
3267*699cd480SApple OSS Distributions off2Ind = ioplInfo.fIOMDOffset;
3268*699cd480SApple OSS Distributions
3269*699cd480SApple OSS Distributions if (ind < numIOPLs) {
3270*699cd480SApple OSS Distributions length = ioplList[ind].fIOMDOffset;
3271*699cd480SApple OSS Distributions } else {
3272*699cd480SApple OSS Distributions length = _length;
3273*699cd480SApple OSS Distributions }
3274*699cd480SApple OSS Distributions length -= offset; // Remainder within iopl
3275*699cd480SApple OSS Distributions
3276*699cd480SApple OSS Distributions // Subtract offset till this iopl in total list
3277*699cd480SApple OSS Distributions offset -= off2Ind;
3278*699cd480SApple OSS Distributions
3279*699cd480SApple OSS Distributions // If a mapped address is requested and this is a pre-mapped IOPL
3280*699cd480SApple OSS Distributions // then just need to compute an offset relative to the mapped base.
3281*699cd480SApple OSS Distributions if (mapped) {
3282*699cd480SApple OSS Distributions offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283*699cd480SApple OSS Distributions address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284*699cd480SApple OSS Distributions continue; // Done leave do/while(false) now
3285*699cd480SApple OSS Distributions }
3286*699cd480SApple OSS Distributions
3287*699cd480SApple OSS Distributions // The offset is rebased into the current iopl.
3288*699cd480SApple OSS Distributions // Now add the iopl 1st page offset.
3289*699cd480SApple OSS Distributions offset += ioplInfo.fPageOffset;
3290*699cd480SApple OSS Distributions
3291*699cd480SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
3292*699cd480SApple OSS Distributions // the upl's upl_page_info_t array.
3293*699cd480SApple OSS Distributions if (ioplInfo.fFlags & kIOPLExternUPL) {
3294*699cd480SApple OSS Distributions pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295*699cd480SApple OSS Distributions } else {
3296*699cd480SApple OSS Distributions pageList = &pageList[ioplInfo.fPageInfo];
3297*699cd480SApple OSS Distributions }
3298*699cd480SApple OSS Distributions
3299*699cd480SApple OSS Distributions // Check for direct device non-paged memory
3300*699cd480SApple OSS Distributions if (ioplInfo.fFlags & kIOPLOnDevice) {
3301*699cd480SApple OSS Distributions address = ptoa_64(pageList->phys_addr) + offset;
3302*699cd480SApple OSS Distributions continue; // Done leave do/while(false) now
3303*699cd480SApple OSS Distributions }
3304*699cd480SApple OSS Distributions
3305*699cd480SApple OSS Distributions // Now we need compute the index into the pageList
3306*699cd480SApple OSS Distributions UInt pageInd = atop_32(offset);
3307*699cd480SApple OSS Distributions offset &= PAGE_MASK;
3308*699cd480SApple OSS Distributions
3309*699cd480SApple OSS Distributions // Compute the starting address of this segment
3310*699cd480SApple OSS Distributions IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311*699cd480SApple OSS Distributions if (!pageAddr) {
3312*699cd480SApple OSS Distributions panic("!pageList phys_addr");
3313*699cd480SApple OSS Distributions }
3314*699cd480SApple OSS Distributions
3315*699cd480SApple OSS Distributions address = ptoa_64(pageAddr) + offset;
3316*699cd480SApple OSS Distributions
3317*699cd480SApple OSS Distributions // length is currently set to the length of the remainider of the iopl.
3318*699cd480SApple OSS Distributions // We need to check that the remainder of the iopl is contiguous.
3319*699cd480SApple OSS Distributions // This is indicated by pageList[ind].phys_addr being sequential.
3320*699cd480SApple OSS Distributions IOByteCount contigLength = PAGE_SIZE - offset;
3321*699cd480SApple OSS Distributions while (contigLength < length
3322*699cd480SApple OSS Distributions && ++pageAddr == pageList[++pageInd].phys_addr) {
3323*699cd480SApple OSS Distributions contigLength += PAGE_SIZE;
3324*699cd480SApple OSS Distributions }
3325*699cd480SApple OSS Distributions
3326*699cd480SApple OSS Distributions if (contigLength < length) {
3327*699cd480SApple OSS Distributions length = contigLength;
3328*699cd480SApple OSS Distributions }
3329*699cd480SApple OSS Distributions
3330*699cd480SApple OSS Distributions
3331*699cd480SApple OSS Distributions assert(address);
3332*699cd480SApple OSS Distributions assert(length);
3333*699cd480SApple OSS Distributions } while (false);
3334*699cd480SApple OSS Distributions }
3335*699cd480SApple OSS Distributions
3336*699cd480SApple OSS Distributions // Update return values and state
3337*699cd480SApple OSS Distributions isP->fIO.fIOVMAddr = address;
3338*699cd480SApple OSS Distributions isP->fIO.fLength = length;
3339*699cd480SApple OSS Distributions isP->fIndex = ind;
3340*699cd480SApple OSS Distributions isP->fOffset2Index = off2Ind;
3341*699cd480SApple OSS Distributions isP->fNextOffset = isP->fIO.fOffset + length;
3342*699cd480SApple OSS Distributions
3343*699cd480SApple OSS Distributions return kIOReturnSuccess;
3344*699cd480SApple OSS Distributions }
3345*699cd480SApple OSS Distributions
3346*699cd480SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348*699cd480SApple OSS Distributions {
3349*699cd480SApple OSS Distributions IOReturn ret;
3350*699cd480SApple OSS Distributions mach_vm_address_t address = 0;
3351*699cd480SApple OSS Distributions mach_vm_size_t length = 0;
3352*699cd480SApple OSS Distributions IOMapper * mapper = gIOSystemMapper;
3353*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3354*699cd480SApple OSS Distributions
3355*699cd480SApple OSS Distributions if (lengthOfSegment) {
3356*699cd480SApple OSS Distributions *lengthOfSegment = 0;
3357*699cd480SApple OSS Distributions }
3358*699cd480SApple OSS Distributions
3359*699cd480SApple OSS Distributions if (offset >= _length) {
3360*699cd480SApple OSS Distributions return 0;
3361*699cd480SApple OSS Distributions }
3362*699cd480SApple OSS Distributions
3363*699cd480SApple OSS Distributions // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364*699cd480SApple OSS Distributions // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365*699cd480SApple OSS Distributions // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366*699cd480SApple OSS Distributions // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367*699cd480SApple OSS Distributions
3368*699cd480SApple OSS Distributions if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369*699cd480SApple OSS Distributions unsigned rangesIndex = 0;
3370*699cd480SApple OSS Distributions Ranges vec = _ranges;
3371*699cd480SApple OSS Distributions mach_vm_address_t addr;
3372*699cd480SApple OSS Distributions
3373*699cd480SApple OSS Distributions // Find starting address within the vector of ranges
3374*699cd480SApple OSS Distributions for (;;) {
3375*699cd480SApple OSS Distributions getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376*699cd480SApple OSS Distributions if (offset < length) {
3377*699cd480SApple OSS Distributions break;
3378*699cd480SApple OSS Distributions }
3379*699cd480SApple OSS Distributions offset -= length; // (make offset relative)
3380*699cd480SApple OSS Distributions rangesIndex++;
3381*699cd480SApple OSS Distributions }
3382*699cd480SApple OSS Distributions
3383*699cd480SApple OSS Distributions // Now that we have the starting range,
3384*699cd480SApple OSS Distributions // lets find the last contiguous range
3385*699cd480SApple OSS Distributions addr += offset;
3386*699cd480SApple OSS Distributions length -= offset;
3387*699cd480SApple OSS Distributions
3388*699cd480SApple OSS Distributions for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389*699cd480SApple OSS Distributions mach_vm_address_t newAddr;
3390*699cd480SApple OSS Distributions mach_vm_size_t newLen;
3391*699cd480SApple OSS Distributions
3392*699cd480SApple OSS Distributions getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393*699cd480SApple OSS Distributions if (addr + length != newAddr) {
3394*699cd480SApple OSS Distributions break;
3395*699cd480SApple OSS Distributions }
3396*699cd480SApple OSS Distributions length += newLen;
3397*699cd480SApple OSS Distributions }
3398*699cd480SApple OSS Distributions if (addr) {
3399*699cd480SApple OSS Distributions address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400*699cd480SApple OSS Distributions }
3401*699cd480SApple OSS Distributions } else {
3402*699cd480SApple OSS Distributions IOMDDMAWalkSegmentState _state;
3403*699cd480SApple OSS Distributions IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404*699cd480SApple OSS Distributions
3405*699cd480SApple OSS Distributions state->fOffset = offset;
3406*699cd480SApple OSS Distributions state->fLength = _length - offset;
3407*699cd480SApple OSS Distributions state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408*699cd480SApple OSS Distributions
3409*699cd480SApple OSS Distributions ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410*699cd480SApple OSS Distributions
3411*699cd480SApple OSS Distributions if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412*699cd480SApple OSS Distributions DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413*699cd480SApple OSS Distributions ret, this, state->fOffset,
3414*699cd480SApple OSS Distributions state->fIOVMAddr, state->fLength);
3415*699cd480SApple OSS Distributions }
3416*699cd480SApple OSS Distributions if (kIOReturnSuccess == ret) {
3417*699cd480SApple OSS Distributions address = state->fIOVMAddr;
3418*699cd480SApple OSS Distributions length = state->fLength;
3419*699cd480SApple OSS Distributions }
3420*699cd480SApple OSS Distributions
3421*699cd480SApple OSS Distributions // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422*699cd480SApple OSS Distributions // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423*699cd480SApple OSS Distributions
3424*699cd480SApple OSS Distributions if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425*699cd480SApple OSS Distributions if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426*699cd480SApple OSS Distributions addr64_t origAddr = address;
3427*699cd480SApple OSS Distributions IOByteCount origLen = length;
3428*699cd480SApple OSS Distributions
3429*699cd480SApple OSS Distributions address = mapper->mapToPhysicalAddress(origAddr);
3430*699cd480SApple OSS Distributions length = page_size - (address & (page_size - 1));
3431*699cd480SApple OSS Distributions while ((length < origLen)
3432*699cd480SApple OSS Distributions && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433*699cd480SApple OSS Distributions length += page_size;
3434*699cd480SApple OSS Distributions }
3435*699cd480SApple OSS Distributions if (length > origLen) {
3436*699cd480SApple OSS Distributions length = origLen;
3437*699cd480SApple OSS Distributions }
3438*699cd480SApple OSS Distributions }
3439*699cd480SApple OSS Distributions }
3440*699cd480SApple OSS Distributions }
3441*699cd480SApple OSS Distributions
3442*699cd480SApple OSS Distributions if (!address) {
3443*699cd480SApple OSS Distributions length = 0;
3444*699cd480SApple OSS Distributions }
3445*699cd480SApple OSS Distributions
3446*699cd480SApple OSS Distributions if (lengthOfSegment) {
3447*699cd480SApple OSS Distributions *lengthOfSegment = length;
3448*699cd480SApple OSS Distributions }
3449*699cd480SApple OSS Distributions
3450*699cd480SApple OSS Distributions return address;
3451*699cd480SApple OSS Distributions }
3452*699cd480SApple OSS Distributions
3453*699cd480SApple OSS Distributions #ifndef __LP64__
3454*699cd480SApple OSS Distributions #pragma clang diagnostic push
3455*699cd480SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456*699cd480SApple OSS Distributions
3457*699cd480SApple OSS Distributions addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458*699cd480SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459*699cd480SApple OSS Distributions {
3460*699cd480SApple OSS Distributions addr64_t address = 0;
3461*699cd480SApple OSS Distributions
3462*699cd480SApple OSS Distributions if (options & _kIOMemorySourceSegment) {
3463*699cd480SApple OSS Distributions address = getSourceSegment(offset, lengthOfSegment);
3464*699cd480SApple OSS Distributions } else if (options & kIOMemoryMapperNone) {
3465*699cd480SApple OSS Distributions address = getPhysicalSegment64(offset, lengthOfSegment);
3466*699cd480SApple OSS Distributions } else {
3467*699cd480SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment);
3468*699cd480SApple OSS Distributions }
3469*699cd480SApple OSS Distributions
3470*699cd480SApple OSS Distributions return address;
3471*699cd480SApple OSS Distributions }
3472*699cd480SApple OSS Distributions #pragma clang diagnostic pop
3473*699cd480SApple OSS Distributions
3474*699cd480SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476*699cd480SApple OSS Distributions {
3477*699cd480SApple OSS Distributions return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478*699cd480SApple OSS Distributions }
3479*699cd480SApple OSS Distributions
3480*699cd480SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482*699cd480SApple OSS Distributions {
3483*699cd480SApple OSS Distributions addr64_t address = 0;
3484*699cd480SApple OSS Distributions IOByteCount length = 0;
3485*699cd480SApple OSS Distributions
3486*699cd480SApple OSS Distributions address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487*699cd480SApple OSS Distributions
3488*699cd480SApple OSS Distributions if (lengthOfSegment) {
3489*699cd480SApple OSS Distributions length = *lengthOfSegment;
3490*699cd480SApple OSS Distributions }
3491*699cd480SApple OSS Distributions
3492*699cd480SApple OSS Distributions if ((address + length) > 0x100000000ULL) {
3493*699cd480SApple OSS Distributions panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494*699cd480SApple OSS Distributions address, (long) length, (getMetaClass())->getClassName());
3495*699cd480SApple OSS Distributions }
3496*699cd480SApple OSS Distributions
3497*699cd480SApple OSS Distributions return (IOPhysicalAddress) address;
3498*699cd480SApple OSS Distributions }
3499*699cd480SApple OSS Distributions
3500*699cd480SApple OSS Distributions addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501*699cd480SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502*699cd480SApple OSS Distributions {
3503*699cd480SApple OSS Distributions IOPhysicalAddress phys32;
3504*699cd480SApple OSS Distributions IOByteCount length;
3505*699cd480SApple OSS Distributions addr64_t phys64;
3506*699cd480SApple OSS Distributions IOMapper * mapper = NULL;
3507*699cd480SApple OSS Distributions
3508*699cd480SApple OSS Distributions phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509*699cd480SApple OSS Distributions if (!phys32) {
3510*699cd480SApple OSS Distributions return 0;
3511*699cd480SApple OSS Distributions }
3512*699cd480SApple OSS Distributions
3513*699cd480SApple OSS Distributions if (gIOSystemMapper) {
3514*699cd480SApple OSS Distributions mapper = gIOSystemMapper;
3515*699cd480SApple OSS Distributions }
3516*699cd480SApple OSS Distributions
3517*699cd480SApple OSS Distributions if (mapper) {
3518*699cd480SApple OSS Distributions IOByteCount origLen;
3519*699cd480SApple OSS Distributions
3520*699cd480SApple OSS Distributions phys64 = mapper->mapToPhysicalAddress(phys32);
3521*699cd480SApple OSS Distributions origLen = *lengthOfSegment;
3522*699cd480SApple OSS Distributions length = page_size - (phys64 & (page_size - 1));
3523*699cd480SApple OSS Distributions while ((length < origLen)
3524*699cd480SApple OSS Distributions && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525*699cd480SApple OSS Distributions length += page_size;
3526*699cd480SApple OSS Distributions }
3527*699cd480SApple OSS Distributions if (length > origLen) {
3528*699cd480SApple OSS Distributions length = origLen;
3529*699cd480SApple OSS Distributions }
3530*699cd480SApple OSS Distributions
3531*699cd480SApple OSS Distributions *lengthOfSegment = length;
3532*699cd480SApple OSS Distributions } else {
3533*699cd480SApple OSS Distributions phys64 = (addr64_t) phys32;
3534*699cd480SApple OSS Distributions }
3535*699cd480SApple OSS Distributions
3536*699cd480SApple OSS Distributions return phys64;
3537*699cd480SApple OSS Distributions }
3538*699cd480SApple OSS Distributions
3539*699cd480SApple OSS Distributions IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540*699cd480SApple OSS Distributions IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541*699cd480SApple OSS Distributions {
3542*699cd480SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543*699cd480SApple OSS Distributions }
3544*699cd480SApple OSS Distributions
3545*699cd480SApple OSS Distributions IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547*699cd480SApple OSS Distributions {
3548*699cd480SApple OSS Distributions return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549*699cd480SApple OSS Distributions }
3550*699cd480SApple OSS Distributions
3551*699cd480SApple OSS Distributions #pragma clang diagnostic push
3552*699cd480SApple OSS Distributions #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553*699cd480SApple OSS Distributions
3554*699cd480SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556*699cd480SApple OSS Distributions IOByteCount * lengthOfSegment)
3557*699cd480SApple OSS Distributions {
3558*699cd480SApple OSS Distributions if (_task == kernel_task) {
3559*699cd480SApple OSS Distributions return (void *) getSourceSegment(offset, lengthOfSegment);
3560*699cd480SApple OSS Distributions } else {
3561*699cd480SApple OSS Distributions panic("IOGMD::getVirtualSegment deprecated");
3562*699cd480SApple OSS Distributions }
3563*699cd480SApple OSS Distributions
3564*699cd480SApple OSS Distributions return NULL;
3565*699cd480SApple OSS Distributions }
3566*699cd480SApple OSS Distributions #pragma clang diagnostic pop
3567*699cd480SApple OSS Distributions #endif /* !__LP64__ */
3568*699cd480SApple OSS Distributions
3569*699cd480SApple OSS Distributions IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570*699cd480SApple OSS Distributions IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571*699cd480SApple OSS Distributions {
3572*699cd480SApple OSS Distributions IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573*699cd480SApple OSS Distributions DMACommandOps params;
3574*699cd480SApple OSS Distributions IOReturn err;
3575*699cd480SApple OSS Distributions
3576*699cd480SApple OSS Distributions params = (op & ~kIOMDDMACommandOperationMask & op);
3577*699cd480SApple OSS Distributions op &= kIOMDDMACommandOperationMask;
3578*699cd480SApple OSS Distributions
3579*699cd480SApple OSS Distributions if (kIOMDGetCharacteristics == op) {
3580*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3582*699cd480SApple OSS Distributions }
3583*699cd480SApple OSS Distributions
3584*699cd480SApple OSS Distributions IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585*699cd480SApple OSS Distributions data->fLength = getLength();
3586*699cd480SApple OSS Distributions data->fSGCount = 0;
3587*699cd480SApple OSS Distributions data->fDirection = getDirection();
3588*699cd480SApple OSS Distributions data->fIsPrepared = true; // Assume prepared - fails safe
3589*699cd480SApple OSS Distributions } else if (kIOMDWalkSegments == op) {
3590*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3592*699cd480SApple OSS Distributions }
3593*699cd480SApple OSS Distributions
3594*699cd480SApple OSS Distributions IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595*699cd480SApple OSS Distributions IOByteCount offset = (IOByteCount) data->fOffset;
3596*699cd480SApple OSS Distributions IOPhysicalLength length, nextLength;
3597*699cd480SApple OSS Distributions addr64_t addr, nextAddr;
3598*699cd480SApple OSS Distributions
3599*699cd480SApple OSS Distributions if (data->fMapped) {
3600*699cd480SApple OSS Distributions panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601*699cd480SApple OSS Distributions }
3602*699cd480SApple OSS Distributions addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603*699cd480SApple OSS Distributions offset += length;
3604*699cd480SApple OSS Distributions while (offset < getLength()) {
3605*699cd480SApple OSS Distributions nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606*699cd480SApple OSS Distributions if ((addr + length) != nextAddr) {
3607*699cd480SApple OSS Distributions break;
3608*699cd480SApple OSS Distributions }
3609*699cd480SApple OSS Distributions length += nextLength;
3610*699cd480SApple OSS Distributions offset += nextLength;
3611*699cd480SApple OSS Distributions }
3612*699cd480SApple OSS Distributions data->fIOVMAddr = addr;
3613*699cd480SApple OSS Distributions data->fLength = length;
3614*699cd480SApple OSS Distributions } else if (kIOMDAddDMAMapSpec == op) {
3615*699cd480SApple OSS Distributions return kIOReturnUnsupported;
3616*699cd480SApple OSS Distributions } else if (kIOMDDMAMap == op) {
3617*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3619*699cd480SApple OSS Distributions }
3620*699cd480SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621*699cd480SApple OSS Distributions
3622*699cd480SApple OSS Distributions err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623*699cd480SApple OSS Distributions
3624*699cd480SApple OSS Distributions return err;
3625*699cd480SApple OSS Distributions } else if (kIOMDDMAUnmap == op) {
3626*699cd480SApple OSS Distributions if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627*699cd480SApple OSS Distributions return kIOReturnUnderrun;
3628*699cd480SApple OSS Distributions }
3629*699cd480SApple OSS Distributions IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630*699cd480SApple OSS Distributions
3631*699cd480SApple OSS Distributions err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632*699cd480SApple OSS Distributions
3633*699cd480SApple OSS Distributions return kIOReturnSuccess;
3634*699cd480SApple OSS Distributions } else {
3635*699cd480SApple OSS Distributions return kIOReturnBadArgument;
3636*699cd480SApple OSS Distributions }
3637*699cd480SApple OSS Distributions
3638*699cd480SApple OSS Distributions return kIOReturnSuccess;
3639*699cd480SApple OSS Distributions }
3640*699cd480SApple OSS Distributions
3641*699cd480SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643*699cd480SApple OSS Distributions IOOptionBits * oldState )
3644*699cd480SApple OSS Distributions {
3645*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3646*699cd480SApple OSS Distributions
3647*699cd480SApple OSS Distributions vm_purgable_t control;
3648*699cd480SApple OSS Distributions int state;
3649*699cd480SApple OSS Distributions
3650*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3651*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3652*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3653*699cd480SApple OSS Distributions }
3654*699cd480SApple OSS Distributions
3655*699cd480SApple OSS Distributions if (_memRef) {
3656*699cd480SApple OSS Distributions err = super::setPurgeable(newState, oldState);
3657*699cd480SApple OSS Distributions } else {
3658*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3659*699cd480SApple OSS Distributions LOCK;
3660*699cd480SApple OSS Distributions }
3661*699cd480SApple OSS Distributions do{
3662*699cd480SApple OSS Distributions // Find the appropriate vm_map for the given task
3663*699cd480SApple OSS Distributions vm_map_t curMap;
3664*699cd480SApple OSS Distributions if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665*699cd480SApple OSS Distributions err = kIOReturnNotReady;
3666*699cd480SApple OSS Distributions break;
3667*699cd480SApple OSS Distributions } else if (!_task) {
3668*699cd480SApple OSS Distributions err = kIOReturnUnsupported;
3669*699cd480SApple OSS Distributions break;
3670*699cd480SApple OSS Distributions } else {
3671*699cd480SApple OSS Distributions curMap = get_task_map(_task);
3672*699cd480SApple OSS Distributions if (NULL == curMap) {
3673*699cd480SApple OSS Distributions err = KERN_INVALID_ARGUMENT;
3674*699cd480SApple OSS Distributions break;
3675*699cd480SApple OSS Distributions }
3676*699cd480SApple OSS Distributions }
3677*699cd480SApple OSS Distributions
3678*699cd480SApple OSS Distributions // can only do one range
3679*699cd480SApple OSS Distributions Ranges vec = _ranges;
3680*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
3681*699cd480SApple OSS Distributions mach_vm_address_t addr;
3682*699cd480SApple OSS Distributions mach_vm_size_t len;
3683*699cd480SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, 0, _task);
3684*699cd480SApple OSS Distributions
3685*699cd480SApple OSS Distributions err = purgeableControlBits(newState, &control, &state);
3686*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
3687*699cd480SApple OSS Distributions break;
3688*699cd480SApple OSS Distributions }
3689*699cd480SApple OSS Distributions err = vm_map_purgable_control(curMap, addr, control, &state);
3690*699cd480SApple OSS Distributions if (oldState) {
3691*699cd480SApple OSS Distributions if (kIOReturnSuccess == err) {
3692*699cd480SApple OSS Distributions err = purgeableStateBits(&state);
3693*699cd480SApple OSS Distributions *oldState = state;
3694*699cd480SApple OSS Distributions }
3695*699cd480SApple OSS Distributions }
3696*699cd480SApple OSS Distributions }while (false);
3697*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3698*699cd480SApple OSS Distributions UNLOCK;
3699*699cd480SApple OSS Distributions }
3700*699cd480SApple OSS Distributions }
3701*699cd480SApple OSS Distributions
3702*699cd480SApple OSS Distributions return err;
3703*699cd480SApple OSS Distributions }
3704*699cd480SApple OSS Distributions
3705*699cd480SApple OSS Distributions IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706*699cd480SApple OSS Distributions IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707*699cd480SApple OSS Distributions IOOptionBits * oldState )
3708*699cd480SApple OSS Distributions {
3709*699cd480SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3710*699cd480SApple OSS Distributions
3711*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3712*699cd480SApple OSS Distributions LOCK;
3713*699cd480SApple OSS Distributions }
3714*699cd480SApple OSS Distributions if (_memRef) {
3715*699cd480SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716*699cd480SApple OSS Distributions }
3717*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3718*699cd480SApple OSS Distributions UNLOCK;
3719*699cd480SApple OSS Distributions }
3720*699cd480SApple OSS Distributions
3721*699cd480SApple OSS Distributions return err;
3722*699cd480SApple OSS Distributions }
3723*699cd480SApple OSS Distributions
3724*699cd480SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726*699cd480SApple OSS Distributions int newLedgerTag,
3727*699cd480SApple OSS Distributions IOOptionBits newLedgerOptions )
3728*699cd480SApple OSS Distributions {
3729*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
3730*699cd480SApple OSS Distributions
3731*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3732*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3733*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3734*699cd480SApple OSS Distributions }
3735*699cd480SApple OSS Distributions
3736*699cd480SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3737*699cd480SApple OSS Distributions return kIOReturnUnsupported;
3738*699cd480SApple OSS Distributions }
3739*699cd480SApple OSS Distributions
3740*699cd480SApple OSS Distributions if (_memRef) {
3741*699cd480SApple OSS Distributions err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742*699cd480SApple OSS Distributions } else {
3743*699cd480SApple OSS Distributions err = kIOReturnUnsupported;
3744*699cd480SApple OSS Distributions }
3745*699cd480SApple OSS Distributions
3746*699cd480SApple OSS Distributions return err;
3747*699cd480SApple OSS Distributions }
3748*699cd480SApple OSS Distributions
3749*699cd480SApple OSS Distributions IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750*699cd480SApple OSS Distributions IOMemoryDescriptor::setOwnership( task_t newOwner,
3751*699cd480SApple OSS Distributions int newLedgerTag,
3752*699cd480SApple OSS Distributions IOOptionBits newLedgerOptions )
3753*699cd480SApple OSS Distributions {
3754*699cd480SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3755*699cd480SApple OSS Distributions
3756*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3757*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3758*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3759*699cd480SApple OSS Distributions }
3760*699cd480SApple OSS Distributions
3761*699cd480SApple OSS Distributions if (iokit_iomd_setownership_enabled == FALSE) {
3762*699cd480SApple OSS Distributions return kIOReturnUnsupported;
3763*699cd480SApple OSS Distributions }
3764*699cd480SApple OSS Distributions
3765*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3766*699cd480SApple OSS Distributions LOCK;
3767*699cd480SApple OSS Distributions }
3768*699cd480SApple OSS Distributions if (_memRef) {
3769*699cd480SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770*699cd480SApple OSS Distributions } else {
3771*699cd480SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3772*699cd480SApple OSS Distributions IOSubMemoryDescriptor * smd;
3773*699cd480SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774*699cd480SApple OSS Distributions err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775*699cd480SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776*699cd480SApple OSS Distributions err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777*699cd480SApple OSS Distributions }
3778*699cd480SApple OSS Distributions }
3779*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3780*699cd480SApple OSS Distributions UNLOCK;
3781*699cd480SApple OSS Distributions }
3782*699cd480SApple OSS Distributions
3783*699cd480SApple OSS Distributions return err;
3784*699cd480SApple OSS Distributions }
3785*699cd480SApple OSS Distributions
3786*699cd480SApple OSS Distributions
3787*699cd480SApple OSS Distributions uint64_t
getDMAMapLength(uint64_t * offset)3788*699cd480SApple OSS Distributions IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789*699cd480SApple OSS Distributions {
3790*699cd480SApple OSS Distributions uint64_t length;
3791*699cd480SApple OSS Distributions
3792*699cd480SApple OSS Distributions if (_memRef) {
3793*699cd480SApple OSS Distributions length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794*699cd480SApple OSS Distributions } else {
3795*699cd480SApple OSS Distributions IOByteCount iterate, segLen;
3796*699cd480SApple OSS Distributions IOPhysicalAddress sourceAddr, sourceAlign;
3797*699cd480SApple OSS Distributions
3798*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3799*699cd480SApple OSS Distributions LOCK;
3800*699cd480SApple OSS Distributions }
3801*699cd480SApple OSS Distributions length = 0;
3802*699cd480SApple OSS Distributions iterate = 0;
3803*699cd480SApple OSS Distributions while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804*699cd480SApple OSS Distributions sourceAlign = (sourceAddr & page_mask);
3805*699cd480SApple OSS Distributions if (offset && !iterate) {
3806*699cd480SApple OSS Distributions *offset = sourceAlign;
3807*699cd480SApple OSS Distributions }
3808*699cd480SApple OSS Distributions length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809*699cd480SApple OSS Distributions iterate += segLen;
3810*699cd480SApple OSS Distributions }
3811*699cd480SApple OSS Distributions if (!iterate) {
3812*699cd480SApple OSS Distributions length = getLength();
3813*699cd480SApple OSS Distributions if (offset) {
3814*699cd480SApple OSS Distributions *offset = 0;
3815*699cd480SApple OSS Distributions }
3816*699cd480SApple OSS Distributions }
3817*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3818*699cd480SApple OSS Distributions UNLOCK;
3819*699cd480SApple OSS Distributions }
3820*699cd480SApple OSS Distributions }
3821*699cd480SApple OSS Distributions
3822*699cd480SApple OSS Distributions return length;
3823*699cd480SApple OSS Distributions }
3824*699cd480SApple OSS Distributions
3825*699cd480SApple OSS Distributions
3826*699cd480SApple OSS Distributions IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827*699cd480SApple OSS Distributions IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828*699cd480SApple OSS Distributions IOByteCount * dirtyPageCount )
3829*699cd480SApple OSS Distributions {
3830*699cd480SApple OSS Distributions IOReturn err = kIOReturnNotReady;
3831*699cd480SApple OSS Distributions
3832*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3833*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3834*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3835*699cd480SApple OSS Distributions }
3836*699cd480SApple OSS Distributions
3837*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3838*699cd480SApple OSS Distributions LOCK;
3839*699cd480SApple OSS Distributions }
3840*699cd480SApple OSS Distributions if (_memRef) {
3841*699cd480SApple OSS Distributions err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842*699cd480SApple OSS Distributions } else {
3843*699cd480SApple OSS Distributions IOMultiMemoryDescriptor * mmd;
3844*699cd480SApple OSS Distributions IOSubMemoryDescriptor * smd;
3845*699cd480SApple OSS Distributions if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846*699cd480SApple OSS Distributions err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847*699cd480SApple OSS Distributions } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848*699cd480SApple OSS Distributions err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849*699cd480SApple OSS Distributions }
3850*699cd480SApple OSS Distributions }
3851*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3852*699cd480SApple OSS Distributions UNLOCK;
3853*699cd480SApple OSS Distributions }
3854*699cd480SApple OSS Distributions
3855*699cd480SApple OSS Distributions return err;
3856*699cd480SApple OSS Distributions }
3857*699cd480SApple OSS Distributions
3858*699cd480SApple OSS Distributions
3859*699cd480SApple OSS Distributions #if defined(__arm64__)
3860*699cd480SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861*699cd480SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
3863*699cd480SApple OSS Distributions extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864*699cd480SApple OSS Distributions extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
3866*699cd480SApple OSS Distributions
3867*699cd480SApple OSS Distributions static void
SetEncryptOp(addr64_t pa,unsigned int count)3868*699cd480SApple OSS Distributions SetEncryptOp(addr64_t pa, unsigned int count)
3869*699cd480SApple OSS Distributions {
3870*699cd480SApple OSS Distributions ppnum_t page, end;
3871*699cd480SApple OSS Distributions
3872*699cd480SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3873*699cd480SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874*699cd480SApple OSS Distributions for (; page < end; page++) {
3875*699cd480SApple OSS Distributions pmap_clear_noencrypt(page);
3876*699cd480SApple OSS Distributions }
3877*699cd480SApple OSS Distributions }
3878*699cd480SApple OSS Distributions
3879*699cd480SApple OSS Distributions static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880*699cd480SApple OSS Distributions ClearEncryptOp(addr64_t pa, unsigned int count)
3881*699cd480SApple OSS Distributions {
3882*699cd480SApple OSS Distributions ppnum_t page, end;
3883*699cd480SApple OSS Distributions
3884*699cd480SApple OSS Distributions page = (ppnum_t) atop_64(round_page_64(pa));
3885*699cd480SApple OSS Distributions end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886*699cd480SApple OSS Distributions for (; page < end; page++) {
3887*699cd480SApple OSS Distributions pmap_set_noencrypt(page);
3888*699cd480SApple OSS Distributions }
3889*699cd480SApple OSS Distributions }
3890*699cd480SApple OSS Distributions
3891*699cd480SApple OSS Distributions IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892*699cd480SApple OSS Distributions IOMemoryDescriptor::performOperation( IOOptionBits options,
3893*699cd480SApple OSS Distributions IOByteCount offset, IOByteCount length )
3894*699cd480SApple OSS Distributions {
3895*699cd480SApple OSS Distributions IOByteCount remaining;
3896*699cd480SApple OSS Distributions unsigned int res;
3897*699cd480SApple OSS Distributions void (*func)(addr64_t pa, unsigned int count) = NULL;
3898*699cd480SApple OSS Distributions #if defined(__arm64__)
3899*699cd480SApple OSS Distributions void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900*699cd480SApple OSS Distributions #endif
3901*699cd480SApple OSS Distributions
3902*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
3903*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
3904*699cd480SApple OSS Distributions return kIOReturnNotAttached;
3905*699cd480SApple OSS Distributions }
3906*699cd480SApple OSS Distributions
3907*699cd480SApple OSS Distributions switch (options) {
3908*699cd480SApple OSS Distributions case kIOMemoryIncoherentIOFlush:
3909*699cd480SApple OSS Distributions #if defined(__arm64__)
3910*699cd480SApple OSS Distributions func_ext = &dcache_incoherent_io_flush64;
3911*699cd480SApple OSS Distributions #if __ARM_COHERENT_IO__
3912*699cd480SApple OSS Distributions func_ext(0, 0, 0, &res);
3913*699cd480SApple OSS Distributions return kIOReturnSuccess;
3914*699cd480SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3915*699cd480SApple OSS Distributions break;
3916*699cd480SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3917*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
3918*699cd480SApple OSS Distributions func = &dcache_incoherent_io_flush64;
3919*699cd480SApple OSS Distributions break;
3920*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
3921*699cd480SApple OSS Distributions case kIOMemoryIncoherentIOStore:
3922*699cd480SApple OSS Distributions #if defined(__arm64__)
3923*699cd480SApple OSS Distributions func_ext = &dcache_incoherent_io_store64;
3924*699cd480SApple OSS Distributions #if __ARM_COHERENT_IO__
3925*699cd480SApple OSS Distributions func_ext(0, 0, 0, &res);
3926*699cd480SApple OSS Distributions return kIOReturnSuccess;
3927*699cd480SApple OSS Distributions #else /* __ARM_COHERENT_IO__ */
3928*699cd480SApple OSS Distributions break;
3929*699cd480SApple OSS Distributions #endif /* __ARM_COHERENT_IO__ */
3930*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
3931*699cd480SApple OSS Distributions func = &dcache_incoherent_io_store64;
3932*699cd480SApple OSS Distributions break;
3933*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
3934*699cd480SApple OSS Distributions
3935*699cd480SApple OSS Distributions case kIOMemorySetEncrypted:
3936*699cd480SApple OSS Distributions func = &SetEncryptOp;
3937*699cd480SApple OSS Distributions break;
3938*699cd480SApple OSS Distributions case kIOMemoryClearEncrypted:
3939*699cd480SApple OSS Distributions func = &ClearEncryptOp;
3940*699cd480SApple OSS Distributions break;
3941*699cd480SApple OSS Distributions }
3942*699cd480SApple OSS Distributions
3943*699cd480SApple OSS Distributions #if defined(__arm64__)
3944*699cd480SApple OSS Distributions if ((func == NULL) && (func_ext == NULL)) {
3945*699cd480SApple OSS Distributions return kIOReturnUnsupported;
3946*699cd480SApple OSS Distributions }
3947*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
3948*699cd480SApple OSS Distributions if (!func) {
3949*699cd480SApple OSS Distributions return kIOReturnUnsupported;
3950*699cd480SApple OSS Distributions }
3951*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
3952*699cd480SApple OSS Distributions
3953*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
3954*699cd480SApple OSS Distributions LOCK;
3955*699cd480SApple OSS Distributions }
3956*699cd480SApple OSS Distributions
3957*699cd480SApple OSS Distributions res = 0x0UL;
3958*699cd480SApple OSS Distributions remaining = length = min(length, getLength() - offset);
3959*699cd480SApple OSS Distributions while (remaining) {
3960*699cd480SApple OSS Distributions // (process another target segment?)
3961*699cd480SApple OSS Distributions addr64_t dstAddr64;
3962*699cd480SApple OSS Distributions IOByteCount dstLen;
3963*699cd480SApple OSS Distributions
3964*699cd480SApple OSS Distributions dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965*699cd480SApple OSS Distributions if (!dstAddr64) {
3966*699cd480SApple OSS Distributions break;
3967*699cd480SApple OSS Distributions }
3968*699cd480SApple OSS Distributions
3969*699cd480SApple OSS Distributions // Clip segment length to remaining
3970*699cd480SApple OSS Distributions if (dstLen > remaining) {
3971*699cd480SApple OSS Distributions dstLen = remaining;
3972*699cd480SApple OSS Distributions }
3973*699cd480SApple OSS Distributions if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974*699cd480SApple OSS Distributions dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975*699cd480SApple OSS Distributions }
3976*699cd480SApple OSS Distributions if (remaining > UINT_MAX) {
3977*699cd480SApple OSS Distributions remaining = UINT_MAX;
3978*699cd480SApple OSS Distributions }
3979*699cd480SApple OSS Distributions
3980*699cd480SApple OSS Distributions #if defined(__arm64__)
3981*699cd480SApple OSS Distributions if (func) {
3982*699cd480SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3983*699cd480SApple OSS Distributions }
3984*699cd480SApple OSS Distributions if (func_ext) {
3985*699cd480SApple OSS Distributions (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986*699cd480SApple OSS Distributions if (res != 0x0UL) {
3987*699cd480SApple OSS Distributions remaining = 0;
3988*699cd480SApple OSS Distributions break;
3989*699cd480SApple OSS Distributions }
3990*699cd480SApple OSS Distributions }
3991*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
3992*699cd480SApple OSS Distributions (*func)(dstAddr64, (unsigned int) dstLen);
3993*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
3994*699cd480SApple OSS Distributions
3995*699cd480SApple OSS Distributions offset += dstLen;
3996*699cd480SApple OSS Distributions remaining -= dstLen;
3997*699cd480SApple OSS Distributions }
3998*699cd480SApple OSS Distributions
3999*699cd480SApple OSS Distributions if (kIOMemoryThreadSafe & _flags) {
4000*699cd480SApple OSS Distributions UNLOCK;
4001*699cd480SApple OSS Distributions }
4002*699cd480SApple OSS Distributions
4003*699cd480SApple OSS Distributions return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004*699cd480SApple OSS Distributions }
4005*699cd480SApple OSS Distributions
4006*699cd480SApple OSS Distributions /*
4007*699cd480SApple OSS Distributions *
4008*699cd480SApple OSS Distributions */
4009*699cd480SApple OSS Distributions
4010*699cd480SApple OSS Distributions #if defined(__i386__) || defined(__x86_64__)
4011*699cd480SApple OSS Distributions
4012*699cd480SApple OSS Distributions extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013*699cd480SApple OSS Distributions
4014*699cd480SApple OSS Distributions /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015*699cd480SApple OSS Distributions * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016*699cd480SApple OSS Distributions * kernel non-text data -- should we just add another range instead?
4017*699cd480SApple OSS Distributions */
4018*699cd480SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4019*699cd480SApple OSS Distributions #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020*699cd480SApple OSS Distributions
4021*699cd480SApple OSS Distributions #elif defined(__arm64__)
4022*699cd480SApple OSS Distributions
4023*699cd480SApple OSS Distributions extern vm_offset_t static_memory_end;
4024*699cd480SApple OSS Distributions
4025*699cd480SApple OSS Distributions #if defined(__arm64__)
4026*699cd480SApple OSS Distributions #define io_kernel_static_start vm_kext_base
4027*699cd480SApple OSS Distributions #else /* defined(__arm64__) */
4028*699cd480SApple OSS Distributions #define io_kernel_static_start vm_kernel_stext
4029*699cd480SApple OSS Distributions #endif /* defined(__arm64__) */
4030*699cd480SApple OSS Distributions
4031*699cd480SApple OSS Distributions #define io_kernel_static_end static_memory_end
4032*699cd480SApple OSS Distributions
4033*699cd480SApple OSS Distributions #else
4034*699cd480SApple OSS Distributions #error io_kernel_static_end is undefined for this architecture
4035*699cd480SApple OSS Distributions #endif
4036*699cd480SApple OSS Distributions
4037*699cd480SApple OSS Distributions static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038*699cd480SApple OSS Distributions io_get_kernel_static_upl(
4039*699cd480SApple OSS Distributions vm_map_t /* map */,
4040*699cd480SApple OSS Distributions uintptr_t offset,
4041*699cd480SApple OSS Distributions upl_size_t *upl_size,
4042*699cd480SApple OSS Distributions unsigned int *page_offset,
4043*699cd480SApple OSS Distributions upl_t *upl,
4044*699cd480SApple OSS Distributions upl_page_info_array_t page_list,
4045*699cd480SApple OSS Distributions unsigned int *count,
4046*699cd480SApple OSS Distributions ppnum_t *highest_page)
4047*699cd480SApple OSS Distributions {
4048*699cd480SApple OSS Distributions unsigned int pageCount, page;
4049*699cd480SApple OSS Distributions ppnum_t phys;
4050*699cd480SApple OSS Distributions ppnum_t highestPage = 0;
4051*699cd480SApple OSS Distributions
4052*699cd480SApple OSS Distributions pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053*699cd480SApple OSS Distributions if (pageCount > *count) {
4054*699cd480SApple OSS Distributions pageCount = *count;
4055*699cd480SApple OSS Distributions }
4056*699cd480SApple OSS Distributions *upl_size = (upl_size_t) ptoa_64(pageCount);
4057*699cd480SApple OSS Distributions
4058*699cd480SApple OSS Distributions *upl = NULL;
4059*699cd480SApple OSS Distributions *page_offset = ((unsigned int) page_mask & offset);
4060*699cd480SApple OSS Distributions
4061*699cd480SApple OSS Distributions for (page = 0; page < pageCount; page++) {
4062*699cd480SApple OSS Distributions phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063*699cd480SApple OSS Distributions if (!phys) {
4064*699cd480SApple OSS Distributions break;
4065*699cd480SApple OSS Distributions }
4066*699cd480SApple OSS Distributions page_list[page].phys_addr = phys;
4067*699cd480SApple OSS Distributions page_list[page].free_when_done = 0;
4068*699cd480SApple OSS Distributions page_list[page].absent = 0;
4069*699cd480SApple OSS Distributions page_list[page].dirty = 0;
4070*699cd480SApple OSS Distributions page_list[page].precious = 0;
4071*699cd480SApple OSS Distributions page_list[page].device = 0;
4072*699cd480SApple OSS Distributions if (phys > highestPage) {
4073*699cd480SApple OSS Distributions highestPage = phys;
4074*699cd480SApple OSS Distributions }
4075*699cd480SApple OSS Distributions }
4076*699cd480SApple OSS Distributions
4077*699cd480SApple OSS Distributions *highest_page = highestPage;
4078*699cd480SApple OSS Distributions
4079*699cd480SApple OSS Distributions return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080*699cd480SApple OSS Distributions }
4081*699cd480SApple OSS Distributions
4082*699cd480SApple OSS Distributions IOReturn
wireVirtual(IODirection forDirection)4083*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084*699cd480SApple OSS Distributions {
4085*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4086*699cd480SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4087*699cd480SApple OSS Distributions ioGMDData *dataP;
4088*699cd480SApple OSS Distributions upl_page_info_array_t pageInfo;
4089*699cd480SApple OSS Distributions ppnum_t mapBase;
4090*699cd480SApple OSS Distributions vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091*699cd480SApple OSS Distributions mach_vm_size_t numBytesWired = 0;
4092*699cd480SApple OSS Distributions
4093*699cd480SApple OSS Distributions assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094*699cd480SApple OSS Distributions
4095*699cd480SApple OSS Distributions if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096*699cd480SApple OSS Distributions forDirection = (IODirection) (forDirection | getDirection());
4097*699cd480SApple OSS Distributions }
4098*699cd480SApple OSS Distributions
4099*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4100*699cd480SApple OSS Distributions upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101*699cd480SApple OSS Distributions switch (kIODirectionOutIn & forDirection) {
4102*699cd480SApple OSS Distributions case kIODirectionOut:
4103*699cd480SApple OSS Distributions // Pages do not need to be marked as dirty on commit
4104*699cd480SApple OSS Distributions uplFlags = UPL_COPYOUT_FROM;
4105*699cd480SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess;
4106*699cd480SApple OSS Distributions break;
4107*699cd480SApple OSS Distributions
4108*699cd480SApple OSS Distributions case kIODirectionIn:
4109*699cd480SApple OSS Distributions dataP->fDMAAccess = kIODMAMapWriteAccess;
4110*699cd480SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4111*699cd480SApple OSS Distributions break;
4112*699cd480SApple OSS Distributions
4113*699cd480SApple OSS Distributions default:
4114*699cd480SApple OSS Distributions dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115*699cd480SApple OSS Distributions uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4116*699cd480SApple OSS Distributions break;
4117*699cd480SApple OSS Distributions }
4118*699cd480SApple OSS Distributions
4119*699cd480SApple OSS Distributions if (_wireCount) {
4120*699cd480SApple OSS Distributions if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121*699cd480SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122*699cd480SApple OSS Distributions (size_t)VM_KERNEL_ADDRPERM(this));
4123*699cd480SApple OSS Distributions error = kIOReturnNotWritable;
4124*699cd480SApple OSS Distributions }
4125*699cd480SApple OSS Distributions } else {
4126*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127*699cd480SApple OSS Distributions IOMapper *mapper;
4128*699cd480SApple OSS Distributions
4129*699cd480SApple OSS Distributions mapper = dataP->fMapper;
4130*699cd480SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131*699cd480SApple OSS Distributions
4132*699cd480SApple OSS Distributions uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133*699cd480SApple OSS Distributions tag = _kernelTag;
4134*699cd480SApple OSS Distributions if (VM_KERN_MEMORY_NONE == tag) {
4135*699cd480SApple OSS Distributions tag = IOMemoryTag(kernel_map);
4136*699cd480SApple OSS Distributions }
4137*699cd480SApple OSS Distributions
4138*699cd480SApple OSS Distributions if (kIODirectionPrepareToPhys32 & forDirection) {
4139*699cd480SApple OSS Distributions if (!mapper) {
4140*699cd480SApple OSS Distributions uplFlags |= UPL_NEED_32BIT_ADDR;
4141*699cd480SApple OSS Distributions }
4142*699cd480SApple OSS Distributions if (dataP->fDMAMapNumAddressBits > 32) {
4143*699cd480SApple OSS Distributions dataP->fDMAMapNumAddressBits = 32;
4144*699cd480SApple OSS Distributions }
4145*699cd480SApple OSS Distributions }
4146*699cd480SApple OSS Distributions if (kIODirectionPrepareNoFault & forDirection) {
4147*699cd480SApple OSS Distributions uplFlags |= UPL_REQUEST_NO_FAULT;
4148*699cd480SApple OSS Distributions }
4149*699cd480SApple OSS Distributions if (kIODirectionPrepareNoZeroFill & forDirection) {
4150*699cd480SApple OSS Distributions uplFlags |= UPL_NOZEROFILLIO;
4151*699cd480SApple OSS Distributions }
4152*699cd480SApple OSS Distributions if (kIODirectionPrepareNonCoherent & forDirection) {
4153*699cd480SApple OSS Distributions uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154*699cd480SApple OSS Distributions }
4155*699cd480SApple OSS Distributions
4156*699cd480SApple OSS Distributions mapBase = 0;
4157*699cd480SApple OSS Distributions
4158*699cd480SApple OSS Distributions // Note that appendBytes(NULL) zeros the data up to the desired length
4159*699cd480SApple OSS Distributions size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160*699cd480SApple OSS Distributions if (uplPageSize > ((unsigned int)uplPageSize)) {
4161*699cd480SApple OSS Distributions error = kIOReturnNoMemory;
4162*699cd480SApple OSS Distributions traceInterval.setEndArg2(error);
4163*699cd480SApple OSS Distributions return error;
4164*699cd480SApple OSS Distributions }
4165*699cd480SApple OSS Distributions if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166*699cd480SApple OSS Distributions error = kIOReturnNoMemory;
4167*699cd480SApple OSS Distributions traceInterval.setEndArg2(error);
4168*699cd480SApple OSS Distributions return error;
4169*699cd480SApple OSS Distributions }
4170*699cd480SApple OSS Distributions dataP = NULL;
4171*699cd480SApple OSS Distributions
4172*699cd480SApple OSS Distributions // Find the appropriate vm_map for the given task
4173*699cd480SApple OSS Distributions vm_map_t curMap;
4174*699cd480SApple OSS Distributions if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175*699cd480SApple OSS Distributions curMap = NULL;
4176*699cd480SApple OSS Distributions } else {
4177*699cd480SApple OSS Distributions curMap = get_task_map(_task);
4178*699cd480SApple OSS Distributions }
4179*699cd480SApple OSS Distributions
4180*699cd480SApple OSS Distributions // Iterate over the vector of virtual ranges
4181*699cd480SApple OSS Distributions Ranges vec = _ranges;
4182*699cd480SApple OSS Distributions unsigned int pageIndex = 0;
4183*699cd480SApple OSS Distributions IOByteCount mdOffset = 0;
4184*699cd480SApple OSS Distributions ppnum_t highestPage = 0;
4185*699cd480SApple OSS Distributions bool byteAlignUPL;
4186*699cd480SApple OSS Distributions
4187*699cd480SApple OSS Distributions IOMemoryEntry * memRefEntry = NULL;
4188*699cd480SApple OSS Distributions if (_memRef) {
4189*699cd480SApple OSS Distributions memRefEntry = &_memRef->entries[0];
4190*699cd480SApple OSS Distributions byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191*699cd480SApple OSS Distributions } else {
4192*699cd480SApple OSS Distributions byteAlignUPL = true;
4193*699cd480SApple OSS Distributions }
4194*699cd480SApple OSS Distributions
4195*699cd480SApple OSS Distributions for (UInt range = 0; mdOffset < _length; range++) {
4196*699cd480SApple OSS Distributions ioPLBlock iopl;
4197*699cd480SApple OSS Distributions mach_vm_address_t startPage, startPageOffset;
4198*699cd480SApple OSS Distributions mach_vm_size_t numBytes;
4199*699cd480SApple OSS Distributions ppnum_t highPage = 0;
4200*699cd480SApple OSS Distributions
4201*699cd480SApple OSS Distributions if (_memRef) {
4202*699cd480SApple OSS Distributions if (range >= _memRef->count) {
4203*699cd480SApple OSS Distributions panic("memRefEntry");
4204*699cd480SApple OSS Distributions }
4205*699cd480SApple OSS Distributions memRefEntry = &_memRef->entries[range];
4206*699cd480SApple OSS Distributions numBytes = memRefEntry->size;
4207*699cd480SApple OSS Distributions startPage = -1ULL;
4208*699cd480SApple OSS Distributions if (byteAlignUPL) {
4209*699cd480SApple OSS Distributions startPageOffset = 0;
4210*699cd480SApple OSS Distributions } else {
4211*699cd480SApple OSS Distributions startPageOffset = (memRefEntry->start & PAGE_MASK);
4212*699cd480SApple OSS Distributions }
4213*699cd480SApple OSS Distributions } else {
4214*699cd480SApple OSS Distributions // Get the startPage address and length of vec[range]
4215*699cd480SApple OSS Distributions getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216*699cd480SApple OSS Distributions if (byteAlignUPL) {
4217*699cd480SApple OSS Distributions startPageOffset = 0;
4218*699cd480SApple OSS Distributions } else {
4219*699cd480SApple OSS Distributions startPageOffset = startPage & PAGE_MASK;
4220*699cd480SApple OSS Distributions startPage = trunc_page_64(startPage);
4221*699cd480SApple OSS Distributions }
4222*699cd480SApple OSS Distributions }
4223*699cd480SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224*699cd480SApple OSS Distributions numBytes += startPageOffset;
4225*699cd480SApple OSS Distributions
4226*699cd480SApple OSS Distributions if (mapper) {
4227*699cd480SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4228*699cd480SApple OSS Distributions } else {
4229*699cd480SApple OSS Distributions iopl.fMappedPage = 0;
4230*699cd480SApple OSS Distributions }
4231*699cd480SApple OSS Distributions
4232*699cd480SApple OSS Distributions // Iterate over the current range, creating UPLs
4233*699cd480SApple OSS Distributions while (numBytes) {
4234*699cd480SApple OSS Distributions vm_address_t kernelStart = (vm_address_t) startPage;
4235*699cd480SApple OSS Distributions vm_map_t theMap;
4236*699cd480SApple OSS Distributions if (curMap) {
4237*699cd480SApple OSS Distributions theMap = curMap;
4238*699cd480SApple OSS Distributions } else if (_memRef) {
4239*699cd480SApple OSS Distributions theMap = NULL;
4240*699cd480SApple OSS Distributions } else {
4241*699cd480SApple OSS Distributions assert(_task == kernel_task);
4242*699cd480SApple OSS Distributions theMap = IOPageableMapForAddress(kernelStart);
4243*699cd480SApple OSS Distributions }
4244*699cd480SApple OSS Distributions
4245*699cd480SApple OSS Distributions // ioplFlags is an in/out parameter
4246*699cd480SApple OSS Distributions upl_control_flags_t ioplFlags = uplFlags;
4247*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4248*699cd480SApple OSS Distributions pageInfo = getPageList(dataP);
4249*699cd480SApple OSS Distributions upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250*699cd480SApple OSS Distributions
4251*699cd480SApple OSS Distributions mach_vm_size_t ioplPhysSize;
4252*699cd480SApple OSS Distributions upl_size_t ioplSize;
4253*699cd480SApple OSS Distributions unsigned int numPageInfo;
4254*699cd480SApple OSS Distributions
4255*699cd480SApple OSS Distributions if (_memRef) {
4256*699cd480SApple OSS Distributions error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257*699cd480SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258*699cd480SApple OSS Distributions } else {
4259*699cd480SApple OSS Distributions error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260*699cd480SApple OSS Distributions DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261*699cd480SApple OSS Distributions }
4262*699cd480SApple OSS Distributions if (error != KERN_SUCCESS) {
4263*699cd480SApple OSS Distributions if (_memRef) {
4264*699cd480SApple OSS Distributions DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265*699cd480SApple OSS Distributions } else {
4266*699cd480SApple OSS Distributions DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267*699cd480SApple OSS Distributions }
4268*699cd480SApple OSS Distributions printf("entry size error %d\n", error);
4269*699cd480SApple OSS Distributions goto abortExit;
4270*699cd480SApple OSS Distributions }
4271*699cd480SApple OSS Distributions ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272*699cd480SApple OSS Distributions numPageInfo = atop_32(ioplPhysSize);
4273*699cd480SApple OSS Distributions if (byteAlignUPL) {
4274*699cd480SApple OSS Distributions if (numBytes > ioplPhysSize) {
4275*699cd480SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276*699cd480SApple OSS Distributions } else {
4277*699cd480SApple OSS Distributions ioplSize = ((typeof(ioplSize))numBytes);
4278*699cd480SApple OSS Distributions }
4279*699cd480SApple OSS Distributions } else {
4280*699cd480SApple OSS Distributions ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281*699cd480SApple OSS Distributions }
4282*699cd480SApple OSS Distributions
4283*699cd480SApple OSS Distributions if (_memRef) {
4284*699cd480SApple OSS Distributions memory_object_offset_t entryOffset;
4285*699cd480SApple OSS Distributions
4286*699cd480SApple OSS Distributions entryOffset = mdOffset;
4287*699cd480SApple OSS Distributions if (byteAlignUPL) {
4288*699cd480SApple OSS Distributions entryOffset = (entryOffset - memRefEntry->offset);
4289*699cd480SApple OSS Distributions } else {
4290*699cd480SApple OSS Distributions entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291*699cd480SApple OSS Distributions }
4292*699cd480SApple OSS Distributions if (ioplSize > (memRefEntry->size - entryOffset)) {
4293*699cd480SApple OSS Distributions ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294*699cd480SApple OSS Distributions }
4295*699cd480SApple OSS Distributions error = memory_object_iopl_request(memRefEntry->entry,
4296*699cd480SApple OSS Distributions entryOffset,
4297*699cd480SApple OSS Distributions &ioplSize,
4298*699cd480SApple OSS Distributions &iopl.fIOPL,
4299*699cd480SApple OSS Distributions baseInfo,
4300*699cd480SApple OSS Distributions &numPageInfo,
4301*699cd480SApple OSS Distributions &ioplFlags,
4302*699cd480SApple OSS Distributions tag);
4303*699cd480SApple OSS Distributions } else if ((theMap == kernel_map)
4304*699cd480SApple OSS Distributions && (kernelStart >= io_kernel_static_start)
4305*699cd480SApple OSS Distributions && (kernelStart < io_kernel_static_end)) {
4306*699cd480SApple OSS Distributions error = io_get_kernel_static_upl(theMap,
4307*699cd480SApple OSS Distributions kernelStart,
4308*699cd480SApple OSS Distributions &ioplSize,
4309*699cd480SApple OSS Distributions &iopl.fPageOffset,
4310*699cd480SApple OSS Distributions &iopl.fIOPL,
4311*699cd480SApple OSS Distributions baseInfo,
4312*699cd480SApple OSS Distributions &numPageInfo,
4313*699cd480SApple OSS Distributions &highPage);
4314*699cd480SApple OSS Distributions } else {
4315*699cd480SApple OSS Distributions assert(theMap);
4316*699cd480SApple OSS Distributions error = vm_map_create_upl(theMap,
4317*699cd480SApple OSS Distributions startPage,
4318*699cd480SApple OSS Distributions (upl_size_t*)&ioplSize,
4319*699cd480SApple OSS Distributions &iopl.fIOPL,
4320*699cd480SApple OSS Distributions baseInfo,
4321*699cd480SApple OSS Distributions &numPageInfo,
4322*699cd480SApple OSS Distributions &ioplFlags,
4323*699cd480SApple OSS Distributions tag);
4324*699cd480SApple OSS Distributions }
4325*699cd480SApple OSS Distributions
4326*699cd480SApple OSS Distributions if (error != KERN_SUCCESS) {
4327*699cd480SApple OSS Distributions traceInterval.setEndArg2(error);
4328*699cd480SApple OSS Distributions DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329*699cd480SApple OSS Distributions goto abortExit;
4330*699cd480SApple OSS Distributions }
4331*699cd480SApple OSS Distributions
4332*699cd480SApple OSS Distributions assert(ioplSize);
4333*699cd480SApple OSS Distributions
4334*699cd480SApple OSS Distributions if (iopl.fIOPL) {
4335*699cd480SApple OSS Distributions highPage = upl_get_highest_page(iopl.fIOPL);
4336*699cd480SApple OSS Distributions }
4337*699cd480SApple OSS Distributions if (highPage > highestPage) {
4338*699cd480SApple OSS Distributions highestPage = highPage;
4339*699cd480SApple OSS Distributions }
4340*699cd480SApple OSS Distributions
4341*699cd480SApple OSS Distributions if (baseInfo->device) {
4342*699cd480SApple OSS Distributions numPageInfo = 1;
4343*699cd480SApple OSS Distributions iopl.fFlags = kIOPLOnDevice;
4344*699cd480SApple OSS Distributions } else {
4345*699cd480SApple OSS Distributions iopl.fFlags = 0;
4346*699cd480SApple OSS Distributions }
4347*699cd480SApple OSS Distributions
4348*699cd480SApple OSS Distributions if (byteAlignUPL) {
4349*699cd480SApple OSS Distributions if (iopl.fIOPL) {
4350*699cd480SApple OSS Distributions DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351*699cd480SApple OSS Distributions iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352*699cd480SApple OSS Distributions }
4353*699cd480SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4354*699cd480SApple OSS Distributions // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355*699cd480SApple OSS Distributions startPage -= iopl.fPageOffset;
4356*699cd480SApple OSS Distributions }
4357*699cd480SApple OSS Distributions ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358*699cd480SApple OSS Distributions numBytes += iopl.fPageOffset;
4359*699cd480SApple OSS Distributions }
4360*699cd480SApple OSS Distributions
4361*699cd480SApple OSS Distributions iopl.fIOMDOffset = mdOffset;
4362*699cd480SApple OSS Distributions iopl.fPageInfo = pageIndex;
4363*699cd480SApple OSS Distributions
4364*699cd480SApple OSS Distributions if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365*699cd480SApple OSS Distributions // Clean up partial created and unsaved iopl
4366*699cd480SApple OSS Distributions if (iopl.fIOPL) {
4367*699cd480SApple OSS Distributions upl_abort(iopl.fIOPL, 0);
4368*699cd480SApple OSS Distributions upl_deallocate(iopl.fIOPL);
4369*699cd480SApple OSS Distributions }
4370*699cd480SApple OSS Distributions error = kIOReturnNoMemory;
4371*699cd480SApple OSS Distributions traceInterval.setEndArg2(error);
4372*699cd480SApple OSS Distributions goto abortExit;
4373*699cd480SApple OSS Distributions }
4374*699cd480SApple OSS Distributions dataP = NULL;
4375*699cd480SApple OSS Distributions
4376*699cd480SApple OSS Distributions // Check for a multiple iopl's in one virtual range
4377*699cd480SApple OSS Distributions pageIndex += numPageInfo;
4378*699cd480SApple OSS Distributions mdOffset -= iopl.fPageOffset;
4379*699cd480SApple OSS Distributions numBytesWired += ioplSize;
4380*699cd480SApple OSS Distributions if (ioplSize < numBytes) {
4381*699cd480SApple OSS Distributions numBytes -= ioplSize;
4382*699cd480SApple OSS Distributions if (startPage != (mach_vm_address_t)-1) {
4383*699cd480SApple OSS Distributions startPage += ioplSize;
4384*699cd480SApple OSS Distributions }
4385*699cd480SApple OSS Distributions mdOffset += ioplSize;
4386*699cd480SApple OSS Distributions iopl.fPageOffset = 0;
4387*699cd480SApple OSS Distributions if (mapper) {
4388*699cd480SApple OSS Distributions iopl.fMappedPage = mapBase + pageIndex;
4389*699cd480SApple OSS Distributions }
4390*699cd480SApple OSS Distributions } else {
4391*699cd480SApple OSS Distributions mdOffset += numBytes;
4392*699cd480SApple OSS Distributions break;
4393*699cd480SApple OSS Distributions }
4394*699cd480SApple OSS Distributions }
4395*699cd480SApple OSS Distributions }
4396*699cd480SApple OSS Distributions
4397*699cd480SApple OSS Distributions _highestPage = highestPage;
4398*699cd480SApple OSS Distributions DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399*699cd480SApple OSS Distributions
4400*699cd480SApple OSS Distributions if (UPL_COPYOUT_FROM & uplFlags) {
4401*699cd480SApple OSS Distributions _flags |= kIOMemoryPreparedReadOnly;
4402*699cd480SApple OSS Distributions }
4403*699cd480SApple OSS Distributions traceInterval.setEndCodes(numBytesWired, error);
4404*699cd480SApple OSS Distributions }
4405*699cd480SApple OSS Distributions
4406*699cd480SApple OSS Distributions #if IOTRACKING
4407*699cd480SApple OSS Distributions if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4409*699cd480SApple OSS Distributions if (!dataP->fWireTracking.link.next) {
4410*699cd480SApple OSS Distributions IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411*699cd480SApple OSS Distributions }
4412*699cd480SApple OSS Distributions }
4413*699cd480SApple OSS Distributions #endif /* IOTRACKING */
4414*699cd480SApple OSS Distributions
4415*699cd480SApple OSS Distributions return error;
4416*699cd480SApple OSS Distributions
4417*699cd480SApple OSS Distributions abortExit:
4418*699cd480SApple OSS Distributions {
4419*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4420*699cd480SApple OSS Distributions UInt done = getNumIOPL(_memoryEntries, dataP);
4421*699cd480SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4422*699cd480SApple OSS Distributions
4423*699cd480SApple OSS Distributions for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424*699cd480SApple OSS Distributions if (ioplList[ioplIdx].fIOPL) {
4425*699cd480SApple OSS Distributions upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426*699cd480SApple OSS Distributions upl_deallocate(ioplList[ioplIdx].fIOPL);
4427*699cd480SApple OSS Distributions }
4428*699cd480SApple OSS Distributions }
4429*699cd480SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4430*699cd480SApple OSS Distributions }
4431*699cd480SApple OSS Distributions
4432*699cd480SApple OSS Distributions if (error == KERN_FAILURE) {
4433*699cd480SApple OSS Distributions error = kIOReturnCannotWire;
4434*699cd480SApple OSS Distributions } else if (error == KERN_MEMORY_ERROR) {
4435*699cd480SApple OSS Distributions error = kIOReturnNoResources;
4436*699cd480SApple OSS Distributions }
4437*699cd480SApple OSS Distributions
4438*699cd480SApple OSS Distributions return error;
4439*699cd480SApple OSS Distributions }
4440*699cd480SApple OSS Distributions
4441*699cd480SApple OSS Distributions bool
initMemoryEntries(size_t size,IOMapper * mapper)4442*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443*699cd480SApple OSS Distributions {
4444*699cd480SApple OSS Distributions ioGMDData * dataP;
4445*699cd480SApple OSS Distributions
4446*699cd480SApple OSS Distributions if (size > UINT_MAX) {
4447*699cd480SApple OSS Distributions return false;
4448*699cd480SApple OSS Distributions }
4449*699cd480SApple OSS Distributions if (!_memoryEntries) {
4450*699cd480SApple OSS Distributions _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451*699cd480SApple OSS Distributions if (!_memoryEntries) {
4452*699cd480SApple OSS Distributions return false;
4453*699cd480SApple OSS Distributions }
4454*699cd480SApple OSS Distributions } else if (!_memoryEntries->initWithCapacity(size)) {
4455*699cd480SApple OSS Distributions return false;
4456*699cd480SApple OSS Distributions }
4457*699cd480SApple OSS Distributions
4458*699cd480SApple OSS Distributions _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4460*699cd480SApple OSS Distributions
4461*699cd480SApple OSS Distributions if (mapper == kIOMapperWaitSystem) {
4462*699cd480SApple OSS Distributions IOMapper::checkForSystemMapper();
4463*699cd480SApple OSS Distributions mapper = IOMapper::gSystem;
4464*699cd480SApple OSS Distributions }
4465*699cd480SApple OSS Distributions dataP->fMapper = mapper;
4466*699cd480SApple OSS Distributions dataP->fPageCnt = 0;
4467*699cd480SApple OSS Distributions dataP->fMappedBase = 0;
4468*699cd480SApple OSS Distributions dataP->fDMAMapNumAddressBits = 64;
4469*699cd480SApple OSS Distributions dataP->fDMAMapAlignment = 0;
4470*699cd480SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4471*699cd480SApple OSS Distributions dataP->fCompletionError = false;
4472*699cd480SApple OSS Distributions dataP->fMappedBaseValid = false;
4473*699cd480SApple OSS Distributions
4474*699cd480SApple OSS Distributions return true;
4475*699cd480SApple OSS Distributions }
4476*699cd480SApple OSS Distributions
4477*699cd480SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478*699cd480SApple OSS Distributions IOMemoryDescriptor::dmaMap(
4479*699cd480SApple OSS Distributions IOMapper * mapper,
4480*699cd480SApple OSS Distributions IOMemoryDescriptor * memory,
4481*699cd480SApple OSS Distributions IODMACommand * command,
4482*699cd480SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4483*699cd480SApple OSS Distributions uint64_t offset,
4484*699cd480SApple OSS Distributions uint64_t length,
4485*699cd480SApple OSS Distributions uint64_t * mapAddress,
4486*699cd480SApple OSS Distributions uint64_t * mapLength)
4487*699cd480SApple OSS Distributions {
4488*699cd480SApple OSS Distributions IOReturn err;
4489*699cd480SApple OSS Distributions uint32_t mapOptions;
4490*699cd480SApple OSS Distributions
4491*699cd480SApple OSS Distributions mapOptions = 0;
4492*699cd480SApple OSS Distributions mapOptions |= kIODMAMapReadAccess;
4493*699cd480SApple OSS Distributions if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494*699cd480SApple OSS Distributions mapOptions |= kIODMAMapWriteAccess;
4495*699cd480SApple OSS Distributions }
4496*699cd480SApple OSS Distributions
4497*699cd480SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498*699cd480SApple OSS Distributions mapSpec, command, NULL, mapAddress, mapLength);
4499*699cd480SApple OSS Distributions
4500*699cd480SApple OSS Distributions if (kIOReturnSuccess == err) {
4501*699cd480SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4502*699cd480SApple OSS Distributions }
4503*699cd480SApple OSS Distributions
4504*699cd480SApple OSS Distributions return err;
4505*699cd480SApple OSS Distributions }
4506*699cd480SApple OSS Distributions
4507*699cd480SApple OSS Distributions void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508*699cd480SApple OSS Distributions IOMemoryDescriptor::dmaMapRecord(
4509*699cd480SApple OSS Distributions IOMapper * mapper,
4510*699cd480SApple OSS Distributions IODMACommand * command,
4511*699cd480SApple OSS Distributions uint64_t mapLength)
4512*699cd480SApple OSS Distributions {
4513*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514*699cd480SApple OSS Distributions kern_allocation_name_t alloc;
4515*699cd480SApple OSS Distributions int16_t prior;
4516*699cd480SApple OSS Distributions
4517*699cd480SApple OSS Distributions if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518*699cd480SApple OSS Distributions kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4519*699cd480SApple OSS Distributions }
4520*699cd480SApple OSS Distributions
4521*699cd480SApple OSS Distributions if (!command) {
4522*699cd480SApple OSS Distributions return;
4523*699cd480SApple OSS Distributions }
4524*699cd480SApple OSS Distributions prior = OSAddAtomic16(1, &_dmaReferences);
4525*699cd480SApple OSS Distributions if (!prior) {
4526*699cd480SApple OSS Distributions if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527*699cd480SApple OSS Distributions _mapName = alloc;
4528*699cd480SApple OSS Distributions mapLength = _length;
4529*699cd480SApple OSS Distributions kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530*699cd480SApple OSS Distributions } else {
4531*699cd480SApple OSS Distributions _mapName = NULL;
4532*699cd480SApple OSS Distributions }
4533*699cd480SApple OSS Distributions }
4534*699cd480SApple OSS Distributions }
4535*699cd480SApple OSS Distributions
4536*699cd480SApple OSS Distributions IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537*699cd480SApple OSS Distributions IOMemoryDescriptor::dmaUnmap(
4538*699cd480SApple OSS Distributions IOMapper * mapper,
4539*699cd480SApple OSS Distributions IODMACommand * command,
4540*699cd480SApple OSS Distributions uint64_t offset,
4541*699cd480SApple OSS Distributions uint64_t mapAddress,
4542*699cd480SApple OSS Distributions uint64_t mapLength)
4543*699cd480SApple OSS Distributions {
4544*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545*699cd480SApple OSS Distributions IOReturn ret;
4546*699cd480SApple OSS Distributions kern_allocation_name_t alloc;
4547*699cd480SApple OSS Distributions kern_allocation_name_t mapName;
4548*699cd480SApple OSS Distributions int16_t prior;
4549*699cd480SApple OSS Distributions
4550*699cd480SApple OSS Distributions mapName = NULL;
4551*699cd480SApple OSS Distributions prior = 0;
4552*699cd480SApple OSS Distributions if (command) {
4553*699cd480SApple OSS Distributions mapName = _mapName;
4554*699cd480SApple OSS Distributions if (_dmaReferences) {
4555*699cd480SApple OSS Distributions prior = OSAddAtomic16(-1, &_dmaReferences);
4556*699cd480SApple OSS Distributions } else {
4557*699cd480SApple OSS Distributions panic("_dmaReferences underflow");
4558*699cd480SApple OSS Distributions }
4559*699cd480SApple OSS Distributions }
4560*699cd480SApple OSS Distributions
4561*699cd480SApple OSS Distributions if (!mapLength) {
4562*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4563*699cd480SApple OSS Distributions return kIOReturnSuccess;
4564*699cd480SApple OSS Distributions }
4565*699cd480SApple OSS Distributions
4566*699cd480SApple OSS Distributions ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567*699cd480SApple OSS Distributions
4568*699cd480SApple OSS Distributions if ((alloc = mapper->fAllocName)) {
4569*699cd480SApple OSS Distributions kern_allocation_update_size(alloc, -mapLength, NULL);
4570*699cd480SApple OSS Distributions if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571*699cd480SApple OSS Distributions mapLength = _length;
4572*699cd480SApple OSS Distributions kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573*699cd480SApple OSS Distributions }
4574*699cd480SApple OSS Distributions }
4575*699cd480SApple OSS Distributions
4576*699cd480SApple OSS Distributions traceInterval.setEndArg1(ret);
4577*699cd480SApple OSS Distributions return ret;
4578*699cd480SApple OSS Distributions }
4579*699cd480SApple OSS Distributions
4580*699cd480SApple OSS Distributions IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::dmaMap(
4582*699cd480SApple OSS Distributions IOMapper * mapper,
4583*699cd480SApple OSS Distributions IOMemoryDescriptor * memory,
4584*699cd480SApple OSS Distributions IODMACommand * command,
4585*699cd480SApple OSS Distributions const IODMAMapSpecification * mapSpec,
4586*699cd480SApple OSS Distributions uint64_t offset,
4587*699cd480SApple OSS Distributions uint64_t length,
4588*699cd480SApple OSS Distributions uint64_t * mapAddress,
4589*699cd480SApple OSS Distributions uint64_t * mapLength)
4590*699cd480SApple OSS Distributions {
4591*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
4592*699cd480SApple OSS Distributions ioGMDData * dataP;
4593*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4594*699cd480SApple OSS Distributions
4595*699cd480SApple OSS Distributions *mapAddress = 0;
4596*699cd480SApple OSS Distributions if (kIOMemoryHostOnly & _flags) {
4597*699cd480SApple OSS Distributions return kIOReturnSuccess;
4598*699cd480SApple OSS Distributions }
4599*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4600*699cd480SApple OSS Distributions return kIOReturnNotAttached;
4601*699cd480SApple OSS Distributions }
4602*699cd480SApple OSS Distributions
4603*699cd480SApple OSS Distributions if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604*699cd480SApple OSS Distributions || offset || (length != _length)) {
4605*699cd480SApple OSS Distributions err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606*699cd480SApple OSS Distributions } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607*699cd480SApple OSS Distributions const ioPLBlock * ioplList = getIOPLList(dataP);
4608*699cd480SApple OSS Distributions upl_page_info_t * pageList;
4609*699cd480SApple OSS Distributions uint32_t mapOptions = 0;
4610*699cd480SApple OSS Distributions
4611*699cd480SApple OSS Distributions IODMAMapSpecification mapSpec;
4612*699cd480SApple OSS Distributions bzero(&mapSpec, sizeof(mapSpec));
4613*699cd480SApple OSS Distributions mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614*699cd480SApple OSS Distributions mapSpec.alignment = dataP->fDMAMapAlignment;
4615*699cd480SApple OSS Distributions
4616*699cd480SApple OSS Distributions // For external UPLs the fPageInfo field points directly to
4617*699cd480SApple OSS Distributions // the upl's upl_page_info_t array.
4618*699cd480SApple OSS Distributions if (ioplList->fFlags & kIOPLExternUPL) {
4619*699cd480SApple OSS Distributions pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620*699cd480SApple OSS Distributions mapOptions |= kIODMAMapPagingPath;
4621*699cd480SApple OSS Distributions } else {
4622*699cd480SApple OSS Distributions pageList = getPageList(dataP);
4623*699cd480SApple OSS Distributions }
4624*699cd480SApple OSS Distributions
4625*699cd480SApple OSS Distributions if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626*699cd480SApple OSS Distributions mapOptions |= kIODMAMapPageListFullyOccupied;
4627*699cd480SApple OSS Distributions }
4628*699cd480SApple OSS Distributions
4629*699cd480SApple OSS Distributions assert(dataP->fDMAAccess);
4630*699cd480SApple OSS Distributions mapOptions |= dataP->fDMAAccess;
4631*699cd480SApple OSS Distributions
4632*699cd480SApple OSS Distributions // Check for direct device non-paged memory
4633*699cd480SApple OSS Distributions if (ioplList->fFlags & kIOPLOnDevice) {
4634*699cd480SApple OSS Distributions mapOptions |= kIODMAMapPhysicallyContiguous;
4635*699cd480SApple OSS Distributions }
4636*699cd480SApple OSS Distributions
4637*699cd480SApple OSS Distributions IODMAMapPageList dmaPageList =
4638*699cd480SApple OSS Distributions {
4639*699cd480SApple OSS Distributions .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4640*699cd480SApple OSS Distributions .pageListCount = _pages,
4641*699cd480SApple OSS Distributions .pageList = &pageList[0]
4642*699cd480SApple OSS Distributions };
4643*699cd480SApple OSS Distributions err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644*699cd480SApple OSS Distributions command, &dmaPageList, mapAddress, mapLength);
4645*699cd480SApple OSS Distributions
4646*699cd480SApple OSS Distributions if (kIOReturnSuccess == err) {
4647*699cd480SApple OSS Distributions dmaMapRecord(mapper, command, *mapLength);
4648*699cd480SApple OSS Distributions }
4649*699cd480SApple OSS Distributions }
4650*699cd480SApple OSS Distributions
4651*699cd480SApple OSS Distributions return err;
4652*699cd480SApple OSS Distributions }
4653*699cd480SApple OSS Distributions
4654*699cd480SApple OSS Distributions /*
4655*699cd480SApple OSS Distributions * prepare
4656*699cd480SApple OSS Distributions *
4657*699cd480SApple OSS Distributions * Prepare the memory for an I/O transfer. This involves paging in
4658*699cd480SApple OSS Distributions * the memory, if necessary, and wiring it down for the duration of
4659*699cd480SApple OSS Distributions * the transfer. The complete() method completes the processing of
4660*699cd480SApple OSS Distributions * the memory after the I/O transfer finishes. This method needn't
4661*699cd480SApple OSS Distributions * called for non-pageable memory.
4662*699cd480SApple OSS Distributions */
4663*699cd480SApple OSS Distributions
4664*699cd480SApple OSS Distributions IOReturn
prepare(IODirection forDirection)4665*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666*699cd480SApple OSS Distributions {
4667*699cd480SApple OSS Distributions IOReturn error = kIOReturnSuccess;
4668*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4669*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670*699cd480SApple OSS Distributions
4671*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4673*699cd480SApple OSS Distributions return kIOReturnSuccess;
4674*699cd480SApple OSS Distributions }
4675*699cd480SApple OSS Distributions
4676*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4677*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4678*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4679*699cd480SApple OSS Distributions return kIOReturnNotAttached;
4680*699cd480SApple OSS Distributions }
4681*699cd480SApple OSS Distributions
4682*699cd480SApple OSS Distributions if (_prepareLock) {
4683*699cd480SApple OSS Distributions IOLockLock(_prepareLock);
4684*699cd480SApple OSS Distributions }
4685*699cd480SApple OSS Distributions
4686*699cd480SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687*699cd480SApple OSS Distributions if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688*699cd480SApple OSS Distributions error = kIOReturnNotReady;
4689*699cd480SApple OSS Distributions goto finish;
4690*699cd480SApple OSS Distributions }
4691*699cd480SApple OSS Distributions error = wireVirtual(forDirection);
4692*699cd480SApple OSS Distributions }
4693*699cd480SApple OSS Distributions
4694*699cd480SApple OSS Distributions if (kIOReturnSuccess == error) {
4695*699cd480SApple OSS Distributions if (1 == ++_wireCount) {
4696*699cd480SApple OSS Distributions if (kIOMemoryClearEncrypt & _flags) {
4697*699cd480SApple OSS Distributions performOperation(kIOMemoryClearEncrypted, 0, _length);
4698*699cd480SApple OSS Distributions }
4699*699cd480SApple OSS Distributions
4700*699cd480SApple OSS Distributions ktraceEmitPhysicalSegments();
4701*699cd480SApple OSS Distributions }
4702*699cd480SApple OSS Distributions }
4703*699cd480SApple OSS Distributions
4704*699cd480SApple OSS Distributions finish:
4705*699cd480SApple OSS Distributions
4706*699cd480SApple OSS Distributions if (_prepareLock) {
4707*699cd480SApple OSS Distributions IOLockUnlock(_prepareLock);
4708*699cd480SApple OSS Distributions }
4709*699cd480SApple OSS Distributions traceInterval.setEndArg1(error);
4710*699cd480SApple OSS Distributions
4711*699cd480SApple OSS Distributions return error;
4712*699cd480SApple OSS Distributions }
4713*699cd480SApple OSS Distributions
4714*699cd480SApple OSS Distributions /*
4715*699cd480SApple OSS Distributions * complete
4716*699cd480SApple OSS Distributions *
4717*699cd480SApple OSS Distributions * Complete processing of the memory after an I/O transfer finishes.
4718*699cd480SApple OSS Distributions * This method should not be called unless a prepare was previously
4719*699cd480SApple OSS Distributions * issued; the prepare() and complete() must occur in pairs, before
4720*699cd480SApple OSS Distributions * before and after an I/O transfer involving pageable memory.
4721*699cd480SApple OSS Distributions */
4722*699cd480SApple OSS Distributions
4723*699cd480SApple OSS Distributions IOReturn
complete(IODirection forDirection)4724*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725*699cd480SApple OSS Distributions {
4726*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4727*699cd480SApple OSS Distributions ioGMDData * dataP;
4728*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729*699cd480SApple OSS Distributions
4730*699cd480SApple OSS Distributions if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4732*699cd480SApple OSS Distributions return kIOReturnSuccess;
4733*699cd480SApple OSS Distributions }
4734*699cd480SApple OSS Distributions
4735*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4736*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4737*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnNotAttached);
4738*699cd480SApple OSS Distributions return kIOReturnNotAttached;
4739*699cd480SApple OSS Distributions }
4740*699cd480SApple OSS Distributions
4741*699cd480SApple OSS Distributions if (_prepareLock) {
4742*699cd480SApple OSS Distributions IOLockLock(_prepareLock);
4743*699cd480SApple OSS Distributions }
4744*699cd480SApple OSS Distributions do{
4745*699cd480SApple OSS Distributions assert(_wireCount);
4746*699cd480SApple OSS Distributions if (!_wireCount) {
4747*699cd480SApple OSS Distributions break;
4748*699cd480SApple OSS Distributions }
4749*699cd480SApple OSS Distributions dataP = getDataP(_memoryEntries);
4750*699cd480SApple OSS Distributions if (!dataP) {
4751*699cd480SApple OSS Distributions break;
4752*699cd480SApple OSS Distributions }
4753*699cd480SApple OSS Distributions
4754*699cd480SApple OSS Distributions if (kIODirectionCompleteWithError & forDirection) {
4755*699cd480SApple OSS Distributions dataP->fCompletionError = true;
4756*699cd480SApple OSS Distributions }
4757*699cd480SApple OSS Distributions
4758*699cd480SApple OSS Distributions if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759*699cd480SApple OSS Distributions performOperation(kIOMemorySetEncrypted, 0, _length);
4760*699cd480SApple OSS Distributions }
4761*699cd480SApple OSS Distributions
4762*699cd480SApple OSS Distributions _wireCount--;
4763*699cd480SApple OSS Distributions if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764*699cd480SApple OSS Distributions ioPLBlock *ioplList = getIOPLList(dataP);
4765*699cd480SApple OSS Distributions UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766*699cd480SApple OSS Distributions
4767*699cd480SApple OSS Distributions if (_wireCount) {
4768*699cd480SApple OSS Distributions // kIODirectionCompleteWithDataValid & forDirection
4769*699cd480SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770*699cd480SApple OSS Distributions vm_tag_t tag;
4771*699cd480SApple OSS Distributions tag = (typeof(tag))getVMTag(kernel_map);
4772*699cd480SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4773*699cd480SApple OSS Distributions if (ioplList[ind].fIOPL) {
4774*699cd480SApple OSS Distributions iopl_valid_data(ioplList[ind].fIOPL, tag);
4775*699cd480SApple OSS Distributions }
4776*699cd480SApple OSS Distributions }
4777*699cd480SApple OSS Distributions }
4778*699cd480SApple OSS Distributions } else {
4779*699cd480SApple OSS Distributions if (_dmaReferences) {
4780*699cd480SApple OSS Distributions panic("complete() while dma active");
4781*699cd480SApple OSS Distributions }
4782*699cd480SApple OSS Distributions
4783*699cd480SApple OSS Distributions if (dataP->fMappedBaseValid) {
4784*699cd480SApple OSS Distributions dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785*699cd480SApple OSS Distributions dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786*699cd480SApple OSS Distributions }
4787*699cd480SApple OSS Distributions #if IOTRACKING
4788*699cd480SApple OSS Distributions if (dataP->fWireTracking.link.next) {
4789*699cd480SApple OSS Distributions IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790*699cd480SApple OSS Distributions }
4791*699cd480SApple OSS Distributions #endif /* IOTRACKING */
4792*699cd480SApple OSS Distributions // Only complete iopls that we created which are for TypeVirtual
4793*699cd480SApple OSS Distributions if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794*699cd480SApple OSS Distributions for (ind = 0; ind < count; ind++) {
4795*699cd480SApple OSS Distributions if (ioplList[ind].fIOPL) {
4796*699cd480SApple OSS Distributions if (dataP->fCompletionError) {
4797*699cd480SApple OSS Distributions upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798*699cd480SApple OSS Distributions } else {
4799*699cd480SApple OSS Distributions upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800*699cd480SApple OSS Distributions }
4801*699cd480SApple OSS Distributions upl_deallocate(ioplList[ind].fIOPL);
4802*699cd480SApple OSS Distributions }
4803*699cd480SApple OSS Distributions }
4804*699cd480SApple OSS Distributions } else if (kIOMemoryTypeUPL == type) {
4805*699cd480SApple OSS Distributions upl_set_referenced(ioplList[0].fIOPL, false);
4806*699cd480SApple OSS Distributions }
4807*699cd480SApple OSS Distributions
4808*699cd480SApple OSS Distributions _memoryEntries->setLength(computeDataSize(0, 0));
4809*699cd480SApple OSS Distributions
4810*699cd480SApple OSS Distributions dataP->fPreparationID = kIOPreparationIDUnprepared;
4811*699cd480SApple OSS Distributions _flags &= ~kIOMemoryPreparedReadOnly;
4812*699cd480SApple OSS Distributions
4813*699cd480SApple OSS Distributions if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814*699cd480SApple OSS Distributions IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815*699cd480SApple OSS Distributions }
4816*699cd480SApple OSS Distributions }
4817*699cd480SApple OSS Distributions }
4818*699cd480SApple OSS Distributions }while (false);
4819*699cd480SApple OSS Distributions
4820*699cd480SApple OSS Distributions if (_prepareLock) {
4821*699cd480SApple OSS Distributions IOLockUnlock(_prepareLock);
4822*699cd480SApple OSS Distributions }
4823*699cd480SApple OSS Distributions
4824*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4825*699cd480SApple OSS Distributions return kIOReturnSuccess;
4826*699cd480SApple OSS Distributions }
4827*699cd480SApple OSS Distributions
4828*699cd480SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4829*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::doMap(
4830*699cd480SApple OSS Distributions vm_map_t __addressMap,
4831*699cd480SApple OSS Distributions IOVirtualAddress * __address,
4832*699cd480SApple OSS Distributions IOOptionBits options,
4833*699cd480SApple OSS Distributions IOByteCount __offset,
4834*699cd480SApple OSS Distributions IOByteCount __length )
4835*699cd480SApple OSS Distributions {
4836*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4837*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnSuccess);
4838*699cd480SApple OSS Distributions #ifndef __LP64__
4839*699cd480SApple OSS Distributions if (!(kIOMap64Bit & options)) {
4840*699cd480SApple OSS Distributions panic("IOGeneralMemoryDescriptor::doMap !64bit");
4841*699cd480SApple OSS Distributions }
4842*699cd480SApple OSS Distributions #endif /* !__LP64__ */
4843*699cd480SApple OSS Distributions
4844*699cd480SApple OSS Distributions kern_return_t err;
4845*699cd480SApple OSS Distributions
4846*699cd480SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4847*699cd480SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
4848*699cd480SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
4849*699cd480SApple OSS Distributions
4850*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
4851*699cd480SApple OSS Distributions Ranges vec = _ranges;
4852*699cd480SApple OSS Distributions
4853*699cd480SApple OSS Distributions mach_vm_address_t range0Addr = 0;
4854*699cd480SApple OSS Distributions mach_vm_size_t range0Len = 0;
4855*699cd480SApple OSS Distributions
4856*699cd480SApple OSS Distributions if ((offset >= _length) || ((offset + length) > _length)) {
4857*699cd480SApple OSS Distributions traceInterval.setEndArg1(kIOReturnBadArgument);
4858*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4859*699cd480SApple OSS Distributions // assert(offset == 0 && _length == 0 && length == 0);
4860*699cd480SApple OSS Distributions return kIOReturnBadArgument;
4861*699cd480SApple OSS Distributions }
4862*699cd480SApple OSS Distributions
4863*699cd480SApple OSS Distributions assert(!(kIOMemoryRemote & _flags));
4864*699cd480SApple OSS Distributions if (kIOMemoryRemote & _flags) {
4865*699cd480SApple OSS Distributions return 0;
4866*699cd480SApple OSS Distributions }
4867*699cd480SApple OSS Distributions
4868*699cd480SApple OSS Distributions if (vec.v) {
4869*699cd480SApple OSS Distributions getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4870*699cd480SApple OSS Distributions }
4871*699cd480SApple OSS Distributions
4872*699cd480SApple OSS Distributions // mapping source == dest? (could be much better)
4873*699cd480SApple OSS Distributions if (_task
4874*699cd480SApple OSS Distributions && (mapping->fAddressTask == _task)
4875*699cd480SApple OSS Distributions && (mapping->fAddressMap == get_task_map(_task))
4876*699cd480SApple OSS Distributions && (options & kIOMapAnywhere)
4877*699cd480SApple OSS Distributions && (!(kIOMapUnique & options))
4878*699cd480SApple OSS Distributions && (!(kIOMapGuardedMask & options))
4879*699cd480SApple OSS Distributions && (1 == _rangesCount)
4880*699cd480SApple OSS Distributions && (0 == offset)
4881*699cd480SApple OSS Distributions && range0Addr
4882*699cd480SApple OSS Distributions && (length <= range0Len)) {
4883*699cd480SApple OSS Distributions mapping->fAddress = range0Addr;
4884*699cd480SApple OSS Distributions mapping->fOptions |= kIOMapStatic;
4885*699cd480SApple OSS Distributions
4886*699cd480SApple OSS Distributions return kIOReturnSuccess;
4887*699cd480SApple OSS Distributions }
4888*699cd480SApple OSS Distributions
4889*699cd480SApple OSS Distributions if (!_memRef) {
4890*699cd480SApple OSS Distributions IOOptionBits createOptions = 0;
4891*699cd480SApple OSS Distributions if (!(kIOMapReadOnly & options)) {
4892*699cd480SApple OSS Distributions createOptions |= kIOMemoryReferenceWrite;
4893*699cd480SApple OSS Distributions #if DEVELOPMENT || DEBUG
4894*699cd480SApple OSS Distributions if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4895*699cd480SApple OSS Distributions && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4896*699cd480SApple OSS Distributions OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4897*699cd480SApple OSS Distributions }
4898*699cd480SApple OSS Distributions #endif
4899*699cd480SApple OSS Distributions }
4900*699cd480SApple OSS Distributions err = memoryReferenceCreate(createOptions, &_memRef);
4901*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
4902*699cd480SApple OSS Distributions traceInterval.setEndArg1(err);
4903*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4904*699cd480SApple OSS Distributions return err;
4905*699cd480SApple OSS Distributions }
4906*699cd480SApple OSS Distributions }
4907*699cd480SApple OSS Distributions
4908*699cd480SApple OSS Distributions memory_object_t pager;
4909*699cd480SApple OSS Distributions pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4910*699cd480SApple OSS Distributions
4911*699cd480SApple OSS Distributions // <upl_transpose //
4912*699cd480SApple OSS Distributions if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4913*699cd480SApple OSS Distributions do{
4914*699cd480SApple OSS Distributions upl_t redirUPL2;
4915*699cd480SApple OSS Distributions upl_size_t size;
4916*699cd480SApple OSS Distributions upl_control_flags_t flags;
4917*699cd480SApple OSS Distributions unsigned int lock_count;
4918*699cd480SApple OSS Distributions
4919*699cd480SApple OSS Distributions if (!_memRef || (1 != _memRef->count)) {
4920*699cd480SApple OSS Distributions err = kIOReturnNotReadable;
4921*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4922*699cd480SApple OSS Distributions break;
4923*699cd480SApple OSS Distributions }
4924*699cd480SApple OSS Distributions
4925*699cd480SApple OSS Distributions size = (upl_size_t) round_page(mapping->fLength);
4926*699cd480SApple OSS Distributions flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4927*699cd480SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4928*699cd480SApple OSS Distributions
4929*699cd480SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4930*699cd480SApple OSS Distributions NULL, NULL,
4931*699cd480SApple OSS Distributions &flags, (vm_tag_t) getVMTag(kernel_map))) {
4932*699cd480SApple OSS Distributions redirUPL2 = NULL;
4933*699cd480SApple OSS Distributions }
4934*699cd480SApple OSS Distributions
4935*699cd480SApple OSS Distributions for (lock_count = 0;
4936*699cd480SApple OSS Distributions IORecursiveLockHaveLock(gIOMemoryLock);
4937*699cd480SApple OSS Distributions lock_count++) {
4938*699cd480SApple OSS Distributions UNLOCK;
4939*699cd480SApple OSS Distributions }
4940*699cd480SApple OSS Distributions err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4941*699cd480SApple OSS Distributions for (;
4942*699cd480SApple OSS Distributions lock_count;
4943*699cd480SApple OSS Distributions lock_count--) {
4944*699cd480SApple OSS Distributions LOCK;
4945*699cd480SApple OSS Distributions }
4946*699cd480SApple OSS Distributions
4947*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
4948*699cd480SApple OSS Distributions IOLog("upl_transpose(%x)\n", err);
4949*699cd480SApple OSS Distributions err = kIOReturnSuccess;
4950*699cd480SApple OSS Distributions }
4951*699cd480SApple OSS Distributions
4952*699cd480SApple OSS Distributions if (redirUPL2) {
4953*699cd480SApple OSS Distributions upl_commit(redirUPL2, NULL, 0);
4954*699cd480SApple OSS Distributions upl_deallocate(redirUPL2);
4955*699cd480SApple OSS Distributions redirUPL2 = NULL;
4956*699cd480SApple OSS Distributions }
4957*699cd480SApple OSS Distributions {
4958*699cd480SApple OSS Distributions // swap the memEntries since they now refer to different vm_objects
4959*699cd480SApple OSS Distributions IOMemoryReference * me = _memRef;
4960*699cd480SApple OSS Distributions _memRef = mapping->fMemory->_memRef;
4961*699cd480SApple OSS Distributions mapping->fMemory->_memRef = me;
4962*699cd480SApple OSS Distributions }
4963*699cd480SApple OSS Distributions if (pager) {
4964*699cd480SApple OSS Distributions err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4965*699cd480SApple OSS Distributions }
4966*699cd480SApple OSS Distributions }while (false);
4967*699cd480SApple OSS Distributions }
4968*699cd480SApple OSS Distributions // upl_transpose> //
4969*699cd480SApple OSS Distributions else {
4970*699cd480SApple OSS Distributions err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4971*699cd480SApple OSS Distributions if (err) {
4972*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4973*699cd480SApple OSS Distributions }
4974*699cd480SApple OSS Distributions #if IOTRACKING
4975*699cd480SApple OSS Distributions if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4976*699cd480SApple OSS Distributions // only dram maps in the default on developement case
4977*699cd480SApple OSS Distributions IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4978*699cd480SApple OSS Distributions }
4979*699cd480SApple OSS Distributions #endif /* IOTRACKING */
4980*699cd480SApple OSS Distributions if ((err == KERN_SUCCESS) && pager) {
4981*699cd480SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4982*699cd480SApple OSS Distributions
4983*699cd480SApple OSS Distributions if (err != KERN_SUCCESS) {
4984*699cd480SApple OSS Distributions doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4985*699cd480SApple OSS Distributions } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4986*699cd480SApple OSS Distributions mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4987*699cd480SApple OSS Distributions }
4988*699cd480SApple OSS Distributions }
4989*699cd480SApple OSS Distributions }
4990*699cd480SApple OSS Distributions
4991*699cd480SApple OSS Distributions traceInterval.setEndArg1(err);
4992*699cd480SApple OSS Distributions if (err) {
4993*699cd480SApple OSS Distributions DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4994*699cd480SApple OSS Distributions }
4995*699cd480SApple OSS Distributions return err;
4996*699cd480SApple OSS Distributions }
4997*699cd480SApple OSS Distributions
4998*699cd480SApple OSS Distributions #if IOTRACKING
4999*699cd480SApple OSS Distributions IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5000*699cd480SApple OSS Distributions IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5001*699cd480SApple OSS Distributions mach_vm_address_t * address, mach_vm_size_t * size)
5002*699cd480SApple OSS Distributions {
5003*699cd480SApple OSS Distributions #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5004*699cd480SApple OSS Distributions
5005*699cd480SApple OSS Distributions IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5006*699cd480SApple OSS Distributions
5007*699cd480SApple OSS Distributions if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5008*699cd480SApple OSS Distributions return kIOReturnNotReady;
5009*699cd480SApple OSS Distributions }
5010*699cd480SApple OSS Distributions
5011*699cd480SApple OSS Distributions *task = map->fAddressTask;
5012*699cd480SApple OSS Distributions *address = map->fAddress;
5013*699cd480SApple OSS Distributions *size = map->fLength;
5014*699cd480SApple OSS Distributions
5015*699cd480SApple OSS Distributions return kIOReturnSuccess;
5016*699cd480SApple OSS Distributions }
5017*699cd480SApple OSS Distributions #endif /* IOTRACKING */
5018*699cd480SApple OSS Distributions
5019*699cd480SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5020*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::doUnmap(
5021*699cd480SApple OSS Distributions vm_map_t addressMap,
5022*699cd480SApple OSS Distributions IOVirtualAddress __address,
5023*699cd480SApple OSS Distributions IOByteCount __length )
5024*699cd480SApple OSS Distributions {
5025*699cd480SApple OSS Distributions IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5026*699cd480SApple OSS Distributions IOReturn ret;
5027*699cd480SApple OSS Distributions ret = super::doUnmap(addressMap, __address, __length);
5028*699cd480SApple OSS Distributions traceInterval.setEndArg1(ret);
5029*699cd480SApple OSS Distributions return ret;
5030*699cd480SApple OSS Distributions }
5031*699cd480SApple OSS Distributions
5032*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5033*699cd480SApple OSS Distributions
5034*699cd480SApple OSS Distributions #undef super
5035*699cd480SApple OSS Distributions #define super OSObject
5036*699cd480SApple OSS Distributions
5037*699cd480SApple OSS Distributions OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5038*699cd480SApple OSS Distributions
5039*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5040*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5041*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5042*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5043*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5044*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5045*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5046*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5047*699cd480SApple OSS Distributions
5048*699cd480SApple OSS Distributions /* ex-inline function implementation */
5049*699cd480SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()5050*699cd480SApple OSS Distributions IOMemoryMap::getPhysicalAddress()
5051*699cd480SApple OSS Distributions {
5052*699cd480SApple OSS Distributions return getPhysicalSegment( 0, NULL );
5053*699cd480SApple OSS Distributions }
5054*699cd480SApple OSS Distributions
5055*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5056*699cd480SApple OSS Distributions
5057*699cd480SApple OSS Distributions bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5058*699cd480SApple OSS Distributions IOMemoryMap::init(
5059*699cd480SApple OSS Distributions task_t intoTask,
5060*699cd480SApple OSS Distributions mach_vm_address_t toAddress,
5061*699cd480SApple OSS Distributions IOOptionBits _options,
5062*699cd480SApple OSS Distributions mach_vm_size_t _offset,
5063*699cd480SApple OSS Distributions mach_vm_size_t _length )
5064*699cd480SApple OSS Distributions {
5065*699cd480SApple OSS Distributions if (!intoTask) {
5066*699cd480SApple OSS Distributions return false;
5067*699cd480SApple OSS Distributions }
5068*699cd480SApple OSS Distributions
5069*699cd480SApple OSS Distributions if (!super::init()) {
5070*699cd480SApple OSS Distributions return false;
5071*699cd480SApple OSS Distributions }
5072*699cd480SApple OSS Distributions
5073*699cd480SApple OSS Distributions fAddressMap = get_task_map(intoTask);
5074*699cd480SApple OSS Distributions if (!fAddressMap) {
5075*699cd480SApple OSS Distributions return false;
5076*699cd480SApple OSS Distributions }
5077*699cd480SApple OSS Distributions vm_map_reference(fAddressMap);
5078*699cd480SApple OSS Distributions
5079*699cd480SApple OSS Distributions fAddressTask = intoTask;
5080*699cd480SApple OSS Distributions fOptions = _options;
5081*699cd480SApple OSS Distributions fLength = _length;
5082*699cd480SApple OSS Distributions fOffset = _offset;
5083*699cd480SApple OSS Distributions fAddress = toAddress;
5084*699cd480SApple OSS Distributions
5085*699cd480SApple OSS Distributions return true;
5086*699cd480SApple OSS Distributions }
5087*699cd480SApple OSS Distributions
5088*699cd480SApple OSS Distributions bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5089*699cd480SApple OSS Distributions IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5090*699cd480SApple OSS Distributions {
5091*699cd480SApple OSS Distributions if (!_memory) {
5092*699cd480SApple OSS Distributions return false;
5093*699cd480SApple OSS Distributions }
5094*699cd480SApple OSS Distributions
5095*699cd480SApple OSS Distributions if (!fSuperMap) {
5096*699cd480SApple OSS Distributions if ((_offset + fLength) > _memory->getLength()) {
5097*699cd480SApple OSS Distributions return false;
5098*699cd480SApple OSS Distributions }
5099*699cd480SApple OSS Distributions fOffset = _offset;
5100*699cd480SApple OSS Distributions }
5101*699cd480SApple OSS Distributions
5102*699cd480SApple OSS Distributions
5103*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5104*699cd480SApple OSS Distributions if (fMemory) {
5105*699cd480SApple OSS Distributions if (fMemory != _memory) {
5106*699cd480SApple OSS Distributions fMemory->removeMapping(this);
5107*699cd480SApple OSS Distributions }
5108*699cd480SApple OSS Distributions }
5109*699cd480SApple OSS Distributions fMemory = os::move(tempval);
5110*699cd480SApple OSS Distributions
5111*699cd480SApple OSS Distributions return true;
5112*699cd480SApple OSS Distributions }
5113*699cd480SApple OSS Distributions
5114*699cd480SApple OSS Distributions IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5115*699cd480SApple OSS Distributions IOMemoryDescriptor::doMap(
5116*699cd480SApple OSS Distributions vm_map_t __addressMap,
5117*699cd480SApple OSS Distributions IOVirtualAddress * __address,
5118*699cd480SApple OSS Distributions IOOptionBits options,
5119*699cd480SApple OSS Distributions IOByteCount __offset,
5120*699cd480SApple OSS Distributions IOByteCount __length )
5121*699cd480SApple OSS Distributions {
5122*699cd480SApple OSS Distributions return kIOReturnUnsupported;
5123*699cd480SApple OSS Distributions }
5124*699cd480SApple OSS Distributions
5125*699cd480SApple OSS Distributions IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5126*699cd480SApple OSS Distributions IOMemoryDescriptor::handleFault(
5127*699cd480SApple OSS Distributions void * _pager,
5128*699cd480SApple OSS Distributions mach_vm_size_t sourceOffset,
5129*699cd480SApple OSS Distributions mach_vm_size_t length)
5130*699cd480SApple OSS Distributions {
5131*699cd480SApple OSS Distributions if (kIOMemoryRedirected & _flags) {
5132*699cd480SApple OSS Distributions #if DEBUG
5133*699cd480SApple OSS Distributions IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5134*699cd480SApple OSS Distributions #endif
5135*699cd480SApple OSS Distributions do {
5136*699cd480SApple OSS Distributions SLEEP;
5137*699cd480SApple OSS Distributions } while (kIOMemoryRedirected & _flags);
5138*699cd480SApple OSS Distributions }
5139*699cd480SApple OSS Distributions return kIOReturnSuccess;
5140*699cd480SApple OSS Distributions }
5141*699cd480SApple OSS Distributions
5142*699cd480SApple OSS Distributions IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5143*699cd480SApple OSS Distributions IOMemoryDescriptor::populateDevicePager(
5144*699cd480SApple OSS Distributions void * _pager,
5145*699cd480SApple OSS Distributions vm_map_t addressMap,
5146*699cd480SApple OSS Distributions mach_vm_address_t address,
5147*699cd480SApple OSS Distributions mach_vm_size_t sourceOffset,
5148*699cd480SApple OSS Distributions mach_vm_size_t length,
5149*699cd480SApple OSS Distributions IOOptionBits options )
5150*699cd480SApple OSS Distributions {
5151*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5152*699cd480SApple OSS Distributions memory_object_t pager = (memory_object_t) _pager;
5153*699cd480SApple OSS Distributions mach_vm_size_t size;
5154*699cd480SApple OSS Distributions mach_vm_size_t bytes;
5155*699cd480SApple OSS Distributions mach_vm_size_t page;
5156*699cd480SApple OSS Distributions mach_vm_size_t pageOffset;
5157*699cd480SApple OSS Distributions mach_vm_size_t pagerOffset;
5158*699cd480SApple OSS Distributions IOPhysicalLength segLen, chunk;
5159*699cd480SApple OSS Distributions addr64_t physAddr;
5160*699cd480SApple OSS Distributions IOOptionBits type;
5161*699cd480SApple OSS Distributions
5162*699cd480SApple OSS Distributions type = _flags & kIOMemoryTypeMask;
5163*699cd480SApple OSS Distributions
5164*699cd480SApple OSS Distributions if (reserved->dp.pagerContig) {
5165*699cd480SApple OSS Distributions sourceOffset = 0;
5166*699cd480SApple OSS Distributions pagerOffset = 0;
5167*699cd480SApple OSS Distributions }
5168*699cd480SApple OSS Distributions
5169*699cd480SApple OSS Distributions physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5170*699cd480SApple OSS Distributions assert( physAddr );
5171*699cd480SApple OSS Distributions pageOffset = physAddr - trunc_page_64( physAddr );
5172*699cd480SApple OSS Distributions pagerOffset = sourceOffset;
5173*699cd480SApple OSS Distributions
5174*699cd480SApple OSS Distributions size = length + pageOffset;
5175*699cd480SApple OSS Distributions physAddr -= pageOffset;
5176*699cd480SApple OSS Distributions
5177*699cd480SApple OSS Distributions segLen += pageOffset;
5178*699cd480SApple OSS Distributions bytes = size;
5179*699cd480SApple OSS Distributions do{
5180*699cd480SApple OSS Distributions // in the middle of the loop only map whole pages
5181*699cd480SApple OSS Distributions if (segLen >= bytes) {
5182*699cd480SApple OSS Distributions segLen = bytes;
5183*699cd480SApple OSS Distributions } else if (segLen != trunc_page_64(segLen)) {
5184*699cd480SApple OSS Distributions err = kIOReturnVMError;
5185*699cd480SApple OSS Distributions }
5186*699cd480SApple OSS Distributions if (physAddr != trunc_page_64(physAddr)) {
5187*699cd480SApple OSS Distributions err = kIOReturnBadArgument;
5188*699cd480SApple OSS Distributions }
5189*699cd480SApple OSS Distributions
5190*699cd480SApple OSS Distributions if (kIOReturnSuccess != err) {
5191*699cd480SApple OSS Distributions break;
5192*699cd480SApple OSS Distributions }
5193*699cd480SApple OSS Distributions
5194*699cd480SApple OSS Distributions #if DEBUG || DEVELOPMENT
5195*699cd480SApple OSS Distributions if ((kIOMemoryTypeUPL != type)
5196*699cd480SApple OSS Distributions && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5197*699cd480SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5198*699cd480SApple OSS Distributions physAddr, (uint64_t)segLen);
5199*699cd480SApple OSS Distributions }
5200*699cd480SApple OSS Distributions #endif /* DEBUG || DEVELOPMENT */
5201*699cd480SApple OSS Distributions
5202*699cd480SApple OSS Distributions chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5203*699cd480SApple OSS Distributions for (page = 0;
5204*699cd480SApple OSS Distributions (page < segLen) && (KERN_SUCCESS == err);
5205*699cd480SApple OSS Distributions page += chunk) {
5206*699cd480SApple OSS Distributions err = device_pager_populate_object(pager, pagerOffset,
5207*699cd480SApple OSS Distributions (ppnum_t)(atop_64(physAddr + page)), chunk);
5208*699cd480SApple OSS Distributions pagerOffset += chunk;
5209*699cd480SApple OSS Distributions }
5210*699cd480SApple OSS Distributions
5211*699cd480SApple OSS Distributions assert(KERN_SUCCESS == err);
5212*699cd480SApple OSS Distributions if (err) {
5213*699cd480SApple OSS Distributions break;
5214*699cd480SApple OSS Distributions }
5215*699cd480SApple OSS Distributions
5216*699cd480SApple OSS Distributions // This call to vm_fault causes an early pmap level resolution
5217*699cd480SApple OSS Distributions // of the mappings created above for kernel mappings, since
5218*699cd480SApple OSS Distributions // faulting in later can't take place from interrupt level.
5219*699cd480SApple OSS Distributions if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5220*699cd480SApple OSS Distributions err = vm_fault(addressMap,
5221*699cd480SApple OSS Distributions (vm_map_offset_t)trunc_page_64(address),
5222*699cd480SApple OSS Distributions options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5223*699cd480SApple OSS Distributions FALSE, VM_KERN_MEMORY_NONE,
5224*699cd480SApple OSS Distributions THREAD_UNINT, NULL,
5225*699cd480SApple OSS Distributions (vm_map_offset_t)0);
5226*699cd480SApple OSS Distributions
5227*699cd480SApple OSS Distributions if (KERN_SUCCESS != err) {
5228*699cd480SApple OSS Distributions break;
5229*699cd480SApple OSS Distributions }
5230*699cd480SApple OSS Distributions }
5231*699cd480SApple OSS Distributions
5232*699cd480SApple OSS Distributions sourceOffset += segLen - pageOffset;
5233*699cd480SApple OSS Distributions address += segLen;
5234*699cd480SApple OSS Distributions bytes -= segLen;
5235*699cd480SApple OSS Distributions pageOffset = 0;
5236*699cd480SApple OSS Distributions }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5237*699cd480SApple OSS Distributions
5238*699cd480SApple OSS Distributions if (bytes) {
5239*699cd480SApple OSS Distributions err = kIOReturnBadArgument;
5240*699cd480SApple OSS Distributions }
5241*699cd480SApple OSS Distributions
5242*699cd480SApple OSS Distributions return err;
5243*699cd480SApple OSS Distributions }
5244*699cd480SApple OSS Distributions
5245*699cd480SApple OSS Distributions IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5246*699cd480SApple OSS Distributions IOMemoryDescriptor::doUnmap(
5247*699cd480SApple OSS Distributions vm_map_t addressMap,
5248*699cd480SApple OSS Distributions IOVirtualAddress __address,
5249*699cd480SApple OSS Distributions IOByteCount __length )
5250*699cd480SApple OSS Distributions {
5251*699cd480SApple OSS Distributions IOReturn err;
5252*699cd480SApple OSS Distributions IOMemoryMap * mapping;
5253*699cd480SApple OSS Distributions mach_vm_address_t address;
5254*699cd480SApple OSS Distributions mach_vm_size_t length;
5255*699cd480SApple OSS Distributions
5256*699cd480SApple OSS Distributions if (__length) {
5257*699cd480SApple OSS Distributions panic("doUnmap");
5258*699cd480SApple OSS Distributions }
5259*699cd480SApple OSS Distributions
5260*699cd480SApple OSS Distributions mapping = (IOMemoryMap *) __address;
5261*699cd480SApple OSS Distributions addressMap = mapping->fAddressMap;
5262*699cd480SApple OSS Distributions address = mapping->fAddress;
5263*699cd480SApple OSS Distributions length = mapping->fLength;
5264*699cd480SApple OSS Distributions
5265*699cd480SApple OSS Distributions if (kIOMapOverwrite & mapping->fOptions) {
5266*699cd480SApple OSS Distributions err = KERN_SUCCESS;
5267*699cd480SApple OSS Distributions } else {
5268*699cd480SApple OSS Distributions if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5269*699cd480SApple OSS Distributions addressMap = IOPageableMapForAddress( address );
5270*699cd480SApple OSS Distributions }
5271*699cd480SApple OSS Distributions #if DEBUG
5272*699cd480SApple OSS Distributions if (kIOLogMapping & gIOKitDebug) {
5273*699cd480SApple OSS Distributions IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5274*699cd480SApple OSS Distributions addressMap, address, length );
5275*699cd480SApple OSS Distributions }
5276*699cd480SApple OSS Distributions #endif
5277*699cd480SApple OSS Distributions err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5278*699cd480SApple OSS Distributions if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5279*699cd480SApple OSS Distributions DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5280*699cd480SApple OSS Distributions }
5281*699cd480SApple OSS Distributions }
5282*699cd480SApple OSS Distributions
5283*699cd480SApple OSS Distributions #if IOTRACKING
5284*699cd480SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5285*699cd480SApple OSS Distributions #endif /* IOTRACKING */
5286*699cd480SApple OSS Distributions
5287*699cd480SApple OSS Distributions return err;
5288*699cd480SApple OSS Distributions }
5289*699cd480SApple OSS Distributions
5290*699cd480SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5291*699cd480SApple OSS Distributions IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5292*699cd480SApple OSS Distributions {
5293*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5294*699cd480SApple OSS Distributions IOMemoryMap * mapping = NULL;
5295*699cd480SApple OSS Distributions OSSharedPtr<OSIterator> iter;
5296*699cd480SApple OSS Distributions
5297*699cd480SApple OSS Distributions LOCK;
5298*699cd480SApple OSS Distributions
5299*699cd480SApple OSS Distributions if (doRedirect) {
5300*699cd480SApple OSS Distributions _flags |= kIOMemoryRedirected;
5301*699cd480SApple OSS Distributions } else {
5302*699cd480SApple OSS Distributions _flags &= ~kIOMemoryRedirected;
5303*699cd480SApple OSS Distributions }
5304*699cd480SApple OSS Distributions
5305*699cd480SApple OSS Distributions do {
5306*699cd480SApple OSS Distributions if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5307*699cd480SApple OSS Distributions memory_object_t pager;
5308*699cd480SApple OSS Distributions
5309*699cd480SApple OSS Distributions if (reserved) {
5310*699cd480SApple OSS Distributions pager = (memory_object_t) reserved->dp.devicePager;
5311*699cd480SApple OSS Distributions } else {
5312*699cd480SApple OSS Distributions pager = MACH_PORT_NULL;
5313*699cd480SApple OSS Distributions }
5314*699cd480SApple OSS Distributions
5315*699cd480SApple OSS Distributions while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5316*699cd480SApple OSS Distributions mapping->redirect( safeTask, doRedirect );
5317*699cd480SApple OSS Distributions if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5318*699cd480SApple OSS Distributions err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5319*699cd480SApple OSS Distributions }
5320*699cd480SApple OSS Distributions }
5321*699cd480SApple OSS Distributions
5322*699cd480SApple OSS Distributions iter.reset();
5323*699cd480SApple OSS Distributions }
5324*699cd480SApple OSS Distributions } while (false);
5325*699cd480SApple OSS Distributions
5326*699cd480SApple OSS Distributions if (!doRedirect) {
5327*699cd480SApple OSS Distributions WAKEUP;
5328*699cd480SApple OSS Distributions }
5329*699cd480SApple OSS Distributions
5330*699cd480SApple OSS Distributions UNLOCK;
5331*699cd480SApple OSS Distributions
5332*699cd480SApple OSS Distributions #ifndef __LP64__
5333*699cd480SApple OSS Distributions // temporary binary compatibility
5334*699cd480SApple OSS Distributions IOSubMemoryDescriptor * subMem;
5335*699cd480SApple OSS Distributions if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5336*699cd480SApple OSS Distributions err = subMem->redirect( safeTask, doRedirect );
5337*699cd480SApple OSS Distributions } else {
5338*699cd480SApple OSS Distributions err = kIOReturnSuccess;
5339*699cd480SApple OSS Distributions }
5340*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5341*699cd480SApple OSS Distributions
5342*699cd480SApple OSS Distributions return err;
5343*699cd480SApple OSS Distributions }
5344*699cd480SApple OSS Distributions
5345*699cd480SApple OSS Distributions IOReturn
redirect(task_t safeTask,bool doRedirect)5346*699cd480SApple OSS Distributions IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5347*699cd480SApple OSS Distributions {
5348*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5349*699cd480SApple OSS Distributions
5350*699cd480SApple OSS Distributions if (fSuperMap) {
5351*699cd480SApple OSS Distributions // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5352*699cd480SApple OSS Distributions } else {
5353*699cd480SApple OSS Distributions LOCK;
5354*699cd480SApple OSS Distributions
5355*699cd480SApple OSS Distributions do{
5356*699cd480SApple OSS Distributions if (!fAddress) {
5357*699cd480SApple OSS Distributions break;
5358*699cd480SApple OSS Distributions }
5359*699cd480SApple OSS Distributions if (!fAddressMap) {
5360*699cd480SApple OSS Distributions break;
5361*699cd480SApple OSS Distributions }
5362*699cd480SApple OSS Distributions
5363*699cd480SApple OSS Distributions if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5364*699cd480SApple OSS Distributions && (0 == (fOptions & kIOMapStatic))) {
5365*699cd480SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5366*699cd480SApple OSS Distributions err = kIOReturnSuccess;
5367*699cd480SApple OSS Distributions #if DEBUG
5368*699cd480SApple OSS Distributions IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5369*699cd480SApple OSS Distributions #endif
5370*699cd480SApple OSS Distributions } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5371*699cd480SApple OSS Distributions IOOptionBits newMode;
5372*699cd480SApple OSS Distributions newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5373*699cd480SApple OSS Distributions IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5374*699cd480SApple OSS Distributions }
5375*699cd480SApple OSS Distributions }while (false);
5376*699cd480SApple OSS Distributions UNLOCK;
5377*699cd480SApple OSS Distributions }
5378*699cd480SApple OSS Distributions
5379*699cd480SApple OSS Distributions if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5380*699cd480SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5381*699cd480SApple OSS Distributions && safeTask
5382*699cd480SApple OSS Distributions && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5383*699cd480SApple OSS Distributions fMemory->redirect(safeTask, doRedirect);
5384*699cd480SApple OSS Distributions }
5385*699cd480SApple OSS Distributions
5386*699cd480SApple OSS Distributions return err;
5387*699cd480SApple OSS Distributions }
5388*699cd480SApple OSS Distributions
5389*699cd480SApple OSS Distributions IOReturn
unmap(void)5390*699cd480SApple OSS Distributions IOMemoryMap::unmap( void )
5391*699cd480SApple OSS Distributions {
5392*699cd480SApple OSS Distributions IOReturn err;
5393*699cd480SApple OSS Distributions
5394*699cd480SApple OSS Distributions LOCK;
5395*699cd480SApple OSS Distributions
5396*699cd480SApple OSS Distributions if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5397*699cd480SApple OSS Distributions && (0 == (kIOMapStatic & fOptions))) {
5398*699cd480SApple OSS Distributions err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5399*699cd480SApple OSS Distributions } else {
5400*699cd480SApple OSS Distributions err = kIOReturnSuccess;
5401*699cd480SApple OSS Distributions }
5402*699cd480SApple OSS Distributions
5403*699cd480SApple OSS Distributions if (fAddressMap) {
5404*699cd480SApple OSS Distributions vm_map_deallocate(fAddressMap);
5405*699cd480SApple OSS Distributions fAddressMap = NULL;
5406*699cd480SApple OSS Distributions }
5407*699cd480SApple OSS Distributions
5408*699cd480SApple OSS Distributions fAddress = 0;
5409*699cd480SApple OSS Distributions
5410*699cd480SApple OSS Distributions UNLOCK;
5411*699cd480SApple OSS Distributions
5412*699cd480SApple OSS Distributions return err;
5413*699cd480SApple OSS Distributions }
5414*699cd480SApple OSS Distributions
5415*699cd480SApple OSS Distributions void
taskDied(void)5416*699cd480SApple OSS Distributions IOMemoryMap::taskDied( void )
5417*699cd480SApple OSS Distributions {
5418*699cd480SApple OSS Distributions LOCK;
5419*699cd480SApple OSS Distributions if (fUserClientUnmap) {
5420*699cd480SApple OSS Distributions unmap();
5421*699cd480SApple OSS Distributions }
5422*699cd480SApple OSS Distributions #if IOTRACKING
5423*699cd480SApple OSS Distributions else {
5424*699cd480SApple OSS Distributions IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5425*699cd480SApple OSS Distributions }
5426*699cd480SApple OSS Distributions #endif /* IOTRACKING */
5427*699cd480SApple OSS Distributions
5428*699cd480SApple OSS Distributions if (fAddressMap) {
5429*699cd480SApple OSS Distributions vm_map_deallocate(fAddressMap);
5430*699cd480SApple OSS Distributions fAddressMap = NULL;
5431*699cd480SApple OSS Distributions }
5432*699cd480SApple OSS Distributions fAddressTask = NULL;
5433*699cd480SApple OSS Distributions fAddress = 0;
5434*699cd480SApple OSS Distributions UNLOCK;
5435*699cd480SApple OSS Distributions }
5436*699cd480SApple OSS Distributions
5437*699cd480SApple OSS Distributions IOReturn
userClientUnmap(void)5438*699cd480SApple OSS Distributions IOMemoryMap::userClientUnmap( void )
5439*699cd480SApple OSS Distributions {
5440*699cd480SApple OSS Distributions fUserClientUnmap = true;
5441*699cd480SApple OSS Distributions return kIOReturnSuccess;
5442*699cd480SApple OSS Distributions }
5443*699cd480SApple OSS Distributions
5444*699cd480SApple OSS Distributions // Overload the release mechanism. All mappings must be a member
5445*699cd480SApple OSS Distributions // of a memory descriptors _mappings set. This means that we
5446*699cd480SApple OSS Distributions // always have 2 references on a mapping. When either of these mappings
5447*699cd480SApple OSS Distributions // are released we need to free ourselves.
5448*699cd480SApple OSS Distributions void
taggedRelease(const void * tag) const5449*699cd480SApple OSS Distributions IOMemoryMap::taggedRelease(const void *tag) const
5450*699cd480SApple OSS Distributions {
5451*699cd480SApple OSS Distributions LOCK;
5452*699cd480SApple OSS Distributions super::taggedRelease(tag, 2);
5453*699cd480SApple OSS Distributions UNLOCK;
5454*699cd480SApple OSS Distributions }
5455*699cd480SApple OSS Distributions
5456*699cd480SApple OSS Distributions void
free()5457*699cd480SApple OSS Distributions IOMemoryMap::free()
5458*699cd480SApple OSS Distributions {
5459*699cd480SApple OSS Distributions unmap();
5460*699cd480SApple OSS Distributions
5461*699cd480SApple OSS Distributions if (fMemory) {
5462*699cd480SApple OSS Distributions LOCK;
5463*699cd480SApple OSS Distributions fMemory->removeMapping(this);
5464*699cd480SApple OSS Distributions UNLOCK;
5465*699cd480SApple OSS Distributions fMemory.reset();
5466*699cd480SApple OSS Distributions }
5467*699cd480SApple OSS Distributions
5468*699cd480SApple OSS Distributions if (fSuperMap) {
5469*699cd480SApple OSS Distributions fSuperMap.reset();
5470*699cd480SApple OSS Distributions }
5471*699cd480SApple OSS Distributions
5472*699cd480SApple OSS Distributions if (fRedirUPL) {
5473*699cd480SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5474*699cd480SApple OSS Distributions upl_deallocate(fRedirUPL);
5475*699cd480SApple OSS Distributions }
5476*699cd480SApple OSS Distributions
5477*699cd480SApple OSS Distributions super::free();
5478*699cd480SApple OSS Distributions }
5479*699cd480SApple OSS Distributions
5480*699cd480SApple OSS Distributions IOByteCount
getLength()5481*699cd480SApple OSS Distributions IOMemoryMap::getLength()
5482*699cd480SApple OSS Distributions {
5483*699cd480SApple OSS Distributions return fLength;
5484*699cd480SApple OSS Distributions }
5485*699cd480SApple OSS Distributions
5486*699cd480SApple OSS Distributions IOVirtualAddress
getVirtualAddress()5487*699cd480SApple OSS Distributions IOMemoryMap::getVirtualAddress()
5488*699cd480SApple OSS Distributions {
5489*699cd480SApple OSS Distributions #ifndef __LP64__
5490*699cd480SApple OSS Distributions if (fSuperMap) {
5491*699cd480SApple OSS Distributions fSuperMap->getVirtualAddress();
5492*699cd480SApple OSS Distributions } else if (fAddressMap
5493*699cd480SApple OSS Distributions && vm_map_is_64bit(fAddressMap)
5494*699cd480SApple OSS Distributions && (sizeof(IOVirtualAddress) < 8)) {
5495*699cd480SApple OSS Distributions OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5496*699cd480SApple OSS Distributions }
5497*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5498*699cd480SApple OSS Distributions
5499*699cd480SApple OSS Distributions return fAddress;
5500*699cd480SApple OSS Distributions }
5501*699cd480SApple OSS Distributions
5502*699cd480SApple OSS Distributions #ifndef __LP64__
5503*699cd480SApple OSS Distributions mach_vm_address_t
getAddress()5504*699cd480SApple OSS Distributions IOMemoryMap::getAddress()
5505*699cd480SApple OSS Distributions {
5506*699cd480SApple OSS Distributions return fAddress;
5507*699cd480SApple OSS Distributions }
5508*699cd480SApple OSS Distributions
5509*699cd480SApple OSS Distributions mach_vm_size_t
getSize()5510*699cd480SApple OSS Distributions IOMemoryMap::getSize()
5511*699cd480SApple OSS Distributions {
5512*699cd480SApple OSS Distributions return fLength;
5513*699cd480SApple OSS Distributions }
5514*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5515*699cd480SApple OSS Distributions
5516*699cd480SApple OSS Distributions
5517*699cd480SApple OSS Distributions task_t
getAddressTask()5518*699cd480SApple OSS Distributions IOMemoryMap::getAddressTask()
5519*699cd480SApple OSS Distributions {
5520*699cd480SApple OSS Distributions if (fSuperMap) {
5521*699cd480SApple OSS Distributions return fSuperMap->getAddressTask();
5522*699cd480SApple OSS Distributions } else {
5523*699cd480SApple OSS Distributions return fAddressTask;
5524*699cd480SApple OSS Distributions }
5525*699cd480SApple OSS Distributions }
5526*699cd480SApple OSS Distributions
5527*699cd480SApple OSS Distributions IOOptionBits
getMapOptions()5528*699cd480SApple OSS Distributions IOMemoryMap::getMapOptions()
5529*699cd480SApple OSS Distributions {
5530*699cd480SApple OSS Distributions return fOptions;
5531*699cd480SApple OSS Distributions }
5532*699cd480SApple OSS Distributions
5533*699cd480SApple OSS Distributions IOMemoryDescriptor *
getMemoryDescriptor()5534*699cd480SApple OSS Distributions IOMemoryMap::getMemoryDescriptor()
5535*699cd480SApple OSS Distributions {
5536*699cd480SApple OSS Distributions return fMemory.get();
5537*699cd480SApple OSS Distributions }
5538*699cd480SApple OSS Distributions
5539*699cd480SApple OSS Distributions IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5540*699cd480SApple OSS Distributions IOMemoryMap::copyCompatible(
5541*699cd480SApple OSS Distributions IOMemoryMap * newMapping )
5542*699cd480SApple OSS Distributions {
5543*699cd480SApple OSS Distributions task_t task = newMapping->getAddressTask();
5544*699cd480SApple OSS Distributions mach_vm_address_t toAddress = newMapping->fAddress;
5545*699cd480SApple OSS Distributions IOOptionBits _options = newMapping->fOptions;
5546*699cd480SApple OSS Distributions mach_vm_size_t _offset = newMapping->fOffset;
5547*699cd480SApple OSS Distributions mach_vm_size_t _length = newMapping->fLength;
5548*699cd480SApple OSS Distributions
5549*699cd480SApple OSS Distributions if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5550*699cd480SApple OSS Distributions return NULL;
5551*699cd480SApple OSS Distributions }
5552*699cd480SApple OSS Distributions if ((fOptions ^ _options) & kIOMapReadOnly) {
5553*699cd480SApple OSS Distributions return NULL;
5554*699cd480SApple OSS Distributions }
5555*699cd480SApple OSS Distributions if ((fOptions ^ _options) & kIOMapGuardedMask) {
5556*699cd480SApple OSS Distributions return NULL;
5557*699cd480SApple OSS Distributions }
5558*699cd480SApple OSS Distributions if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5559*699cd480SApple OSS Distributions && ((fOptions ^ _options) & kIOMapCacheMask)) {
5560*699cd480SApple OSS Distributions return NULL;
5561*699cd480SApple OSS Distributions }
5562*699cd480SApple OSS Distributions
5563*699cd480SApple OSS Distributions if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5564*699cd480SApple OSS Distributions return NULL;
5565*699cd480SApple OSS Distributions }
5566*699cd480SApple OSS Distributions
5567*699cd480SApple OSS Distributions if (_offset < fOffset) {
5568*699cd480SApple OSS Distributions return NULL;
5569*699cd480SApple OSS Distributions }
5570*699cd480SApple OSS Distributions
5571*699cd480SApple OSS Distributions _offset -= fOffset;
5572*699cd480SApple OSS Distributions
5573*699cd480SApple OSS Distributions if ((_offset + _length) > fLength) {
5574*699cd480SApple OSS Distributions return NULL;
5575*699cd480SApple OSS Distributions }
5576*699cd480SApple OSS Distributions
5577*699cd480SApple OSS Distributions if ((fLength == _length) && (!_offset)) {
5578*699cd480SApple OSS Distributions retain();
5579*699cd480SApple OSS Distributions newMapping = this;
5580*699cd480SApple OSS Distributions } else {
5581*699cd480SApple OSS Distributions newMapping->fSuperMap.reset(this, OSRetain);
5582*699cd480SApple OSS Distributions newMapping->fOffset = fOffset + _offset;
5583*699cd480SApple OSS Distributions newMapping->fAddress = fAddress + _offset;
5584*699cd480SApple OSS Distributions }
5585*699cd480SApple OSS Distributions
5586*699cd480SApple OSS Distributions return newMapping;
5587*699cd480SApple OSS Distributions }
5588*699cd480SApple OSS Distributions
5589*699cd480SApple OSS Distributions IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5590*699cd480SApple OSS Distributions IOMemoryMap::wireRange(
5591*699cd480SApple OSS Distributions uint32_t options,
5592*699cd480SApple OSS Distributions mach_vm_size_t offset,
5593*699cd480SApple OSS Distributions mach_vm_size_t length)
5594*699cd480SApple OSS Distributions {
5595*699cd480SApple OSS Distributions IOReturn kr;
5596*699cd480SApple OSS Distributions mach_vm_address_t start = trunc_page_64(fAddress + offset);
5597*699cd480SApple OSS Distributions mach_vm_address_t end = round_page_64(fAddress + offset + length);
5598*699cd480SApple OSS Distributions vm_prot_t prot;
5599*699cd480SApple OSS Distributions
5600*699cd480SApple OSS Distributions prot = (kIODirectionOutIn & options);
5601*699cd480SApple OSS Distributions if (prot) {
5602*699cd480SApple OSS Distributions kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5603*699cd480SApple OSS Distributions } else {
5604*699cd480SApple OSS Distributions kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5605*699cd480SApple OSS Distributions }
5606*699cd480SApple OSS Distributions
5607*699cd480SApple OSS Distributions return kr;
5608*699cd480SApple OSS Distributions }
5609*699cd480SApple OSS Distributions
5610*699cd480SApple OSS Distributions
5611*699cd480SApple OSS Distributions IOPhysicalAddress
5612*699cd480SApple OSS Distributions #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5613*699cd480SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5614*699cd480SApple OSS Distributions #else /* !__LP64__ */
5615*699cd480SApple OSS Distributions IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5616*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5617*699cd480SApple OSS Distributions {
5618*699cd480SApple OSS Distributions IOPhysicalAddress address;
5619*699cd480SApple OSS Distributions
5620*699cd480SApple OSS Distributions LOCK;
5621*699cd480SApple OSS Distributions #ifdef __LP64__
5622*699cd480SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5623*699cd480SApple OSS Distributions #else /* !__LP64__ */
5624*699cd480SApple OSS Distributions address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5625*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5626*699cd480SApple OSS Distributions UNLOCK;
5627*699cd480SApple OSS Distributions
5628*699cd480SApple OSS Distributions return address;
5629*699cd480SApple OSS Distributions }
5630*699cd480SApple OSS Distributions
5631*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5632*699cd480SApple OSS Distributions
5633*699cd480SApple OSS Distributions #undef super
5634*699cd480SApple OSS Distributions #define super OSObject
5635*699cd480SApple OSS Distributions
5636*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5637*699cd480SApple OSS Distributions
5638*699cd480SApple OSS Distributions void
initialize(void)5639*699cd480SApple OSS Distributions IOMemoryDescriptor::initialize( void )
5640*699cd480SApple OSS Distributions {
5641*699cd480SApple OSS Distributions if (NULL == gIOMemoryLock) {
5642*699cd480SApple OSS Distributions gIOMemoryLock = IORecursiveLockAlloc();
5643*699cd480SApple OSS Distributions }
5644*699cd480SApple OSS Distributions
5645*699cd480SApple OSS Distributions gIOLastPage = IOGetLastPageNumber();
5646*699cd480SApple OSS Distributions }
5647*699cd480SApple OSS Distributions
5648*699cd480SApple OSS Distributions void
free(void)5649*699cd480SApple OSS Distributions IOMemoryDescriptor::free( void )
5650*699cd480SApple OSS Distributions {
5651*699cd480SApple OSS Distributions if (_mappings) {
5652*699cd480SApple OSS Distributions _mappings.reset();
5653*699cd480SApple OSS Distributions }
5654*699cd480SApple OSS Distributions
5655*699cd480SApple OSS Distributions if (reserved) {
5656*699cd480SApple OSS Distributions cleanKernelReserved(reserved);
5657*699cd480SApple OSS Distributions IOFreeType(reserved, IOMemoryDescriptorReserved);
5658*699cd480SApple OSS Distributions reserved = NULL;
5659*699cd480SApple OSS Distributions }
5660*699cd480SApple OSS Distributions super::free();
5661*699cd480SApple OSS Distributions }
5662*699cd480SApple OSS Distributions
5663*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5664*699cd480SApple OSS Distributions IOMemoryDescriptor::setMapping(
5665*699cd480SApple OSS Distributions task_t intoTask,
5666*699cd480SApple OSS Distributions IOVirtualAddress mapAddress,
5667*699cd480SApple OSS Distributions IOOptionBits options )
5668*699cd480SApple OSS Distributions {
5669*699cd480SApple OSS Distributions return createMappingInTask( intoTask, mapAddress,
5670*699cd480SApple OSS Distributions options | kIOMapStatic,
5671*699cd480SApple OSS Distributions 0, getLength());
5672*699cd480SApple OSS Distributions }
5673*699cd480SApple OSS Distributions
5674*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5675*699cd480SApple OSS Distributions IOMemoryDescriptor::map(
5676*699cd480SApple OSS Distributions IOOptionBits options )
5677*699cd480SApple OSS Distributions {
5678*699cd480SApple OSS Distributions return createMappingInTask( kernel_task, 0,
5679*699cd480SApple OSS Distributions options | kIOMapAnywhere,
5680*699cd480SApple OSS Distributions 0, getLength());
5681*699cd480SApple OSS Distributions }
5682*699cd480SApple OSS Distributions
5683*699cd480SApple OSS Distributions #ifndef __LP64__
5684*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5685*699cd480SApple OSS Distributions IOMemoryDescriptor::map(
5686*699cd480SApple OSS Distributions task_t intoTask,
5687*699cd480SApple OSS Distributions IOVirtualAddress atAddress,
5688*699cd480SApple OSS Distributions IOOptionBits options,
5689*699cd480SApple OSS Distributions IOByteCount offset,
5690*699cd480SApple OSS Distributions IOByteCount length )
5691*699cd480SApple OSS Distributions {
5692*699cd480SApple OSS Distributions if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5693*699cd480SApple OSS Distributions OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5694*699cd480SApple OSS Distributions return NULL;
5695*699cd480SApple OSS Distributions }
5696*699cd480SApple OSS Distributions
5697*699cd480SApple OSS Distributions return createMappingInTask(intoTask, atAddress,
5698*699cd480SApple OSS Distributions options, offset, length);
5699*699cd480SApple OSS Distributions }
5700*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5701*699cd480SApple OSS Distributions
5702*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5703*699cd480SApple OSS Distributions IOMemoryDescriptor::createMappingInTask(
5704*699cd480SApple OSS Distributions task_t intoTask,
5705*699cd480SApple OSS Distributions mach_vm_address_t atAddress,
5706*699cd480SApple OSS Distributions IOOptionBits options,
5707*699cd480SApple OSS Distributions mach_vm_size_t offset,
5708*699cd480SApple OSS Distributions mach_vm_size_t length)
5709*699cd480SApple OSS Distributions {
5710*699cd480SApple OSS Distributions IOMemoryMap * result;
5711*699cd480SApple OSS Distributions IOMemoryMap * mapping;
5712*699cd480SApple OSS Distributions
5713*699cd480SApple OSS Distributions if (0 == length) {
5714*699cd480SApple OSS Distributions length = getLength();
5715*699cd480SApple OSS Distributions }
5716*699cd480SApple OSS Distributions
5717*699cd480SApple OSS Distributions mapping = new IOMemoryMap;
5718*699cd480SApple OSS Distributions
5719*699cd480SApple OSS Distributions if (mapping
5720*699cd480SApple OSS Distributions && !mapping->init( intoTask, atAddress,
5721*699cd480SApple OSS Distributions options, offset, length )) {
5722*699cd480SApple OSS Distributions mapping->release();
5723*699cd480SApple OSS Distributions mapping = NULL;
5724*699cd480SApple OSS Distributions }
5725*699cd480SApple OSS Distributions
5726*699cd480SApple OSS Distributions if (mapping) {
5727*699cd480SApple OSS Distributions result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5728*699cd480SApple OSS Distributions } else {
5729*699cd480SApple OSS Distributions result = nullptr;
5730*699cd480SApple OSS Distributions }
5731*699cd480SApple OSS Distributions
5732*699cd480SApple OSS Distributions #if DEBUG
5733*699cd480SApple OSS Distributions if (!result) {
5734*699cd480SApple OSS Distributions IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5735*699cd480SApple OSS Distributions this, atAddress, (uint32_t) options, offset, length);
5736*699cd480SApple OSS Distributions }
5737*699cd480SApple OSS Distributions #endif
5738*699cd480SApple OSS Distributions
5739*699cd480SApple OSS Distributions // already retained through makeMapping
5740*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5741*699cd480SApple OSS Distributions
5742*699cd480SApple OSS Distributions return retval;
5743*699cd480SApple OSS Distributions }
5744*699cd480SApple OSS Distributions
5745*699cd480SApple OSS Distributions #ifndef __LP64__ // there is only a 64 bit version for LP64
5746*699cd480SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5747*699cd480SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5748*699cd480SApple OSS Distributions IOOptionBits options,
5749*699cd480SApple OSS Distributions IOByteCount offset)
5750*699cd480SApple OSS Distributions {
5751*699cd480SApple OSS Distributions return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5752*699cd480SApple OSS Distributions }
5753*699cd480SApple OSS Distributions #endif
5754*699cd480SApple OSS Distributions
5755*699cd480SApple OSS Distributions IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5756*699cd480SApple OSS Distributions IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5757*699cd480SApple OSS Distributions IOOptionBits options,
5758*699cd480SApple OSS Distributions mach_vm_size_t offset)
5759*699cd480SApple OSS Distributions {
5760*699cd480SApple OSS Distributions IOReturn err = kIOReturnSuccess;
5761*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> physMem;
5762*699cd480SApple OSS Distributions
5763*699cd480SApple OSS Distributions LOCK;
5764*699cd480SApple OSS Distributions
5765*699cd480SApple OSS Distributions if (fAddress && fAddressMap) {
5766*699cd480SApple OSS Distributions do{
5767*699cd480SApple OSS Distributions if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5768*699cd480SApple OSS Distributions || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5769*699cd480SApple OSS Distributions physMem = fMemory;
5770*699cd480SApple OSS Distributions }
5771*699cd480SApple OSS Distributions
5772*699cd480SApple OSS Distributions if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5773*699cd480SApple OSS Distributions upl_size_t size = (typeof(size))round_page(fLength);
5774*699cd480SApple OSS Distributions upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5775*699cd480SApple OSS Distributions | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5776*699cd480SApple OSS Distributions if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5777*699cd480SApple OSS Distributions NULL, NULL,
5778*699cd480SApple OSS Distributions &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5779*699cd480SApple OSS Distributions fRedirUPL = NULL;
5780*699cd480SApple OSS Distributions }
5781*699cd480SApple OSS Distributions
5782*699cd480SApple OSS Distributions if (physMem) {
5783*699cd480SApple OSS Distributions IOUnmapPages( fAddressMap, fAddress, fLength );
5784*699cd480SApple OSS Distributions if ((false)) {
5785*699cd480SApple OSS Distributions physMem->redirect(NULL, true);
5786*699cd480SApple OSS Distributions }
5787*699cd480SApple OSS Distributions }
5788*699cd480SApple OSS Distributions }
5789*699cd480SApple OSS Distributions
5790*699cd480SApple OSS Distributions if (newBackingMemory) {
5791*699cd480SApple OSS Distributions if (newBackingMemory != fMemory) {
5792*699cd480SApple OSS Distributions fOffset = 0;
5793*699cd480SApple OSS Distributions if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5794*699cd480SApple OSS Distributions options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5795*699cd480SApple OSS Distributions offset, fLength)) {
5796*699cd480SApple OSS Distributions err = kIOReturnError;
5797*699cd480SApple OSS Distributions }
5798*699cd480SApple OSS Distributions }
5799*699cd480SApple OSS Distributions if (fRedirUPL) {
5800*699cd480SApple OSS Distributions upl_commit(fRedirUPL, NULL, 0);
5801*699cd480SApple OSS Distributions upl_deallocate(fRedirUPL);
5802*699cd480SApple OSS Distributions fRedirUPL = NULL;
5803*699cd480SApple OSS Distributions }
5804*699cd480SApple OSS Distributions if ((false) && physMem) {
5805*699cd480SApple OSS Distributions physMem->redirect(NULL, false);
5806*699cd480SApple OSS Distributions }
5807*699cd480SApple OSS Distributions }
5808*699cd480SApple OSS Distributions }while (false);
5809*699cd480SApple OSS Distributions }
5810*699cd480SApple OSS Distributions
5811*699cd480SApple OSS Distributions UNLOCK;
5812*699cd480SApple OSS Distributions
5813*699cd480SApple OSS Distributions return err;
5814*699cd480SApple OSS Distributions }
5815*699cd480SApple OSS Distributions
5816*699cd480SApple OSS Distributions IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5817*699cd480SApple OSS Distributions IOMemoryDescriptor::makeMapping(
5818*699cd480SApple OSS Distributions IOMemoryDescriptor * owner,
5819*699cd480SApple OSS Distributions task_t __intoTask,
5820*699cd480SApple OSS Distributions IOVirtualAddress __address,
5821*699cd480SApple OSS Distributions IOOptionBits options,
5822*699cd480SApple OSS Distributions IOByteCount __offset,
5823*699cd480SApple OSS Distributions IOByteCount __length )
5824*699cd480SApple OSS Distributions {
5825*699cd480SApple OSS Distributions #ifndef __LP64__
5826*699cd480SApple OSS Distributions if (!(kIOMap64Bit & options)) {
5827*699cd480SApple OSS Distributions panic("IOMemoryDescriptor::makeMapping !64bit");
5828*699cd480SApple OSS Distributions }
5829*699cd480SApple OSS Distributions #endif /* !__LP64__ */
5830*699cd480SApple OSS Distributions
5831*699cd480SApple OSS Distributions OSSharedPtr<IOMemoryDescriptor> mapDesc;
5832*699cd480SApple OSS Distributions __block IOMemoryMap * result = NULL;
5833*699cd480SApple OSS Distributions
5834*699cd480SApple OSS Distributions IOMemoryMap * mapping = (IOMemoryMap *) __address;
5835*699cd480SApple OSS Distributions mach_vm_size_t offset = mapping->fOffset + __offset;
5836*699cd480SApple OSS Distributions mach_vm_size_t length = mapping->fLength;
5837*699cd480SApple OSS Distributions
5838*699cd480SApple OSS Distributions mapping->fOffset = offset;
5839*699cd480SApple OSS Distributions
5840*699cd480SApple OSS Distributions LOCK;
5841*699cd480SApple OSS Distributions
5842*699cd480SApple OSS Distributions do{
5843*699cd480SApple OSS Distributions if (kIOMapStatic & options) {
5844*699cd480SApple OSS Distributions result = mapping;
5845*699cd480SApple OSS Distributions addMapping(mapping);
5846*699cd480SApple OSS Distributions mapping->setMemoryDescriptor(this, 0);
5847*699cd480SApple OSS Distributions continue;
5848*699cd480SApple OSS Distributions }
5849*699cd480SApple OSS Distributions
5850*699cd480SApple OSS Distributions if (kIOMapUnique & options) {
5851*699cd480SApple OSS Distributions addr64_t phys;
5852*699cd480SApple OSS Distributions IOByteCount physLen;
5853*699cd480SApple OSS Distributions
5854*699cd480SApple OSS Distributions // if (owner != this) continue;
5855*699cd480SApple OSS Distributions
5856*699cd480SApple OSS Distributions if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5857*699cd480SApple OSS Distributions || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5858*699cd480SApple OSS Distributions phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5859*699cd480SApple OSS Distributions if (!phys || (physLen < length)) {
5860*699cd480SApple OSS Distributions continue;
5861*699cd480SApple OSS Distributions }
5862*699cd480SApple OSS Distributions
5863*699cd480SApple OSS Distributions mapDesc = IOMemoryDescriptor::withAddressRange(
5864*699cd480SApple OSS Distributions phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5865*699cd480SApple OSS Distributions if (!mapDesc) {
5866*699cd480SApple OSS Distributions continue;
5867*699cd480SApple OSS Distributions }
5868*699cd480SApple OSS Distributions offset = 0;
5869*699cd480SApple OSS Distributions mapping->fOffset = offset;
5870*699cd480SApple OSS Distributions }
5871*699cd480SApple OSS Distributions } else {
5872*699cd480SApple OSS Distributions // look for a compatible existing mapping
5873*699cd480SApple OSS Distributions if (_mappings) {
5874*699cd480SApple OSS Distributions _mappings->iterateObjects(^(OSObject * object)
5875*699cd480SApple OSS Distributions {
5876*699cd480SApple OSS Distributions IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5877*699cd480SApple OSS Distributions if ((result = lookMapping->copyCompatible(mapping))) {
5878*699cd480SApple OSS Distributions addMapping(result);
5879*699cd480SApple OSS Distributions result->setMemoryDescriptor(this, offset);
5880*699cd480SApple OSS Distributions return true;
5881*699cd480SApple OSS Distributions }
5882*699cd480SApple OSS Distributions return false;
5883*699cd480SApple OSS Distributions });
5884*699cd480SApple OSS Distributions }
5885*699cd480SApple OSS Distributions if (result || (options & kIOMapReference)) {
5886*699cd480SApple OSS Distributions if (result != mapping) {
5887*699cd480SApple OSS Distributions mapping->release();
5888*699cd480SApple OSS Distributions mapping = NULL;
5889*699cd480SApple OSS Distributions }
5890*699cd480SApple OSS Distributions continue;
5891*699cd480SApple OSS Distributions }
5892*699cd480SApple OSS Distributions }
5893*699cd480SApple OSS Distributions
5894*699cd480SApple OSS Distributions if (!mapDesc) {
5895*699cd480SApple OSS Distributions mapDesc.reset(this, OSRetain);
5896*699cd480SApple OSS Distributions }
5897*699cd480SApple OSS Distributions IOReturn
5898*699cd480SApple OSS Distributions kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5899*699cd480SApple OSS Distributions if (kIOReturnSuccess == kr) {
5900*699cd480SApple OSS Distributions result = mapping;
5901*699cd480SApple OSS Distributions mapDesc->addMapping(result);
5902*699cd480SApple OSS Distributions result->setMemoryDescriptor(mapDesc.get(), offset);
5903*699cd480SApple OSS Distributions } else {
5904*699cd480SApple OSS Distributions mapping->release();
5905*699cd480SApple OSS Distributions mapping = NULL;
5906*699cd480SApple OSS Distributions }
5907*699cd480SApple OSS Distributions }while (false);
5908*699cd480SApple OSS Distributions
5909*699cd480SApple OSS Distributions UNLOCK;
5910*699cd480SApple OSS Distributions
5911*699cd480SApple OSS Distributions return result;
5912*699cd480SApple OSS Distributions }
5913*699cd480SApple OSS Distributions
5914*699cd480SApple OSS Distributions void
addMapping(IOMemoryMap * mapping)5915*699cd480SApple OSS Distributions IOMemoryDescriptor::addMapping(
5916*699cd480SApple OSS Distributions IOMemoryMap * mapping )
5917*699cd480SApple OSS Distributions {
5918*699cd480SApple OSS Distributions if (mapping) {
5919*699cd480SApple OSS Distributions if (NULL == _mappings) {
5920*699cd480SApple OSS Distributions _mappings = OSSet::withCapacity(1);
5921*699cd480SApple OSS Distributions }
5922*699cd480SApple OSS Distributions if (_mappings) {
5923*699cd480SApple OSS Distributions _mappings->setObject( mapping );
5924*699cd480SApple OSS Distributions }
5925*699cd480SApple OSS Distributions }
5926*699cd480SApple OSS Distributions }
5927*699cd480SApple OSS Distributions
5928*699cd480SApple OSS Distributions void
removeMapping(IOMemoryMap * mapping)5929*699cd480SApple OSS Distributions IOMemoryDescriptor::removeMapping(
5930*699cd480SApple OSS Distributions IOMemoryMap * mapping )
5931*699cd480SApple OSS Distributions {
5932*699cd480SApple OSS Distributions if (_mappings) {
5933*699cd480SApple OSS Distributions _mappings->removeObject( mapping);
5934*699cd480SApple OSS Distributions }
5935*699cd480SApple OSS Distributions }
5936*699cd480SApple OSS Distributions
5937*699cd480SApple OSS Distributions void
setMapperOptions(uint16_t options)5938*699cd480SApple OSS Distributions IOMemoryDescriptor::setMapperOptions( uint16_t options)
5939*699cd480SApple OSS Distributions {
5940*699cd480SApple OSS Distributions _iomapperOptions = options;
5941*699cd480SApple OSS Distributions }
5942*699cd480SApple OSS Distributions
5943*699cd480SApple OSS Distributions uint16_t
getMapperOptions(void)5944*699cd480SApple OSS Distributions IOMemoryDescriptor::getMapperOptions( void )
5945*699cd480SApple OSS Distributions {
5946*699cd480SApple OSS Distributions return _iomapperOptions;
5947*699cd480SApple OSS Distributions }
5948*699cd480SApple OSS Distributions
5949*699cd480SApple OSS Distributions #ifndef __LP64__
5950*699cd480SApple OSS Distributions // obsolete initializers
5951*699cd480SApple OSS Distributions // - initWithOptions is the designated initializer
5952*699cd480SApple OSS Distributions bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5953*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithAddress(void * address,
5954*699cd480SApple OSS Distributions IOByteCount length,
5955*699cd480SApple OSS Distributions IODirection direction)
5956*699cd480SApple OSS Distributions {
5957*699cd480SApple OSS Distributions return false;
5958*699cd480SApple OSS Distributions }
5959*699cd480SApple OSS Distributions
5960*699cd480SApple OSS Distributions bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5961*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5962*699cd480SApple OSS Distributions IOByteCount length,
5963*699cd480SApple OSS Distributions IODirection direction,
5964*699cd480SApple OSS Distributions task_t task)
5965*699cd480SApple OSS Distributions {
5966*699cd480SApple OSS Distributions return false;
5967*699cd480SApple OSS Distributions }
5968*699cd480SApple OSS Distributions
5969*699cd480SApple OSS Distributions bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5970*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalAddress(
5971*699cd480SApple OSS Distributions IOPhysicalAddress address,
5972*699cd480SApple OSS Distributions IOByteCount length,
5973*699cd480SApple OSS Distributions IODirection direction )
5974*699cd480SApple OSS Distributions {
5975*699cd480SApple OSS Distributions return false;
5976*699cd480SApple OSS Distributions }
5977*699cd480SApple OSS Distributions
5978*699cd480SApple OSS Distributions bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5979*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithRanges(
5980*699cd480SApple OSS Distributions IOVirtualRange * ranges,
5981*699cd480SApple OSS Distributions UInt32 withCount,
5982*699cd480SApple OSS Distributions IODirection direction,
5983*699cd480SApple OSS Distributions task_t task,
5984*699cd480SApple OSS Distributions bool asReference)
5985*699cd480SApple OSS Distributions {
5986*699cd480SApple OSS Distributions return false;
5987*699cd480SApple OSS Distributions }
5988*699cd480SApple OSS Distributions
5989*699cd480SApple OSS Distributions bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5990*699cd480SApple OSS Distributions IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5991*699cd480SApple OSS Distributions UInt32 withCount,
5992*699cd480SApple OSS Distributions IODirection direction,
5993*699cd480SApple OSS Distributions bool asReference)
5994*699cd480SApple OSS Distributions {
5995*699cd480SApple OSS Distributions return false;
5996*699cd480SApple OSS Distributions }
5997*699cd480SApple OSS Distributions
5998*699cd480SApple OSS Distributions void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5999*699cd480SApple OSS Distributions IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6000*699cd480SApple OSS Distributions IOByteCount * lengthOfSegment)
6001*699cd480SApple OSS Distributions {
6002*699cd480SApple OSS Distributions return NULL;
6003*699cd480SApple OSS Distributions }
6004*699cd480SApple OSS Distributions #endif /* !__LP64__ */
6005*699cd480SApple OSS Distributions
6006*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6007*699cd480SApple OSS Distributions
6008*699cd480SApple OSS Distributions bool
serialize(OSSerialize * s) const6009*699cd480SApple OSS Distributions IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6010*699cd480SApple OSS Distributions {
6011*699cd480SApple OSS Distributions OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6012*699cd480SApple OSS Distributions OSSharedPtr<OSObject> values[2] = {NULL};
6013*699cd480SApple OSS Distributions OSSharedPtr<OSArray> array;
6014*699cd480SApple OSS Distributions
6015*699cd480SApple OSS Distributions struct SerData {
6016*699cd480SApple OSS Distributions user_addr_t address;
6017*699cd480SApple OSS Distributions user_size_t length;
6018*699cd480SApple OSS Distributions };
6019*699cd480SApple OSS Distributions
6020*699cd480SApple OSS Distributions unsigned int index;
6021*699cd480SApple OSS Distributions
6022*699cd480SApple OSS Distributions IOOptionBits type = _flags & kIOMemoryTypeMask;
6023*699cd480SApple OSS Distributions
6024*699cd480SApple OSS Distributions if (s == NULL) {
6025*699cd480SApple OSS Distributions return false;
6026*699cd480SApple OSS Distributions }
6027*699cd480SApple OSS Distributions
6028*699cd480SApple OSS Distributions array = OSArray::withCapacity(4);
6029*699cd480SApple OSS Distributions if (!array) {
6030*699cd480SApple OSS Distributions return false;
6031*699cd480SApple OSS Distributions }
6032*699cd480SApple OSS Distributions
6033*699cd480SApple OSS Distributions OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6034*699cd480SApple OSS Distributions if (!vcopy) {
6035*699cd480SApple OSS Distributions return false;
6036*699cd480SApple OSS Distributions }
6037*699cd480SApple OSS Distributions
6038*699cd480SApple OSS Distributions keys[0] = OSSymbol::withCString("address");
6039*699cd480SApple OSS Distributions keys[1] = OSSymbol::withCString("length");
6040*699cd480SApple OSS Distributions
6041*699cd480SApple OSS Distributions // Copy the volatile data so we don't have to allocate memory
6042*699cd480SApple OSS Distributions // while the lock is held.
6043*699cd480SApple OSS Distributions LOCK;
6044*699cd480SApple OSS Distributions if (vcopy.size() == _rangesCount) {
6045*699cd480SApple OSS Distributions Ranges vec = _ranges;
6046*699cd480SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6047*699cd480SApple OSS Distributions mach_vm_address_t addr; mach_vm_size_t len;
6048*699cd480SApple OSS Distributions getAddrLenForInd(addr, len, type, vec, index, _task);
6049*699cd480SApple OSS Distributions vcopy[index].address = addr;
6050*699cd480SApple OSS Distributions vcopy[index].length = len;
6051*699cd480SApple OSS Distributions }
6052*699cd480SApple OSS Distributions } else {
6053*699cd480SApple OSS Distributions // The descriptor changed out from under us. Give up.
6054*699cd480SApple OSS Distributions UNLOCK;
6055*699cd480SApple OSS Distributions return false;
6056*699cd480SApple OSS Distributions }
6057*699cd480SApple OSS Distributions UNLOCK;
6058*699cd480SApple OSS Distributions
6059*699cd480SApple OSS Distributions for (index = 0; index < vcopy.size(); index++) {
6060*699cd480SApple OSS Distributions user_addr_t addr = vcopy[index].address;
6061*699cd480SApple OSS Distributions IOByteCount len = (IOByteCount) vcopy[index].length;
6062*699cd480SApple OSS Distributions values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6063*699cd480SApple OSS Distributions if (values[0] == NULL) {
6064*699cd480SApple OSS Distributions return false;
6065*699cd480SApple OSS Distributions }
6066*699cd480SApple OSS Distributions values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6067*699cd480SApple OSS Distributions if (values[1] == NULL) {
6068*699cd480SApple OSS Distributions return false;
6069*699cd480SApple OSS Distributions }
6070*699cd480SApple OSS Distributions OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6071*699cd480SApple OSS Distributions if (dict == NULL) {
6072*699cd480SApple OSS Distributions return false;
6073*699cd480SApple OSS Distributions }
6074*699cd480SApple OSS Distributions array->setObject(dict.get());
6075*699cd480SApple OSS Distributions dict.reset();
6076*699cd480SApple OSS Distributions values[0].reset();
6077*699cd480SApple OSS Distributions values[1].reset();
6078*699cd480SApple OSS Distributions }
6079*699cd480SApple OSS Distributions
6080*699cd480SApple OSS Distributions return array->serialize(s);
6081*699cd480SApple OSS Distributions }
6082*699cd480SApple OSS Distributions /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6083*699cd480SApple OSS Distributions
6084*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6085*699cd480SApple OSS Distributions #ifdef __LP64__
6086*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6087*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6088*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6089*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6090*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6091*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6092*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6093*699cd480SApple OSS Distributions #else /* !__LP64__ */
6094*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6095*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6096*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6097*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6098*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6099*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6100*699cd480SApple OSS Distributions OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6101*699cd480SApple OSS Distributions #endif /* !__LP64__ */
6102*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6103*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6104*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6105*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6106*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6107*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6108*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6109*699cd480SApple OSS Distributions OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6110*699cd480SApple OSS Distributions
6111*699cd480SApple OSS Distributions /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6112*699cd480SApple OSS Distributions KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6113*699cd480SApple OSS Distributions struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6114*699cd480SApple OSS Distributions
6115*699cd480SApple OSS Distributions /* ex-inline function implementation */
6116*699cd480SApple OSS Distributions IOPhysicalAddress
getPhysicalAddress()6117*699cd480SApple OSS Distributions IOMemoryDescriptor::getPhysicalAddress()
6118*699cd480SApple OSS Distributions {
6119*699cd480SApple OSS Distributions return getPhysicalSegment( 0, NULL );
6120*699cd480SApple OSS Distributions }
6121*699cd480SApple OSS Distributions
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6122*699cd480SApple OSS Distributions OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6123*699cd480SApple OSS Distributions
6124*699cd480SApple OSS Distributions OSPtr<_IOMemoryDescriptorMixedData>
6125*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6126*699cd480SApple OSS Distributions {
6127*699cd480SApple OSS Distributions OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6128*699cd480SApple OSS Distributions if (me && !me->initWithCapacity(capacity)) {
6129*699cd480SApple OSS Distributions return nullptr;
6130*699cd480SApple OSS Distributions }
6131*699cd480SApple OSS Distributions return me;
6132*699cd480SApple OSS Distributions }
6133*699cd480SApple OSS Distributions
6134*699cd480SApple OSS Distributions bool
initWithCapacity(size_t capacity)6135*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6136*699cd480SApple OSS Distributions {
6137*699cd480SApple OSS Distributions if (_data && (!capacity || (_capacity < capacity))) {
6138*699cd480SApple OSS Distributions freeMemory();
6139*699cd480SApple OSS Distributions }
6140*699cd480SApple OSS Distributions
6141*699cd480SApple OSS Distributions if (!OSObject::init()) {
6142*699cd480SApple OSS Distributions return false;
6143*699cd480SApple OSS Distributions }
6144*699cd480SApple OSS Distributions
6145*699cd480SApple OSS Distributions if (!_data && capacity) {
6146*699cd480SApple OSS Distributions _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6147*699cd480SApple OSS Distributions Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6148*699cd480SApple OSS Distributions if (!_data) {
6149*699cd480SApple OSS Distributions return false;
6150*699cd480SApple OSS Distributions }
6151*699cd480SApple OSS Distributions _capacity = capacity;
6152*699cd480SApple OSS Distributions }
6153*699cd480SApple OSS Distributions
6154*699cd480SApple OSS Distributions _length = 0;
6155*699cd480SApple OSS Distributions
6156*699cd480SApple OSS Distributions return true;
6157*699cd480SApple OSS Distributions }
6158*699cd480SApple OSS Distributions
6159*699cd480SApple OSS Distributions void
free()6160*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::free()
6161*699cd480SApple OSS Distributions {
6162*699cd480SApple OSS Distributions freeMemory();
6163*699cd480SApple OSS Distributions OSObject::free();
6164*699cd480SApple OSS Distributions }
6165*699cd480SApple OSS Distributions
6166*699cd480SApple OSS Distributions void
freeMemory()6167*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::freeMemory()
6168*699cd480SApple OSS Distributions {
6169*699cd480SApple OSS Distributions kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6170*699cd480SApple OSS Distributions _data = nullptr;
6171*699cd480SApple OSS Distributions _capacity = _length = 0;
6172*699cd480SApple OSS Distributions }
6173*699cd480SApple OSS Distributions
6174*699cd480SApple OSS Distributions bool
appendBytes(const void * bytes,size_t length)6175*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6176*699cd480SApple OSS Distributions {
6177*699cd480SApple OSS Distributions const auto oldLength = getLength();
6178*699cd480SApple OSS Distributions size_t newLength;
6179*699cd480SApple OSS Distributions if (os_add_overflow(oldLength, length, &newLength)) {
6180*699cd480SApple OSS Distributions return false;
6181*699cd480SApple OSS Distributions }
6182*699cd480SApple OSS Distributions
6183*699cd480SApple OSS Distributions if (!setLength(newLength)) {
6184*699cd480SApple OSS Distributions return false;
6185*699cd480SApple OSS Distributions }
6186*699cd480SApple OSS Distributions
6187*699cd480SApple OSS Distributions unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6188*699cd480SApple OSS Distributions if (bytes) {
6189*699cd480SApple OSS Distributions bcopy(bytes, dest, length);
6190*699cd480SApple OSS Distributions }
6191*699cd480SApple OSS Distributions
6192*699cd480SApple OSS Distributions return true;
6193*699cd480SApple OSS Distributions }
6194*699cd480SApple OSS Distributions
6195*699cd480SApple OSS Distributions bool
setLength(size_t length)6196*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::setLength(size_t length)
6197*699cd480SApple OSS Distributions {
6198*699cd480SApple OSS Distributions if (!_data || (length > _capacity)) {
6199*699cd480SApple OSS Distributions void *newData;
6200*699cd480SApple OSS Distributions
6201*699cd480SApple OSS Distributions newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6202*699cd480SApple OSS Distributions length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6203*699cd480SApple OSS Distributions NULL);
6204*699cd480SApple OSS Distributions if (!newData) {
6205*699cd480SApple OSS Distributions return false;
6206*699cd480SApple OSS Distributions }
6207*699cd480SApple OSS Distributions
6208*699cd480SApple OSS Distributions _data = newData;
6209*699cd480SApple OSS Distributions _capacity = length;
6210*699cd480SApple OSS Distributions }
6211*699cd480SApple OSS Distributions
6212*699cd480SApple OSS Distributions _length = length;
6213*699cd480SApple OSS Distributions return true;
6214*699cd480SApple OSS Distributions }
6215*699cd480SApple OSS Distributions
6216*699cd480SApple OSS Distributions const void *
getBytes() const6217*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::getBytes() const
6218*699cd480SApple OSS Distributions {
6219*699cd480SApple OSS Distributions return _length ? _data : nullptr;
6220*699cd480SApple OSS Distributions }
6221*699cd480SApple OSS Distributions
6222*699cd480SApple OSS Distributions size_t
getLength() const6223*699cd480SApple OSS Distributions _IOMemoryDescriptorMixedData::getLength() const
6224*699cd480SApple OSS Distributions {
6225*699cd480SApple OSS Distributions return _data ? _length : 0;
6226*699cd480SApple OSS Distributions }
6227