xref: /xnu-10063.101.15/iokit/IOKit/IOMemoryDescriptor.h (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30 
31 #include <sys/cdefs.h>
32 
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSPtr.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <DriverKit/IOMemoryDescriptor.h>
38 #include <DriverKit/IOMemoryMap.h>
39 #ifdef XNU_KERNEL_PRIVATE
40 #include <IOKit/IOKitDebug.h>
41 #endif
42 
43 #include <mach/memory_object_types.h>
44 
45 class IOMemoryDescriptor;
46 class IOMemoryMap;
47 class IOMapper;
48 class IOService;
49 class IODMACommand;
50 class _IOMemoryDescriptorMixedData;
51 
52 /*
53  * Direction of transfer, with respect to the described memory.
54  */
55 #ifdef __LP64__
56 enum
57 #else /* !__LP64__ */
58 enum IODirection
59 #endif /* !__LP64__ */
60 {
61 	kIODirectionNone  = 0x0,//                    same as VM_PROT_NONE
62 	kIODirectionIn    = 0x1,// User land 'read',  same as VM_PROT_READ
63 	kIODirectionOut   = 0x2,// User land 'write', same as VM_PROT_WRITE
64 	kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
65 	kIODirectionInOut = kIODirectionIn  | kIODirectionOut,
66 
67 	// these flags are valid for the prepare() method only
68 	kIODirectionPrepareToPhys32   = 0x00000004,
69 	kIODirectionPrepareNoFault    = 0x00000008,
70 	kIODirectionPrepareReserved1  = 0x00000010,
71 #define IODIRECTIONPREPARENONCOHERENTDEFINED    1
72 	kIODirectionPrepareNonCoherent = 0x00000020,
73 #if KERNEL_PRIVATE
74 #define IODIRECTIONPREPAREAVOIDTHROTTLING       1
75 	kIODirectionPrepareAvoidThrottling = 0x00000100,
76 #endif
77 
78 	// these flags are valid for the complete() method only
79 #define IODIRECTIONCOMPLETEWITHERRORDEFINED             1
80 	kIODirectionCompleteWithError = 0x00000040,
81 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
82 	kIODirectionCompleteWithDataValid = 0x00000080,
83 };
84 
85 #ifdef __LP64__
86 typedef IOOptionBits IODirection;
87 #endif /* __LP64__ */
88 
89 /*
90  * IOOptionBits used in the withOptions variant
91  */
92 enum {
93 	kIOMemoryDirectionMask      = 0x00000007,
94 #ifdef XNU_KERNEL_PRIVATE
95 	kIOMemoryAutoPrepare        = 0x00000008,// Shared with Buffer MD
96 #endif
97 
98 	kIOMemoryTypeVirtual        = 0x00000010,
99 	kIOMemoryTypePhysical       = 0x00000020,
100 	kIOMemoryTypeUPL            = 0x00000030,
101 	kIOMemoryTypePersistentMD   = 0x00000040,// Persistent Memory Descriptor
102 	kIOMemoryTypeUIO            = 0x00000050,
103 #ifdef __LP64__
104 	kIOMemoryTypeVirtual64      = kIOMemoryTypeVirtual,
105 	kIOMemoryTypePhysical64     = kIOMemoryTypePhysical,
106 #else /* !__LP64__ */
107 	kIOMemoryTypeVirtual64      = 0x00000060,
108 	kIOMemoryTypePhysical64     = 0x00000070,
109 #endif /* !__LP64__ */
110 	kIOMemoryTypeMask           = 0x000000f0,
111 
112 	kIOMemoryAsReference        = 0x00000100,
113 	kIOMemoryBufferPageable     = 0x00000400,
114 	kIOMemoryMapperNone         = 0x00000800,// Shared with Buffer MD
115 	kIOMemoryHostOnly           = 0x00001000,// Never DMA accessible
116 #ifdef XNU_KERNEL_PRIVATE
117 	kIOMemoryRedirected         = 0x00004000,
118 	kIOMemoryPreparedReadOnly   = 0x00008000,
119 #endif
120 	kIOMemoryPersistent         = 0x00010000,
121 	kIOMemoryMapCopyOnWrite     = 0x00020000,
122 	kIOMemoryRemote             = 0x00040000,
123 	kIOMemoryThreadSafe         = 0x00100000,// Shared with Buffer MD
124 	kIOMemoryClearEncrypt       = 0x00200000,// Shared with Buffer MD
125 	kIOMemoryUseReserve         = 0x00800000,// Shared with Buffer MD
126 #define IOMEMORYUSERESERVEDEFINED       1
127 
128 #ifdef XNU_KERNEL_PRIVATE
129 	kIOMemoryBufferPurgeable    = 0x00400000,
130 	kIOMemoryBufferCacheMask    = 0x70000000,
131 	kIOMemoryBufferCacheShift   = 28,
132 #endif
133 };
134 
135 #define kIOMapperSystem ((IOMapper *) NULL)
136 
137 enum{
138 	kIOMemoryLedgerTagDefault       = VM_LEDGER_TAG_DEFAULT,
139 	kIOmemoryLedgerTagNetwork       = VM_LEDGER_TAG_NETWORK,
140 	kIOMemoryLedgerTagMedia         = VM_LEDGER_TAG_MEDIA,
141 	kIOMemoryLedgerTagGraphics      = VM_LEDGER_TAG_GRAPHICS,
142 	kIOMemoryLedgerTagNeural        = VM_LEDGER_TAG_NEURAL,
143 };
144 enum{
145 	kIOMemoryLedgerFlagNoFootprint  = VM_LEDGER_FLAG_NO_FOOTPRINT,
146 };
147 
148 enum{
149 	kIOMemoryPurgeableKeepCurrent = 1,
150 
151 	kIOMemoryPurgeableNonVolatile = 2,
152 	kIOMemoryPurgeableVolatile    = 3,
153 	kIOMemoryPurgeableEmpty       = 4,
154 
155 	// modifiers for kIOMemoryPurgeableVolatile behavior
156 	kIOMemoryPurgeableVolatileGroup0           = VM_VOLATILE_GROUP_0,
157 	kIOMemoryPurgeableVolatileGroup1           = VM_VOLATILE_GROUP_1,
158 	kIOMemoryPurgeableVolatileGroup2           = VM_VOLATILE_GROUP_2,
159 	kIOMemoryPurgeableVolatileGroup3           = VM_VOLATILE_GROUP_3,
160 	kIOMemoryPurgeableVolatileGroup4           = VM_VOLATILE_GROUP_4,
161 	kIOMemoryPurgeableVolatileGroup5           = VM_VOLATILE_GROUP_5,
162 	kIOMemoryPurgeableVolatileGroup6           = VM_VOLATILE_GROUP_6,
163 	kIOMemoryPurgeableVolatileGroup7           = VM_VOLATILE_GROUP_7,
164 	kIOMemoryPurgeableVolatileBehaviorFifo     = VM_PURGABLE_BEHAVIOR_FIFO,
165 	kIOMemoryPurgeableVolatileBehaviorLifo     = VM_PURGABLE_BEHAVIOR_LIFO,
166 	kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
167 	kIOMemoryPurgeableVolatileOrderingNormal   = VM_PURGABLE_ORDERING_NORMAL,
168 	kIOMemoryPurgeableFaultOnAccess            = VM_PURGABLE_DEBUG_FAULT,
169 };
170 enum{
171 	kIOMemoryIncoherentIOFlush   = 1,
172 	kIOMemoryIncoherentIOStore   = 2,
173 
174 	kIOMemoryClearEncrypted      = 50,
175 	kIOMemorySetEncrypted        = 51,
176 };
177 
178 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND  1
179 
180 struct IODMAMapSpecification {
181 	uint64_t    alignment;
182 	IOService * device;
183 	uint32_t    options;
184 	uint8_t     numAddressBits;
185 	uint8_t     resvA[3];
186 	uint32_t    resvB[4];
187 };
188 
189 struct IODMAMapPageList {
190 	uint32_t                pageOffset;
191 	uint32_t                pageListCount;
192 	const upl_page_info_t * pageList;
193 };
194 
195 // mapOptions for iovmMapMemory
196 enum{
197 	kIODMAMapReadAccess           = 0x00000001,
198 	kIODMAMapWriteAccess          = 0x00000002,
199 	kIODMAMapPhysicallyContiguous = 0x00000010,
200 	kIODMAMapDeviceMemory         = 0x00000020,
201 	kIODMAMapPagingPath           = 0x00000040,
202 	kIODMAMapIdentityMap          = 0x00000080,
203 
204 	kIODMAMapPageListFullyOccupied = 0x00000100,
205 	kIODMAMapFixedAddress          = 0x00000200,
206 };
207 
208 // Options used by IOMapper. example IOMappers are DART and VT-d
209 enum {
210 	kIOMapperUncached      = 0x0001,
211 };
212 
213 #ifdef KERNEL_PRIVATE
214 
215 // Used for dmaCommandOperation communications for IODMACommand and mappers
216 
217 enum  {
218 	kIOMDWalkSegments             = 0x01000000,
219 	kIOMDFirstSegment             = 1 | kIOMDWalkSegments,
220 	kIOMDGetCharacteristics       = 0x02000000,
221 	kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
222 	kIOMDDMAActive                = 0x03000000,
223 	kIOMDSetDMAActive             = 1 | kIOMDDMAActive,
224 	kIOMDSetDMAInactive           = kIOMDDMAActive,
225 	kIOMDAddDMAMapSpec            = 0x04000000,
226 	kIOMDDMAMap                   = 0x05000000,
227 	kIOMDDMAUnmap                 = 0x06000000,
228 	kIOMDDMACommandOperationMask  = 0xFF000000,
229 };
230 struct IOMDDMACharacteristics {
231 	UInt64 fLength;
232 	UInt32 fSGCount;
233 	UInt32 fPages;
234 	UInt32 fPageAlign;
235 	ppnum_t fHighestPage;
236 	IODirection fDirection;
237 	UInt8 fIsPrepared;
238 };
239 
240 struct IOMDDMAMapArgs {
241 	IOMapper            * fMapper;
242 	IODMACommand        * fCommand;
243 	IODMAMapSpecification fMapSpec;
244 	uint64_t              fOffset;
245 	uint64_t              fLength;
246 	uint64_t              fAlloc;
247 	uint64_t              fAllocLength;
248 };
249 
250 struct IOMDDMAWalkSegmentArgs {
251 	UInt64 fOffset;                 // Input/Output offset
252 	UInt64 fIOVMAddr, fLength;      // Output variables
253 	UInt8 fMapped;                  // Input Variable, Require mapped IOVMA
254 	UInt64 fMappedBase;             // Input base of mapping
255 };
256 typedef UInt8 IOMDDMAWalkSegmentState[128];
257 
258 #endif /* KERNEL_PRIVATE */
259 
260 enum{
261 	kIOPreparationIDUnprepared = 0,
262 	kIOPreparationIDUnsupported = 1,
263 	kIOPreparationIDAlwaysPrepared = 2,
264 };
265 
266 #ifdef KERNEL_PRIVATE
267 #define kIODescriptorIDInvalid (0)
268 #endif
269 
270 #ifdef XNU_KERNEL_PRIVATE
271 struct IOMemoryReference;
272 #endif
273 
274 
275 /*! @class IOMemoryDescriptor : public OSObject
276  *   @abstract An abstract base class defining common methods for describing physical or virtual memory.
277  *   @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
278 
279 class IOMemoryDescriptor : public OSObject
280 {
281 	friend class IOMemoryMap;
282 	friend class IOMultiMemoryDescriptor;
283 
284 	OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
285 
286 protected:
287 
288 /*! @var reserved
289  *   Reserved for future use.  (Internal use only)  */
290 	struct IOMemoryDescriptorReserved * reserved;
291 
292 protected:
293 	OSPtr<OSSet>        _mappings;
294 	IOOptionBits        _flags;
295 
296 
297 #ifdef XNU_KERNEL_PRIVATE
298 public:
299 	struct IOMemoryReference *  _memRef;
300 	vm_tag_t _kernelTag;
301 	vm_tag_t _userTag;
302 	int16_t _dmaReferences;
303 	uint16_t _internalFlags;
304 	kern_allocation_name_t _mapName;
305 protected:
306 #else /* XNU_KERNEL_PRIVATE */
307 	void *              __iomd_reserved5;
308 	uint16_t            __iomd_reserved1[4];
309 	uintptr_t           __iomd_reserved2;
310 #endif /* XNU_KERNEL_PRIVATE */
311 
312 	uint16_t            _iomapperOptions;
313 #ifdef __LP64__
314 	uint16_t            __iomd_reserved3[3];
315 #else
316 	uint16_t            __iomd_reserved3;
317 #endif
318 	uintptr_t           __iomd_reserved4;
319 
320 #ifndef __LP64__
321 	IODirection         _direction;    /* use _flags instead */
322 #endif /* !__LP64__ */
323 	IOByteCount         _length;       /* length of all ranges */
324 	IOOptionBits        _tag;
325 
326 public:
327 	typedef IOOptionBits DMACommandOps;
328 #ifndef __LP64__
329 	virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
330 	    IOByteCount * length ) APPLE_KEXT_DEPRECATED;
331 #endif /* !__LP64__ */
332 
333 /*! @function initWithOptions
334  *   @abstract Master initialiser for all variants of memory descriptors.  For a more complete description see IOMemoryDescriptor::withOptions.
335  *   @discussion Note this function can be used to re-init a previously created memory descriptor.
336  *   @result true on success, false on failure. */
337 	virtual bool initWithOptions(void *         buffers,
338 	    UInt32         count,
339 	    UInt32         offset,
340 	    task_t         task,
341 	    IOOptionBits   options,
342 	    IOMapper *     mapper = kIOMapperSystem);
343 
344 #ifndef __LP64__
345 	virtual addr64_t getPhysicalSegment64( IOByteCount offset,
346 	    IOByteCount * length ) APPLE_KEXT_DEPRECATED;                                 /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
347 #endif /* !__LP64__ */
348 
349 /*! @function setPurgeable
350  *   @abstract Control the purgeable status of a memory descriptors memory.
351  *   @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again.  If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
352  *   @param newState - the desired new purgeable state of the memory:<br>
353  *   kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
354  *   kIOMemoryPurgeableVolatile    - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
355  *   kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
356  *   kIOMemoryPurgeableEmpty       - make the memory volatile, and discard any pages allocated to it.
357  *   @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
358  *   kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
359  *   kIOMemoryPurgeableVolatile    - the memory was volatile but its content has not been discarded by the VM system.<br>
360  *   kIOMemoryPurgeableEmpty       - the memory was volatile and has been discarded by the VM system.<br>
361  *   @result An IOReturn code. */
362 
363 	virtual IOReturn setPurgeable( IOOptionBits newState,
364 	    IOOptionBits * oldState );
365 
366 /*! @function setOwnership
367  *   @abstract Control the ownership of a memory descriptors memory.
368  *   @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
369  *   @param newOwner - the task to be the new owner of the memory.
370  *   @param newLedgerTag - the ledger this memory should be accounted in.
371  *   @param newLedgerOptions - accounting options
372  *   @result An IOReturn code. */
373 
374 	IOReturn setOwnership( task_t newOwner,
375 	    int newLedgerTag,
376 	    IOOptionBits newLedgerOptions );
377 
378 /*! @function getPageCounts
379  *   @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
380  *   @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
381  *   @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
382  *   @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
383  *   @result An IOReturn code. */
384 
385 	IOReturn getPageCounts( IOByteCount * residentPageCount,
386 	    IOByteCount * dirtyPageCount);
387 
388 /*! @function performOperation
389  *   @abstract Perform an operation on the memory descriptor's memory.
390  *   @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
391  *   @param options The operation to perform on the memory:<br>
392  *   kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
393  *   kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
394  *   @param offset A byte offset into the memory descriptor's memory.
395  *   @param length The length of the data range.
396  *   @result An IOReturn code. */
397 
398 	virtual IOReturn performOperation( IOOptionBits options,
399 	    IOByteCount offset, IOByteCount length );
400 
401 // Used for dedicated communications for IODMACommand
402 	virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
403 
404 /*! @function getPhysicalSegment
405  *   @abstract Break a memory descriptor into its physically contiguous segments.
406  *   @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
407  *   @param offset A byte offset into the memory whose physical address to return.
408  *   @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
409  *   @result A physical address, or zero if the offset is beyond the length of the memory. */
410 
411 #ifdef __LP64__
412 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
413 	    IOByteCount * length,
414 	    IOOptionBits  options = 0 ) = 0;
415 #else /* !__LP64__ */
416 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
417 	    IOByteCount * length,
418 	    IOOptionBits  options );
419 #endif /* !__LP64__ */
420 
421 	virtual uint64_t getPreparationID( void );
422 	void             setPreparationID( void );
423 
424 	void     setVMTags(uint32_t kernelTag, uint32_t userTag);
425 	uint32_t getVMTag(vm_map_t map);
426 
427 #ifdef KERNEL_PRIVATE
428 	uint64_t getDescriptorID( void );
429 	void     setDescriptorID( void );
430 
431 	IOReturn ktraceEmitPhysicalSegments( void );
432 #endif
433 
434 #ifdef XNU_KERNEL_PRIVATE
435 	IOMemoryDescriptorReserved * getKernelReserved( void );
436 	void                         cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
437 	IOReturn dmaMap(
438 		IOMapper                    * mapper,
439 		IOMemoryDescriptor          * memory,
440 		IODMACommand                * command,
441 		const IODMAMapSpecification * mapSpec,
442 		uint64_t                      offset,
443 		uint64_t                      length,
444 		uint64_t                    * mapAddress,
445 		uint64_t                    * mapLength);
446 	IOReturn dmaUnmap(
447 		IOMapper                    * mapper,
448 		IODMACommand                * command,
449 		uint64_t                      offset,
450 		uint64_t                      mapAddress,
451 		uint64_t                      mapLength);
452 	void dmaMapRecord(
453 		IOMapper                    * mapper,
454 		IODMACommand                * command,
455 		uint64_t                      mapLength);
456 #endif
457 
458 private:
459 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0);
460 #ifdef __LP64__
461 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
462 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
463 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
464 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
465 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
466 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
467 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
468 #else /* !__LP64__ */
469 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1);
470 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2);
471 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3);
472 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4);
473 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5);
474 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6);
475 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7);
476 #endif /* !__LP64__ */
477 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
478 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
479 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
480 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
481 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
482 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
483 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
484 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
485 
486 protected:
487 	virtual void free(void) APPLE_KEXT_OVERRIDE;
488 public:
489 	static void initialize( void );
490 
491 public:
492 /*! @function withAddress
493  *   @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
494  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
495  *   @param address The virtual address of the first byte in the memory.
496  *   @param withLength The length of memory.
497  *   @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
498  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
499 
500 	static OSPtr<IOMemoryDescriptor>  withAddress(void *       address,
501 	    IOByteCount  withLength,
502 	    IODirection  withDirection);
503 
504 #ifndef __LP64__
505 	static OSPtr<IOMemoryDescriptor>  withAddress(IOVirtualAddress address,
506 	    IOByteCount  withLength,
507 	    IODirection  withDirection,
508 	    task_t       withTask) APPLE_KEXT_DEPRECATED;                                 /* use withAddressRange() and prepare() instead */
509 #endif /* !__LP64__ */
510 
511 /*! @function withPhysicalAddress
512  *   @abstract Create an IOMemoryDescriptor to describe one physical range.
513  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
514  *   @param address The physical address of the first byte in the memory.
515  *   @param withLength The length of memory.
516  *   @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
517  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
518 
519 	static OSPtr<IOMemoryDescriptor>  withPhysicalAddress(
520 		IOPhysicalAddress       address,
521 		IOByteCount             withLength,
522 		IODirection             withDirection );
523 
524 #ifndef __LP64__
525 	static OSPtr<IOMemoryDescriptor>  withRanges(IOVirtualRange * ranges,
526 	    UInt32           withCount,
527 	    IODirection      withDirection,
528 	    task_t           withTask,
529 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                                 /* use withAddressRanges() instead */
530 #endif /* !__LP64__ */
531 
532 /*! @function withAddressRange
533  *   @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
534  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
535  *   @param address The virtual address of the first byte in the memory.
536  *   @param length The length of memory.
537  *   @param options
538  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
539  *   @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
540  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
541 
542 	static OSPtr<IOMemoryDescriptor>  withAddressRange(
543 		mach_vm_address_t address,
544 		mach_vm_size_t    length,
545 		IOOptionBits      options,
546 		task_t            task);
547 
548 /*! @function withAddressRanges
549  *   @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
550  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
551  *   @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
552  *   @param rangeCount The member count of the ranges array.
553  *   @param options
554  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
555  *       kIOMemoryAsReference	For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory.  This is an optimisation to try to minimise unnecessary allocations.
556  *   @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
557  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
558 
559 	static OSPtr<IOMemoryDescriptor>  withAddressRanges(
560 		IOAddressRange * ranges,
561 		UInt32           rangeCount,
562 		IOOptionBits     options,
563 		task_t           task);
564 
565 /*! @function withOptions
566  *   @abstract Master initialiser for all variants of memory descriptors.
567  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL.  These variants are selected with the options parameter, see below.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
568  *
569  *
570  *   @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
571  *
572  *   @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array.  For options:type = UPL this field contains a total length.
573  *
574  *   @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
575  *
576  *   @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
577  *
578  *   @param options
579  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
580  *       kIOMemoryTypeMask (options:type)	kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use.  This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
581  *       kIOMemoryAsReference	For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory.  This is an optimisation to try to minimise unnecessary allocations.
582  *       kIOMemoryBufferPageable	Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
583  *
584  *   @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses.  Defaults to 0 which indicates that the system mapper is to be used, if present.
585  *
586  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
587 
588 	static OSPtr<IOMemoryDescriptor> withOptions(void *       buffers,
589 	    UInt32       count,
590 	    UInt32       offset,
591 	    task_t       task,
592 	    IOOptionBits options,
593 	    IOMapper *   mapper = kIOMapperSystem);
594 
595 #ifndef __LP64__
596 	static OSPtr<IOMemoryDescriptor>  withPhysicalRanges(
597 		IOPhysicalRange *   ranges,
598 		UInt32              withCount,
599 		IODirection         withDirection,
600 		bool                asReference = false) APPLE_KEXT_DEPRECATED;                             /* use withOptions() and kIOMemoryTypePhysical instead */
601 #endif /* !__LP64__ */
602 
603 #ifndef __LP64__
604 	static OSPtr<IOMemoryDescriptor>  withSubRange(IOMemoryDescriptor *of,
605 	    IOByteCount offset,
606 	    IOByteCount length,
607 	    IODirection withDirection) APPLE_KEXT_DEPRECATED;                                  /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
608 #endif /* !__LP64__ */
609 
610 /*! @function withPersistentMemoryDescriptor
611  *   @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
612  *   @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference.  Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm.  Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
613  *   @param originalMD The memory descriptor to be duplicated.
614  *   @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
615 	static OSPtr<IOMemoryDescriptor>
616 	withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
617 
618 #ifndef __LP64__
619 // obsolete initializers
620 // - initWithOptions is the designated initializer
621 	virtual bool initWithAddress(void *       address,
622 	    IOByteCount  withLength,
623 	    IODirection  withDirection) APPLE_KEXT_DEPRECATED;                      /* use initWithOptions() instead */
624 	virtual bool initWithAddress(IOVirtualAddress address,
625 	    IOByteCount  withLength,
626 	    IODirection  withDirection,
627 	    task_t       withTask) APPLE_KEXT_DEPRECATED;                      /* use initWithOptions() instead */
628 	virtual bool initWithPhysicalAddress(
629 		IOPhysicalAddress      address,
630 		IOByteCount            withLength,
631 		IODirection            withDirection ) APPLE_KEXT_DEPRECATED;                  /* use initWithOptions() instead */
632 	virtual bool initWithRanges(IOVirtualRange * ranges,
633 	    UInt32           withCount,
634 	    IODirection      withDirection,
635 	    task_t           withTask,
636 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                     /* use initWithOptions() instead */
637 	virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
638 	    UInt32           withCount,
639 	    IODirection      withDirection,
640 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                             /* use initWithOptions() instead */
641 #endif /* __LP64__ */
642 
643 /*! @function getDirection
644  *   @abstract Accessor to get the direction the memory descriptor was created with.
645  *   @discussion This method returns the direction the memory descriptor was created with.
646  *   @result The direction. */
647 
648 	virtual IODirection getDirection() const;
649 
650 /*! @function getLength
651  *   @abstract Accessor to get the length of the memory descriptor (over all its ranges).
652  *   @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
653  *   @result The byte count. */
654 
655 	virtual IOByteCount getLength() const;
656 
657 #define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH
658 	uint64_t getDMAMapLength(uint64_t * offset = NULL);
659 
660 /*! @function setTag
661  *   @abstract Set the tag for the memory descriptor.
662  *   @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
663  *   @param tag The tag. */
664 
665 	virtual void setTag( IOOptionBits tag );
666 
667 /*! @function getTag
668  *   @abstract Accessor to the retrieve the tag for the memory descriptor.
669  *   @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
670  *   @result The tag. */
671 
672 	virtual IOOptionBits getTag( void );
673 
674 /*! @function getFlags
675  *   @abstract Accessor to the retrieve the options the memory descriptor was created with.
676  *   @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
677  *   @result The flags bitfield. */
678 
679 	uint64_t getFlags(void);
680 
681 /*! @function readBytes
682  *   @abstract Copy data from the memory descriptor's buffer to the specified buffer.
683  *   @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer.  The memory descriptor MUST have the kIODirectionOut direcction bit set  and be prepared.  kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
684  *   @param offset A byte offset into the memory descriptor's memory.
685  *   @param bytes The caller supplied buffer to copy the data to.
686  *   @param withLength The length of the data to copy.
687  *   @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
688 
689 	virtual IOByteCount readBytes(IOByteCount offset,
690 	    void * bytes, IOByteCount withLength);
691 
692 /*! @function writeBytes
693  *   @abstract Copy data to the memory descriptor's buffer from the specified buffer.
694  *   @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer.  The memory descriptor MUST have the kIODirectionIn direcction bit set  and be prepared.  kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
695  *   @param offset A byte offset into the memory descriptor's memory.
696  *   @param bytes The caller supplied buffer to copy the data from.
697  *   @param withLength The length of the data to copy.
698  *   @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
699 
700 	virtual IOByteCount writeBytes(IOByteCount offset,
701 	    const void * bytes, IOByteCount withLength);
702 
703 #ifndef __LP64__
704 	virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
705 	    IOByteCount * length);
706 #endif /* !__LP64__ */
707 
708 /*! @function getPhysicalAddress
709  *   @abstract Return the physical address of the first byte in the memory.
710  *   @discussion This method returns the physical address of the  first byte in the memory. It is most useful on memory known to be physically contiguous.
711  *   @result A physical address. */
712 
713 	IOPhysicalAddress getPhysicalAddress();
714 
715 #ifndef __LP64__
716 	virtual void * getVirtualSegment(IOByteCount offset,
717 	    IOByteCount * length) APPLE_KEXT_DEPRECATED;                             /* use map() and getVirtualAddress() instead */
718 #endif /* !__LP64__ */
719 
720 /*! @function prepare
721  *   @abstract Prepare the memory for an I/O transfer.
722  *   @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer.  The complete() method completes the processing of the memory after the I/O transfer finishes.  Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
723  *   @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
724  *   @result An IOReturn code. */
725 
726 	virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
727 
728 /*! @function complete
729  *   @abstract Complete processing of the memory after an I/O transfer finishes.
730  *   @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory.  In 10.3 or greater systems the direction argument to complete is not longer respected.  The direction is totally determined at prepare() time.
731  *   @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
732  *   @result An IOReturn code. */
733 
734 	virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
735 
736 /*
737  * Mapping functions.
738  */
739 
740 /*! @function createMappingInTask
741  *   @abstract Maps a IOMemoryDescriptor into a task.
742  *   @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
743  *   @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
744  *   @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
745  *   @param options Mapping options are defined in IOTypes.h,<br>
746  *       kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
747  *       kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
748  *       kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
749  *       kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
750  *       kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
751  *       kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
752  *       kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
753  *   @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
754  *   @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
755  *   @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
756 
757 	OSPtr<IOMemoryMap>        createMappingInTask(
758 		task_t                  intoTask,
759 		mach_vm_address_t       atAddress,
760 		IOOptionBits            options,
761 		mach_vm_size_t          offset = 0,
762 		mach_vm_size_t          length = 0 );
763 
764 #ifndef __LP64__
765 	virtual OSPtr<IOMemoryMap>       map(
766 		task_t          intoTask,
767 		IOVirtualAddress        atAddress,
768 		IOOptionBits            options,
769 		IOByteCount             offset = 0,
770 		IOByteCount             length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
771 #endif /* !__LP64__ */
772 
773 /*! @function map
774  *   @abstract Maps a IOMemoryDescriptor into the kernel map.
775  *   @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
776  *   @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
777  *   @result See the full version of the createMappingInTask method. */
778 
779 	virtual OSPtr<IOMemoryMap>       map(
780 		IOOptionBits            options = 0 );
781 
782 /*! @function setMapping
783  *   @abstract Establishes an already existing mapping.
784  *   @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
785  *   @param task Address space in which the mapping exists.
786  *   @param mapAddress Virtual address of the mapping.
787  *   @param options Caching and read-only attributes of the mapping.
788  *   @result A IOMemoryMap object created to represent the mapping. */
789 
790 	virtual OSPtr<IOMemoryMap>       setMapping(
791 		task_t          task,
792 		IOVirtualAddress        mapAddress,
793 		IOOptionBits            options = 0 );
794 
795 /*! @function setMapperOptions
796  *   @abstract Set the IOMapper options
797  *   @discussion This method sets the IOMapper options
798  *   @param options  IOMapper options to be set. */
799 
800 	void setMapperOptions( uint16_t options );
801 
802 /*! @function getMapperOptions
803  *   @abstract return IOMapper Options
804  *   @discussion This method returns IOMapper Options set earlier using setMapperOptions
805  *   @result IOMapper options set. */
806 
807 	uint16_t getMapperOptions( void );
808 
809 // Following methods are private implementation
810 
811 #ifdef __LP64__
812 	virtual
813 #endif /* __LP64__ */
814 	IOReturn redirect( task_t safeTask, bool redirect );
815 
816 	IOReturn handleFault(
817 		void *                  _pager,
818 		mach_vm_size_t          sourceOffset,
819 		mach_vm_size_t          length);
820 
821 	IOReturn populateDevicePager(
822 		void *                  pager,
823 		vm_map_t                addressMap,
824 		mach_vm_address_t       address,
825 		mach_vm_size_t          sourceOffset,
826 		mach_vm_size_t          length,
827 		IOOptionBits            options );
828 
829 	virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap *      makeMapping(
830 		IOMemoryDescriptor *    owner,
831 		task_t                  intoTask,
832 		IOVirtualAddress        atAddress,
833 		IOOptionBits            options,
834 		IOByteCount             offset,
835 		IOByteCount             length );
836 
837 #if KERNEL_PRIVATE
838 /*! @function copyContext
839  *   @abstract Accessor to the retrieve the context previously set for the memory descriptor.
840  *   @discussion This method returns the context for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
841  *   @result The context, returned with an additional retain to be released by the caller. */
842 	OSObject * copyContext(void) const;
843 
844 /*! @function setContext
845  *   @abstract Set a context object for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
846  *   @discussion The context is retained, and will be released when the memory descriptor is freed or when a new context object is set.
847  */
848 	void setContext(OSObject * context);
849 #endif
850 
851 protected:
852 	virtual void                addMapping(
853 		IOMemoryMap *           mapping );
854 
855 	virtual void                removeMapping(
856 		IOMemoryMap *           mapping );
857 
858 	virtual IOReturn doMap(
859 		vm_map_t                addressMap,
860 		IOVirtualAddress *      atAddress,
861 		IOOptionBits            options,
862 		IOByteCount             sourceOffset = 0,
863 		IOByteCount             length = 0 );
864 
865 	virtual IOReturn doUnmap(
866 		vm_map_t                addressMap,
867 		IOVirtualAddress        logical,
868 		IOByteCount             length );
869 };
870 
871 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
872 
873 /*! @class IOMemoryMap : public OSObject
874  *   @abstract A class defining common methods for describing a memory mapping.
875  *   @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
876 
877 class IOMemoryMap : public OSObject
878 {
879 	OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
880 #ifdef XNU_KERNEL_PRIVATE
881 public:
882 	IOOptionBits         fOptions;
883 	OSPtr<IOMemoryDescriptor>  fMemory;
884 	OSPtr<IOMemoryMap>         fSuperMap;
885 	mach_vm_size_t       fOffset;
886 	mach_vm_address_t    fAddress;
887 	mach_vm_size_t       fLength;
888 	task_t               fAddressTask;
889 	vm_map_t             fAddressMap;
890 	upl_t                fRedirUPL;
891 	uint8_t              fUserClientUnmap;
892 #if IOTRACKING
893 	IOTrackingUser       fTracking;
894 #endif
895 #endif /* XNU_KERNEL_PRIVATE */
896 
897 protected:
898 	virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
899 	virtual void free(void) APPLE_KEXT_OVERRIDE;
900 
901 public:
902 /*! @function getVirtualAddress
903  *   @abstract Accessor to the virtual address of the first byte in the mapping.
904  *   @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
905  *   @result A virtual address. */
906 
907 	virtual IOVirtualAddress    getVirtualAddress(void);
908 
909 /*! @function getPhysicalSegment
910  *   @abstract Break a mapping into its physically contiguous segments.
911  *   @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
912  *   @param offset A byte offset into the mapping whose physical address to return.
913  *   @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
914  *   @result A physical address, or zero if the offset is beyond the length of the mapping. */
915 
916 #ifdef __LP64__
917 	virtual IOPhysicalAddress   getPhysicalSegment(IOByteCount offset,
918 	    IOByteCount * length,
919 	    IOOptionBits  options = 0);
920 #else /* !__LP64__ */
921 	virtual IOPhysicalAddress   getPhysicalSegment(IOByteCount offset,
922 	    IOByteCount * length);
923 #endif /* !__LP64__ */
924 
925 /*! @function getPhysicalAddress
926  *   @abstract Return the physical address of the first byte in the mapping.
927  *   @discussion This method returns the physical address of the  first byte in the mapping. It is most useful on mappings known to be physically contiguous.
928  *   @result A physical address. */
929 
930 	IOPhysicalAddress getPhysicalAddress(void);
931 
932 /*! @function getLength
933  *   @abstract Accessor to the length of the mapping.
934  *   @discussion This method returns the length of the mapping.
935  *   @result A byte count. */
936 
937 	virtual IOByteCount         getLength(void);
938 
939 /*! @function getAddressTask
940  *   @abstract Accessor to the task of the mapping.
941  *   @discussion This method returns the mach task the mapping exists in.
942  *   @result A mach task_t. */
943 
944 	virtual task_t              getAddressTask();
945 
946 /*! @function getMemoryDescriptor
947  *   @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
948  *   @discussion This method returns the IOMemoryDescriptor the mapping was created from.
949  *   @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
950 
951 	virtual IOMemoryDescriptor * getMemoryDescriptor();
952 
953 /*! @function getMapOptions
954  *   @abstract Accessor to the options the mapping was created with.
955  *   @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
956  *   @result Options for the mapping, including cache settings. */
957 
958 	virtual IOOptionBits        getMapOptions();
959 
960 /*! @function unmap
961  *   @abstract Force the IOMemoryMap to unmap, without destroying the object.
962  *   @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
963  *   @result An IOReturn code. */
964 
965 	virtual IOReturn            unmap();
966 
967 	virtual void                taskDied();
968 
969 /*! @function redirect
970  *   @abstract Replace the memory mapped in a process with new backing memory.
971  *   @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
972  *   @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
973  *   @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
974  *   @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
975  *   @result An IOReturn code. */
976 
977 #ifndef __LP64__
978 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
979 // for 64 bit, these fall together on the 64 bit one.
980 	virtual IOReturn            redirect(IOMemoryDescriptor * newBackingMemory,
981 	    IOOptionBits         options,
982 	    IOByteCount          offset = 0);
983 #endif
984 	virtual IOReturn            redirect(IOMemoryDescriptor * newBackingMemory,
985 	    IOOptionBits         options,
986 	    mach_vm_size_t       offset = 0);
987 
988 #ifdef __LP64__
989 /*! @function getAddress
990  *   @abstract Accessor to the virtual address of the first byte in the mapping.
991  *   @discussion This method returns the virtual address of the first byte in the mapping.
992  *   @result A virtual address. */
993 	inline mach_vm_address_t    getAddress() __attribute__((always_inline));
994 /*! @function getSize
995  *   @abstract Accessor to the length of the mapping.
996  *   @discussion This method returns the length of the mapping.
997  *   @result A byte count. */
998 	inline mach_vm_size_t       getSize() __attribute__((always_inline));
999 #else /* !__LP64__ */
1000 /*! @function getAddress
1001  *   @abstract Accessor to the virtual address of the first byte in the mapping.
1002  *   @discussion This method returns the virtual address of the first byte in the mapping.
1003  *   @result A virtual address. */
1004 	virtual mach_vm_address_t   getAddress();
1005 /*! @function getSize
1006  *   @abstract Accessor to the length of the mapping.
1007  *   @discussion This method returns the length of the mapping.
1008  *   @result A byte count. */
1009 	virtual mach_vm_size_t      getSize();
1010 #endif /* !__LP64__ */
1011 
1012 #ifdef XNU_KERNEL_PRIVATE
1013 // for IOMemoryDescriptor use
1014 	IOMemoryMap *  copyCompatible( IOMemoryMap * newMapping );
1015 
1016 	bool init(
1017 		task_t                  intoTask,
1018 		mach_vm_address_t       toAddress,
1019 		IOOptionBits            options,
1020 		mach_vm_size_t          offset,
1021 		mach_vm_size_t          length );
1022 
1023 	bool    setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
1024 
1025 	IOReturn redirect(
1026 		task_t                  intoTask, bool redirect );
1027 
1028 	IOReturn userClientUnmap();
1029 #endif /* XNU_KERNEL_PRIVATE */
1030 
1031 	IOReturn wireRange(
1032 		uint32_t                options,
1033 		mach_vm_size_t          offset,
1034 		mach_vm_size_t          length);
1035 
1036 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
1037 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
1038 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
1039 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
1040 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
1041 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
1042 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
1043 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
1044 };
1045 
1046 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1047 #ifdef XNU_KERNEL_PRIVATE
1048 // Also these flags should not overlap with the options to
1049 //	IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1050 enum {
1051 	_kIOMemorySourceSegment     = 0x00002000
1052 };
1053 #endif /* XNU_KERNEL_PRIVATE */
1054 
1055 // The following classes are private implementation of IOMemoryDescriptor - they
1056 // should not be referenced directly, just through the public API's in the
1057 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1058 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1059 // no need to reference as anything but a generic IOMemoryDescriptor *.
1060 
1061 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1062 {
1063 	OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1064 
1065 public:
1066 	union Ranges {
1067 		IOVirtualRange   *v;
1068 		IOAddressRange   *v64;
1069 		IOPhysicalRange  *p;
1070 		void             *uio;
1071 	};
1072 protected:
1073 	Ranges              _ranges;
1074 	unsigned            _rangesCount;   /* number of address ranges in list */
1075 #ifndef __LP64__
1076 	bool                _rangesIsAllocated;/* is list allocated by us? */
1077 #endif /* !__LP64__ */
1078 
1079 	task_t              _task;           /* task where all ranges are mapped to */
1080 
1081 	union {
1082 		IOVirtualRange  v;
1083 		IOPhysicalRange p;
1084 	}                   _singleRange;  /* storage space for a single range */
1085 
1086 	unsigned            _wireCount;    /* number of outstanding wires */
1087 
1088 #ifndef __LP64__
1089 	uintptr_t _cachedVirtualAddress;
1090 
1091 	IOPhysicalAddress   _cachedPhysicalAddress;
1092 #endif /* !__LP64__ */
1093 
1094 	bool                _initialized;  /* has superclass been initialized? */
1095 
1096 public:
1097 	virtual void free() APPLE_KEXT_OVERRIDE;
1098 
1099 	virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1100 
1101 	virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1102 
1103 #ifdef XNU_KERNEL_PRIVATE
1104 // Internal APIs may be made virtual at some time in the future.
1105 	IOReturn wireVirtual(IODirection forDirection);
1106 	IOReturn dmaMap(
1107 		IOMapper                    * mapper,
1108 		IOMemoryDescriptor          * memory,
1109 		IODMACommand                * command,
1110 		const IODMAMapSpecification * mapSpec,
1111 		uint64_t                      offset,
1112 		uint64_t                      length,
1113 		uint64_t                    * mapAddress,
1114 		uint64_t                    * mapLength);
1115 	bool initMemoryEntries(size_t size, IOMapper * mapper);
1116 
1117 	IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1118 	    IOMemoryReference * realloc);
1119 	void memoryReferenceFree(IOMemoryReference * ref);
1120 	void memoryReferenceRelease(IOMemoryReference * ref);
1121 
1122 	IOReturn memoryReferenceCreate(
1123 		IOOptionBits         options,
1124 		IOMemoryReference ** reference);
1125 
1126 	IOReturn memoryReferenceMap(IOMemoryReference * ref,
1127 	    vm_map_t            map,
1128 	    mach_vm_size_t      inoffset,
1129 	    mach_vm_size_t      size,
1130 	    IOOptionBits        options,
1131 	    mach_vm_address_t * inaddr);
1132 
1133 	IOReturn memoryReferenceMapNew(IOMemoryReference * ref,
1134 	    vm_map_t            map,
1135 	    mach_vm_size_t      inoffset,
1136 	    mach_vm_size_t      size,
1137 	    IOOptionBits        options,
1138 	    mach_vm_address_t * inaddr);
1139 
1140 	static IOReturn memoryReferenceSetPurgeable(
1141 		IOMemoryReference * ref,
1142 		IOOptionBits newState,
1143 		IOOptionBits * oldState);
1144 	static IOReturn memoryReferenceSetOwnership(
1145 		IOMemoryReference * ref,
1146 		task_t newOwner,
1147 		int newLedgerTag,
1148 		IOOptionBits newLedgerOptions);
1149 	static IOReturn memoryReferenceGetPageCounts(
1150 		IOMemoryReference * ref,
1151 		IOByteCount       * residentPageCount,
1152 		IOByteCount       * dirtyPageCount);
1153 
1154 	static uint64_t memoryReferenceGetDMAMapLength(
1155 		IOMemoryReference * ref,
1156 		uint64_t * offset);
1157 
1158 #endif
1159 
1160 private:
1161 
1162 #ifndef __LP64__
1163 	virtual void setPosition(IOByteCount position);
1164 	virtual void mapIntoKernel(unsigned rangeIndex);
1165 	virtual void unmapFromKernel();
1166 #endif /* !__LP64__ */
1167 
1168 // Internal
1169 	OSPtr<_IOMemoryDescriptorMixedData> _memoryEntries;
1170 	unsigned int    _pages;
1171 	ppnum_t         _highestPage;
1172 	uint32_t        __iomd_reservedA;
1173 	uint32_t        __iomd_reservedB;
1174 
1175 	IOLock *        _prepareLock;
1176 
1177 public:
1178 /*
1179  * IOMemoryDescriptor required methods
1180  */
1181 
1182 // Master initaliser
1183 	virtual bool initWithOptions(void *         buffers,
1184 	    UInt32         count,
1185 	    UInt32         offset,
1186 	    task_t         task,
1187 	    IOOptionBits   options,
1188 	    IOMapper *     mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1189 
1190 #ifndef __LP64__
1191 // Secondary initialisers
1192 	virtual bool initWithAddress(void *         address,
1193 	    IOByteCount    withLength,
1194 	    IODirection    withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1195 
1196 	virtual bool initWithAddress(IOVirtualAddress address,
1197 	    IOByteCount    withLength,
1198 	    IODirection    withDirection,
1199 	    task_t         withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1200 
1201 	virtual bool initWithPhysicalAddress(
1202 		IOPhysicalAddress      address,
1203 		IOByteCount            withLength,
1204 		IODirection            withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1205 
1206 	virtual bool initWithRanges(        IOVirtualRange * ranges,
1207 	    UInt32           withCount,
1208 	    IODirection      withDirection,
1209 	    task_t           withTask,
1210 	    bool             asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1211 
1212 	virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1213 	    UInt32           withCount,
1214 	    IODirection      withDirection,
1215 	    bool             asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1216 
1217 	virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1218 	    IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1219 
1220 	virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1221 	    IOByteCount * length) APPLE_KEXT_OVERRIDE;
1222 
1223 	virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1224 	    IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1225 
1226 	virtual void * getVirtualSegment(IOByteCount offset,
1227 	    IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1228 #endif /* !__LP64__ */
1229 
1230 	virtual IOReturn setPurgeable( IOOptionBits newState,
1231 	    IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1232 
1233 	IOReturn setOwnership( task_t newOwner,
1234 	    int newLedgerTag,
1235 	    IOOptionBits newLedgerOptions );
1236 
1237 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
1238 	    IOByteCount * length,
1239 #ifdef __LP64__
1240 	    IOOptionBits  options = 0 ) APPLE_KEXT_OVERRIDE;
1241 #else /* !__LP64__ */
1242 	    IOOptionBits  options)APPLE_KEXT_OVERRIDE;
1243 #endif /* !__LP64__ */
1244 
1245 	virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1246 
1247 	virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1248 
1249 	virtual IOReturn doMap(
1250 		vm_map_t                addressMap,
1251 		IOVirtualAddress *      atAddress,
1252 		IOOptionBits            options,
1253 		IOByteCount             sourceOffset = 0,
1254 		IOByteCount             length = 0 ) APPLE_KEXT_OVERRIDE;
1255 
1256 	virtual IOReturn doUnmap(
1257 		vm_map_t                addressMap,
1258 		IOVirtualAddress        logical,
1259 		IOByteCount             length ) APPLE_KEXT_OVERRIDE;
1260 
1261 	virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1262 
1263 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1264 	static OSPtr<IOMemoryDescriptor>
1265 	withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1266 };
1267 
1268 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1269 
1270 #ifdef __LP64__
1271 mach_vm_address_t
getAddress()1272 IOMemoryMap::getAddress()
1273 {
1274 	return getVirtualAddress();
1275 }
1276 
1277 mach_vm_size_t
getSize()1278 IOMemoryMap::getSize()
1279 {
1280 	return getLength();
1281 }
1282 #else /* !__LP64__ */
1283 #include <IOKit/IOSubMemoryDescriptor.h>
1284 #endif /* !__LP64__ */
1285 
1286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1287 
1288 extern bool iokit_iomd_setownership_enabled;
1289 
1290 #endif /* !_IOMEMORYDESCRIPTOR_H */
1291