xref: /xnu-12377.1.9/iokit/IOKit/IOMemoryDescriptor.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30 
31 #include <sys/cdefs.h>
32 
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSPtr.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <DriverKit/IOMemoryDescriptor.h>
38 #include <DriverKit/IOMemoryMap.h>
39 #ifdef XNU_KERNEL_PRIVATE
40 #include <IOKit/IOKitDebug.h>
41 #endif
42 
43 #include <mach/memory_object_types.h>
44 
45 class IOMemoryDescriptor;
46 class IOMemoryMap;
47 class IOMapper;
48 class IOService;
49 class IODMACommand;
50 class _IOMemoryDescriptorMixedData;
51 
52 /*
53  * Direction of transfer, with respect to the described memory.
54  */
55 #ifdef __LP64__
56 enum
57 #else /* !__LP64__ */
58 enum IODirection
59 #endif /* !__LP64__ */
60 {
61 	kIODirectionNone  = 0x0,//                    same as VM_PROT_NONE
62 	kIODirectionIn    = 0x1,// User land 'read',  same as VM_PROT_READ
63 	kIODirectionOut   = 0x2,// User land 'write', same as VM_PROT_WRITE
64 	kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
65 	kIODirectionInOut = kIODirectionIn  | kIODirectionOut,
66 
67 	// these flags are valid for the prepare() method only
68 	kIODirectionPrepareToPhys32   = 0x00000004,
69 	kIODirectionPrepareNoFault    = 0x00000008,
70 	kIODirectionPrepareReserved1  = 0x00000010,
71 #define IODIRECTIONPREPARENONCOHERENTDEFINED    1
72 	kIODirectionPrepareNonCoherent = 0x00000020,
73 #if KERNEL_PRIVATE
74 #define IODIRECTIONPREPAREAVOIDTHROTTLING       1
75 	kIODirectionPrepareAvoidThrottling = 0x00000100,
76 #endif
77 
78 	// these flags are valid for the complete() method only
79 #define IODIRECTIONCOMPLETEWITHERRORDEFINED             1
80 	kIODirectionCompleteWithError = 0x00000040,
81 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
82 	kIODirectionCompleteWithDataValid = 0x00000080,
83 };
84 
85 #ifdef __LP64__
86 typedef IOOptionBits IODirection;
87 #endif /* __LP64__ */
88 
89 /*
90  * IOOptionBits used in the withOptions variant
91  */
92 enum {
93 	kIOMemoryDirectionMask      = 0x00000007,
94 #ifdef XNU_KERNEL_PRIVATE
95 	kIOMemoryAutoPrepare        = 0x00000008,// Shared with Buffer MD
96 #endif
97 
98 	kIOMemoryTypeVirtual        = 0x00000010,
99 	kIOMemoryTypePhysical       = 0x00000020,
100 	kIOMemoryTypeUPL            = 0x00000030,
101 	kIOMemoryTypePersistentMD   = 0x00000040,// Persistent Memory Descriptor
102 	kIOMemoryTypeUIO            = 0x00000050,
103 #ifdef __LP64__
104 	kIOMemoryTypeVirtual64      = kIOMemoryTypeVirtual,
105 	kIOMemoryTypePhysical64     = kIOMemoryTypePhysical,
106 #else /* !__LP64__ */
107 	kIOMemoryTypeVirtual64      = 0x00000060,
108 	kIOMemoryTypePhysical64     = 0x00000070,
109 #endif /* !__LP64__ */
110 	kIOMemoryTypeMask           = 0x000000f0,
111 
112 	kIOMemoryAsReference        = 0x00000100,
113 	kIOMemoryBufferPageable     = 0x00000400,
114 	kIOMemoryMapperNone         = 0x00000800,// Shared with Buffer MD
115 	kIOMemoryHostOnly           = 0x00001000,// Never DMA accessible
116 #ifdef XNU_KERNEL_PRIVATE
117 	kIOMemoryRedirected         = 0x00004000,
118 	kIOMemoryPreparedReadOnly   = 0x00008000,
119 #endif
120 	kIOMemoryPersistent         = 0x00010000,
121 	kIOMemoryMapCopyOnWrite     = 0x00020000,
122 	kIOMemoryRemote             = 0x00040000,
123 	kIOMemoryThreadSafe         = 0x00100000,// Shared with Buffer MD
124 	kIOMemoryClearEncrypt       = 0x00200000,// Shared with Buffer MD
125 	kIOMemoryUseReserve         = 0x00800000,// Shared with Buffer MD
126 #define IOMEMORYUSERESERVEDEFINED       1
127 
128 #ifdef XNU_KERNEL_PRIVATE
129 	kIOMemoryBufferPurgeable    = 0x00400000,
130 	kIOMemoryBufferCacheMask    = 0x70000000,
131 	kIOMemoryBufferCacheShift   = 28,
132 #endif
133 };
134 
135 #define kIOMapperSystem ((IOMapper *) NULL)
136 
137 enum{
138 	kIOMemoryLedgerTagDefault       = VM_LEDGER_TAG_DEFAULT,
139 	kIOmemoryLedgerTagNetwork       = VM_LEDGER_TAG_NETWORK,
140 	kIOMemoryLedgerTagMedia         = VM_LEDGER_TAG_MEDIA,
141 	kIOMemoryLedgerTagGraphics      = VM_LEDGER_TAG_GRAPHICS,
142 	kIOMemoryLedgerTagNeural        = VM_LEDGER_TAG_NEURAL,
143 };
144 enum{
145 	kIOMemoryLedgerFlagNoFootprint  = VM_LEDGER_FLAG_NO_FOOTPRINT,
146 };
147 
148 enum{
149 	kIOMemoryPurgeableKeepCurrent = 1,
150 
151 	kIOMemoryPurgeableNonVolatile = 2,
152 	kIOMemoryPurgeableVolatile    = 3,
153 	kIOMemoryPurgeableEmpty       = 4,
154 
155 	// modifiers for kIOMemoryPurgeableVolatile behavior
156 	kIOMemoryPurgeableVolatileGroup0           = VM_VOLATILE_GROUP_0,
157 	kIOMemoryPurgeableVolatileGroup1           = VM_VOLATILE_GROUP_1,
158 	kIOMemoryPurgeableVolatileGroup2           = VM_VOLATILE_GROUP_2,
159 	kIOMemoryPurgeableVolatileGroup3           = VM_VOLATILE_GROUP_3,
160 	kIOMemoryPurgeableVolatileGroup4           = VM_VOLATILE_GROUP_4,
161 	kIOMemoryPurgeableVolatileGroup5           = VM_VOLATILE_GROUP_5,
162 	kIOMemoryPurgeableVolatileGroup6           = VM_VOLATILE_GROUP_6,
163 	kIOMemoryPurgeableVolatileGroup7           = VM_VOLATILE_GROUP_7,
164 	kIOMemoryPurgeableVolatileBehaviorFifo     = VM_PURGABLE_BEHAVIOR_FIFO,
165 	kIOMemoryPurgeableVolatileBehaviorLifo     = VM_PURGABLE_BEHAVIOR_LIFO,
166 	kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
167 	kIOMemoryPurgeableVolatileOrderingNormal   = VM_PURGABLE_ORDERING_NORMAL,
168 	kIOMemoryPurgeableFaultOnAccess            = VM_PURGABLE_DEBUG_FAULT,
169 };
170 enum{
171 	kIOMemoryIncoherentIOFlush   = 1,
172 	kIOMemoryIncoherentIOStore   = 2,
173 
174 	kIOMemoryClearEncrypted      = 50,
175 	kIOMemorySetEncrypted        = 51,
176 };
177 
178 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND  1
179 
180 struct IODMAMapSpecification {
181 	uint64_t    alignment;
182 	IOService * device;
183 	uint32_t    options;
184 	uint8_t     numAddressBits;
185 	uint8_t     resvA[3];
186 	uint32_t    resvB[4];
187 };
188 
189 struct IODMAMapPageList {
190 	uint32_t                pageOffset;
191 	uint32_t                pageListCount;
192 	const upl_page_info_t * pageList;
193 };
194 
195 // mapOptions for iovmMapMemory
196 enum{
197 	kIODMAMapReadAccess           = 0x00000001,
198 	kIODMAMapWriteAccess          = 0x00000002,
199 	kIODMAMapPhysicallyContiguous = 0x00000010,
200 	kIODMAMapDeviceMemory         = 0x00000020,
201 	kIODMAMapPagingPath           = 0x00000040,
202 	kIODMAMapIdentityMap          = 0x00000080,
203 
204 	kIODMAMapPageListFullyOccupied = 0x00000100,
205 	kIODMAMapFixedAddress          = 0x00000200,
206 };
207 
208 // Options used by IOMapper. example IOMappers are DART and VT-d
209 enum {
210 	kIOMapperUncached      = 0x0001,
211 #ifdef KERNEL_PRIVATE
212 	kIOMapperTransient     = 0x0002,
213 #endif
214 };
215 
216 #ifdef KERNEL_PRIVATE
217 
218 // Used for dmaCommandOperation communications for IODMACommand and mappers
219 
220 enum  {
221 	kIOMDWalkSegments             = 0x01000000,
222 	kIOMDFirstSegment             = 1 | kIOMDWalkSegments,
223 	kIOMDGetCharacteristics       = 0x02000000,
224 	kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
225 	kIOMDDMAActive                = 0x03000000,
226 	kIOMDSetDMAActive             = 1 | kIOMDDMAActive,
227 	kIOMDSetDMAInactive           = kIOMDDMAActive,
228 	kIOMDAddDMAMapSpec            = 0x04000000,
229 	kIOMDDMAMap                   = 0x05000000,
230 	kIOMDDMAUnmap                 = 0x06000000,
231 	kIOMDDMACommandOperationMask  = 0xFF000000,
232 };
233 struct IOMDDMACharacteristics {
234 	UInt64 fLength;
235 	UInt32 fSGCount;
236 	UInt32 fPages;
237 	UInt32 fPageAlign;
238 	ppnum_t fHighestPage;
239 	IODirection fDirection;
240 	UInt8 fIsPrepared;
241 };
242 
243 struct IOMDDMAMapArgs {
244 	IOMapper            * fMapper;
245 	IODMACommand        * fCommand;
246 	IODMAMapSpecification fMapSpec;
247 	uint64_t              fOffset;
248 	uint64_t              fLength;
249 	uint64_t              fAlloc;
250 	uint64_t              fAllocLength;
251 };
252 
253 struct IOMDDMAWalkSegmentArgs {
254 	UInt64 fOffset;                 // Input/Output offset
255 	/* Output variables.
256 	 * Note to reader: fIOVMAddr is (sometimes?) a DART-mapped device address.
257 	 */
258 	UInt64 fIOVMAddr, fLength;
259 	UInt8 fMapped;                  // Input Variable, Require mapped IOVMA
260 	UInt64 fMappedBase;             // Input base of mapping
261 };
262 typedef UInt8 IOMDDMAWalkSegmentState[128];
263 
264 #endif /* KERNEL_PRIVATE */
265 
266 enum{
267 	kIOPreparationIDUnprepared = 0,
268 	kIOPreparationIDUnsupported = 1,
269 	kIOPreparationIDAlwaysPrepared = 2,
270 };
271 
272 #ifdef KERNEL_PRIVATE
273 #define kIODescriptorIDInvalid (0)
274 #endif
275 
276 #ifdef XNU_KERNEL_PRIVATE
277 struct IOMemoryReference;
278 #endif
279 
280 
281 /*! @class IOMemoryDescriptor : public OSObject
282  *   @abstract An abstract base class defining common methods for describing physical or virtual memory.
283  *   @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
284 
285 class IOMemoryDescriptor : public OSObject
286 {
287 	friend class IOMemoryMap;
288 	friend class IOMultiMemoryDescriptor;
289 
290 	OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
291 
292 protected:
293 
294 /*! @var reserved
295  *   Reserved for future use.  (Internal use only)  */
296 	struct IOMemoryDescriptorReserved * reserved;
297 
298 protected:
299 	OSPtr<OSSet>        _mappings;
300 	IOOptionBits        _flags;
301 
302 
303 #ifdef XNU_KERNEL_PRIVATE
304 public:
305 	struct IOMemoryReference *  _memRef;
306 	vm_tag_t _kernelTag;
307 	vm_tag_t _userTag;
308 	int16_t _dmaReferences;
309 	uint16_t _internalIOMDFlags;
310 	kern_allocation_name_t _mapName;
311 protected:
312 #else /* XNU_KERNEL_PRIVATE */
313 	void *              __iomd_reserved5;
314 	uint16_t            __iomd_reserved1[4];
315 	uintptr_t           __iomd_reserved2;
316 #endif /* XNU_KERNEL_PRIVATE */
317 
318 	uint16_t            _iomapperOptions;
319 #ifdef __LP64__
320 	uint16_t            __iomd_reserved3[3];
321 #else
322 	uint16_t            __iomd_reserved3;
323 #endif
324 	uintptr_t           __iomd_reserved4;
325 
326 #ifndef __LP64__
327 	IODirection         _direction;    /* use _flags instead */
328 #endif /* !__LP64__ */
329 	IOByteCount         _length;       /* length of all ranges */
330 	IOOptionBits        _tag;
331 
332 public:
333 	typedef IOOptionBits DMACommandOps;
334 #ifndef __LP64__
335 	virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
336 	    IOByteCount * length ) APPLE_KEXT_DEPRECATED;
337 #endif /* !__LP64__ */
338 
339 /*! @function initWithOptions
340  *   @abstract Master initialiser for all variants of memory descriptors.  For a more complete description see IOMemoryDescriptor::withOptions.
341  *   @discussion Note this function can be used to re-init a previously created memory descriptor.
342  *   @result true on success, false on failure. */
343 	virtual bool initWithOptions(void *         buffers,
344 	    UInt32         count,
345 	    UInt32         offset,
346 	    task_t         task,
347 	    IOOptionBits   options,
348 	    IOMapper *     mapper = kIOMapperSystem);
349 
350 #ifndef __LP64__
351 	virtual addr64_t getPhysicalSegment64( IOByteCount offset,
352 	    IOByteCount * length ) APPLE_KEXT_DEPRECATED;                                 /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
353 #endif /* !__LP64__ */
354 
355 /*! @function setPurgeable
356  *   @abstract Control the purgeable status of a memory descriptors memory.
357  *   @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again.  If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
358  *   @param newState - the desired new purgeable state of the memory:<br>
359  *   kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
360  *   kIOMemoryPurgeableVolatile    - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
361  *   kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
362  *   kIOMemoryPurgeableEmpty       - make the memory volatile, and discard any pages allocated to it.
363  *   @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
364  *   kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
365  *   kIOMemoryPurgeableVolatile    - the memory was volatile but its content has not been discarded by the VM system.<br>
366  *   kIOMemoryPurgeableEmpty       - the memory was volatile and has been discarded by the VM system.<br>
367  *   @result An IOReturn code. */
368 
369 	virtual IOReturn setPurgeable( IOOptionBits newState,
370 	    IOOptionBits * oldState );
371 
372 /*! @function setOwnership
373  *   @abstract Control the ownership of a memory descriptors memory.
374  *   @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
375  *   @param newOwner - the task to be the new owner of the memory.
376  *   @param newLedgerTag - the ledger this memory should be accounted in.
377  *   @param newLedgerOptions - accounting options
378  *   @result An IOReturn code. */
379 
380 	IOReturn setOwnership( task_t newOwner,
381 	    int newLedgerTag,
382 	    IOOptionBits newLedgerOptions );
383 
384 /*! @function getPageCounts
385  *   @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
386  *   @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
387  *   @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
388  *   @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
389  *   @result An IOReturn code. */
390 
391 	IOReturn getPageCounts( IOByteCount * residentPageCount,
392 	    IOByteCount * dirtyPageCount);
393 
394 #if KERNEL_PRIVATE
395 #define IOMEMORYDESCRIPTOR_GETPAGECOUNTS_SUPPORTS_SWAPPED 1
396 #endif
397 /*! @function getPageCounts
398  *   @abstract Retrieve the number of resident, dirty, and swapped pages encompassed by an IOMemoryDescriptor.
399  *   @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
400  *   @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of resident, dirty pages encompassed by this IOMemoryDescriptor.
401  *   @param swappedPageCount - If non-null, a pointer to a byte count that will return the number of swapped pages encompassed by this IOMemoryDescriptor.
402  *   @result An IOReturn code. */
403 
404 	IOReturn getPageCounts( IOByteCount * residentPageCount,
405 	    IOByteCount * dirtyPageCount,
406 	    IOByteCount * swappedPageCount );
407 
408 /*! @function performOperation
409  *   @abstract Perform an operation on the memory descriptor's memory.
410  *   @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
411  *   @param options The operation to perform on the memory:<br>
412  *   kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
413  *   kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
414  *   @param offset A byte offset into the memory descriptor's memory.
415  *   @param length The length of the data range.
416  *   @result An IOReturn code. */
417 
418 	virtual IOReturn performOperation( IOOptionBits options,
419 	    IOByteCount offset, IOByteCount length );
420 
421 // Used for dedicated communications for IODMACommand
422 	virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
423 
424 /*! @function getPhysicalSegment
425  *   @abstract Break a memory descriptor into its physically contiguous segments.
426  *   @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
427  *   @param offset A byte offset into the memory whose physical address to return.
428  *   @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
429  *   @result A physical address, or zero if the offset is beyond the length of the memory. */
430 
431 #ifdef __LP64__
432 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
433 	    IOByteCount * length,
434 	    IOOptionBits  options = 0 ) = 0;
435 #else /* !__LP64__ */
436 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
437 	    IOByteCount * length,
438 	    IOOptionBits  options );
439 #endif /* !__LP64__ */
440 
441 	virtual uint64_t getPreparationID( void );
442 	void             setPreparationID( void );
443 
444 	void     setVMTags(uint32_t kernelTag, uint32_t userTag);
445 	uint32_t getVMTag(vm_map_t map);
446 
447 #ifdef KERNEL_PRIVATE
448 	uint64_t getDescriptorID( void );
449 	void     setDescriptorID( void );
450 
451 	IOReturn ktraceEmitPhysicalSegments( void );
452 #endif
453 
454 #ifdef XNU_KERNEL_PRIVATE
455 	IOMemoryDescriptorReserved * getKernelReserved( void );
456 	void                         cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
457 	IOReturn dmaMap(
458 		IOMapper                    * mapper,
459 		IOMemoryDescriptor          * memory,
460 		IODMACommand                * command,
461 		const IODMAMapSpecification * mapSpec,
462 		uint64_t                      offset,
463 		uint64_t                      length,
464 		uint64_t                    * mapAddress,
465 		uint64_t                    * mapLength);
466 	IOReturn dmaUnmap(
467 		IOMapper                    * mapper,
468 		IODMACommand                * command,
469 		uint64_t                      offset,
470 		uint64_t                      mapAddress,
471 		uint64_t                      mapLength);
472 	void dmaMapRecord(
473 		IOMapper                    * mapper,
474 		IODMACommand                * command,
475 		uint64_t                      mapLength);
476 #endif
477 
478 private:
479 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0);
480 #ifdef __LP64__
481 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
482 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
483 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
484 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
485 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
486 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
487 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
488 #else /* !__LP64__ */
489 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1);
490 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2);
491 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3);
492 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4);
493 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5);
494 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6);
495 	OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7);
496 #endif /* !__LP64__ */
497 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
498 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
499 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
500 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
501 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
502 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
503 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
504 	OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
505 
506 protected:
507 	virtual void free(void) APPLE_KEXT_OVERRIDE;
508 public:
509 	static void initialize( void );
510 
511 public:
512 /*! @function withAddress
513  *   @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
514  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
515  *   @param address The virtual address of the first byte in the memory.
516  *   @param withLength The length of memory.
517  *   @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
518  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
519 
520 	static OSPtr<IOMemoryDescriptor>  withAddress(void *       address,
521 	    IOByteCount  withLength,
522 	    IODirection  withDirection);
523 
524 #ifndef __LP64__
525 	static OSPtr<IOMemoryDescriptor>  withAddress(IOVirtualAddress address,
526 	    IOByteCount  withLength,
527 	    IODirection  withDirection,
528 	    task_t       withTask) APPLE_KEXT_DEPRECATED;                                 /* use withAddressRange() and prepare() instead */
529 #endif /* !__LP64__ */
530 
531 /*! @function withPhysicalAddress
532  *   @abstract Create an IOMemoryDescriptor to describe one physical range.
533  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
534  *   @param address The physical address of the first byte in the memory.
535  *   @param withLength The length of memory.
536  *   @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
537  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
538 
539 	static OSPtr<IOMemoryDescriptor>  withPhysicalAddress(
540 		IOPhysicalAddress       address,
541 		IOByteCount             withLength,
542 		IODirection             withDirection );
543 
544 #ifndef __LP64__
545 	static OSPtr<IOMemoryDescriptor>  withRanges(IOVirtualRange * ranges,
546 	    UInt32           withCount,
547 	    IODirection      withDirection,
548 	    task_t           withTask,
549 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                                 /* use withAddressRanges() instead */
550 #endif /* !__LP64__ */
551 
552 /*! @function withAddressRange
553  *   @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
554  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
555  *   @param address The virtual address of the first byte in the memory.
556  *   @param length The length of memory.
557  *   @param options
558  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
559  *   @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
560  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
561 
562 	static OSPtr<IOMemoryDescriptor>  withAddressRange(
563 		mach_vm_address_t address,
564 		mach_vm_size_t    length,
565 		IOOptionBits      options,
566 		task_t            task);
567 
568 /*! @function withAddressRanges
569  *   @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
570  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
571  *   @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
572  *   @param rangeCount The member count of the ranges array.
573  *   @param options
574  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
575  *       kIOMemoryAsReference	For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory.  This is an optimisation to try to minimise unnecessary allocations.
576  *   @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
577  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
578 
579 	static OSPtr<IOMemoryDescriptor>  withAddressRanges(
580 		IOAddressRange * ranges,
581 		UInt32           rangeCount,
582 		IOOptionBits     options,
583 		task_t           task);
584 
585 /*! @function withOptions
586  *   @abstract Master initialiser for all variants of memory descriptors.
587  *   @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL.  These variants are selected with the options parameter, see below.  This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
588  *
589  *
590  *   @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
591  *
592  *   @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array.  For options:type = UPL this field contains a total length.
593  *
594  *   @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
595  *
596  *   @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
597  *
598  *   @param options
599  *       kIOMemoryDirectionMask (options:direction)	This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
600  *       kIOMemoryTypeMask (options:type)	kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use.  This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
601  *       kIOMemoryAsReference	For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory.  This is an optimisation to try to minimise unnecessary allocations.
602  *       kIOMemoryBufferPageable	Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
603  *
604  *   @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses.  Defaults to 0 which indicates that the system mapper is to be used, if present.
605  *
606  *   @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
607 
608 	static OSPtr<IOMemoryDescriptor> withOptions(void *       buffers,
609 	    UInt32       count,
610 	    UInt32       offset,
611 	    task_t       task,
612 	    IOOptionBits options,
613 	    IOMapper *   mapper = kIOMapperSystem);
614 
615 #ifndef __LP64__
616 	static OSPtr<IOMemoryDescriptor>  withPhysicalRanges(
617 		IOPhysicalRange *   ranges,
618 		UInt32              withCount,
619 		IODirection         withDirection,
620 		bool                asReference = false) APPLE_KEXT_DEPRECATED;                             /* use withOptions() and kIOMemoryTypePhysical instead */
621 #endif /* !__LP64__ */
622 
623 #ifndef __LP64__
624 	static OSPtr<IOMemoryDescriptor>  withSubRange(IOMemoryDescriptor *of,
625 	    IOByteCount offset,
626 	    IOByteCount length,
627 	    IODirection withDirection) APPLE_KEXT_DEPRECATED;                                  /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
628 #endif /* !__LP64__ */
629 
630 /*! @function withPersistentMemoryDescriptor
631  *   @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
632  *   @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference.  Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm.  Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
633  *   @param originalMD The memory descriptor to be duplicated.
634  *   @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
635 	static OSPtr<IOMemoryDescriptor>
636 	withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
637 
638 #ifndef __LP64__
639 // obsolete initializers
640 // - initWithOptions is the designated initializer
641 	virtual bool initWithAddress(void *       address,
642 	    IOByteCount  withLength,
643 	    IODirection  withDirection) APPLE_KEXT_DEPRECATED;                      /* use initWithOptions() instead */
644 	virtual bool initWithAddress(IOVirtualAddress address,
645 	    IOByteCount  withLength,
646 	    IODirection  withDirection,
647 	    task_t       withTask) APPLE_KEXT_DEPRECATED;                      /* use initWithOptions() instead */
648 	virtual bool initWithPhysicalAddress(
649 		IOPhysicalAddress      address,
650 		IOByteCount            withLength,
651 		IODirection            withDirection ) APPLE_KEXT_DEPRECATED;                  /* use initWithOptions() instead */
652 	virtual bool initWithRanges(IOVirtualRange * ranges,
653 	    UInt32           withCount,
654 	    IODirection      withDirection,
655 	    task_t           withTask,
656 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                     /* use initWithOptions() instead */
657 	virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
658 	    UInt32           withCount,
659 	    IODirection      withDirection,
660 	    bool             asReference = false) APPLE_KEXT_DEPRECATED;                             /* use initWithOptions() instead */
661 #endif /* __LP64__ */
662 
663 /*! @function getDirection
664  *   @abstract Accessor to get the direction the memory descriptor was created with.
665  *   @discussion This method returns the direction the memory descriptor was created with.
666  *   @result The direction. */
667 
668 	virtual IODirection getDirection() const;
669 
670 /*! @function getLength
671  *   @abstract Accessor to get the length of the memory descriptor (over all its ranges).
672  *   @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
673  *   @result The byte count. */
674 
675 	virtual IOByteCount getLength() const;
676 
677 #define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH
678 	uint64_t getDMAMapLength(uint64_t * offset = NULL);
679 
680 /*! @function setTag
681  *   @abstract Set the tag for the memory descriptor.
682  *   @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
683  *   @param tag The tag. */
684 
685 	virtual void setTag( IOOptionBits tag );
686 
687 /*! @function getTag
688  *   @abstract Accessor to the retrieve the tag for the memory descriptor.
689  *   @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
690  *   @result The tag. */
691 
692 	virtual IOOptionBits getTag( void );
693 
694 /*! @function getFlags
695  *   @abstract Accessor to the retrieve the options the memory descriptor was created with.
696  *   @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
697  *   @result The flags bitfield. */
698 
699 	uint64_t getFlags(void);
700 
701 /*! @function readBytes
702  *   @abstract Copy data from the memory descriptor's buffer to the specified buffer.
703  *   @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer.  The memory descriptor MUST have the kIODirectionOut direcction bit set  and be prepared.  kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
704  *   @param offset A byte offset into the memory descriptor's memory.
705  *   @param bytes The caller supplied buffer to copy the data to.
706  *   @param withLength The length of the data to copy.
707  *   @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
708 
709 	virtual IOByteCount readBytes(IOByteCount offset,
710 	    void * bytes, IOByteCount withLength);
711 
712 /*! @function writeBytes
713  *   @abstract Copy data to the memory descriptor's buffer from the specified buffer.
714  *   @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer.  The memory descriptor MUST have the kIODirectionIn direcction bit set  and be prepared.  kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
715  *   @param offset A byte offset into the memory descriptor's memory.
716  *   @param bytes The caller supplied buffer to copy the data from.
717  *   @param withLength The length of the data to copy.
718  *   @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
719 
720 	virtual IOByteCount writeBytes(IOByteCount offset,
721 	    const void * bytes, IOByteCount withLength);
722 
723 #ifndef __LP64__
724 	virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
725 	    IOByteCount * length);
726 #endif /* !__LP64__ */
727 
728 /*! @function getPhysicalAddress
729  *   @abstract Return the physical address of the first byte in the memory.
730  *   @discussion This method returns the physical address of the  first byte in the memory. It is most useful on memory known to be physically contiguous.
731  *   @result A physical address. */
732 
733 	IOPhysicalAddress getPhysicalAddress();
734 
735 #ifndef __LP64__
736 	virtual void * getVirtualSegment(IOByteCount offset,
737 	    IOByteCount * length) APPLE_KEXT_DEPRECATED;                             /* use map() and getVirtualAddress() instead */
738 #endif /* !__LP64__ */
739 
740 /*! @function prepare
741  *   @abstract Prepare the memory for an I/O transfer.
742  *   @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer.  The complete() method completes the processing of the memory after the I/O transfer finishes.  Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
743  *   @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
744  *   @result An IOReturn code. */
745 
746 	virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
747 
748 /*! @function complete
749  *   @abstract Complete processing of the memory after an I/O transfer finishes.
750  *   @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory.  In 10.3 or greater systems the direction argument to complete is not longer respected.  The direction is totally determined at prepare() time.
751  *   @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
752  *   @result An IOReturn code. */
753 
754 	virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
755 
756 /*
757  * Mapping functions.
758  */
759 
760 /*! @function createMappingInTask
761  *   @abstract Maps a IOMemoryDescriptor into a task.
762  *   @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
763  *   @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
764  *   @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
765  *   @param options Mapping options are defined in IOTypes.h,<br>
766  *       kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
767  *       kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
768  *       kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
769  *       kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
770  *       kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
771  *       kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
772  *       kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
773  *   @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
774  *   @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
775  *   @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
776 
777 	OSPtr<IOMemoryMap>        createMappingInTask(
778 		task_t                  intoTask,
779 		mach_vm_address_t       atAddress,
780 		IOOptionBits            options,
781 		mach_vm_size_t          offset = 0,
782 		mach_vm_size_t          length = 0 );
783 
784 #ifndef __LP64__
785 	virtual OSPtr<IOMemoryMap>       map(
786 		task_t          intoTask,
787 		IOVirtualAddress        atAddress,
788 		IOOptionBits            options,
789 		IOByteCount             offset = 0,
790 		IOByteCount             length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
791 #endif /* !__LP64__ */
792 
793 /*! @function map
794  *   @abstract Maps a IOMemoryDescriptor into the kernel map.
795  *   @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
796  *   @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
797  *   @result See the full version of the createMappingInTask method. */
798 
799 	virtual OSPtr<IOMemoryMap>       map(
800 		IOOptionBits            options = 0 );
801 
802 /*! @function setMapping
803  *   @abstract Establishes an already existing mapping.
804  *   @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
805  *   @param task Address space in which the mapping exists.
806  *   @param mapAddress Virtual address of the mapping.
807  *   @param options Caching and read-only attributes of the mapping.
808  *   @result A IOMemoryMap object created to represent the mapping. */
809 
810 	virtual OSPtr<IOMemoryMap>       setMapping(
811 		task_t          task,
812 		IOVirtualAddress        mapAddress,
813 		IOOptionBits            options = 0 );
814 
815 /*! @function setMapperOptions
816  *   @abstract Set the IOMapper options
817  *   @discussion This method sets the IOMapper options
818  *   @param options  IOMapper options to be set. */
819 
820 	void setMapperOptions( uint16_t options );
821 
822 /*! @function getMapperOptions
823  *   @abstract return IOMapper Options
824  *   @discussion This method returns IOMapper Options set earlier using setMapperOptions
825  *   @result IOMapper options set. */
826 
827 	uint16_t getMapperOptions( void );
828 
829 // Following methods are private implementation
830 
831 #ifdef __LP64__
832 	virtual
833 #endif /* __LP64__ */
834 	IOReturn redirect( task_t safeTask, bool redirect );
835 
836 	IOReturn handleFault(
837 		void *                  _pager,
838 		mach_vm_size_t          sourceOffset,
839 		mach_vm_size_t          length);
840 
841 	IOReturn populateDevicePager(
842 		void *                  pager,
843 		vm_map_t                addressMap,
844 		mach_vm_address_t       address,
845 		mach_vm_size_t          sourceOffset,
846 		mach_vm_size_t          length,
847 		IOOptionBits            options );
848 
849 	virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap *      makeMapping(
850 		IOMemoryDescriptor *    owner,
851 		task_t                  intoTask,
852 		IOVirtualAddress        atAddress,
853 		IOOptionBits            options,
854 		IOByteCount             offset,
855 		IOByteCount             length );
856 
857 #if KERNEL_PRIVATE
858 /*! @function copyContext
859  *   @abstract Accessor to the retrieve the context previously set for the memory descriptor.
860  *   @discussion This method returns the context for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
861  *   @result The context, returned with an additional retain to be released by the caller. */
862 	OSObject * copyContext(void) const;
863 #ifdef XNU_KERNEL_PRIVATE
864 	OSObject * copyContext(const OSSymbol * key) const;
865 	OSObject * copyContext(const char * key) const;
866 	OSObject * copySharingContext(const char * key) const;
867 #endif /* XNU_KERNEL_PRIVATE */
868 
869 /*! @function setContext
870  *   @abstract Set a context object for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
871  *   @discussion The context is retained, and will be released when the memory descriptor is freed or when a new context object is set.
872  */
873 	void setContext(OSObject * context);
874 #ifdef XNU_KERNEL_PRIVATE
875 	void setContext(const OSSymbol * key, OSObject * context);
876 	void setContext(const char * key, OSObject * context);
877 	void setSharingContext(const char * key, OSObject * context);
878 	bool hasSharingContext(void);
879 
880 #endif /* XNU_KERNEL_PRIVATE */
881 #endif /* KERNEL_PRIVATE */
882 
883 protected:
884 	virtual void                addMapping(
885 		IOMemoryMap *           mapping );
886 
887 	virtual void                removeMapping(
888 		IOMemoryMap *           mapping );
889 
890 	virtual IOReturn doMap(
891 		vm_map_t                addressMap,
892 		IOVirtualAddress *      atAddress,
893 		IOOptionBits            options,
894 		IOByteCount             sourceOffset = 0,
895 		IOByteCount             length = 0 );
896 
897 	virtual IOReturn doUnmap(
898 		vm_map_t                addressMap,
899 		IOVirtualAddress        logical,
900 		IOByteCount             length );
901 };
902 
903 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
904 
905 /*! @class IOMemoryMap : public OSObject
906  *   @abstract A class defining common methods for describing a memory mapping.
907  *   @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
908 
909 class IOMemoryMap : public OSObject
910 {
911 	OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
912 #ifdef XNU_KERNEL_PRIVATE
913 public:
914 	IOOptionBits         fOptions;
915 	OSPtr<IOMemoryDescriptor>  fMemory;
916 	OSPtr<IOMemoryMap>         fSuperMap;
917 	mach_vm_size_t       fOffset;
918 	mach_vm_address_t    fAddress;
919 	mach_vm_size_t       fLength;
920 	task_t               fAddressTask;
921 	vm_map_t             fAddressMap;
922 	upl_t                fRedirUPL;
923 	uint8_t              fUserClientUnmap;
924 #if IOTRACKING
925 	IOTrackingUser       fTracking;
926 #endif
927 #endif /* XNU_KERNEL_PRIVATE */
928 
929 protected:
930 	virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
931 	virtual void free(void) APPLE_KEXT_OVERRIDE;
932 
933 public:
934 /*! @function getVirtualAddress
935  *   @abstract Accessor to the virtual address of the first byte in the mapping.
936  *   @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
937  *   @result A virtual address. */
938 
939 	virtual IOVirtualAddress    getVirtualAddress(void);
940 
941 /*! @function getPhysicalSegment
942  *   @abstract Break a mapping into its physically contiguous segments.
943  *   @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
944  *   @param offset A byte offset into the mapping whose physical address to return.
945  *   @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
946  *   @result A physical address, or zero if the offset is beyond the length of the mapping. */
947 
948 #ifdef __LP64__
949 	virtual IOPhysicalAddress   getPhysicalSegment(IOByteCount offset,
950 	    IOByteCount * length,
951 	    IOOptionBits  options = 0);
952 #else /* !__LP64__ */
953 	virtual IOPhysicalAddress   getPhysicalSegment(IOByteCount offset,
954 	    IOByteCount * length);
955 #endif /* !__LP64__ */
956 
957 /*! @function getPhysicalAddress
958  *   @abstract Return the physical address of the first byte in the mapping.
959  *   @discussion This method returns the physical address of the  first byte in the mapping. It is most useful on mappings known to be physically contiguous.
960  *   @result A physical address. */
961 
962 	IOPhysicalAddress getPhysicalAddress(void);
963 
964 /*! @function getLength
965  *   @abstract Accessor to the length of the mapping.
966  *   @discussion This method returns the length of the mapping.
967  *   @result A byte count. */
968 
969 	virtual IOByteCount         getLength(void);
970 
971 /*! @function getAddressTask
972  *   @abstract Accessor to the task of the mapping.
973  *   @discussion This method returns the mach task the mapping exists in.
974  *   @result A mach task_t. */
975 
976 	virtual task_t              getAddressTask();
977 
978 /*! @function getMemoryDescriptor
979  *   @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
980  *   @discussion This method returns the IOMemoryDescriptor the mapping was created from.
981  *   @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
982 
983 	virtual IOMemoryDescriptor * getMemoryDescriptor();
984 
985 /*! @function getMapOptions
986  *   @abstract Accessor to the options the mapping was created with.
987  *   @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
988  *   @result Options for the mapping, including cache settings. */
989 
990 	virtual IOOptionBits        getMapOptions();
991 
992 /*! @function unmap
993  *   @abstract Force the IOMemoryMap to unmap, without destroying the object.
994  *   @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
995  *   @result An IOReturn code. */
996 
997 	virtual IOReturn            unmap();
998 
999 	virtual void                taskDied();
1000 
1001 /*! @function redirect
1002  *   @abstract Replace the memory mapped in a process with new backing memory.
1003  *   @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
1004  *   @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
1005  *   @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
1006  *   @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
1007  *   @result An IOReturn code. */
1008 
1009 #ifndef __LP64__
1010 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
1011 // for 64 bit, these fall together on the 64 bit one.
1012 	virtual IOReturn            redirect(IOMemoryDescriptor * newBackingMemory,
1013 	    IOOptionBits         options,
1014 	    IOByteCount          offset = 0);
1015 #endif
1016 	virtual IOReturn            redirect(IOMemoryDescriptor * newBackingMemory,
1017 	    IOOptionBits         options,
1018 	    mach_vm_size_t       offset = 0);
1019 
1020 #ifdef __LP64__
1021 /*! @function getAddress
1022  *   @abstract Accessor to the virtual address of the first byte in the mapping.
1023  *   @discussion This method returns the virtual address of the first byte in the mapping.
1024  *   @result A virtual address. */
1025 	inline mach_vm_address_t    getAddress() __attribute__((always_inline));
1026 /*! @function getSize
1027  *   @abstract Accessor to the length of the mapping.
1028  *   @discussion This method returns the length of the mapping.
1029  *   @result A byte count. */
1030 	inline mach_vm_size_t       getSize() __attribute__((always_inline));
1031 #else /* !__LP64__ */
1032 /*! @function getAddress
1033  *   @abstract Accessor to the virtual address of the first byte in the mapping.
1034  *   @discussion This method returns the virtual address of the first byte in the mapping.
1035  *   @result A virtual address. */
1036 	virtual mach_vm_address_t   getAddress();
1037 /*! @function getSize
1038  *   @abstract Accessor to the length of the mapping.
1039  *   @discussion This method returns the length of the mapping.
1040  *   @result A byte count. */
1041 	virtual mach_vm_size_t      getSize();
1042 #endif /* !__LP64__ */
1043 
1044 #ifdef XNU_KERNEL_PRIVATE
1045 // for IOMemoryDescriptor use
1046 	IOMemoryMap *  copyCompatible( IOMemoryMap * newMapping );
1047 
1048 	bool init(
1049 		task_t                  intoTask,
1050 		mach_vm_address_t       toAddress,
1051 		IOOptionBits            options,
1052 		mach_vm_size_t          offset,
1053 		mach_vm_size_t          length );
1054 
1055 	bool    setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
1056 
1057 	IOReturn redirect(
1058 		task_t                  intoTask, bool redirect );
1059 
1060 	IOReturn userClientUnmap();
1061 #endif /* XNU_KERNEL_PRIVATE */
1062 
1063 	IOReturn wireRange(
1064 		uint32_t                options,
1065 		mach_vm_size_t          offset,
1066 		mach_vm_size_t          length);
1067 
1068 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
1069 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
1070 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
1071 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
1072 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
1073 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
1074 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
1075 	OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
1076 };
1077 
1078 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1079 #ifdef XNU_KERNEL_PRIVATE
1080 // Also these flags should not overlap with the options to
1081 //	IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1082 enum {
1083 	_kIOMemorySourceSegment     = 0x00002000
1084 };
1085 #endif /* XNU_KERNEL_PRIVATE */
1086 
1087 // The following classes are private implementation of IOMemoryDescriptor - they
1088 // should not be referenced directly, just through the public API's in the
1089 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1090 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1091 // no need to reference as anything but a generic IOMemoryDescriptor *.
1092 
1093 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1094 {
1095 	OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1096 
1097 public:
1098 	union Ranges {
1099 		IOVirtualRange   *v;
1100 		IOAddressRange   *v64;
1101 		IOPhysicalRange  *p;
1102 		void             *uio;
1103 	};
1104 protected:
1105 	Ranges              _ranges;
1106 	unsigned            _rangesCount;   /* number of address ranges in list */
1107 #ifndef __LP64__
1108 	bool                _rangesIsAllocated;/* is list allocated by us? */
1109 #endif /* !__LP64__ */
1110 
1111 	task_t              _task;           /* task where all ranges are mapped to */
1112 
1113 	union {
1114 		IOVirtualRange  v;
1115 		IOPhysicalRange p;
1116 	}                   _singleRange;  /* storage space for a single range */
1117 
1118 	unsigned            _wireCount;    /* number of outstanding wires */
1119 
1120 #ifndef __LP64__
1121 	uintptr_t _cachedVirtualAddress;
1122 
1123 	IOPhysicalAddress   _cachedPhysicalAddress;
1124 #endif /* !__LP64__ */
1125 
1126 	bool                _initialized;  /* has superclass been initialized? */
1127 
1128 public:
1129 	virtual void free() APPLE_KEXT_OVERRIDE;
1130 
1131 	virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1132 
1133 	virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1134 
1135 #ifdef XNU_KERNEL_PRIVATE
1136 // Internal APIs may be made virtual at some time in the future.
1137 	IOReturn wireVirtual(IODirection forDirection);
1138 	IOReturn dmaMap(
1139 		IOMapper                    * mapper,
1140 		IOMemoryDescriptor          * memory,
1141 		IODMACommand                * command,
1142 		const IODMAMapSpecification * mapSpec,
1143 		uint64_t                      offset,
1144 		uint64_t                      length,
1145 		uint64_t                    * mapAddress,
1146 		uint64_t                    * mapLength);
1147 	bool initMemoryEntries(size_t size, IOMapper * mapper);
1148 
1149 	IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1150 	    IOMemoryReference * realloc);
1151 	void memoryReferenceFree(IOMemoryReference * ref);
1152 	void memoryReferenceRelease(IOMemoryReference * ref);
1153 
1154 	IOReturn memoryReferenceCreate(
1155 		IOOptionBits         options,
1156 		IOMemoryReference ** reference);
1157 
1158 	IOReturn memoryReferenceMap(IOMemoryReference * ref,
1159 	    vm_map_t            map,
1160 	    mach_vm_size_t      inoffset,
1161 	    mach_vm_size_t      size,
1162 	    IOOptionBits        options,
1163 	    mach_vm_address_t * inaddr);
1164 
1165 	IOReturn memoryReferenceMapNew(IOMemoryReference * ref,
1166 	    vm_map_t            map,
1167 	    mach_vm_size_t      inoffset,
1168 	    mach_vm_size_t      size,
1169 	    IOOptionBits        options,
1170 	    mach_vm_address_t * inaddr);
1171 
1172 	static IOReturn memoryReferenceSetPurgeable(
1173 		IOMemoryReference * ref,
1174 		IOOptionBits newState,
1175 		IOOptionBits * oldState);
1176 	static IOReturn memoryReferenceSetOwnership(
1177 		IOMemoryReference * ref,
1178 		task_t newOwner,
1179 		int newLedgerTag,
1180 		IOOptionBits newLedgerOptions);
1181 	static IOReturn memoryReferenceGetPageCounts(
1182 		IOMemoryReference * ref,
1183 		IOByteCount       * residentPageCount,
1184 		IOByteCount       * dirtyPageCount,
1185 		IOByteCount       * swappedPageCount);
1186 
1187 	static uint64_t memoryReferenceGetDMAMapLength(
1188 		IOMemoryReference * ref,
1189 		uint64_t * offset);
1190 
1191 	IOByteCount readBytes(IOByteCount offset,
1192 	    void * bytes, IOByteCount withLength) override;
1193 	IOByteCount writeBytes(IOByteCount offset,
1194 	    const void * bytes, IOByteCount withLength) override;
1195 
1196 #endif
1197 
1198 private:
1199 
1200 #ifndef __LP64__
1201 	virtual void setPosition(IOByteCount position);
1202 	virtual void mapIntoKernel(unsigned rangeIndex);
1203 	virtual void unmapFromKernel();
1204 #endif /* !__LP64__ */
1205 
1206 // Internal
1207 	OSPtr<_IOMemoryDescriptorMixedData> _memoryEntries;
1208 	unsigned int    _pages;
1209 	ppnum_t         _highestPage;
1210 	uint32_t        __iomd_reservedA;
1211 	uint32_t        __iomd_reservedB;
1212 
1213 	IOLock *        _prepareLock;
1214 
1215 public:
1216 /*
1217  * IOMemoryDescriptor required methods
1218  */
1219 
1220 // Master initaliser
1221 	virtual bool initWithOptions(void *         buffers,
1222 	    UInt32         count,
1223 	    UInt32         offset,
1224 	    task_t         task,
1225 	    IOOptionBits   options,
1226 	    IOMapper *     mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1227 
1228 #ifndef __LP64__
1229 // Secondary initialisers
1230 	virtual bool initWithAddress(void *         address,
1231 	    IOByteCount    withLength,
1232 	    IODirection    withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1233 
1234 	virtual bool initWithAddress(IOVirtualAddress address,
1235 	    IOByteCount    withLength,
1236 	    IODirection    withDirection,
1237 	    task_t         withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1238 
1239 	virtual bool initWithPhysicalAddress(
1240 		IOPhysicalAddress      address,
1241 		IOByteCount            withLength,
1242 		IODirection            withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1243 
1244 	virtual bool initWithRanges(        IOVirtualRange * ranges,
1245 	    UInt32           withCount,
1246 	    IODirection      withDirection,
1247 	    task_t           withTask,
1248 	    bool             asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1249 
1250 	virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1251 	    UInt32           withCount,
1252 	    IODirection      withDirection,
1253 	    bool             asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1254 
1255 	virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1256 	    IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1257 
1258 	virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1259 	    IOByteCount * length) APPLE_KEXT_OVERRIDE;
1260 
1261 	virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1262 	    IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1263 
1264 	virtual void * getVirtualSegment(IOByteCount offset,
1265 	    IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1266 #endif /* !__LP64__ */
1267 
1268 	virtual IOReturn setPurgeable( IOOptionBits newState,
1269 	    IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1270 
1271 	IOReturn setOwnership( task_t newOwner,
1272 	    int newLedgerTag,
1273 	    IOOptionBits newLedgerOptions );
1274 
1275 	virtual addr64_t getPhysicalSegment( IOByteCount   offset,
1276 	    IOByteCount * length,
1277 #ifdef __LP64__
1278 	    IOOptionBits  options = 0 ) APPLE_KEXT_OVERRIDE;
1279 #else /* !__LP64__ */
1280 	    IOOptionBits  options)APPLE_KEXT_OVERRIDE;
1281 #endif /* !__LP64__ */
1282 
1283 	virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1284 
1285 	virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1286 
1287 	virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap *      makeMapping(
1288 		IOMemoryDescriptor *    owner,
1289 		task_t                  intoTask,
1290 		IOVirtualAddress        atAddress,
1291 		IOOptionBits            options,
1292 		IOByteCount             offset,
1293 		IOByteCount             length ) APPLE_KEXT_OVERRIDE;
1294 
1295 	virtual IOReturn doMap(
1296 		vm_map_t                addressMap,
1297 		IOVirtualAddress *      atAddress,
1298 		IOOptionBits            options,
1299 		IOByteCount             sourceOffset = 0,
1300 		IOByteCount             length = 0 ) APPLE_KEXT_OVERRIDE;
1301 
1302 	virtual IOReturn doUnmap(
1303 		vm_map_t                addressMap,
1304 		IOVirtualAddress        logical,
1305 		IOByteCount             length ) APPLE_KEXT_OVERRIDE;
1306 
1307 	virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1308 
1309 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1310 	static OSPtr<IOMemoryDescriptor>
1311 	withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1312 
1313 	IOOptionBits memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * map);
1314 };
1315 
1316 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1317 
1318 #ifdef __LP64__
1319 mach_vm_address_t
getAddress()1320 IOMemoryMap::getAddress()
1321 {
1322 	return getVirtualAddress();
1323 }
1324 
1325 mach_vm_size_t
getSize()1326 IOMemoryMap::getSize()
1327 {
1328 	return getLength();
1329 }
1330 #else /* !__LP64__ */
1331 #include <IOKit/IOSubMemoryDescriptor.h>
1332 #endif /* !__LP64__ */
1333 
1334 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1335 
1336 extern bool iokit_iomd_setownership_enabled;
1337 
1338 #endif /* !_IOMEMORYDESCRIPTOR_H */
1339