1 /*
2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSPtr.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <DriverKit/IOMemoryDescriptor.h>
38 #include <DriverKit/IOMemoryMap.h>
39 #ifdef XNU_KERNEL_PRIVATE
40 #include <IOKit/IOKitDebug.h>
41 #endif
42
43 #include <mach/memory_object_types.h>
44
45 class IOMemoryDescriptor;
46 class IOMemoryMap;
47 class IOMapper;
48 class IOService;
49 class IODMACommand;
50 class _IOMemoryDescriptorMixedData;
51
52 /*
53 * Direction of transfer, with respect to the described memory.
54 */
55 #ifdef __LP64__
56 enum
57 #else /* !__LP64__ */
58 enum IODirection
59 #endif /* !__LP64__ */
60 {
61 kIODirectionNone = 0x0,// same as VM_PROT_NONE
62 kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ
63 kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE
64 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
65 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
66
67 // these flags are valid for the prepare() method only
68 kIODirectionPrepareToPhys32 = 0x00000004,
69 kIODirectionPrepareNoFault = 0x00000008,
70 kIODirectionPrepareReserved1 = 0x00000010,
71 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
72 kIODirectionPrepareNonCoherent = 0x00000020,
73 #if KERNEL_PRIVATE
74 #define IODIRECTIONPREPAREAVOIDTHROTTLING 1
75 kIODirectionPrepareAvoidThrottling = 0x00000100,
76 #endif
77
78 // these flags are valid for the complete() method only
79 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
80 kIODirectionCompleteWithError = 0x00000040,
81 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
82 kIODirectionCompleteWithDataValid = 0x00000080,
83 };
84
85 #ifdef __LP64__
86 typedef IOOptionBits IODirection;
87 #endif /* __LP64__ */
88
89 /*
90 * IOOptionBits used in the withOptions variant
91 */
92 enum {
93 kIOMemoryDirectionMask = 0x00000007,
94 #ifdef XNU_KERNEL_PRIVATE
95 kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD
96 #endif
97
98 kIOMemoryTypeVirtual = 0x00000010,
99 kIOMemoryTypePhysical = 0x00000020,
100 kIOMemoryTypeUPL = 0x00000030,
101 kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor
102 kIOMemoryTypeUIO = 0x00000050,
103 #ifdef __LP64__
104 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
105 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
106 #else /* !__LP64__ */
107 kIOMemoryTypeVirtual64 = 0x00000060,
108 kIOMemoryTypePhysical64 = 0x00000070,
109 #endif /* !__LP64__ */
110 kIOMemoryTypeMask = 0x000000f0,
111
112 kIOMemoryAsReference = 0x00000100,
113 kIOMemoryBufferPageable = 0x00000400,
114 kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD
115 kIOMemoryHostOnly = 0x00001000,// Never DMA accessible
116 #ifdef XNU_KERNEL_PRIVATE
117 kIOMemoryRedirected = 0x00004000,
118 kIOMemoryPreparedReadOnly = 0x00008000,
119 #endif
120 kIOMemoryPersistent = 0x00010000,
121 kIOMemoryMapCopyOnWrite = 0x00020000,
122 kIOMemoryRemote = 0x00040000,
123 kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD
124 kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD
125 kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD
126 #define IOMEMORYUSERESERVEDEFINED 1
127
128 #ifdef XNU_KERNEL_PRIVATE
129 kIOMemoryBufferPurgeable = 0x00400000,
130 kIOMemoryBufferCacheMask = 0x70000000,
131 kIOMemoryBufferCacheShift = 28,
132 #endif
133 };
134
135 #define kIOMapperSystem ((IOMapper *) NULL)
136
137 enum{
138 kIOMemoryLedgerTagDefault = VM_LEDGER_TAG_DEFAULT,
139 kIOmemoryLedgerTagNetwork = VM_LEDGER_TAG_NETWORK,
140 kIOMemoryLedgerTagMedia = VM_LEDGER_TAG_MEDIA,
141 kIOMemoryLedgerTagGraphics = VM_LEDGER_TAG_GRAPHICS,
142 kIOMemoryLedgerTagNeural = VM_LEDGER_TAG_NEURAL,
143 };
144 enum{
145 kIOMemoryLedgerFlagNoFootprint = VM_LEDGER_FLAG_NO_FOOTPRINT,
146 };
147
148 enum{
149 kIOMemoryPurgeableKeepCurrent = 1,
150
151 kIOMemoryPurgeableNonVolatile = 2,
152 kIOMemoryPurgeableVolatile = 3,
153 kIOMemoryPurgeableEmpty = 4,
154
155 // modifiers for kIOMemoryPurgeableVolatile behavior
156 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
157 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
158 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
159 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
160 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
161 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
162 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
163 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
164 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
165 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
166 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
167 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
168 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
169 };
170 enum{
171 kIOMemoryIncoherentIOFlush = 1,
172 kIOMemoryIncoherentIOStore = 2,
173
174 kIOMemoryClearEncrypted = 50,
175 kIOMemorySetEncrypted = 51,
176 };
177
178 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
179
180 struct IODMAMapSpecification {
181 uint64_t alignment;
182 IOService * device;
183 uint32_t options;
184 uint8_t numAddressBits;
185 uint8_t resvA[3];
186 uint32_t resvB[4];
187 };
188
189 struct IODMAMapPageList {
190 uint32_t pageOffset;
191 uint32_t pageListCount;
192 const upl_page_info_t * pageList;
193 };
194
195 // mapOptions for iovmMapMemory
196 enum{
197 kIODMAMapReadAccess = 0x00000001,
198 kIODMAMapWriteAccess = 0x00000002,
199 kIODMAMapPhysicallyContiguous = 0x00000010,
200 kIODMAMapDeviceMemory = 0x00000020,
201 kIODMAMapPagingPath = 0x00000040,
202 kIODMAMapIdentityMap = 0x00000080,
203
204 kIODMAMapPageListFullyOccupied = 0x00000100,
205 kIODMAMapFixedAddress = 0x00000200,
206 };
207
208 #ifdef KERNEL_PRIVATE
209
210 // Used for dmaCommandOperation communications for IODMACommand and mappers
211
212 enum {
213 kIOMDWalkSegments = 0x01000000,
214 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
215 kIOMDGetCharacteristics = 0x02000000,
216 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
217 kIOMDDMAActive = 0x03000000,
218 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
219 kIOMDSetDMAInactive = kIOMDDMAActive,
220 kIOMDAddDMAMapSpec = 0x04000000,
221 kIOMDDMAMap = 0x05000000,
222 kIOMDDMAUnmap = 0x06000000,
223 kIOMDDMACommandOperationMask = 0xFF000000,
224 };
225 struct IOMDDMACharacteristics {
226 UInt64 fLength;
227 UInt32 fSGCount;
228 UInt32 fPages;
229 UInt32 fPageAlign;
230 ppnum_t fHighestPage;
231 IODirection fDirection;
232 UInt8 fIsPrepared;
233 };
234
235 struct IOMDDMAMapArgs {
236 IOMapper * fMapper;
237 IODMACommand * fCommand;
238 IODMAMapSpecification fMapSpec;
239 uint64_t fOffset;
240 uint64_t fLength;
241 uint64_t fAlloc;
242 uint64_t fAllocLength;
243 };
244
245 struct IOMDDMAWalkSegmentArgs {
246 UInt64 fOffset; // Input/Output offset
247 UInt64 fIOVMAddr, fLength; // Output variables
248 UInt8 fMapped; // Input Variable, Require mapped IOVMA
249 UInt64 fMappedBase; // Input base of mapping
250 };
251 typedef UInt8 IOMDDMAWalkSegmentState[128];
252
253 #endif /* KERNEL_PRIVATE */
254
255 enum{
256 kIOPreparationIDUnprepared = 0,
257 kIOPreparationIDUnsupported = 1,
258 kIOPreparationIDAlwaysPrepared = 2,
259 };
260
261 #ifdef KERNEL_PRIVATE
262 #define kIODescriptorIDInvalid (0)
263 #endif
264
265 #ifdef XNU_KERNEL_PRIVATE
266 struct IOMemoryReference;
267 #endif
268
269
270 /*! @class IOMemoryDescriptor : public OSObject
271 * @abstract An abstract base class defining common methods for describing physical or virtual memory.
272 * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
273
274 class IOMemoryDescriptor : public OSObject
275 {
276 friend class IOMemoryMap;
277 friend class IOMultiMemoryDescriptor;
278
279 OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
280
281 protected:
282
283 /*! @var reserved
284 * Reserved for future use. (Internal use only) */
285 struct IOMemoryDescriptorReserved * reserved;
286
287 protected:
288 OSPtr<OSSet> _mappings;
289 IOOptionBits _flags;
290
291
292 #ifdef XNU_KERNEL_PRIVATE
293 public:
294 struct IOMemoryReference * _memRef;
295 vm_tag_t _kernelTag;
296 vm_tag_t _userTag;
297 int16_t _dmaReferences;
298 uint16_t _internalFlags;
299 kern_allocation_name_t _mapName;
300 protected:
301 #else /* XNU_KERNEL_PRIVATE */
302 void * __iomd_reserved5;
303 uint16_t __iomd_reserved1[4];
304 uintptr_t __iomd_reserved2;
305 #endif /* XNU_KERNEL_PRIVATE */
306
307 uintptr_t __iomd_reserved3;
308 uintptr_t __iomd_reserved4;
309
310 #ifndef __LP64__
311 IODirection _direction; /* use _flags instead */
312 #endif /* !__LP64__ */
313 IOByteCount _length; /* length of all ranges */
314 IOOptionBits _tag;
315
316 public:
317 typedef IOOptionBits DMACommandOps;
318 #ifndef __LP64__
319 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
320 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
321 #endif /* !__LP64__ */
322
323 /*! @function initWithOptions
324 * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
325 * @discussion Note this function can be used to re-init a previously created memory descriptor.
326 * @result true on success, false on failure. */
327 virtual bool initWithOptions(void * buffers,
328 UInt32 count,
329 UInt32 offset,
330 task_t task,
331 IOOptionBits options,
332 IOMapper * mapper = kIOMapperSystem);
333
334 #ifndef __LP64__
335 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
336 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
337 #endif /* !__LP64__ */
338
339 /*! @function setPurgeable
340 * @abstract Control the purgeable status of a memory descriptors memory.
341 * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
342 * @param newState - the desired new purgeable state of the memory:<br>
343 * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
344 * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
345 * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
346 * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
347 * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
348 * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
349 * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
350 * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
351 * @result An IOReturn code. */
352
353 virtual IOReturn setPurgeable( IOOptionBits newState,
354 IOOptionBits * oldState );
355
356 /*! @function setOwnership
357 * @abstract Control the ownership of a memory descriptors memory.
358 * @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
359 * @param newOwner - the task to be the new owner of the memory.
360 * @param newLedgerTag - the ledger this memory should be accounted in.
361 * @param newLedgerOptions - accounting options
362 * @result An IOReturn code. */
363
364 IOReturn setOwnership( task_t newOwner,
365 int newLedgerTag,
366 IOOptionBits newLedgerOptions );
367
368 /*! @function getPageCounts
369 * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
370 * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
371 * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
372 * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
373 * @result An IOReturn code. */
374
375 IOReturn getPageCounts( IOByteCount * residentPageCount,
376 IOByteCount * dirtyPageCount);
377
378 /*! @function performOperation
379 * @abstract Perform an operation on the memory descriptor's memory.
380 * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
381 * @param options The operation to perform on the memory:<br>
382 * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
383 * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
384 * @param offset A byte offset into the memory descriptor's memory.
385 * @param length The length of the data range.
386 * @result An IOReturn code. */
387
388 virtual IOReturn performOperation( IOOptionBits options,
389 IOByteCount offset, IOByteCount length );
390
391 // Used for dedicated communications for IODMACommand
392 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
393
394 /*! @function getPhysicalSegment
395 * @abstract Break a memory descriptor into its physically contiguous segments.
396 * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
397 * @param offset A byte offset into the memory whose physical address to return.
398 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
399 * @result A physical address, or zero if the offset is beyond the length of the memory. */
400
401 #ifdef __LP64__
402 virtual addr64_t getPhysicalSegment( IOByteCount offset,
403 IOByteCount * length,
404 IOOptionBits options = 0 ) = 0;
405 #else /* !__LP64__ */
406 virtual addr64_t getPhysicalSegment( IOByteCount offset,
407 IOByteCount * length,
408 IOOptionBits options );
409 #endif /* !__LP64__ */
410
411 virtual uint64_t getPreparationID( void );
412 void setPreparationID( void );
413
414 void setVMTags(uint32_t kernelTag, uint32_t userTag);
415 uint32_t getVMTag(vm_map_t map);
416
417 #ifdef KERNEL_PRIVATE
418 uint64_t getDescriptorID( void );
419 void setDescriptorID( void );
420
421 IOReturn ktraceEmitPhysicalSegments( void );
422 #endif
423
424 #ifdef XNU_KERNEL_PRIVATE
425 IOMemoryDescriptorReserved * getKernelReserved( void );
426 void cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
427 IOReturn dmaMap(
428 IOMapper * mapper,
429 IOMemoryDescriptor * memory,
430 IODMACommand * command,
431 const IODMAMapSpecification * mapSpec,
432 uint64_t offset,
433 uint64_t length,
434 uint64_t * mapAddress,
435 uint64_t * mapLength);
436 IOReturn dmaUnmap(
437 IOMapper * mapper,
438 IODMACommand * command,
439 uint64_t offset,
440 uint64_t mapAddress,
441 uint64_t mapLength);
442 void dmaMapRecord(
443 IOMapper * mapper,
444 IODMACommand * command,
445 uint64_t mapLength);
446 #endif
447
448 private:
449 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0);
450 #ifdef __LP64__
451 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
452 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
453 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
454 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
455 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
456 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
457 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
458 #else /* !__LP64__ */
459 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1);
460 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2);
461 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3);
462 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4);
463 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5);
464 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6);
465 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7);
466 #endif /* !__LP64__ */
467 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
468 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
469 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
470 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
471 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
472 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
473 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
474 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
475
476 protected:
477 virtual void free(void) APPLE_KEXT_OVERRIDE;
478 public:
479 static void initialize( void );
480
481 public:
482 /*! @function withAddress
483 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
484 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
485 * @param address The virtual address of the first byte in the memory.
486 * @param withLength The length of memory.
487 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
488 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
489
490 static OSPtr<IOMemoryDescriptor> withAddress(void * address,
491 IOByteCount withLength,
492 IODirection withDirection);
493
494 #ifndef __LP64__
495 static OSPtr<IOMemoryDescriptor> withAddress(IOVirtualAddress address,
496 IOByteCount withLength,
497 IODirection withDirection,
498 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
499 #endif /* !__LP64__ */
500
501 /*! @function withPhysicalAddress
502 * @abstract Create an IOMemoryDescriptor to describe one physical range.
503 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
504 * @param address The physical address of the first byte in the memory.
505 * @param withLength The length of memory.
506 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
507 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
508
509 static OSPtr<IOMemoryDescriptor> withPhysicalAddress(
510 IOPhysicalAddress address,
511 IOByteCount withLength,
512 IODirection withDirection );
513
514 #ifndef __LP64__
515 static OSPtr<IOMemoryDescriptor> withRanges(IOVirtualRange * ranges,
516 UInt32 withCount,
517 IODirection withDirection,
518 task_t withTask,
519 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
520 #endif /* !__LP64__ */
521
522 /*! @function withAddressRange
523 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
524 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
525 * @param address The virtual address of the first byte in the memory.
526 * @param length The length of memory.
527 * @param options
528 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
529 * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
530 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
531
532 static OSPtr<IOMemoryDescriptor> withAddressRange(
533 mach_vm_address_t address,
534 mach_vm_size_t length,
535 IOOptionBits options,
536 task_t task);
537
538 /*! @function withAddressRanges
539 * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
540 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
541 * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
542 * @param rangeCount The member count of the ranges array.
543 * @param options
544 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
545 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
546 * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
547 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
548
549 static OSPtr<IOMemoryDescriptor> withAddressRanges(
550 IOAddressRange * ranges,
551 UInt32 rangeCount,
552 IOOptionBits options,
553 task_t task);
554
555 /*! @function withOptions
556 * @abstract Master initialiser for all variants of memory descriptors.
557 * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
558 *
559 *
560 * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
561 *
562 * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
563 *
564 * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
565 *
566 * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
567 *
568 * @param options
569 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
570 * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
571 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
572 * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
573 *
574 * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
575 *
576 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
577
578 static OSPtr<IOMemoryDescriptor> withOptions(void * buffers,
579 UInt32 count,
580 UInt32 offset,
581 task_t task,
582 IOOptionBits options,
583 IOMapper * mapper = kIOMapperSystem);
584
585 #ifndef __LP64__
586 static OSPtr<IOMemoryDescriptor> withPhysicalRanges(
587 IOPhysicalRange * ranges,
588 UInt32 withCount,
589 IODirection withDirection,
590 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
591 #endif /* !__LP64__ */
592
593 #ifndef __LP64__
594 static OSPtr<IOMemoryDescriptor> withSubRange(IOMemoryDescriptor *of,
595 IOByteCount offset,
596 IOByteCount length,
597 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
598 #endif /* !__LP64__ */
599
600 /*! @function withPersistentMemoryDescriptor
601 * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
602 * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
603 * @param originalMD The memory descriptor to be duplicated.
604 * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
605 static OSPtr<IOMemoryDescriptor>
606 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
607
608 #ifndef __LP64__
609 // obsolete initializers
610 // - initWithOptions is the designated initializer
611 virtual bool initWithAddress(void * address,
612 IOByteCount withLength,
613 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
614 virtual bool initWithAddress(IOVirtualAddress address,
615 IOByteCount withLength,
616 IODirection withDirection,
617 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
618 virtual bool initWithPhysicalAddress(
619 IOPhysicalAddress address,
620 IOByteCount withLength,
621 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
622 virtual bool initWithRanges(IOVirtualRange * ranges,
623 UInt32 withCount,
624 IODirection withDirection,
625 task_t withTask,
626 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
627 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
628 UInt32 withCount,
629 IODirection withDirection,
630 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
631 #endif /* __LP64__ */
632
633 /*! @function getDirection
634 * @abstract Accessor to get the direction the memory descriptor was created with.
635 * @discussion This method returns the direction the memory descriptor was created with.
636 * @result The direction. */
637
638 virtual IODirection getDirection() const;
639
640 /*! @function getLength
641 * @abstract Accessor to get the length of the memory descriptor (over all its ranges).
642 * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
643 * @result The byte count. */
644
645 virtual IOByteCount getLength() const;
646
647 #define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH
648 uint64_t getDMAMapLength(uint64_t * offset = NULL);
649
650 /*! @function setTag
651 * @abstract Set the tag for the memory descriptor.
652 * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
653 * @param tag The tag. */
654
655 virtual void setTag( IOOptionBits tag );
656
657 /*! @function getTag
658 * @abstract Accessor to the retrieve the tag for the memory descriptor.
659 * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
660 * @result The tag. */
661
662 virtual IOOptionBits getTag( void );
663
664 /*! @function getFlags
665 * @abstract Accessor to the retrieve the options the memory descriptor was created with.
666 * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
667 * @result The flags bitfield. */
668
669 uint64_t getFlags(void);
670
671 /*! @function readBytes
672 * @abstract Copy data from the memory descriptor's buffer to the specified buffer.
673 * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
674 * @param offset A byte offset into the memory descriptor's memory.
675 * @param bytes The caller supplied buffer to copy the data to.
676 * @param withLength The length of the data to copy.
677 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
678
679 virtual IOByteCount readBytes(IOByteCount offset,
680 void * bytes, IOByteCount withLength);
681
682 /*! @function writeBytes
683 * @abstract Copy data to the memory descriptor's buffer from the specified buffer.
684 * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
685 * @param offset A byte offset into the memory descriptor's memory.
686 * @param bytes The caller supplied buffer to copy the data from.
687 * @param withLength The length of the data to copy.
688 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
689
690 virtual IOByteCount writeBytes(IOByteCount offset,
691 const void * bytes, IOByteCount withLength);
692
693 #ifndef __LP64__
694 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
695 IOByteCount * length);
696 #endif /* !__LP64__ */
697
698 /*! @function getPhysicalAddress
699 * @abstract Return the physical address of the first byte in the memory.
700 * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
701 * @result A physical address. */
702
703 IOPhysicalAddress getPhysicalAddress();
704
705 #ifndef __LP64__
706 virtual void * getVirtualSegment(IOByteCount offset,
707 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
708 #endif /* !__LP64__ */
709
710 /*! @function prepare
711 * @abstract Prepare the memory for an I/O transfer.
712 * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
713 * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
714 * @result An IOReturn code. */
715
716 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
717
718 /*! @function complete
719 * @abstract Complete processing of the memory after an I/O transfer finishes.
720 * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
721 * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
722 * @result An IOReturn code. */
723
724 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
725
726 /*
727 * Mapping functions.
728 */
729
730 /*! @function createMappingInTask
731 * @abstract Maps a IOMemoryDescriptor into a task.
732 * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
733 * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
734 * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
735 * @param options Mapping options are defined in IOTypes.h,<br>
736 * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
737 * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
738 * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
739 * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
740 * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
741 * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
742 * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
743 * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
744 * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
745 * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
746
747 OSPtr<IOMemoryMap> createMappingInTask(
748 task_t intoTask,
749 mach_vm_address_t atAddress,
750 IOOptionBits options,
751 mach_vm_size_t offset = 0,
752 mach_vm_size_t length = 0 );
753
754 #ifndef __LP64__
755 virtual OSPtr<IOMemoryMap> map(
756 task_t intoTask,
757 IOVirtualAddress atAddress,
758 IOOptionBits options,
759 IOByteCount offset = 0,
760 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
761 #endif /* !__LP64__ */
762
763 /*! @function map
764 * @abstract Maps a IOMemoryDescriptor into the kernel map.
765 * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
766 * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
767 * @result See the full version of the createMappingInTask method. */
768
769 virtual OSPtr<IOMemoryMap> map(
770 IOOptionBits options = 0 );
771
772 /*! @function setMapping
773 * @abstract Establishes an already existing mapping.
774 * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
775 * @param task Address space in which the mapping exists.
776 * @param mapAddress Virtual address of the mapping.
777 * @param options Caching and read-only attributes of the mapping.
778 * @result A IOMemoryMap object created to represent the mapping. */
779
780 virtual OSPtr<IOMemoryMap> setMapping(
781 task_t task,
782 IOVirtualAddress mapAddress,
783 IOOptionBits options = 0 );
784
785 // Following methods are private implementation
786
787 #ifdef __LP64__
788 virtual
789 #endif /* __LP64__ */
790 IOReturn redirect( task_t safeTask, bool redirect );
791
792 IOReturn handleFault(
793 void * _pager,
794 mach_vm_size_t sourceOffset,
795 mach_vm_size_t length);
796
797 IOReturn populateDevicePager(
798 void * pager,
799 vm_map_t addressMap,
800 mach_vm_address_t address,
801 mach_vm_size_t sourceOffset,
802 mach_vm_size_t length,
803 IOOptionBits options );
804
805 virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap * makeMapping(
806 IOMemoryDescriptor * owner,
807 task_t intoTask,
808 IOVirtualAddress atAddress,
809 IOOptionBits options,
810 IOByteCount offset,
811 IOByteCount length );
812
813 #if KERNEL_PRIVATE
814 /*! @function copyContext
815 * @abstract Accessor to the retrieve the context previously set for the memory descriptor.
816 * @discussion This method returns the context for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
817 * @result The context, returned with an additional retain to be released by the caller. */
818 OSObject * copyContext(void) const;
819
820 /*! @function setContext
821 * @abstract Set a context object for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
822 * @discussion The context is retained, and will be released when the memory descriptor is freed or when a new context object is set.
823 */
824 void setContext(OSObject * context);
825 #endif
826
827 protected:
828 virtual void addMapping(
829 IOMemoryMap * mapping );
830
831 virtual void removeMapping(
832 IOMemoryMap * mapping );
833
834 virtual IOReturn doMap(
835 vm_map_t addressMap,
836 IOVirtualAddress * atAddress,
837 IOOptionBits options,
838 IOByteCount sourceOffset = 0,
839 IOByteCount length = 0 );
840
841 virtual IOReturn doUnmap(
842 vm_map_t addressMap,
843 IOVirtualAddress logical,
844 IOByteCount length );
845 };
846
847 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
848
849 /*! @class IOMemoryMap : public OSObject
850 * @abstract A class defining common methods for describing a memory mapping.
851 * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
852
853 class IOMemoryMap : public OSObject
854 {
855 OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
856 #ifdef XNU_KERNEL_PRIVATE
857 public:
858 IOOptionBits fOptions;
859 OSPtr<IOMemoryDescriptor> fMemory;
860 OSPtr<IOMemoryMap> fSuperMap;
861 mach_vm_size_t fOffset;
862 mach_vm_address_t fAddress;
863 mach_vm_size_t fLength;
864 task_t fAddressTask;
865 vm_map_t fAddressMap;
866 upl_t fRedirUPL;
867 uint8_t fUserClientUnmap;
868 #if IOTRACKING
869 IOTrackingUser fTracking;
870 #endif
871 #endif /* XNU_KERNEL_PRIVATE */
872
873 protected:
874 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
875 virtual void free(void) APPLE_KEXT_OVERRIDE;
876
877 public:
878 /*! @function getVirtualAddress
879 * @abstract Accessor to the virtual address of the first byte in the mapping.
880 * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
881 * @result A virtual address. */
882
883 virtual IOVirtualAddress getVirtualAddress(void);
884
885 /*! @function getPhysicalSegment
886 * @abstract Break a mapping into its physically contiguous segments.
887 * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
888 * @param offset A byte offset into the mapping whose physical address to return.
889 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
890 * @result A physical address, or zero if the offset is beyond the length of the mapping. */
891
892 #ifdef __LP64__
893 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
894 IOByteCount * length,
895 IOOptionBits options = 0);
896 #else /* !__LP64__ */
897 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
898 IOByteCount * length);
899 #endif /* !__LP64__ */
900
901 /*! @function getPhysicalAddress
902 * @abstract Return the physical address of the first byte in the mapping.
903 * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
904 * @result A physical address. */
905
906 IOPhysicalAddress getPhysicalAddress(void);
907
908 /*! @function getLength
909 * @abstract Accessor to the length of the mapping.
910 * @discussion This method returns the length of the mapping.
911 * @result A byte count. */
912
913 virtual IOByteCount getLength(void);
914
915 /*! @function getAddressTask
916 * @abstract Accessor to the task of the mapping.
917 * @discussion This method returns the mach task the mapping exists in.
918 * @result A mach task_t. */
919
920 virtual task_t getAddressTask();
921
922 /*! @function getMemoryDescriptor
923 * @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
924 * @discussion This method returns the IOMemoryDescriptor the mapping was created from.
925 * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
926
927 virtual IOMemoryDescriptor * getMemoryDescriptor();
928
929 /*! @function getMapOptions
930 * @abstract Accessor to the options the mapping was created with.
931 * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
932 * @result Options for the mapping, including cache settings. */
933
934 virtual IOOptionBits getMapOptions();
935
936 /*! @function unmap
937 * @abstract Force the IOMemoryMap to unmap, without destroying the object.
938 * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
939 * @result An IOReturn code. */
940
941 virtual IOReturn unmap();
942
943 virtual void taskDied();
944
945 /*! @function redirect
946 * @abstract Replace the memory mapped in a process with new backing memory.
947 * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
948 * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
949 * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
950 * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
951 * @result An IOReturn code. */
952
953 #ifndef __LP64__
954 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
955 // for 64 bit, these fall together on the 64 bit one.
956 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
957 IOOptionBits options,
958 IOByteCount offset = 0);
959 #endif
960 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
961 IOOptionBits options,
962 mach_vm_size_t offset = 0);
963
964 #ifdef __LP64__
965 /*! @function getAddress
966 * @abstract Accessor to the virtual address of the first byte in the mapping.
967 * @discussion This method returns the virtual address of the first byte in the mapping.
968 * @result A virtual address. */
969 inline mach_vm_address_t getAddress() __attribute__((always_inline));
970 /*! @function getSize
971 * @abstract Accessor to the length of the mapping.
972 * @discussion This method returns the length of the mapping.
973 * @result A byte count. */
974 inline mach_vm_size_t getSize() __attribute__((always_inline));
975 #else /* !__LP64__ */
976 /*! @function getAddress
977 * @abstract Accessor to the virtual address of the first byte in the mapping.
978 * @discussion This method returns the virtual address of the first byte in the mapping.
979 * @result A virtual address. */
980 virtual mach_vm_address_t getAddress();
981 /*! @function getSize
982 * @abstract Accessor to the length of the mapping.
983 * @discussion This method returns the length of the mapping.
984 * @result A byte count. */
985 virtual mach_vm_size_t getSize();
986 #endif /* !__LP64__ */
987
988 #ifdef XNU_KERNEL_PRIVATE
989 // for IOMemoryDescriptor use
990 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
991
992 bool init(
993 task_t intoTask,
994 mach_vm_address_t toAddress,
995 IOOptionBits options,
996 mach_vm_size_t offset,
997 mach_vm_size_t length );
998
999 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
1000
1001 IOReturn redirect(
1002 task_t intoTask, bool redirect );
1003
1004 IOReturn userClientUnmap();
1005 #endif /* XNU_KERNEL_PRIVATE */
1006
1007 IOReturn wireRange(
1008 uint32_t options,
1009 mach_vm_size_t offset,
1010 mach_vm_size_t length);
1011
1012 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
1013 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
1014 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
1015 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
1016 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
1017 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
1018 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
1019 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
1020 };
1021
1022 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1023 #ifdef XNU_KERNEL_PRIVATE
1024 // Also these flags should not overlap with the options to
1025 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1026 enum {
1027 _kIOMemorySourceSegment = 0x00002000
1028 };
1029 #endif /* XNU_KERNEL_PRIVATE */
1030
1031 // The following classes are private implementation of IOMemoryDescriptor - they
1032 // should not be referenced directly, just through the public API's in the
1033 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1034 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1035 // no need to reference as anything but a generic IOMemoryDescriptor *.
1036
1037 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1038 {
1039 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1040
1041 public:
1042 union Ranges {
1043 IOVirtualRange *v;
1044 IOAddressRange *v64;
1045 IOPhysicalRange *p;
1046 void *uio;
1047 };
1048 protected:
1049 Ranges _ranges;
1050 unsigned _rangesCount; /* number of address ranges in list */
1051 #ifndef __LP64__
1052 bool _rangesIsAllocated;/* is list allocated by us? */
1053 #endif /* !__LP64__ */
1054
1055 task_t _task; /* task where all ranges are mapped to */
1056
1057 union {
1058 IOVirtualRange v;
1059 IOPhysicalRange p;
1060 } _singleRange; /* storage space for a single range */
1061
1062 unsigned _wireCount; /* number of outstanding wires */
1063
1064 #ifndef __LP64__
1065 uintptr_t _cachedVirtualAddress;
1066
1067 IOPhysicalAddress _cachedPhysicalAddress;
1068 #endif /* !__LP64__ */
1069
1070 bool _initialized; /* has superclass been initialized? */
1071
1072 public:
1073 virtual void free() APPLE_KEXT_OVERRIDE;
1074
1075 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1076
1077 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1078
1079 #ifdef XNU_KERNEL_PRIVATE
1080 // Internal APIs may be made virtual at some time in the future.
1081 IOReturn wireVirtual(IODirection forDirection);
1082 IOReturn dmaMap(
1083 IOMapper * mapper,
1084 IOMemoryDescriptor * memory,
1085 IODMACommand * command,
1086 const IODMAMapSpecification * mapSpec,
1087 uint64_t offset,
1088 uint64_t length,
1089 uint64_t * mapAddress,
1090 uint64_t * mapLength);
1091 bool initMemoryEntries(size_t size, IOMapper * mapper);
1092
1093 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1094 IOMemoryReference * realloc);
1095 void memoryReferenceFree(IOMemoryReference * ref);
1096 void memoryReferenceRelease(IOMemoryReference * ref);
1097
1098 IOReturn memoryReferenceCreate(
1099 IOOptionBits options,
1100 IOMemoryReference ** reference);
1101
1102 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1103 vm_map_t map,
1104 mach_vm_size_t inoffset,
1105 mach_vm_size_t size,
1106 IOOptionBits options,
1107 mach_vm_address_t * inaddr);
1108
1109 IOReturn memoryReferenceMapNew(IOMemoryReference * ref,
1110 vm_map_t map,
1111 mach_vm_size_t inoffset,
1112 mach_vm_size_t size,
1113 IOOptionBits options,
1114 mach_vm_address_t * inaddr);
1115
1116 static IOReturn memoryReferenceSetPurgeable(
1117 IOMemoryReference * ref,
1118 IOOptionBits newState,
1119 IOOptionBits * oldState);
1120 static IOReturn memoryReferenceSetOwnership(
1121 IOMemoryReference * ref,
1122 task_t newOwner,
1123 int newLedgerTag,
1124 IOOptionBits newLedgerOptions);
1125 static IOReturn memoryReferenceGetPageCounts(
1126 IOMemoryReference * ref,
1127 IOByteCount * residentPageCount,
1128 IOByteCount * dirtyPageCount);
1129
1130 static uint64_t memoryReferenceGetDMAMapLength(
1131 IOMemoryReference * ref,
1132 uint64_t * offset);
1133
1134 #endif
1135
1136 private:
1137
1138 #ifndef __LP64__
1139 virtual void setPosition(IOByteCount position);
1140 virtual void mapIntoKernel(unsigned rangeIndex);
1141 virtual void unmapFromKernel();
1142 #endif /* !__LP64__ */
1143
1144 // Internal
1145 OSPtr<_IOMemoryDescriptorMixedData> _memoryEntries;
1146 unsigned int _pages;
1147 ppnum_t _highestPage;
1148 uint32_t __iomd_reservedA;
1149 uint32_t __iomd_reservedB;
1150
1151 IOLock * _prepareLock;
1152
1153 public:
1154 /*
1155 * IOMemoryDescriptor required methods
1156 */
1157
1158 // Master initaliser
1159 virtual bool initWithOptions(void * buffers,
1160 UInt32 count,
1161 UInt32 offset,
1162 task_t task,
1163 IOOptionBits options,
1164 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1165
1166 #ifndef __LP64__
1167 // Secondary initialisers
1168 virtual bool initWithAddress(void * address,
1169 IOByteCount withLength,
1170 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1171
1172 virtual bool initWithAddress(IOVirtualAddress address,
1173 IOByteCount withLength,
1174 IODirection withDirection,
1175 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1176
1177 virtual bool initWithPhysicalAddress(
1178 IOPhysicalAddress address,
1179 IOByteCount withLength,
1180 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1181
1182 virtual bool initWithRanges( IOVirtualRange * ranges,
1183 UInt32 withCount,
1184 IODirection withDirection,
1185 task_t withTask,
1186 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1187
1188 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1189 UInt32 withCount,
1190 IODirection withDirection,
1191 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1192
1193 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1194 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1195
1196 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1197 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1198
1199 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1200 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1201
1202 virtual void * getVirtualSegment(IOByteCount offset,
1203 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1204 #endif /* !__LP64__ */
1205
1206 virtual IOReturn setPurgeable( IOOptionBits newState,
1207 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1208
1209 IOReturn setOwnership( task_t newOwner,
1210 int newLedgerTag,
1211 IOOptionBits newLedgerOptions );
1212
1213 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1214 IOByteCount * length,
1215 #ifdef __LP64__
1216 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1217 #else /* !__LP64__ */
1218 IOOptionBits options)APPLE_KEXT_OVERRIDE;
1219 #endif /* !__LP64__ */
1220
1221 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1222
1223 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1224
1225 virtual IOReturn doMap(
1226 vm_map_t addressMap,
1227 IOVirtualAddress * atAddress,
1228 IOOptionBits options,
1229 IOByteCount sourceOffset = 0,
1230 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1231
1232 virtual IOReturn doUnmap(
1233 vm_map_t addressMap,
1234 IOVirtualAddress logical,
1235 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1236
1237 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1238
1239 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1240 static OSPtr<IOMemoryDescriptor>
1241 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1242 };
1243
1244 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1245
1246 #ifdef __LP64__
1247 mach_vm_address_t
getAddress()1248 IOMemoryMap::getAddress()
1249 {
1250 return getVirtualAddress();
1251 }
1252
1253 mach_vm_size_t
getSize()1254 IOMemoryMap::getSize()
1255 {
1256 return getLength();
1257 }
1258 #else /* !__LP64__ */
1259 #include <IOKit/IOSubMemoryDescriptor.h>
1260 #endif /* !__LP64__ */
1261
1262 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1263
1264 extern bool iokit_iomd_setownership_enabled;
1265
1266 #endif /* !_IOMEMORYDESCRIPTOR_H */
1267