1 /*
2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSPtr.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <DriverKit/IOMemoryDescriptor.h>
38 #include <DriverKit/IOMemoryMap.h>
39 #ifdef XNU_KERNEL_PRIVATE
40 #include <IOKit/IOKitDebug.h>
41 #endif
42
43 #include <mach/memory_object_types.h>
44
45 class IOMemoryDescriptor;
46 class IOMemoryMap;
47 class IOMapper;
48 class IOService;
49 class IODMACommand;
50 class _IOMemoryDescriptorMixedData;
51
52 /*
53 * Direction of transfer, with respect to the described memory.
54 */
55 #ifdef __LP64__
56 enum
57 #else /* !__LP64__ */
58 enum IODirection
59 #endif /* !__LP64__ */
60 {
61 kIODirectionNone = 0x0,// same as VM_PROT_NONE
62 kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ
63 kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE
64 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
65 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
66
67 // these flags are valid for the prepare() method only
68 kIODirectionPrepareToPhys32 = 0x00000004,
69 kIODirectionPrepareNoFault = 0x00000008,
70 kIODirectionPrepareReserved1 = 0x00000010,
71 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
72 kIODirectionPrepareNonCoherent = 0x00000020,
73 #if KERNEL_PRIVATE
74 #define IODIRECTIONPREPAREAVOIDTHROTTLING 1
75 kIODirectionPrepareAvoidThrottling = 0x00000100,
76 #endif
77
78 // these flags are valid for the complete() method only
79 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
80 kIODirectionCompleteWithError = 0x00000040,
81 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
82 kIODirectionCompleteWithDataValid = 0x00000080,
83 };
84
85 #ifdef __LP64__
86 typedef IOOptionBits IODirection;
87 #endif /* __LP64__ */
88
89 /*
90 * IOOptionBits used in the withOptions variant
91 */
92 enum {
93 kIOMemoryDirectionMask = 0x00000007,
94 #ifdef XNU_KERNEL_PRIVATE
95 kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD
96 #endif
97
98 kIOMemoryTypeVirtual = 0x00000010,
99 kIOMemoryTypePhysical = 0x00000020,
100 kIOMemoryTypeUPL = 0x00000030,
101 kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor
102 kIOMemoryTypeUIO = 0x00000050,
103 #ifdef __LP64__
104 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
105 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
106 #else /* !__LP64__ */
107 kIOMemoryTypeVirtual64 = 0x00000060,
108 kIOMemoryTypePhysical64 = 0x00000070,
109 #endif /* !__LP64__ */
110 kIOMemoryTypeMask = 0x000000f0,
111
112 kIOMemoryAsReference = 0x00000100,
113 kIOMemoryBufferPageable = 0x00000400,
114 kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD
115 kIOMemoryHostOnly = 0x00001000,// Never DMA accessible
116 #ifdef XNU_KERNEL_PRIVATE
117 kIOMemoryRedirected = 0x00004000,
118 kIOMemoryPreparedReadOnly = 0x00008000,
119 #endif
120 kIOMemoryPersistent = 0x00010000,
121 kIOMemoryMapCopyOnWrite = 0x00020000,
122 kIOMemoryRemote = 0x00040000,
123 kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD
124 kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD
125 kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD
126 #define IOMEMORYUSERESERVEDEFINED 1
127
128 #ifdef XNU_KERNEL_PRIVATE
129 kIOMemoryBufferPurgeable = 0x00400000,
130 kIOMemoryBufferCacheMask = 0x70000000,
131 kIOMemoryBufferCacheShift = 28,
132 #endif
133 };
134
135 #define kIOMapperSystem ((IOMapper *) NULL)
136
137 enum{
138 kIOMemoryLedgerTagDefault = VM_LEDGER_TAG_DEFAULT,
139 kIOmemoryLedgerTagNetwork = VM_LEDGER_TAG_NETWORK,
140 kIOMemoryLedgerTagMedia = VM_LEDGER_TAG_MEDIA,
141 kIOMemoryLedgerTagGraphics = VM_LEDGER_TAG_GRAPHICS,
142 kIOMemoryLedgerTagNeural = VM_LEDGER_TAG_NEURAL,
143 };
144 enum{
145 kIOMemoryLedgerFlagNoFootprint = VM_LEDGER_FLAG_NO_FOOTPRINT,
146 };
147
148 enum{
149 kIOMemoryPurgeableKeepCurrent = 1,
150
151 kIOMemoryPurgeableNonVolatile = 2,
152 kIOMemoryPurgeableVolatile = 3,
153 kIOMemoryPurgeableEmpty = 4,
154
155 // modifiers for kIOMemoryPurgeableVolatile behavior
156 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
157 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
158 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
159 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
160 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
161 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
162 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
163 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
164 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
165 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
166 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
167 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
168 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
169 };
170 enum{
171 kIOMemoryIncoherentIOFlush = 1,
172 kIOMemoryIncoherentIOStore = 2,
173
174 kIOMemoryClearEncrypted = 50,
175 kIOMemorySetEncrypted = 51,
176 };
177
178 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
179
180 struct IODMAMapSpecification {
181 uint64_t alignment;
182 IOService * device;
183 uint32_t options;
184 uint8_t numAddressBits;
185 uint8_t resvA[3];
186 uint32_t resvB[4];
187 };
188
189 struct IODMAMapPageList {
190 uint32_t pageOffset;
191 uint32_t pageListCount;
192 const upl_page_info_t * pageList;
193 };
194
195 // mapOptions for iovmMapMemory
196 enum{
197 kIODMAMapReadAccess = 0x00000001,
198 kIODMAMapWriteAccess = 0x00000002,
199 kIODMAMapPhysicallyContiguous = 0x00000010,
200 kIODMAMapDeviceMemory = 0x00000020,
201 kIODMAMapPagingPath = 0x00000040,
202 kIODMAMapIdentityMap = 0x00000080,
203
204 kIODMAMapPageListFullyOccupied = 0x00000100,
205 kIODMAMapFixedAddress = 0x00000200,
206 };
207
208 // Options used by IOMapper. example IOMappers are DART and VT-d
209 enum {
210 kIOMapperUncached = 0x0001,
211 #ifdef KERNEL_PRIVATE
212 kIOMapperTransient = 0x0002,
213 #endif
214 };
215
216 #ifdef KERNEL_PRIVATE
217
218 // Used for dmaCommandOperation communications for IODMACommand and mappers
219
220 enum {
221 kIOMDWalkSegments = 0x01000000,
222 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
223 kIOMDGetCharacteristics = 0x02000000,
224 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
225 kIOMDDMAActive = 0x03000000,
226 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
227 kIOMDSetDMAInactive = kIOMDDMAActive,
228 kIOMDAddDMAMapSpec = 0x04000000,
229 kIOMDDMAMap = 0x05000000,
230 kIOMDDMAUnmap = 0x06000000,
231 kIOMDDMACommandOperationMask = 0xFF000000,
232 };
233 struct IOMDDMACharacteristics {
234 UInt64 fLength;
235 UInt32 fSGCount;
236 UInt32 fPages;
237 UInt32 fPageAlign;
238 ppnum_t fHighestPage;
239 IODirection fDirection;
240 UInt8 fIsPrepared;
241 };
242
243 struct IOMDDMAMapArgs {
244 IOMapper * fMapper;
245 IODMACommand * fCommand;
246 IODMAMapSpecification fMapSpec;
247 uint64_t fOffset;
248 uint64_t fLength;
249 uint64_t fAlloc;
250 uint64_t fAllocLength;
251 };
252
253 struct IOMDDMAWalkSegmentArgs {
254 UInt64 fOffset; // Input/Output offset
255 /* Output variables.
256 * Note to reader: fIOVMAddr is (sometimes?) a DART-mapped device address.
257 */
258 UInt64 fIOVMAddr, fLength;
259 UInt8 fMapped; // Input Variable, Require mapped IOVMA
260 UInt64 fMappedBase; // Input base of mapping
261 };
262 typedef UInt8 IOMDDMAWalkSegmentState[128];
263
264 #endif /* KERNEL_PRIVATE */
265
266 enum{
267 kIOPreparationIDUnprepared = 0,
268 kIOPreparationIDUnsupported = 1,
269 kIOPreparationIDAlwaysPrepared = 2,
270 };
271
272 #ifdef KERNEL_PRIVATE
273 #define kIODescriptorIDInvalid (0)
274 #endif
275
276 #ifdef XNU_KERNEL_PRIVATE
277 struct IOMemoryReference;
278 #endif
279
280
281 /*! @class IOMemoryDescriptor : public OSObject
282 * @abstract An abstract base class defining common methods for describing physical or virtual memory.
283 * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
284
285 class IOMemoryDescriptor : public OSObject
286 {
287 friend class IOMemoryMap;
288 friend class IOMultiMemoryDescriptor;
289
290 OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
291
292 protected:
293
294 /*! @var reserved
295 * Reserved for future use. (Internal use only) */
296 struct IOMemoryDescriptorReserved * reserved;
297
298 protected:
299 OSPtr<OSSet> _mappings;
300 IOOptionBits _flags;
301
302
303 #ifdef XNU_KERNEL_PRIVATE
304 public:
305 struct IOMemoryReference * _memRef;
306 vm_tag_t _kernelTag;
307 vm_tag_t _userTag;
308 int16_t _dmaReferences;
309 uint16_t _internalFlags;
310 kern_allocation_name_t _mapName;
311 protected:
312 #else /* XNU_KERNEL_PRIVATE */
313 void * __iomd_reserved5;
314 uint16_t __iomd_reserved1[4];
315 uintptr_t __iomd_reserved2;
316 #endif /* XNU_KERNEL_PRIVATE */
317
318 uint16_t _iomapperOptions;
319 #ifdef __LP64__
320 uint16_t __iomd_reserved3[3];
321 #else
322 uint16_t __iomd_reserved3;
323 #endif
324 uintptr_t __iomd_reserved4;
325
326 #ifndef __LP64__
327 IODirection _direction; /* use _flags instead */
328 #endif /* !__LP64__ */
329 IOByteCount _length; /* length of all ranges */
330 IOOptionBits _tag;
331
332 public:
333 typedef IOOptionBits DMACommandOps;
334 #ifndef __LP64__
335 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
336 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
337 #endif /* !__LP64__ */
338
339 /*! @function initWithOptions
340 * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
341 * @discussion Note this function can be used to re-init a previously created memory descriptor.
342 * @result true on success, false on failure. */
343 virtual bool initWithOptions(void * buffers,
344 UInt32 count,
345 UInt32 offset,
346 task_t task,
347 IOOptionBits options,
348 IOMapper * mapper = kIOMapperSystem);
349
350 #ifndef __LP64__
351 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
352 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
353 #endif /* !__LP64__ */
354
355 /*! @function setPurgeable
356 * @abstract Control the purgeable status of a memory descriptors memory.
357 * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
358 * @param newState - the desired new purgeable state of the memory:<br>
359 * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
360 * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
361 * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
362 * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
363 * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
364 * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
365 * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
366 * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
367 * @result An IOReturn code. */
368
369 virtual IOReturn setPurgeable( IOOptionBits newState,
370 IOOptionBits * oldState );
371
372 /*! @function setOwnership
373 * @abstract Control the ownership of a memory descriptors memory.
374 * @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
375 * @param newOwner - the task to be the new owner of the memory.
376 * @param newLedgerTag - the ledger this memory should be accounted in.
377 * @param newLedgerOptions - accounting options
378 * @result An IOReturn code. */
379
380 IOReturn setOwnership( task_t newOwner,
381 int newLedgerTag,
382 IOOptionBits newLedgerOptions );
383
384 /*! @function getPageCounts
385 * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
386 * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
387 * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
388 * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
389 * @result An IOReturn code. */
390
391 IOReturn getPageCounts( IOByteCount * residentPageCount,
392 IOByteCount * dirtyPageCount);
393
394 /*! @function performOperation
395 * @abstract Perform an operation on the memory descriptor's memory.
396 * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
397 * @param options The operation to perform on the memory:<br>
398 * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
399 * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
400 * @param offset A byte offset into the memory descriptor's memory.
401 * @param length The length of the data range.
402 * @result An IOReturn code. */
403
404 virtual IOReturn performOperation( IOOptionBits options,
405 IOByteCount offset, IOByteCount length );
406
407 // Used for dedicated communications for IODMACommand
408 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
409
410 /*! @function getPhysicalSegment
411 * @abstract Break a memory descriptor into its physically contiguous segments.
412 * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
413 * @param offset A byte offset into the memory whose physical address to return.
414 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
415 * @result A physical address, or zero if the offset is beyond the length of the memory. */
416
417 #ifdef __LP64__
418 virtual addr64_t getPhysicalSegment( IOByteCount offset,
419 IOByteCount * length,
420 IOOptionBits options = 0 ) = 0;
421 #else /* !__LP64__ */
422 virtual addr64_t getPhysicalSegment( IOByteCount offset,
423 IOByteCount * length,
424 IOOptionBits options );
425 #endif /* !__LP64__ */
426
427 virtual uint64_t getPreparationID( void );
428 void setPreparationID( void );
429
430 void setVMTags(uint32_t kernelTag, uint32_t userTag);
431 uint32_t getVMTag(vm_map_t map);
432
433 #ifdef KERNEL_PRIVATE
434 uint64_t getDescriptorID( void );
435 void setDescriptorID( void );
436
437 IOReturn ktraceEmitPhysicalSegments( void );
438 #endif
439
440 #ifdef XNU_KERNEL_PRIVATE
441 IOMemoryDescriptorReserved * getKernelReserved( void );
442 void cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
443 IOReturn dmaMap(
444 IOMapper * mapper,
445 IOMemoryDescriptor * memory,
446 IODMACommand * command,
447 const IODMAMapSpecification * mapSpec,
448 uint64_t offset,
449 uint64_t length,
450 uint64_t * mapAddress,
451 uint64_t * mapLength);
452 IOReturn dmaUnmap(
453 IOMapper * mapper,
454 IODMACommand * command,
455 uint64_t offset,
456 uint64_t mapAddress,
457 uint64_t mapLength);
458 void dmaMapRecord(
459 IOMapper * mapper,
460 IODMACommand * command,
461 uint64_t mapLength);
462 #endif
463
464 private:
465 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0);
466 #ifdef __LP64__
467 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
468 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
469 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
470 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
471 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
472 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
473 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
474 #else /* !__LP64__ */
475 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1);
476 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2);
477 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3);
478 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4);
479 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5);
480 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6);
481 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7);
482 #endif /* !__LP64__ */
483 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
484 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
485 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
486 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
487 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
488 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
489 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
490 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
491
492 protected:
493 virtual void free(void) APPLE_KEXT_OVERRIDE;
494 public:
495 static void initialize( void );
496
497 public:
498 /*! @function withAddress
499 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
500 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
501 * @param address The virtual address of the first byte in the memory.
502 * @param withLength The length of memory.
503 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
504 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
505
506 static OSPtr<IOMemoryDescriptor> withAddress(void * address,
507 IOByteCount withLength,
508 IODirection withDirection);
509
510 #ifndef __LP64__
511 static OSPtr<IOMemoryDescriptor> withAddress(IOVirtualAddress address,
512 IOByteCount withLength,
513 IODirection withDirection,
514 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
515 #endif /* !__LP64__ */
516
517 /*! @function withPhysicalAddress
518 * @abstract Create an IOMemoryDescriptor to describe one physical range.
519 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
520 * @param address The physical address of the first byte in the memory.
521 * @param withLength The length of memory.
522 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
523 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
524
525 static OSPtr<IOMemoryDescriptor> withPhysicalAddress(
526 IOPhysicalAddress address,
527 IOByteCount withLength,
528 IODirection withDirection );
529
530 #ifndef __LP64__
531 static OSPtr<IOMemoryDescriptor> withRanges(IOVirtualRange * ranges,
532 UInt32 withCount,
533 IODirection withDirection,
534 task_t withTask,
535 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
536 #endif /* !__LP64__ */
537
538 /*! @function withAddressRange
539 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
540 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
541 * @param address The virtual address of the first byte in the memory.
542 * @param length The length of memory.
543 * @param options
544 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
545 * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
546 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
547
548 static OSPtr<IOMemoryDescriptor> withAddressRange(
549 mach_vm_address_t address,
550 mach_vm_size_t length,
551 IOOptionBits options,
552 task_t task);
553
554 /*! @function withAddressRanges
555 * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
556 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
557 * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
558 * @param rangeCount The member count of the ranges array.
559 * @param options
560 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
561 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
562 * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
563 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
564
565 static OSPtr<IOMemoryDescriptor> withAddressRanges(
566 IOAddressRange * ranges,
567 UInt32 rangeCount,
568 IOOptionBits options,
569 task_t task);
570
571 /*! @function withOptions
572 * @abstract Master initialiser for all variants of memory descriptors.
573 * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
574 *
575 *
576 * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
577 *
578 * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
579 *
580 * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
581 *
582 * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
583 *
584 * @param options
585 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
586 * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
587 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
588 * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
589 *
590 * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
591 *
592 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
593
594 static OSPtr<IOMemoryDescriptor> withOptions(void * buffers,
595 UInt32 count,
596 UInt32 offset,
597 task_t task,
598 IOOptionBits options,
599 IOMapper * mapper = kIOMapperSystem);
600
601 #ifndef __LP64__
602 static OSPtr<IOMemoryDescriptor> withPhysicalRanges(
603 IOPhysicalRange * ranges,
604 UInt32 withCount,
605 IODirection withDirection,
606 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
607 #endif /* !__LP64__ */
608
609 #ifndef __LP64__
610 static OSPtr<IOMemoryDescriptor> withSubRange(IOMemoryDescriptor *of,
611 IOByteCount offset,
612 IOByteCount length,
613 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
614 #endif /* !__LP64__ */
615
616 /*! @function withPersistentMemoryDescriptor
617 * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
618 * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
619 * @param originalMD The memory descriptor to be duplicated.
620 * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
621 static OSPtr<IOMemoryDescriptor>
622 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
623
624 #ifndef __LP64__
625 // obsolete initializers
626 // - initWithOptions is the designated initializer
627 virtual bool initWithAddress(void * address,
628 IOByteCount withLength,
629 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
630 virtual bool initWithAddress(IOVirtualAddress address,
631 IOByteCount withLength,
632 IODirection withDirection,
633 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
634 virtual bool initWithPhysicalAddress(
635 IOPhysicalAddress address,
636 IOByteCount withLength,
637 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
638 virtual bool initWithRanges(IOVirtualRange * ranges,
639 UInt32 withCount,
640 IODirection withDirection,
641 task_t withTask,
642 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
643 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
644 UInt32 withCount,
645 IODirection withDirection,
646 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
647 #endif /* __LP64__ */
648
649 /*! @function getDirection
650 * @abstract Accessor to get the direction the memory descriptor was created with.
651 * @discussion This method returns the direction the memory descriptor was created with.
652 * @result The direction. */
653
654 virtual IODirection getDirection() const;
655
656 /*! @function getLength
657 * @abstract Accessor to get the length of the memory descriptor (over all its ranges).
658 * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
659 * @result The byte count. */
660
661 virtual IOByteCount getLength() const;
662
663 #define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH
664 uint64_t getDMAMapLength(uint64_t * offset = NULL);
665
666 /*! @function setTag
667 * @abstract Set the tag for the memory descriptor.
668 * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
669 * @param tag The tag. */
670
671 virtual void setTag( IOOptionBits tag );
672
673 /*! @function getTag
674 * @abstract Accessor to the retrieve the tag for the memory descriptor.
675 * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
676 * @result The tag. */
677
678 virtual IOOptionBits getTag( void );
679
680 /*! @function getFlags
681 * @abstract Accessor to the retrieve the options the memory descriptor was created with.
682 * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
683 * @result The flags bitfield. */
684
685 uint64_t getFlags(void);
686
687 /*! @function readBytes
688 * @abstract Copy data from the memory descriptor's buffer to the specified buffer.
689 * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
690 * @param offset A byte offset into the memory descriptor's memory.
691 * @param bytes The caller supplied buffer to copy the data to.
692 * @param withLength The length of the data to copy.
693 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
694
695 virtual IOByteCount readBytes(IOByteCount offset,
696 void * bytes, IOByteCount withLength);
697
698 /*! @function writeBytes
699 * @abstract Copy data to the memory descriptor's buffer from the specified buffer.
700 * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
701 * @param offset A byte offset into the memory descriptor's memory.
702 * @param bytes The caller supplied buffer to copy the data from.
703 * @param withLength The length of the data to copy.
704 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
705
706 virtual IOByteCount writeBytes(IOByteCount offset,
707 const void * bytes, IOByteCount withLength);
708
709 #ifndef __LP64__
710 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
711 IOByteCount * length);
712 #endif /* !__LP64__ */
713
714 /*! @function getPhysicalAddress
715 * @abstract Return the physical address of the first byte in the memory.
716 * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
717 * @result A physical address. */
718
719 IOPhysicalAddress getPhysicalAddress();
720
721 #ifndef __LP64__
722 virtual void * getVirtualSegment(IOByteCount offset,
723 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
724 #endif /* !__LP64__ */
725
726 /*! @function prepare
727 * @abstract Prepare the memory for an I/O transfer.
728 * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
729 * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
730 * @result An IOReturn code. */
731
732 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
733
734 /*! @function complete
735 * @abstract Complete processing of the memory after an I/O transfer finishes.
736 * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
737 * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
738 * @result An IOReturn code. */
739
740 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
741
742 /*
743 * Mapping functions.
744 */
745
746 /*! @function createMappingInTask
747 * @abstract Maps a IOMemoryDescriptor into a task.
748 * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
749 * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
750 * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
751 * @param options Mapping options are defined in IOTypes.h,<br>
752 * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
753 * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
754 * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
755 * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
756 * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
757 * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
758 * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
759 * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
760 * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
761 * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
762
763 OSPtr<IOMemoryMap> createMappingInTask(
764 task_t intoTask,
765 mach_vm_address_t atAddress,
766 IOOptionBits options,
767 mach_vm_size_t offset = 0,
768 mach_vm_size_t length = 0 );
769
770 #ifndef __LP64__
771 virtual OSPtr<IOMemoryMap> map(
772 task_t intoTask,
773 IOVirtualAddress atAddress,
774 IOOptionBits options,
775 IOByteCount offset = 0,
776 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
777 #endif /* !__LP64__ */
778
779 /*! @function map
780 * @abstract Maps a IOMemoryDescriptor into the kernel map.
781 * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
782 * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
783 * @result See the full version of the createMappingInTask method. */
784
785 virtual OSPtr<IOMemoryMap> map(
786 IOOptionBits options = 0 );
787
788 /*! @function setMapping
789 * @abstract Establishes an already existing mapping.
790 * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
791 * @param task Address space in which the mapping exists.
792 * @param mapAddress Virtual address of the mapping.
793 * @param options Caching and read-only attributes of the mapping.
794 * @result A IOMemoryMap object created to represent the mapping. */
795
796 virtual OSPtr<IOMemoryMap> setMapping(
797 task_t task,
798 IOVirtualAddress mapAddress,
799 IOOptionBits options = 0 );
800
801 /*! @function setMapperOptions
802 * @abstract Set the IOMapper options
803 * @discussion This method sets the IOMapper options
804 * @param options IOMapper options to be set. */
805
806 void setMapperOptions( uint16_t options );
807
808 /*! @function getMapperOptions
809 * @abstract return IOMapper Options
810 * @discussion This method returns IOMapper Options set earlier using setMapperOptions
811 * @result IOMapper options set. */
812
813 uint16_t getMapperOptions( void );
814
815 // Following methods are private implementation
816
817 #ifdef __LP64__
818 virtual
819 #endif /* __LP64__ */
820 IOReturn redirect( task_t safeTask, bool redirect );
821
822 IOReturn handleFault(
823 void * _pager,
824 mach_vm_size_t sourceOffset,
825 mach_vm_size_t length);
826
827 IOReturn populateDevicePager(
828 void * pager,
829 vm_map_t addressMap,
830 mach_vm_address_t address,
831 mach_vm_size_t sourceOffset,
832 mach_vm_size_t length,
833 IOOptionBits options );
834
835 virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap * makeMapping(
836 IOMemoryDescriptor * owner,
837 task_t intoTask,
838 IOVirtualAddress atAddress,
839 IOOptionBits options,
840 IOByteCount offset,
841 IOByteCount length );
842
843 #if KERNEL_PRIVATE
844 /*! @function copyContext
845 * @abstract Accessor to the retrieve the context previously set for the memory descriptor.
846 * @discussion This method returns the context for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
847 * @result The context, returned with an additional retain to be released by the caller. */
848 OSObject * copyContext(void) const;
849
850 /*! @function setContext
851 * @abstract Set a context object for the memory descriptor. The context is not interpreted by IOMemoryDescriptor.
852 * @discussion The context is retained, and will be released when the memory descriptor is freed or when a new context object is set.
853 */
854 void setContext(OSObject * context);
855 #endif
856
857 protected:
858 virtual void addMapping(
859 IOMemoryMap * mapping );
860
861 virtual void removeMapping(
862 IOMemoryMap * mapping );
863
864 virtual IOReturn doMap(
865 vm_map_t addressMap,
866 IOVirtualAddress * atAddress,
867 IOOptionBits options,
868 IOByteCount sourceOffset = 0,
869 IOByteCount length = 0 );
870
871 virtual IOReturn doUnmap(
872 vm_map_t addressMap,
873 IOVirtualAddress logical,
874 IOByteCount length );
875 };
876
877 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
878
879 /*! @class IOMemoryMap : public OSObject
880 * @abstract A class defining common methods for describing a memory mapping.
881 * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
882
883 class IOMemoryMap : public OSObject
884 {
885 OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
886 #ifdef XNU_KERNEL_PRIVATE
887 public:
888 IOOptionBits fOptions;
889 OSPtr<IOMemoryDescriptor> fMemory;
890 OSPtr<IOMemoryMap> fSuperMap;
891 mach_vm_size_t fOffset;
892 mach_vm_address_t fAddress;
893 mach_vm_size_t fLength;
894 task_t fAddressTask;
895 vm_map_t fAddressMap;
896 upl_t fRedirUPL;
897 uint8_t fUserClientUnmap;
898 #if IOTRACKING
899 IOTrackingUser fTracking;
900 #endif
901 #endif /* XNU_KERNEL_PRIVATE */
902
903 protected:
904 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
905 virtual void free(void) APPLE_KEXT_OVERRIDE;
906
907 public:
908 /*! @function getVirtualAddress
909 * @abstract Accessor to the virtual address of the first byte in the mapping.
910 * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
911 * @result A virtual address. */
912
913 virtual IOVirtualAddress getVirtualAddress(void);
914
915 /*! @function getPhysicalSegment
916 * @abstract Break a mapping into its physically contiguous segments.
917 * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
918 * @param offset A byte offset into the mapping whose physical address to return.
919 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
920 * @result A physical address, or zero if the offset is beyond the length of the mapping. */
921
922 #ifdef __LP64__
923 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
924 IOByteCount * length,
925 IOOptionBits options = 0);
926 #else /* !__LP64__ */
927 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
928 IOByteCount * length);
929 #endif /* !__LP64__ */
930
931 /*! @function getPhysicalAddress
932 * @abstract Return the physical address of the first byte in the mapping.
933 * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
934 * @result A physical address. */
935
936 IOPhysicalAddress getPhysicalAddress(void);
937
938 /*! @function getLength
939 * @abstract Accessor to the length of the mapping.
940 * @discussion This method returns the length of the mapping.
941 * @result A byte count. */
942
943 virtual IOByteCount getLength(void);
944
945 /*! @function getAddressTask
946 * @abstract Accessor to the task of the mapping.
947 * @discussion This method returns the mach task the mapping exists in.
948 * @result A mach task_t. */
949
950 virtual task_t getAddressTask();
951
952 /*! @function getMemoryDescriptor
953 * @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
954 * @discussion This method returns the IOMemoryDescriptor the mapping was created from.
955 * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
956
957 virtual IOMemoryDescriptor * getMemoryDescriptor();
958
959 /*! @function getMapOptions
960 * @abstract Accessor to the options the mapping was created with.
961 * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
962 * @result Options for the mapping, including cache settings. */
963
964 virtual IOOptionBits getMapOptions();
965
966 /*! @function unmap
967 * @abstract Force the IOMemoryMap to unmap, without destroying the object.
968 * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
969 * @result An IOReturn code. */
970
971 virtual IOReturn unmap();
972
973 virtual void taskDied();
974
975 /*! @function redirect
976 * @abstract Replace the memory mapped in a process with new backing memory.
977 * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
978 * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
979 * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
980 * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
981 * @result An IOReturn code. */
982
983 #ifndef __LP64__
984 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
985 // for 64 bit, these fall together on the 64 bit one.
986 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
987 IOOptionBits options,
988 IOByteCount offset = 0);
989 #endif
990 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
991 IOOptionBits options,
992 mach_vm_size_t offset = 0);
993
994 #ifdef __LP64__
995 /*! @function getAddress
996 * @abstract Accessor to the virtual address of the first byte in the mapping.
997 * @discussion This method returns the virtual address of the first byte in the mapping.
998 * @result A virtual address. */
999 inline mach_vm_address_t getAddress() __attribute__((always_inline));
1000 /*! @function getSize
1001 * @abstract Accessor to the length of the mapping.
1002 * @discussion This method returns the length of the mapping.
1003 * @result A byte count. */
1004 inline mach_vm_size_t getSize() __attribute__((always_inline));
1005 #else /* !__LP64__ */
1006 /*! @function getAddress
1007 * @abstract Accessor to the virtual address of the first byte in the mapping.
1008 * @discussion This method returns the virtual address of the first byte in the mapping.
1009 * @result A virtual address. */
1010 virtual mach_vm_address_t getAddress();
1011 /*! @function getSize
1012 * @abstract Accessor to the length of the mapping.
1013 * @discussion This method returns the length of the mapping.
1014 * @result A byte count. */
1015 virtual mach_vm_size_t getSize();
1016 #endif /* !__LP64__ */
1017
1018 #ifdef XNU_KERNEL_PRIVATE
1019 // for IOMemoryDescriptor use
1020 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
1021
1022 bool init(
1023 task_t intoTask,
1024 mach_vm_address_t toAddress,
1025 IOOptionBits options,
1026 mach_vm_size_t offset,
1027 mach_vm_size_t length );
1028
1029 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
1030
1031 IOReturn redirect(
1032 task_t intoTask, bool redirect );
1033
1034 IOReturn userClientUnmap();
1035 #endif /* XNU_KERNEL_PRIVATE */
1036
1037 IOReturn wireRange(
1038 uint32_t options,
1039 mach_vm_size_t offset,
1040 mach_vm_size_t length);
1041
1042 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
1043 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
1044 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
1045 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
1046 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
1047 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
1048 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
1049 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
1050 };
1051
1052 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1053 #ifdef XNU_KERNEL_PRIVATE
1054 // Also these flags should not overlap with the options to
1055 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1056 enum {
1057 _kIOMemorySourceSegment = 0x00002000
1058 };
1059 #endif /* XNU_KERNEL_PRIVATE */
1060
1061 // The following classes are private implementation of IOMemoryDescriptor - they
1062 // should not be referenced directly, just through the public API's in the
1063 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1064 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1065 // no need to reference as anything but a generic IOMemoryDescriptor *.
1066
1067 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1068 {
1069 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1070
1071 public:
1072 union Ranges {
1073 IOVirtualRange *v;
1074 IOAddressRange *v64;
1075 IOPhysicalRange *p;
1076 void *uio;
1077 };
1078 protected:
1079 Ranges _ranges;
1080 unsigned _rangesCount; /* number of address ranges in list */
1081 #ifndef __LP64__
1082 bool _rangesIsAllocated;/* is list allocated by us? */
1083 #endif /* !__LP64__ */
1084
1085 task_t _task; /* task where all ranges are mapped to */
1086
1087 union {
1088 IOVirtualRange v;
1089 IOPhysicalRange p;
1090 } _singleRange; /* storage space for a single range */
1091
1092 unsigned _wireCount; /* number of outstanding wires */
1093
1094 #ifndef __LP64__
1095 uintptr_t _cachedVirtualAddress;
1096
1097 IOPhysicalAddress _cachedPhysicalAddress;
1098 #endif /* !__LP64__ */
1099
1100 bool _initialized; /* has superclass been initialized? */
1101
1102 public:
1103 virtual void free() APPLE_KEXT_OVERRIDE;
1104
1105 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1106
1107 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1108
1109 #ifdef XNU_KERNEL_PRIVATE
1110 // Internal APIs may be made virtual at some time in the future.
1111 IOReturn wireVirtual(IODirection forDirection);
1112 IOReturn dmaMap(
1113 IOMapper * mapper,
1114 IOMemoryDescriptor * memory,
1115 IODMACommand * command,
1116 const IODMAMapSpecification * mapSpec,
1117 uint64_t offset,
1118 uint64_t length,
1119 uint64_t * mapAddress,
1120 uint64_t * mapLength);
1121 bool initMemoryEntries(size_t size, IOMapper * mapper);
1122
1123 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1124 IOMemoryReference * realloc);
1125 void memoryReferenceFree(IOMemoryReference * ref);
1126 void memoryReferenceRelease(IOMemoryReference * ref);
1127
1128 IOReturn memoryReferenceCreate(
1129 IOOptionBits options,
1130 IOMemoryReference ** reference);
1131
1132 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1133 vm_map_t map,
1134 mach_vm_size_t inoffset,
1135 mach_vm_size_t size,
1136 IOOptionBits options,
1137 mach_vm_address_t * inaddr);
1138
1139 IOReturn memoryReferenceMapNew(IOMemoryReference * ref,
1140 vm_map_t map,
1141 mach_vm_size_t inoffset,
1142 mach_vm_size_t size,
1143 IOOptionBits options,
1144 mach_vm_address_t * inaddr);
1145
1146 static IOReturn memoryReferenceSetPurgeable(
1147 IOMemoryReference * ref,
1148 IOOptionBits newState,
1149 IOOptionBits * oldState);
1150 static IOReturn memoryReferenceSetOwnership(
1151 IOMemoryReference * ref,
1152 task_t newOwner,
1153 int newLedgerTag,
1154 IOOptionBits newLedgerOptions);
1155 static IOReturn memoryReferenceGetPageCounts(
1156 IOMemoryReference * ref,
1157 IOByteCount * residentPageCount,
1158 IOByteCount * dirtyPageCount);
1159
1160 static uint64_t memoryReferenceGetDMAMapLength(
1161 IOMemoryReference * ref,
1162 uint64_t * offset);
1163
1164 IOByteCount readBytes(IOByteCount offset,
1165 void * bytes, IOByteCount withLength) override;
1166 IOByteCount writeBytes(IOByteCount offset,
1167 const void * bytes, IOByteCount withLength) override;
1168
1169 #endif
1170
1171 private:
1172
1173 #ifndef __LP64__
1174 virtual void setPosition(IOByteCount position);
1175 virtual void mapIntoKernel(unsigned rangeIndex);
1176 virtual void unmapFromKernel();
1177 #endif /* !__LP64__ */
1178
1179 // Internal
1180 OSPtr<_IOMemoryDescriptorMixedData> _memoryEntries;
1181 unsigned int _pages;
1182 ppnum_t _highestPage;
1183 uint32_t __iomd_reservedA;
1184 uint32_t __iomd_reservedB;
1185
1186 IOLock * _prepareLock;
1187
1188 public:
1189 /*
1190 * IOMemoryDescriptor required methods
1191 */
1192
1193 // Master initaliser
1194 virtual bool initWithOptions(void * buffers,
1195 UInt32 count,
1196 UInt32 offset,
1197 task_t task,
1198 IOOptionBits options,
1199 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1200
1201 #ifndef __LP64__
1202 // Secondary initialisers
1203 virtual bool initWithAddress(void * address,
1204 IOByteCount withLength,
1205 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1206
1207 virtual bool initWithAddress(IOVirtualAddress address,
1208 IOByteCount withLength,
1209 IODirection withDirection,
1210 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1211
1212 virtual bool initWithPhysicalAddress(
1213 IOPhysicalAddress address,
1214 IOByteCount withLength,
1215 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1216
1217 virtual bool initWithRanges( IOVirtualRange * ranges,
1218 UInt32 withCount,
1219 IODirection withDirection,
1220 task_t withTask,
1221 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1222
1223 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1224 UInt32 withCount,
1225 IODirection withDirection,
1226 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1227
1228 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1229 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1230
1231 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1232 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1233
1234 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1235 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1236
1237 virtual void * getVirtualSegment(IOByteCount offset,
1238 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1239 #endif /* !__LP64__ */
1240
1241 virtual IOReturn setPurgeable( IOOptionBits newState,
1242 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1243
1244 IOReturn setOwnership( task_t newOwner,
1245 int newLedgerTag,
1246 IOOptionBits newLedgerOptions );
1247
1248 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1249 IOByteCount * length,
1250 #ifdef __LP64__
1251 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1252 #else /* !__LP64__ */
1253 IOOptionBits options)APPLE_KEXT_OVERRIDE;
1254 #endif /* !__LP64__ */
1255
1256 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1257
1258 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1259
1260 virtual LIBKERN_RETURNS_NOT_RETAINED IOMemoryMap * makeMapping(
1261 IOMemoryDescriptor * owner,
1262 task_t intoTask,
1263 IOVirtualAddress atAddress,
1264 IOOptionBits options,
1265 IOByteCount offset,
1266 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1267
1268 virtual IOReturn doMap(
1269 vm_map_t addressMap,
1270 IOVirtualAddress * atAddress,
1271 IOOptionBits options,
1272 IOByteCount sourceOffset = 0,
1273 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1274
1275 virtual IOReturn doUnmap(
1276 vm_map_t addressMap,
1277 IOVirtualAddress logical,
1278 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1279
1280 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1281
1282 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1283 static OSPtr<IOMemoryDescriptor>
1284 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1285
1286 IOOptionBits memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * map);
1287 };
1288
1289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1290
1291 #ifdef __LP64__
1292 mach_vm_address_t
getAddress()1293 IOMemoryMap::getAddress()
1294 {
1295 return getVirtualAddress();
1296 }
1297
1298 mach_vm_size_t
getSize()1299 IOMemoryMap::getSize()
1300 {
1301 return getLength();
1302 }
1303 #else /* !__LP64__ */
1304 #include <IOKit/IOSubMemoryDescriptor.h>
1305 #endif /* !__LP64__ */
1306
1307 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1308
1309 extern bool iokit_iomd_setownership_enabled;
1310
1311 #endif /* !_IOMEMORYDESCRIPTOR_H */
1312