xref: /xnu-8792.81.2/iokit/Kernel/IOMemoryDescriptor.cpp (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <vm/vm_fault.h>
73 #include <vm/vm_protos.h>
74 
75 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76 extern void ipc_port_release_send(ipc_port_t port);
77 
78 extern kern_return_t
79 mach_memory_entry_ownership(
80 	ipc_port_t      entry_port,
81 	task_t          owner,
82 	int             ledger_tag,
83 	int             ledger_flags);
84 
85 __END_DECLS
86 
87 #define kIOMapperWaitSystem     ((IOMapper *) 1)
88 
89 static IOMapper * gIOSystemMapper = NULL;
90 
91 ppnum_t           gIOLastPage;
92 
93 enum {
94 	kIOMapGuardSizeLarge = 65536
95 };
96 
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98 
99 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100 
101 #define super IOMemoryDescriptor
102 
103 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105 
106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107 
108 static IORecursiveLock * gIOMemoryLock;
109 
110 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
111 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
112 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113 #define WAKEUP  \
114     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115 
116 #if 0
117 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
118 #else
119 #define DEBG(fmt, args...)      {}
120 #endif
121 
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123 
124 // Some data structures and accessor macros used by the initWithOptions
125 // Function
126 
127 enum ioPLBlockFlags {
128 	kIOPLOnDevice  = 0x00000001,
129 	kIOPLExternUPL = 0x00000002,
130 };
131 
132 struct IOMDPersistentInitData {
133 	const IOGeneralMemoryDescriptor * fMD;
134 	IOMemoryReference               * fMemRef;
135 };
136 
137 struct ioPLBlock {
138 	upl_t fIOPL;
139 	vm_address_t fPageInfo; // Pointer to page list or index into it
140 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
141 	ppnum_t fMappedPage;        // Page number of first page in this iopl
142 	unsigned int fPageOffset;   // Offset within first page of iopl
143 	unsigned int fFlags;        // Flags
144 };
145 
146 enum { kMaxWireTags = 6 };
147 
148 struct ioGMDData {
149 	IOMapper *  fMapper;
150 	uint64_t    fDMAMapAlignment;
151 	uint64_t    fMappedBase;
152 	uint64_t    fMappedLength;
153 	uint64_t    fPreparationID;
154 #if IOTRACKING
155 	IOTracking  fWireTracking;
156 #endif /* IOTRACKING */
157 	unsigned int      fPageCnt;
158 	uint8_t           fDMAMapNumAddressBits;
159 	unsigned char     fCompletionError:1;
160 	unsigned char     fMappedBaseValid:1;
161 	unsigned char     _resv:4;
162 	unsigned char     fDMAAccess:2;
163 
164 	/* variable length arrays */
165 	upl_page_info_t fPageList[1]
166 #if __LP64__
167 	// align fPageList as for ioPLBlock
168 	__attribute__((aligned(sizeof(upl_t))))
169 #endif
170 	;
171 	//ioPLBlock fBlocks[1];
172 };
173 
174 #pragma GCC visibility push(hidden)
175 
176 class _IOMemoryDescriptorMixedData : public OSObject
177 {
178 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179 
180 public:
181 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182 	virtual bool initWithCapacity(size_t capacity);
183 	virtual void free() APPLE_KEXT_OVERRIDE;
184 
185 	virtual bool appendBytes(const void * bytes, size_t length);
186 	virtual void setLength(size_t length);
187 
188 	virtual const void * getBytes() const;
189 	virtual size_t getLength() const;
190 
191 private:
192 	void freeMemory();
193 
194 	void *  _data = nullptr;
195 	size_t  _length = 0;
196 	size_t  _capacity = 0;
197 };
198 
199 #pragma GCC visibility pop
200 
201 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
202 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203 #define getNumIOPL(osd, d)      \
204     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205 #define getPageList(d)  (&(d->fPageList[0]))
206 #define computeDataSize(p, u) \
207     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208 
209 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210 
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212 
213 extern "C" {
214 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)215 device_data_action(
216 	uintptr_t               device_handle,
217 	ipc_port_t              device_pager,
218 	vm_prot_t               protection,
219 	vm_object_offset_t      offset,
220 	vm_size_t               size)
221 {
222 	kern_return_t        kr;
223 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224 	OSSharedPtr<IOMemoryDescriptor> memDesc;
225 
226 	LOCK;
227 	if (ref->dp.memory) {
228 		memDesc.reset(ref->dp.memory, OSRetain);
229 		kr = memDesc->handleFault(device_pager, offset, size);
230 		memDesc.reset();
231 	} else {
232 		kr = KERN_ABORTED;
233 	}
234 	UNLOCK;
235 
236 	return kr;
237 }
238 
239 kern_return_t
device_close(uintptr_t device_handle)240 device_close(
241 	uintptr_t     device_handle)
242 {
243 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244 
245 	IOFreeType( ref, IOMemoryDescriptorReserved );
246 
247 	return kIOReturnSuccess;
248 }
249 };      // end extern "C"
250 
251 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252 
253 // Note this inline function uses C++ reference arguments to return values
254 // This means that pointers are not passed and NULLs don't have to be
255 // checked for as a NULL reference is illegal.
256 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind)257 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
258     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
259 {
260 	assert(kIOMemoryTypeUIO == type
261 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
262 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
263 	if (kIOMemoryTypeUIO == type) {
264 		user_size_t us;
265 		user_addr_t ad;
266 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
267 	}
268 #ifndef __LP64__
269 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
270 		IOAddressRange cur = r.v64[ind];
271 		addr = cur.address;
272 		len  = cur.length;
273 	}
274 #endif /* !__LP64__ */
275 	else {
276 		IOVirtualRange cur = r.v[ind];
277 		addr = cur.address;
278 		len  = cur.length;
279 	}
280 }
281 
282 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
283 
284 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)285 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
286 {
287 	IOReturn err = kIOReturnSuccess;
288 
289 	*control = VM_PURGABLE_SET_STATE;
290 
291 	enum { kIOMemoryPurgeableControlMask = 15 };
292 
293 	switch (kIOMemoryPurgeableControlMask & newState) {
294 	case kIOMemoryPurgeableKeepCurrent:
295 		*control = VM_PURGABLE_GET_STATE;
296 		break;
297 
298 	case kIOMemoryPurgeableNonVolatile:
299 		*state = VM_PURGABLE_NONVOLATILE;
300 		break;
301 	case kIOMemoryPurgeableVolatile:
302 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
303 		break;
304 	case kIOMemoryPurgeableEmpty:
305 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
306 		break;
307 	default:
308 		err = kIOReturnBadArgument;
309 		break;
310 	}
311 
312 	if (*control == VM_PURGABLE_SET_STATE) {
313 		// let VM know this call is from the kernel and is allowed to alter
314 		// the volatility of the memory entry even if it was created with
315 		// MAP_MEM_PURGABLE_KERNEL_ONLY
316 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
317 	}
318 
319 	return err;
320 }
321 
322 static IOReturn
purgeableStateBits(int * state)323 purgeableStateBits(int * state)
324 {
325 	IOReturn err = kIOReturnSuccess;
326 
327 	switch (VM_PURGABLE_STATE_MASK & *state) {
328 	case VM_PURGABLE_NONVOLATILE:
329 		*state = kIOMemoryPurgeableNonVolatile;
330 		break;
331 	case VM_PURGABLE_VOLATILE:
332 		*state = kIOMemoryPurgeableVolatile;
333 		break;
334 	case VM_PURGABLE_EMPTY:
335 		*state = kIOMemoryPurgeableEmpty;
336 		break;
337 	default:
338 		*state = kIOMemoryPurgeableNonVolatile;
339 		err = kIOReturnNotReady;
340 		break;
341 	}
342 	return err;
343 }
344 
345 typedef struct {
346 	unsigned int wimg;
347 	unsigned int object_type;
348 } iokit_memtype_entry;
349 
350 static const iokit_memtype_entry iomd_mem_types[] = {
351 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
352 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
353 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
354 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
355 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
356 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
357 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
358 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
359 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
360 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
361 };
362 
363 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)364 vmProtForCacheMode(IOOptionBits cacheMode)
365 {
366 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
367 	vm_prot_t prot = 0;
368 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
369 	return prot;
370 }
371 
372 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)373 pagerFlagsForCacheMode(IOOptionBits cacheMode)
374 {
375 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
376 	if (cacheMode == kIODefaultCache) {
377 		return -1U;
378 	}
379 	return iomd_mem_types[cacheMode].wimg;
380 }
381 
382 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)383 cacheModeForPagerFlags(unsigned int pagerFlags)
384 {
385 	pagerFlags &= VM_WIMG_MASK;
386 	IOOptionBits cacheMode = kIODefaultCache;
387 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
388 		if (iomd_mem_types[i].wimg == pagerFlags) {
389 			cacheMode = i;
390 			break;
391 		}
392 	}
393 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
394 }
395 
396 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
397 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
398 
399 struct IOMemoryEntry {
400 	ipc_port_t entry;
401 	int64_t    offset;
402 	uint64_t   size;
403 	uint64_t   start;
404 };
405 
406 struct IOMemoryReference {
407 	volatile SInt32             refCount;
408 	vm_prot_t                   prot;
409 	uint32_t                    capacity;
410 	uint32_t                    count;
411 	struct IOMemoryReference  * mapRef;
412 	IOMemoryEntry               entries[0];
413 };
414 
415 enum{
416 	kIOMemoryReferenceReuse = 0x00000001,
417 	kIOMemoryReferenceWrite = 0x00000002,
418 	kIOMemoryReferenceCOW   = 0x00000004,
419 };
420 
421 SInt32 gIOMemoryReferenceCount;
422 
423 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)424 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
425 {
426 	IOMemoryReference * ref;
427 	size_t              oldCapacity;
428 
429 	if (realloc) {
430 		oldCapacity = realloc->capacity;
431 	} else {
432 		oldCapacity = 0;
433 	}
434 
435 	// Use the kalloc API instead of manually handling the reallocation
436 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
437 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
438 	if (ref) {
439 		if (oldCapacity == 0) {
440 			ref->refCount = 1;
441 			OSIncrementAtomic(&gIOMemoryReferenceCount);
442 		}
443 		ref->capacity = capacity;
444 	}
445 	return ref;
446 }
447 
448 void
memoryReferenceFree(IOMemoryReference * ref)449 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
450 {
451 	IOMemoryEntry * entries;
452 
453 	if (ref->mapRef) {
454 		memoryReferenceFree(ref->mapRef);
455 		ref->mapRef = NULL;
456 	}
457 
458 	entries = ref->entries + ref->count;
459 	while (entries > &ref->entries[0]) {
460 		entries--;
461 		ipc_port_release_send(entries->entry);
462 	}
463 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
464 
465 	OSDecrementAtomic(&gIOMemoryReferenceCount);
466 }
467 
468 void
memoryReferenceRelease(IOMemoryReference * ref)469 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
470 {
471 	if (1 == OSDecrementAtomic(&ref->refCount)) {
472 		memoryReferenceFree(ref);
473 	}
474 }
475 
476 
477 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)478 IOGeneralMemoryDescriptor::memoryReferenceCreate(
479 	IOOptionBits         options,
480 	IOMemoryReference ** reference)
481 {
482 	enum { kCapacity = 4, kCapacityInc = 4 };
483 
484 	kern_return_t        err;
485 	IOMemoryReference *  ref;
486 	IOMemoryEntry *      entries;
487 	IOMemoryEntry *      cloneEntries = NULL;
488 	vm_map_t             map;
489 	ipc_port_t           entry, cloneEntry;
490 	vm_prot_t            prot;
491 	memory_object_size_t actualSize;
492 	uint32_t             rangeIdx;
493 	uint32_t             count;
494 	mach_vm_address_t    entryAddr, endAddr, entrySize;
495 	mach_vm_size_t       srcAddr, srcLen;
496 	mach_vm_size_t       nextAddr, nextLen;
497 	mach_vm_size_t       offset, remain;
498 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
499 	int                  misaligned_start = 0, misaligned_end = 0;
500 	IOByteCount          physLen;
501 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
502 	IOOptionBits         cacheMode;
503 	unsigned int         pagerFlags;
504 	vm_tag_t             tag;
505 	vm_named_entry_kernel_flags_t vmne_kflags;
506 
507 	ref = memoryReferenceAlloc(kCapacity, NULL);
508 	if (!ref) {
509 		return kIOReturnNoMemory;
510 	}
511 
512 	tag = (vm_tag_t) getVMTag(kernel_map);
513 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
514 	entries = &ref->entries[0];
515 	count = 0;
516 	err = KERN_SUCCESS;
517 
518 	offset = 0;
519 	rangeIdx = 0;
520 	remain = _length;
521 	if (_task) {
522 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
523 
524 		// account for IOBMD setLength(), use its capacity as length
525 		IOBufferMemoryDescriptor * bmd;
526 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
527 			nextLen = bmd->getCapacity();
528 			remain  = nextLen;
529 		}
530 	} else {
531 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
532 		nextLen = physLen;
533 
534 		// default cache mode for physical
535 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
536 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
537 			_flags |= (mode << kIOMemoryBufferCacheShift);
538 		}
539 	}
540 
541 	// cache mode & vm_prot
542 	prot = VM_PROT_READ;
543 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
544 	prot |= vmProtForCacheMode(cacheMode);
545 	// VM system requires write access to change cache mode
546 	if (kIODefaultCache != cacheMode) {
547 		prot |= VM_PROT_WRITE;
548 	}
549 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
550 		prot |= VM_PROT_WRITE;
551 	}
552 	if (kIOMemoryReferenceWrite & options) {
553 		prot |= VM_PROT_WRITE;
554 	}
555 	if (kIOMemoryReferenceCOW   & options) {
556 		prot |= MAP_MEM_VM_COPY;
557 	}
558 
559 	if (kIOMemoryUseReserve & _flags) {
560 		prot |= MAP_MEM_GRAB_SECLUDED;
561 	}
562 
563 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
564 		cloneEntries = &_memRef->entries[0];
565 		prot |= MAP_MEM_NAMED_REUSE;
566 	}
567 
568 	if (_task) {
569 		// virtual ranges
570 
571 		if (kIOMemoryBufferPageable & _flags) {
572 			int ledger_tag, ledger_no_footprint;
573 
574 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
575 			prot |= MAP_MEM_NAMED_CREATE;
576 
577 			// default accounting settings:
578 			//   + "none" ledger tag
579 			//   + include in footprint
580 			// can be changed later with ::setOwnership()
581 			ledger_tag = VM_LEDGER_TAG_NONE;
582 			ledger_no_footprint = 0;
583 
584 			if (kIOMemoryBufferPurgeable & _flags) {
585 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
586 				if (VM_KERN_MEMORY_SKYWALK == tag) {
587 					// Skywalk purgeable memory accounting:
588 					//    + "network" ledger tag
589 					//    + not included in footprint
590 					ledger_tag = VM_LEDGER_TAG_NETWORK;
591 					ledger_no_footprint = 1;
592 				} else {
593 					// regular purgeable memory accounting:
594 					//    + no ledger tag
595 					//    + included in footprint
596 					ledger_tag = VM_LEDGER_TAG_NONE;
597 					ledger_no_footprint = 0;
598 				}
599 			}
600 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
601 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
602 			if (kIOMemoryUseReserve & _flags) {
603 				prot |= MAP_MEM_GRAB_SECLUDED;
604 			}
605 
606 			prot |= VM_PROT_WRITE;
607 			map = NULL;
608 		} else {
609 			prot |= MAP_MEM_USE_DATA_ADDR;
610 			map = get_task_map(_task);
611 		}
612 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
613 
614 		while (remain) {
615 			srcAddr  = nextAddr;
616 			srcLen   = nextLen;
617 			nextAddr = 0;
618 			nextLen  = 0;
619 			// coalesce addr range
620 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
621 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
622 				if ((srcAddr + srcLen) != nextAddr) {
623 					break;
624 				}
625 				srcLen += nextLen;
626 			}
627 
628 			if (MAP_MEM_USE_DATA_ADDR & prot) {
629 				entryAddr = srcAddr;
630 				endAddr   = srcAddr + srcLen;
631 			} else {
632 				entryAddr = trunc_page_64(srcAddr);
633 				endAddr   = round_page_64(srcAddr + srcLen);
634 			}
635 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
636 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
637 			}
638 
639 			do{
640 				entrySize = (endAddr - entryAddr);
641 				if (!entrySize) {
642 					break;
643 				}
644 				actualSize = entrySize;
645 
646 				cloneEntry = MACH_PORT_NULL;
647 				if (MAP_MEM_NAMED_REUSE & prot) {
648 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
649 						cloneEntry = cloneEntries->entry;
650 					} else {
651 						prot &= ~MAP_MEM_NAMED_REUSE;
652 					}
653 				}
654 
655 				err = mach_make_memory_entry_internal(map,
656 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
657 
658 				if (KERN_SUCCESS != err) {
659 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
660 					break;
661 				}
662 				if (MAP_MEM_USE_DATA_ADDR & prot) {
663 					if (actualSize > entrySize) {
664 						actualSize = entrySize;
665 					}
666 				} else if (actualSize > entrySize) {
667 					panic("mach_make_memory_entry_64 actualSize");
668 				}
669 
670 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
671 
672 				if (count && overmap_start) {
673 					/*
674 					 * Track misaligned start for all
675 					 * except the first entry.
676 					 */
677 					misaligned_start++;
678 				}
679 
680 				if (overmap_end) {
681 					/*
682 					 * Ignore misaligned end for the
683 					 * last entry.
684 					 */
685 					if ((entryAddr + actualSize) != endAddr) {
686 						misaligned_end++;
687 					}
688 				}
689 
690 				if (count) {
691 					/* Middle entries */
692 					if (misaligned_start || misaligned_end) {
693 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
694 						ipc_port_release_send(entry);
695 						err = KERN_NOT_SUPPORTED;
696 						break;
697 					}
698 				}
699 
700 				if (count >= ref->capacity) {
701 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
702 					entries = &ref->entries[count];
703 				}
704 				entries->entry  = entry;
705 				entries->size   = actualSize;
706 				entries->offset = offset + (entryAddr - srcAddr);
707 				entries->start = entryAddr;
708 				entryAddr += actualSize;
709 				if (MAP_MEM_NAMED_REUSE & prot) {
710 					if ((cloneEntries->entry == entries->entry)
711 					    && (cloneEntries->size == entries->size)
712 					    && (cloneEntries->offset == entries->offset)) {
713 						cloneEntries++;
714 					} else {
715 						prot &= ~MAP_MEM_NAMED_REUSE;
716 					}
717 				}
718 				entries++;
719 				count++;
720 			}while (true);
721 			offset += srcLen;
722 			remain -= srcLen;
723 		}
724 	} else {
725 		// _task == 0, physical or kIOMemoryTypeUPL
726 		memory_object_t pager;
727 		vm_size_t       size = ptoa_64(_pages);
728 
729 		if (!getKernelReserved()) {
730 			panic("getKernelReserved");
731 		}
732 
733 		reserved->dp.pagerContig = (1 == _rangesCount);
734 		reserved->dp.memory      = this;
735 
736 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
737 		if (-1U == pagerFlags) {
738 			panic("phys is kIODefaultCache");
739 		}
740 		if (reserved->dp.pagerContig) {
741 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
742 		}
743 
744 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
745 		    size, pagerFlags);
746 		assert(pager);
747 		if (!pager) {
748 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
749 			err = kIOReturnVMError;
750 		} else {
751 			srcAddr  = nextAddr;
752 			entryAddr = trunc_page_64(srcAddr);
753 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
754 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
755 			assert(KERN_SUCCESS == err);
756 			if (KERN_SUCCESS != err) {
757 				device_pager_deallocate(pager);
758 			} else {
759 				reserved->dp.devicePager = pager;
760 				entries->entry  = entry;
761 				entries->size   = size;
762 				entries->offset = offset + (entryAddr - srcAddr);
763 				entries++;
764 				count++;
765 			}
766 		}
767 	}
768 
769 	ref->count = count;
770 	ref->prot  = prot;
771 
772 	if (_task && (KERN_SUCCESS == err)
773 	    && (kIOMemoryMapCopyOnWrite & _flags)
774 	    && !(kIOMemoryReferenceCOW & options)) {
775 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
776 		if (KERN_SUCCESS != err) {
777 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
778 		}
779 	}
780 
781 	if (KERN_SUCCESS == err) {
782 		if (MAP_MEM_NAMED_REUSE & prot) {
783 			memoryReferenceFree(ref);
784 			OSIncrementAtomic(&_memRef->refCount);
785 			ref = _memRef;
786 		}
787 	} else {
788 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
789 		memoryReferenceFree(ref);
790 		ref = NULL;
791 	}
792 
793 	*reference = ref;
794 
795 	return err;
796 }
797 
798 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)799 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
800 {
801 	switch (kIOMapGuardedMask & options) {
802 	default:
803 	case kIOMapGuardedSmall:
804 		return vm_map_page_size(map);
805 	case kIOMapGuardedLarge:
806 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
807 		return kIOMapGuardSizeLarge;
808 	}
809 	;
810 }
811 
812 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)813 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
814     vm_map_offset_t addr, mach_vm_size_t size)
815 {
816 	kern_return_t   kr;
817 	vm_map_offset_t actualAddr;
818 	mach_vm_size_t  actualSize;
819 
820 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
821 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
822 
823 	if (kIOMapGuardedMask & options) {
824 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
825 		actualAddr -= guardSize;
826 		actualSize += 2 * guardSize;
827 	}
828 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
829 
830 	return kr;
831 }
832 
833 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)834 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
835 {
836 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
837 	IOReturn                        err;
838 	vm_map_offset_t                 addr;
839 	mach_vm_size_t                  size;
840 	mach_vm_size_t                  guardSize;
841 	vm_map_kernel_flags_t           vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
842 
843 	addr = ref->mapped;
844 	size = ref->size;
845 	guardSize = 0;
846 
847 	if (kIOMapGuardedMask & ref->options) {
848 		if (!(kIOMapAnywhere & ref->options)) {
849 			return kIOReturnBadArgument;
850 		}
851 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
852 		size += 2 * guardSize;
853 	}
854 
855 	/*
856 	 * Mapping memory into the kernel_map using IOMDs use the data range.
857 	 * Memory being mapped should not contain kernel pointers.
858 	 */
859 	if (map == kernel_map) {
860 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
861 	}
862 
863 	err = vm_map_enter_mem_object(map, &addr, size,
864 #if __ARM_MIXED_PAGE_SIZE__
865 	    // TODO4K this should not be necessary...
866 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
867 #else /* __ARM_MIXED_PAGE_SIZE__ */
868 	    (vm_map_offset_t) 0,
869 #endif /* __ARM_MIXED_PAGE_SIZE__ */
870 	    (((ref->options & kIOMapAnywhere)
871 	    ? VM_FLAGS_ANYWHERE
872 	    : VM_FLAGS_FIXED)),
873 	    vmk_flags,
874 	    ref->tag,
875 	    IPC_PORT_NULL,
876 	    (memory_object_offset_t) 0,
877 	    false,                       /* copy */
878 	    ref->prot,
879 	    ref->prot,
880 	    VM_INHERIT_NONE);
881 	if (KERN_SUCCESS == err) {
882 		ref->mapped = (mach_vm_address_t) addr;
883 		ref->map = map;
884 		if (kIOMapGuardedMask & ref->options) {
885 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
886 
887 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
888 			assert(KERN_SUCCESS == err);
889 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
890 			assert(KERN_SUCCESS == err);
891 			ref->mapped += guardSize;
892 		}
893 	}
894 
895 	return err;
896 }
897 
898 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)899 IOGeneralMemoryDescriptor::memoryReferenceMap(
900 	IOMemoryReference * ref,
901 	vm_map_t            map,
902 	mach_vm_size_t      inoffset,
903 	mach_vm_size_t      size,
904 	IOOptionBits        options,
905 	mach_vm_address_t * inaddr)
906 {
907 	IOReturn        err;
908 	int64_t         offset = inoffset;
909 	uint32_t        rangeIdx, entryIdx;
910 	vm_map_offset_t addr, mapAddr;
911 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
912 
913 	mach_vm_address_t nextAddr;
914 	mach_vm_size_t    nextLen;
915 	IOByteCount       physLen;
916 	IOMemoryEntry   * entry;
917 	vm_prot_t         prot, memEntryCacheMode;
918 	IOOptionBits      type;
919 	IOOptionBits      cacheMode;
920 	vm_tag_t          tag;
921 	// for the kIOMapPrefault option.
922 	upl_page_info_t * pageList = NULL;
923 	UInt              currentPageIndex = 0;
924 	bool              didAlloc;
925 
926 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
927 
928 	if (ref->mapRef) {
929 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
930 		return err;
931 	}
932 
933 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
934 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
935 		return err;
936 	}
937 
938 	type = _flags & kIOMemoryTypeMask;
939 
940 	prot = VM_PROT_READ;
941 	if (!(kIOMapReadOnly & options)) {
942 		prot |= VM_PROT_WRITE;
943 	}
944 	prot &= ref->prot;
945 
946 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
947 	if (kIODefaultCache != cacheMode) {
948 		// VM system requires write access to update named entry cache mode
949 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
950 	}
951 
952 	tag = (typeof(tag))getVMTag(map);
953 
954 	if (_task) {
955 		// Find first range for offset
956 		if (!_rangesCount) {
957 			return kIOReturnBadArgument;
958 		}
959 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
960 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
961 			if (remain < nextLen) {
962 				break;
963 			}
964 			remain -= nextLen;
965 		}
966 	} else {
967 		rangeIdx = 0;
968 		remain   = 0;
969 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
970 		nextLen  = size;
971 	}
972 
973 	assert(remain < nextLen);
974 	if (remain >= nextLen) {
975 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
976 		return kIOReturnBadArgument;
977 	}
978 
979 	nextAddr  += remain;
980 	nextLen   -= remain;
981 #if __ARM_MIXED_PAGE_SIZE__
982 	pageOffset = (vm_map_page_mask(map) & nextAddr);
983 #else /* __ARM_MIXED_PAGE_SIZE__ */
984 	pageOffset = (page_mask & nextAddr);
985 #endif /* __ARM_MIXED_PAGE_SIZE__ */
986 	addr       = 0;
987 	didAlloc   = false;
988 
989 	if (!(options & kIOMapAnywhere)) {
990 		addr = *inaddr;
991 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
992 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
993 		}
994 		addr -= pageOffset;
995 	}
996 
997 	// find first entry for offset
998 	for (entryIdx = 0;
999 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1000 	    entryIdx++) {
1001 	}
1002 	entryIdx--;
1003 	entry = &ref->entries[entryIdx];
1004 
1005 	// allocate VM
1006 #if __ARM_MIXED_PAGE_SIZE__
1007 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1008 #else
1009 	size = round_page_64(size + pageOffset);
1010 #endif
1011 	if (kIOMapOverwrite & options) {
1012 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1013 			map = IOPageableMapForAddress(addr);
1014 		}
1015 		err = KERN_SUCCESS;
1016 	} else {
1017 		IOMemoryDescriptorMapAllocRef ref;
1018 		ref.map     = map;
1019 		ref.tag     = tag;
1020 		ref.options = options;
1021 		ref.size    = size;
1022 		ref.prot    = prot;
1023 		if (options & kIOMapAnywhere) {
1024 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1025 			ref.mapped = 0;
1026 		} else {
1027 			ref.mapped = addr;
1028 		}
1029 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1030 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1031 		} else {
1032 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1033 		}
1034 		if (KERN_SUCCESS == err) {
1035 			addr     = ref.mapped;
1036 			map      = ref.map;
1037 			didAlloc = true;
1038 		}
1039 	}
1040 
1041 	/*
1042 	 * If the memory is associated with a device pager but doesn't have a UPL,
1043 	 * it will be immediately faulted in through the pager via populateDevicePager().
1044 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1045 	 * operations.
1046 	 */
1047 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1048 		options &= ~kIOMapPrefault;
1049 	}
1050 
1051 	/*
1052 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1053 	 * memory type, and the underlying data.
1054 	 */
1055 	if (options & kIOMapPrefault) {
1056 		/*
1057 		 * The memory must have been wired by calling ::prepare(), otherwise
1058 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1059 		 */
1060 		assert(_wireCount != 0);
1061 		assert(_memoryEntries != NULL);
1062 		if ((_wireCount == 0) ||
1063 		    (_memoryEntries == NULL)) {
1064 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1065 			return kIOReturnBadArgument;
1066 		}
1067 
1068 		// Get the page list.
1069 		ioGMDData* dataP = getDataP(_memoryEntries);
1070 		ioPLBlock const* ioplList = getIOPLList(dataP);
1071 		pageList = getPageList(dataP);
1072 
1073 		// Get the number of IOPLs.
1074 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1075 
1076 		/*
1077 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1078 		 * the offset. The research will go past it, so we'll need to go back to the
1079 		 * right range at the end.
1080 		 */
1081 		UInt ioplIndex = 0;
1082 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1083 			ioplIndex++;
1084 		}
1085 		ioplIndex--;
1086 
1087 		// Retrieve the IOPL info block.
1088 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1089 
1090 		/*
1091 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1092 		 * array.
1093 		 */
1094 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1095 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1096 		} else {
1097 			pageList = &pageList[ioplInfo.fPageInfo];
1098 		}
1099 
1100 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1101 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1102 
1103 		// Retrieve the index of the first page corresponding to the offset.
1104 		currentPageIndex = atop_32(offsetInIOPL);
1105 	}
1106 
1107 	// enter mappings
1108 	remain  = size;
1109 	mapAddr = addr;
1110 	addr    += pageOffset;
1111 
1112 	while (remain && (KERN_SUCCESS == err)) {
1113 		entryOffset = offset - entry->offset;
1114 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1115 			err = kIOReturnNotAligned;
1116 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1117 			break;
1118 		}
1119 
1120 		if (kIODefaultCache != cacheMode) {
1121 			vm_size_t unused = 0;
1122 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1123 			    memEntryCacheMode, NULL, entry->entry);
1124 			assert(KERN_SUCCESS == err);
1125 		}
1126 
1127 		entryOffset -= pageOffset;
1128 		if (entryOffset >= entry->size) {
1129 			panic("entryOffset");
1130 		}
1131 		chunk = entry->size - entryOffset;
1132 		if (chunk) {
1133 			vm_map_kernel_flags_t vmk_flags;
1134 
1135 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1136 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1137 
1138 			if (chunk > remain) {
1139 				chunk = remain;
1140 			}
1141 			if (options & kIOMapPrefault) {
1142 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1143 
1144 				err = vm_map_enter_mem_object_prefault(map,
1145 				    &mapAddr,
1146 				    chunk, 0 /* mask */,
1147 				    (VM_FLAGS_FIXED
1148 				    | VM_FLAGS_OVERWRITE),
1149 				    vmk_flags,
1150 				    tag,
1151 				    entry->entry,
1152 				    entryOffset,
1153 				    prot,                        // cur
1154 				    prot,                        // max
1155 				    &pageList[currentPageIndex],
1156 				    nb_pages);
1157 
1158 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1159 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1160 				}
1161 				// Compute the next index in the page list.
1162 				currentPageIndex += nb_pages;
1163 				assert(currentPageIndex <= _pages);
1164 			} else {
1165 				err = vm_map_enter_mem_object(map,
1166 				    &mapAddr,
1167 				    chunk, 0 /* mask */,
1168 				    (VM_FLAGS_FIXED
1169 				    | VM_FLAGS_OVERWRITE),
1170 				    vmk_flags,
1171 				    tag,
1172 				    entry->entry,
1173 				    entryOffset,
1174 				    false,               // copy
1175 				    prot,               // cur
1176 				    prot,               // max
1177 				    VM_INHERIT_NONE);
1178 			}
1179 			if (KERN_SUCCESS != err) {
1180 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1181 				break;
1182 			}
1183 			remain -= chunk;
1184 			if (!remain) {
1185 				break;
1186 			}
1187 			mapAddr  += chunk;
1188 			offset   += chunk - pageOffset;
1189 		}
1190 		pageOffset = 0;
1191 		entry++;
1192 		entryIdx++;
1193 		if (entryIdx >= ref->count) {
1194 			err = kIOReturnOverrun;
1195 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1196 			break;
1197 		}
1198 	}
1199 
1200 	if ((KERN_SUCCESS != err) && didAlloc) {
1201 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1202 		addr = 0;
1203 	}
1204 	*inaddr = addr;
1205 
1206 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1207 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1208 	}
1209 	return err;
1210 }
1211 
1212 #define LOGUNALIGN 0
1213 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1214 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1215 	IOMemoryReference * ref,
1216 	vm_map_t            map,
1217 	mach_vm_size_t      inoffset,
1218 	mach_vm_size_t      size,
1219 	IOOptionBits        options,
1220 	mach_vm_address_t * inaddr)
1221 {
1222 	IOReturn            err;
1223 	int64_t             offset = inoffset;
1224 	uint32_t            entryIdx, firstEntryIdx;
1225 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1226 	vm_map_offset_t     entryOffset, remain, chunk;
1227 
1228 	IOMemoryEntry    * entry;
1229 	vm_prot_t          prot, memEntryCacheMode;
1230 	IOOptionBits       type;
1231 	IOOptionBits       cacheMode;
1232 	vm_tag_t           tag;
1233 	// for the kIOMapPrefault option.
1234 	upl_page_info_t  * pageList = NULL;
1235 	UInt               currentPageIndex = 0;
1236 	bool               didAlloc;
1237 
1238 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1239 
1240 	if (ref->mapRef) {
1241 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1242 		return err;
1243 	}
1244 
1245 #if LOGUNALIGN
1246 	printf("MAP offset %qx, %qx\n", inoffset, size);
1247 #endif
1248 
1249 	type = _flags & kIOMemoryTypeMask;
1250 
1251 	prot = VM_PROT_READ;
1252 	if (!(kIOMapReadOnly & options)) {
1253 		prot |= VM_PROT_WRITE;
1254 	}
1255 	prot &= ref->prot;
1256 
1257 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1258 	if (kIODefaultCache != cacheMode) {
1259 		// VM system requires write access to update named entry cache mode
1260 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1261 	}
1262 
1263 	tag = (vm_tag_t) getVMTag(map);
1264 
1265 	addr       = 0;
1266 	didAlloc   = false;
1267 
1268 	if (!(options & kIOMapAnywhere)) {
1269 		addr = *inaddr;
1270 	}
1271 
1272 	// find first entry for offset
1273 	for (firstEntryIdx = 0;
1274 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1275 	    firstEntryIdx++) {
1276 	}
1277 	firstEntryIdx--;
1278 
1279 	// calculate required VM space
1280 
1281 	entryIdx = firstEntryIdx;
1282 	entry = &ref->entries[entryIdx];
1283 
1284 	remain  = size;
1285 	int64_t iteroffset = offset;
1286 	uint64_t mapSize = 0;
1287 	while (remain) {
1288 		entryOffset = iteroffset - entry->offset;
1289 		if (entryOffset >= entry->size) {
1290 			panic("entryOffset");
1291 		}
1292 
1293 #if LOGUNALIGN
1294 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1295 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1296 #endif
1297 
1298 		chunk = entry->size - entryOffset;
1299 		if (chunk) {
1300 			if (chunk > remain) {
1301 				chunk = remain;
1302 			}
1303 			mach_vm_size_t entrySize;
1304 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1305 			assert(KERN_SUCCESS == err);
1306 			mapSize += entrySize;
1307 
1308 			remain -= chunk;
1309 			if (!remain) {
1310 				break;
1311 			}
1312 			iteroffset   += chunk; // - pageOffset;
1313 		}
1314 		entry++;
1315 		entryIdx++;
1316 		if (entryIdx >= ref->count) {
1317 			panic("overrun");
1318 			err = kIOReturnOverrun;
1319 			break;
1320 		}
1321 	}
1322 
1323 	if (kIOMapOverwrite & options) {
1324 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1325 			map = IOPageableMapForAddress(addr);
1326 		}
1327 		err = KERN_SUCCESS;
1328 	} else {
1329 		IOMemoryDescriptorMapAllocRef ref;
1330 		ref.map     = map;
1331 		ref.tag     = tag;
1332 		ref.options = options;
1333 		ref.size    = mapSize;
1334 		ref.prot    = prot;
1335 		if (options & kIOMapAnywhere) {
1336 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1337 			ref.mapped = 0;
1338 		} else {
1339 			ref.mapped = addr;
1340 		}
1341 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1342 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1343 		} else {
1344 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1345 		}
1346 
1347 		if (KERN_SUCCESS == err) {
1348 			addr     = ref.mapped;
1349 			map      = ref.map;
1350 			didAlloc = true;
1351 		}
1352 #if LOGUNALIGN
1353 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1354 #endif
1355 	}
1356 
1357 	/*
1358 	 * If the memory is associated with a device pager but doesn't have a UPL,
1359 	 * it will be immediately faulted in through the pager via populateDevicePager().
1360 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1361 	 * operations.
1362 	 */
1363 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1364 		options &= ~kIOMapPrefault;
1365 	}
1366 
1367 	/*
1368 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1369 	 * memory type, and the underlying data.
1370 	 */
1371 	if (options & kIOMapPrefault) {
1372 		/*
1373 		 * The memory must have been wired by calling ::prepare(), otherwise
1374 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1375 		 */
1376 		assert(_wireCount != 0);
1377 		assert(_memoryEntries != NULL);
1378 		if ((_wireCount == 0) ||
1379 		    (_memoryEntries == NULL)) {
1380 			return kIOReturnBadArgument;
1381 		}
1382 
1383 		// Get the page list.
1384 		ioGMDData* dataP = getDataP(_memoryEntries);
1385 		ioPLBlock const* ioplList = getIOPLList(dataP);
1386 		pageList = getPageList(dataP);
1387 
1388 		// Get the number of IOPLs.
1389 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1390 
1391 		/*
1392 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1393 		 * the offset. The research will go past it, so we'll need to go back to the
1394 		 * right range at the end.
1395 		 */
1396 		UInt ioplIndex = 0;
1397 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1398 			ioplIndex++;
1399 		}
1400 		ioplIndex--;
1401 
1402 		// Retrieve the IOPL info block.
1403 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1404 
1405 		/*
1406 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1407 		 * array.
1408 		 */
1409 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1410 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1411 		} else {
1412 			pageList = &pageList[ioplInfo.fPageInfo];
1413 		}
1414 
1415 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1416 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1417 
1418 		// Retrieve the index of the first page corresponding to the offset.
1419 		currentPageIndex = atop_32(offsetInIOPL);
1420 	}
1421 
1422 	// enter mappings
1423 	remain   = size;
1424 	mapAddr  = addr;
1425 	entryIdx = firstEntryIdx;
1426 	entry = &ref->entries[entryIdx];
1427 
1428 	while (remain && (KERN_SUCCESS == err)) {
1429 #if LOGUNALIGN
1430 		printf("offset %qx, %qx\n", offset, entry->offset);
1431 #endif
1432 		if (kIODefaultCache != cacheMode) {
1433 			vm_size_t unused = 0;
1434 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1435 			    memEntryCacheMode, NULL, entry->entry);
1436 			assert(KERN_SUCCESS == err);
1437 		}
1438 		entryOffset = offset - entry->offset;
1439 		if (entryOffset >= entry->size) {
1440 			panic("entryOffset");
1441 		}
1442 		chunk = entry->size - entryOffset;
1443 #if LOGUNALIGN
1444 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1445 #endif
1446 		if (chunk) {
1447 			vm_map_kernel_flags_t vmk_flags;
1448 
1449 			vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1450 			vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1451 
1452 			if (chunk > remain) {
1453 				chunk = remain;
1454 			}
1455 			mapAddrOut = mapAddr;
1456 			if (options & kIOMapPrefault) {
1457 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1458 
1459 				err = vm_map_enter_mem_object_prefault(map,
1460 				    &mapAddrOut,
1461 				    chunk, 0 /* mask */,
1462 				    (VM_FLAGS_FIXED
1463 				    | VM_FLAGS_OVERWRITE
1464 				    | VM_FLAGS_RETURN_DATA_ADDR),
1465 				    vmk_flags,
1466 				    tag,
1467 				    entry->entry,
1468 				    entryOffset,
1469 				    prot,                        // cur
1470 				    prot,                        // max
1471 				    &pageList[currentPageIndex],
1472 				    nb_pages);
1473 
1474 				// Compute the next index in the page list.
1475 				currentPageIndex += nb_pages;
1476 				assert(currentPageIndex <= _pages);
1477 			} else {
1478 #if LOGUNALIGN
1479 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1480 #endif
1481 				err = vm_map_enter_mem_object(map,
1482 				    &mapAddrOut,
1483 				    chunk, 0 /* mask */,
1484 				    (VM_FLAGS_FIXED
1485 				    | VM_FLAGS_OVERWRITE
1486 				    | VM_FLAGS_RETURN_DATA_ADDR),
1487 				    vmk_flags,
1488 				    tag,
1489 				    entry->entry,
1490 				    entryOffset,
1491 				    false,               // copy
1492 				    prot,               // cur
1493 				    prot,               // max
1494 				    VM_INHERIT_NONE);
1495 			}
1496 			if (KERN_SUCCESS != err) {
1497 				panic("map enter err %x", err);
1498 				break;
1499 			}
1500 #if LOGUNALIGN
1501 			printf("mapAddr o %qx\n", mapAddrOut);
1502 #endif
1503 			if (entryIdx == firstEntryIdx) {
1504 				addr = mapAddrOut;
1505 			}
1506 			remain -= chunk;
1507 			if (!remain) {
1508 				break;
1509 			}
1510 			mach_vm_size_t entrySize;
1511 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1512 			assert(KERN_SUCCESS == err);
1513 			mapAddr += entrySize;
1514 			offset  += chunk;
1515 		}
1516 
1517 		entry++;
1518 		entryIdx++;
1519 		if (entryIdx >= ref->count) {
1520 			err = kIOReturnOverrun;
1521 			break;
1522 		}
1523 	}
1524 
1525 	if (KERN_SUCCESS != err) {
1526 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1527 	}
1528 
1529 	if ((KERN_SUCCESS != err) && didAlloc) {
1530 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1531 		addr = 0;
1532 	}
1533 	*inaddr = addr;
1534 
1535 	return err;
1536 }
1537 
1538 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1539 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1540 	IOMemoryReference * ref,
1541 	uint64_t          * offset)
1542 {
1543 	kern_return_t kr;
1544 	vm_object_offset_t data_offset = 0;
1545 	uint64_t total;
1546 	uint32_t idx;
1547 
1548 	assert(ref->count);
1549 	if (offset) {
1550 		*offset = (uint64_t) data_offset;
1551 	}
1552 	total = 0;
1553 	for (idx = 0; idx < ref->count; idx++) {
1554 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1555 		    &data_offset);
1556 		if (KERN_SUCCESS != kr) {
1557 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1558 		} else if (0 != data_offset) {
1559 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1560 		}
1561 		if (offset && !idx) {
1562 			*offset = (uint64_t) data_offset;
1563 		}
1564 		total += round_page(data_offset + ref->entries[idx].size);
1565 	}
1566 
1567 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1568 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1569 
1570 	return total;
1571 }
1572 
1573 
1574 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1575 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1576 	IOMemoryReference * ref,
1577 	IOByteCount       * residentPageCount,
1578 	IOByteCount       * dirtyPageCount)
1579 {
1580 	IOReturn        err;
1581 	IOMemoryEntry * entries;
1582 	unsigned int resident, dirty;
1583 	unsigned int totalResident, totalDirty;
1584 
1585 	totalResident = totalDirty = 0;
1586 	err = kIOReturnSuccess;
1587 	entries = ref->entries + ref->count;
1588 	while (entries > &ref->entries[0]) {
1589 		entries--;
1590 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1591 		if (KERN_SUCCESS != err) {
1592 			break;
1593 		}
1594 		totalResident += resident;
1595 		totalDirty    += dirty;
1596 	}
1597 
1598 	if (residentPageCount) {
1599 		*residentPageCount = totalResident;
1600 	}
1601 	if (dirtyPageCount) {
1602 		*dirtyPageCount    = totalDirty;
1603 	}
1604 	return err;
1605 }
1606 
1607 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1608 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1609 	IOMemoryReference * ref,
1610 	IOOptionBits        newState,
1611 	IOOptionBits      * oldState)
1612 {
1613 	IOReturn        err;
1614 	IOMemoryEntry * entries;
1615 	vm_purgable_t   control;
1616 	int             totalState, state;
1617 
1618 	totalState = kIOMemoryPurgeableNonVolatile;
1619 	err = kIOReturnSuccess;
1620 	entries = ref->entries + ref->count;
1621 	while (entries > &ref->entries[0]) {
1622 		entries--;
1623 
1624 		err = purgeableControlBits(newState, &control, &state);
1625 		if (KERN_SUCCESS != err) {
1626 			break;
1627 		}
1628 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1629 		if (KERN_SUCCESS != err) {
1630 			break;
1631 		}
1632 		err = purgeableStateBits(&state);
1633 		if (KERN_SUCCESS != err) {
1634 			break;
1635 		}
1636 
1637 		if (kIOMemoryPurgeableEmpty == state) {
1638 			totalState = kIOMemoryPurgeableEmpty;
1639 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1640 			continue;
1641 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1642 			continue;
1643 		} else if (kIOMemoryPurgeableVolatile == state) {
1644 			totalState = kIOMemoryPurgeableVolatile;
1645 		} else {
1646 			totalState = kIOMemoryPurgeableNonVolatile;
1647 		}
1648 	}
1649 
1650 	if (oldState) {
1651 		*oldState = totalState;
1652 	}
1653 	return err;
1654 }
1655 
1656 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1657 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1658 	IOMemoryReference * ref,
1659 	task_t              newOwner,
1660 	int                 newLedgerTag,
1661 	IOOptionBits        newLedgerOptions)
1662 {
1663 	IOReturn        err, totalErr;
1664 	IOMemoryEntry * entries;
1665 
1666 	totalErr = kIOReturnSuccess;
1667 	entries = ref->entries + ref->count;
1668 	while (entries > &ref->entries[0]) {
1669 		entries--;
1670 
1671 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1672 		if (KERN_SUCCESS != err) {
1673 			totalErr = err;
1674 		}
1675 	}
1676 
1677 	return totalErr;
1678 }
1679 
1680 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1681 
1682 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1683 IOMemoryDescriptor::withAddress(void *      address,
1684     IOByteCount   length,
1685     IODirection direction)
1686 {
1687 	return IOMemoryDescriptor::
1688 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1689 }
1690 
1691 #ifndef __LP64__
1692 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1693 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1694     IOByteCount  length,
1695     IODirection  direction,
1696     task_t       task)
1697 {
1698 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1699 	if (that) {
1700 		if (that->initWithAddress(address, length, direction, task)) {
1701 			return os::move(that);
1702 		}
1703 	}
1704 	return nullptr;
1705 }
1706 #endif /* !__LP64__ */
1707 
1708 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1709 IOMemoryDescriptor::withPhysicalAddress(
1710 	IOPhysicalAddress       address,
1711 	IOByteCount             length,
1712 	IODirection             direction )
1713 {
1714 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1715 }
1716 
1717 #ifndef __LP64__
1718 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1719 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1720     UInt32           withCount,
1721     IODirection      direction,
1722     task_t           task,
1723     bool             asReference)
1724 {
1725 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1726 	if (that) {
1727 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1728 			return os::move(that);
1729 		}
1730 	}
1731 	return nullptr;
1732 }
1733 #endif /* !__LP64__ */
1734 
1735 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1736 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1737     mach_vm_size_t length,
1738     IOOptionBits   options,
1739     task_t         task)
1740 {
1741 	IOAddressRange range = { address, length };
1742 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1743 }
1744 
1745 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1746 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1747     UInt32           rangeCount,
1748     IOOptionBits     options,
1749     task_t           task)
1750 {
1751 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1752 	if (that) {
1753 		if (task) {
1754 			options |= kIOMemoryTypeVirtual64;
1755 		} else {
1756 			options |= kIOMemoryTypePhysical64;
1757 		}
1758 
1759 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1760 			return os::move(that);
1761 		}
1762 	}
1763 
1764 	return nullptr;
1765 }
1766 
1767 
1768 /*
1769  * withOptions:
1770  *
1771  * Create a new IOMemoryDescriptor. The buffer is made up of several
1772  * virtual address ranges, from a given task.
1773  *
1774  * Passing the ranges as a reference will avoid an extra allocation.
1775  */
1776 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1777 IOMemoryDescriptor::withOptions(void *          buffers,
1778     UInt32          count,
1779     UInt32          offset,
1780     task_t          task,
1781     IOOptionBits    opts,
1782     IOMapper *      mapper)
1783 {
1784 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1785 
1786 	if (self
1787 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1788 		return nullptr;
1789 	}
1790 
1791 	return os::move(self);
1792 }
1793 
1794 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1795 IOMemoryDescriptor::initWithOptions(void *         buffers,
1796     UInt32         count,
1797     UInt32         offset,
1798     task_t         task,
1799     IOOptionBits   options,
1800     IOMapper *     mapper)
1801 {
1802 	return false;
1803 }
1804 
1805 #ifndef __LP64__
1806 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1807 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1808     UInt32          withCount,
1809     IODirection     direction,
1810     bool            asReference)
1811 {
1812 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1813 	if (that) {
1814 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1815 			return os::move(that);
1816 		}
1817 	}
1818 	return nullptr;
1819 }
1820 
1821 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1822 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1823     IOByteCount             offset,
1824     IOByteCount             length,
1825     IODirection             direction)
1826 {
1827 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1828 }
1829 #endif /* !__LP64__ */
1830 
1831 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1832 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1833 {
1834 	IOGeneralMemoryDescriptor *origGenMD =
1835 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1836 
1837 	if (origGenMD) {
1838 		return IOGeneralMemoryDescriptor::
1839 		       withPersistentMemoryDescriptor(origGenMD);
1840 	} else {
1841 		return nullptr;
1842 	}
1843 }
1844 
1845 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1846 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1847 {
1848 	IOMemoryReference * memRef;
1849 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1850 
1851 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1852 		return nullptr;
1853 	}
1854 
1855 	if (memRef == originalMD->_memRef) {
1856 		self.reset(originalMD, OSRetain);
1857 		originalMD->memoryReferenceRelease(memRef);
1858 		return os::move(self);
1859 	}
1860 
1861 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1862 	IOMDPersistentInitData initData = { originalMD, memRef };
1863 
1864 	if (self
1865 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1866 		return nullptr;
1867 	}
1868 	return os::move(self);
1869 }
1870 
1871 #ifndef __LP64__
1872 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1873 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1874     IOByteCount   withLength,
1875     IODirection withDirection)
1876 {
1877 	_singleRange.v.address = (vm_offset_t) address;
1878 	_singleRange.v.length  = withLength;
1879 
1880 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1881 }
1882 
1883 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1884 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1885     IOByteCount    withLength,
1886     IODirection  withDirection,
1887     task_t       withTask)
1888 {
1889 	_singleRange.v.address = address;
1890 	_singleRange.v.length  = withLength;
1891 
1892 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1893 }
1894 
1895 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1896 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1897 	IOPhysicalAddress      address,
1898 	IOByteCount            withLength,
1899 	IODirection            withDirection )
1900 {
1901 	_singleRange.p.address = address;
1902 	_singleRange.p.length  = withLength;
1903 
1904 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1905 }
1906 
1907 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1908 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1909 	IOPhysicalRange * ranges,
1910 	UInt32            count,
1911 	IODirection       direction,
1912 	bool              reference)
1913 {
1914 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1915 
1916 	if (reference) {
1917 		mdOpts |= kIOMemoryAsReference;
1918 	}
1919 
1920 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1921 }
1922 
1923 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1924 IOGeneralMemoryDescriptor::initWithRanges(
1925 	IOVirtualRange * ranges,
1926 	UInt32           count,
1927 	IODirection      direction,
1928 	task_t           task,
1929 	bool             reference)
1930 {
1931 	IOOptionBits mdOpts = direction;
1932 
1933 	if (reference) {
1934 		mdOpts |= kIOMemoryAsReference;
1935 	}
1936 
1937 	if (task) {
1938 		mdOpts |= kIOMemoryTypeVirtual;
1939 
1940 		// Auto-prepare if this is a kernel memory descriptor as very few
1941 		// clients bother to prepare() kernel memory.
1942 		// But it was not enforced so what are you going to do?
1943 		if (task == kernel_task) {
1944 			mdOpts |= kIOMemoryAutoPrepare;
1945 		}
1946 	} else {
1947 		mdOpts |= kIOMemoryTypePhysical;
1948 	}
1949 
1950 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1951 }
1952 #endif /* !__LP64__ */
1953 
1954 /*
1955  * initWithOptions:
1956  *
1957  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1958  * from a given task, several physical ranges, an UPL from the ubc
1959  * system or a uio (may be 64bit) from the BSD subsystem.
1960  *
1961  * Passing the ranges as a reference will avoid an extra allocation.
1962  *
1963  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1964  * existing instance -- note this behavior is not commonly supported in other
1965  * I/O Kit classes, although it is supported here.
1966  */
1967 
1968 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1969 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1970     UInt32       count,
1971     UInt32       offset,
1972     task_t       task,
1973     IOOptionBits options,
1974     IOMapper *   mapper)
1975 {
1976 	IOOptionBits type = options & kIOMemoryTypeMask;
1977 
1978 #ifndef __LP64__
1979 	if (task
1980 	    && (kIOMemoryTypeVirtual == type)
1981 	    && vm_map_is_64bit(get_task_map(task))
1982 	    && ((IOVirtualRange *) buffers)->address) {
1983 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1984 		return false;
1985 	}
1986 #endif /* !__LP64__ */
1987 
1988 	// Grab the original MD's configuation data to initialse the
1989 	// arguments to this function.
1990 	if (kIOMemoryTypePersistentMD == type) {
1991 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
1992 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
1993 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
1994 
1995 		// Only accept persistent memory descriptors with valid dataP data.
1996 		assert(orig->_rangesCount == 1);
1997 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1998 			return false;
1999 		}
2000 
2001 		_memRef = initData->fMemRef; // Grab the new named entry
2002 		options = orig->_flags & ~kIOMemoryAsReference;
2003 		type = options & kIOMemoryTypeMask;
2004 		buffers = orig->_ranges.v;
2005 		count = orig->_rangesCount;
2006 
2007 		// Now grab the original task and whatever mapper was previously used
2008 		task = orig->_task;
2009 		mapper = dataP->fMapper;
2010 
2011 		// We are ready to go through the original initialisation now
2012 	}
2013 
2014 	switch (type) {
2015 	case kIOMemoryTypeUIO:
2016 	case kIOMemoryTypeVirtual:
2017 #ifndef __LP64__
2018 	case kIOMemoryTypeVirtual64:
2019 #endif /* !__LP64__ */
2020 		assert(task);
2021 		if (!task) {
2022 			return false;
2023 		}
2024 		break;
2025 
2026 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2027 #ifndef __LP64__
2028 	case kIOMemoryTypePhysical64:
2029 #endif /* !__LP64__ */
2030 	case kIOMemoryTypeUPL:
2031 		assert(!task);
2032 		break;
2033 	default:
2034 		return false; /* bad argument */
2035 	}
2036 
2037 	assert(buffers);
2038 	assert(count);
2039 
2040 	/*
2041 	 * We can check the _initialized  instance variable before having ever set
2042 	 * it to an initial value because I/O Kit guarantees that all our instance
2043 	 * variables are zeroed on an object's allocation.
2044 	 */
2045 
2046 	if (_initialized) {
2047 		/*
2048 		 * An existing memory descriptor is being retargeted to point to
2049 		 * somewhere else.  Clean up our present state.
2050 		 */
2051 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2052 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2053 			while (_wireCount) {
2054 				complete();
2055 			}
2056 		}
2057 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2058 			if (kIOMemoryTypeUIO == type) {
2059 				uio_free((uio_t) _ranges.v);
2060 			}
2061 #ifndef __LP64__
2062 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2063 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2064 			}
2065 #endif /* !__LP64__ */
2066 			else {
2067 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2068 			}
2069 		}
2070 
2071 		options |= (kIOMemoryRedirected & _flags);
2072 		if (!(kIOMemoryRedirected & options)) {
2073 			if (_memRef) {
2074 				memoryReferenceRelease(_memRef);
2075 				_memRef = NULL;
2076 			}
2077 			if (_mappings) {
2078 				_mappings->flushCollection();
2079 			}
2080 		}
2081 	} else {
2082 		if (!super::init()) {
2083 			return false;
2084 		}
2085 		_initialized = true;
2086 	}
2087 
2088 	// Grab the appropriate mapper
2089 	if (kIOMemoryHostOrRemote & options) {
2090 		options |= kIOMemoryMapperNone;
2091 	}
2092 	if (kIOMemoryMapperNone & options) {
2093 		mapper = NULL; // No Mapper
2094 	} else if (mapper == kIOMapperSystem) {
2095 		IOMapper::checkForSystemMapper();
2096 		gIOSystemMapper = mapper = IOMapper::gSystem;
2097 	}
2098 
2099 	// Remove the dynamic internal use flags from the initial setting
2100 	options               &= ~(kIOMemoryPreparedReadOnly);
2101 	_flags                 = options;
2102 	_task                  = task;
2103 
2104 #ifndef __LP64__
2105 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2106 #endif /* !__LP64__ */
2107 
2108 	_dmaReferences = 0;
2109 	__iomd_reservedA = 0;
2110 	__iomd_reservedB = 0;
2111 	_highestPage = 0;
2112 
2113 	if (kIOMemoryThreadSafe & options) {
2114 		if (!_prepareLock) {
2115 			_prepareLock = IOLockAlloc();
2116 		}
2117 	} else if (_prepareLock) {
2118 		IOLockFree(_prepareLock);
2119 		_prepareLock = NULL;
2120 	}
2121 
2122 	if (kIOMemoryTypeUPL == type) {
2123 		ioGMDData *dataP;
2124 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2125 
2126 		if (!initMemoryEntries(dataSize, mapper)) {
2127 			return false;
2128 		}
2129 		dataP = getDataP(_memoryEntries);
2130 		dataP->fPageCnt = 0;
2131 		switch (kIOMemoryDirectionMask & options) {
2132 		case kIODirectionOut:
2133 			dataP->fDMAAccess = kIODMAMapReadAccess;
2134 			break;
2135 		case kIODirectionIn:
2136 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2137 			break;
2138 		case kIODirectionNone:
2139 		case kIODirectionOutIn:
2140 		default:
2141 			panic("bad dir for upl 0x%x", (int) options);
2142 			break;
2143 		}
2144 		//       _wireCount++;	// UPLs start out life wired
2145 
2146 		_length    = count;
2147 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2148 
2149 		ioPLBlock iopl;
2150 		iopl.fIOPL = (upl_t) buffers;
2151 		upl_set_referenced(iopl.fIOPL, true);
2152 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2153 
2154 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2155 			panic("short external upl");
2156 		}
2157 
2158 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2159 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2160 
2161 		// Set the flag kIOPLOnDevice convieniently equal to 1
2162 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2163 		if (!pageList->device) {
2164 			// Pre-compute the offset into the UPL's page list
2165 			pageList = &pageList[atop_32(offset)];
2166 			offset &= PAGE_MASK;
2167 		}
2168 		iopl.fIOMDOffset = 0;
2169 		iopl.fMappedPage = 0;
2170 		iopl.fPageInfo = (vm_address_t) pageList;
2171 		iopl.fPageOffset = offset;
2172 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2173 	} else {
2174 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2175 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2176 
2177 		// Initialize the memory descriptor
2178 		if (options & kIOMemoryAsReference) {
2179 #ifndef __LP64__
2180 			_rangesIsAllocated = false;
2181 #endif /* !__LP64__ */
2182 
2183 			// Hack assignment to get the buffer arg into _ranges.
2184 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2185 			// work, C++ sigh.
2186 			// This also initialises the uio & physical ranges.
2187 			_ranges.v = (IOVirtualRange *) buffers;
2188 		} else {
2189 #ifndef __LP64__
2190 			_rangesIsAllocated = true;
2191 #endif /* !__LP64__ */
2192 			switch (type) {
2193 			case kIOMemoryTypeUIO:
2194 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2195 				break;
2196 
2197 #ifndef __LP64__
2198 			case kIOMemoryTypeVirtual64:
2199 			case kIOMemoryTypePhysical64:
2200 				if (count == 1
2201 #ifndef __arm__
2202 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2203 #endif
2204 				    ) {
2205 					if (kIOMemoryTypeVirtual64 == type) {
2206 						type = kIOMemoryTypeVirtual;
2207 					} else {
2208 						type = kIOMemoryTypePhysical;
2209 					}
2210 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2211 					_rangesIsAllocated = false;
2212 					_ranges.v = &_singleRange.v;
2213 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2214 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2215 					break;
2216 				}
2217 				_ranges.v64 = IONew(IOAddressRange, count);
2218 				if (!_ranges.v64) {
2219 					return false;
2220 				}
2221 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2222 				break;
2223 #endif /* !__LP64__ */
2224 			case kIOMemoryTypeVirtual:
2225 			case kIOMemoryTypePhysical:
2226 				if (count == 1) {
2227 					_flags |= kIOMemoryAsReference;
2228 #ifndef __LP64__
2229 					_rangesIsAllocated = false;
2230 #endif /* !__LP64__ */
2231 					_ranges.v = &_singleRange.v;
2232 				} else {
2233 					_ranges.v = IONew(IOVirtualRange, count);
2234 					if (!_ranges.v) {
2235 						return false;
2236 					}
2237 				}
2238 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2239 				break;
2240 			}
2241 		}
2242 #if CONFIG_PROB_GZALLOC
2243 		if (task == kernel_task) {
2244 			for (UInt32 i = 0; i < count; i++) {
2245 				_ranges.v[i].address = pgz_decode(_ranges.v[i].address, _ranges.v[i].length);
2246 			}
2247 		}
2248 #endif /* CONFIG_PROB_GZALLOC */
2249 		_rangesCount = count;
2250 
2251 		// Find starting address within the vector of ranges
2252 		Ranges vec = _ranges;
2253 		mach_vm_size_t totalLength = 0;
2254 		unsigned int ind, pages = 0;
2255 		for (ind = 0; ind < count; ind++) {
2256 			mach_vm_address_t addr;
2257 			mach_vm_address_t endAddr;
2258 			mach_vm_size_t    len;
2259 
2260 			// addr & len are returned by this function
2261 			getAddrLenForInd(addr, len, type, vec, ind);
2262 			if (_task) {
2263 				mach_vm_size_t phys_size;
2264 				kern_return_t kret;
2265 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2266 				if (KERN_SUCCESS != kret) {
2267 					break;
2268 				}
2269 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2270 					break;
2271 				}
2272 			} else {
2273 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2274 					break;
2275 				}
2276 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2277 					break;
2278 				}
2279 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2280 					break;
2281 				}
2282 			}
2283 			if (os_add_overflow(totalLength, len, &totalLength)) {
2284 				break;
2285 			}
2286 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2287 				uint64_t highPage = atop_64(addr + len - 1);
2288 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2289 					_highestPage = (ppnum_t) highPage;
2290 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2291 				}
2292 			}
2293 		}
2294 		if ((ind < count)
2295 		    || (totalLength != ((IOByteCount) totalLength))) {
2296 			return false;                                   /* overflow */
2297 		}
2298 		_length      = totalLength;
2299 		_pages       = pages;
2300 
2301 		// Auto-prepare memory at creation time.
2302 		// Implied completion when descriptor is free-ed
2303 
2304 
2305 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2306 			_wireCount++; // Physical MDs are, by definition, wired
2307 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2308 			ioGMDData *dataP;
2309 			unsigned dataSize;
2310 
2311 			if (_pages > atop_64(max_mem)) {
2312 				return false;
2313 			}
2314 
2315 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2316 			if (!initMemoryEntries(dataSize, mapper)) {
2317 				return false;
2318 			}
2319 			dataP = getDataP(_memoryEntries);
2320 			dataP->fPageCnt = _pages;
2321 
2322 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2323 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2324 				_kernelTag = IOMemoryTag(kernel_map);
2325 				if (_kernelTag == gIOSurfaceTag) {
2326 					_userTag = VM_MEMORY_IOSURFACE;
2327 				}
2328 			}
2329 
2330 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2331 				IOReturn
2332 				    err = memoryReferenceCreate(0, &_memRef);
2333 				if (kIOReturnSuccess != err) {
2334 					return false;
2335 				}
2336 			}
2337 
2338 			if ((_flags & kIOMemoryAutoPrepare)
2339 			    && prepare() != kIOReturnSuccess) {
2340 				return false;
2341 			}
2342 		}
2343 	}
2344 
2345 	return true;
2346 }
2347 
2348 /*
2349  * free
2350  *
2351  * Free resources.
2352  */
2353 void
free()2354 IOGeneralMemoryDescriptor::free()
2355 {
2356 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2357 
2358 	if (reserved && reserved->dp.memory) {
2359 		LOCK;
2360 		reserved->dp.memory = NULL;
2361 		UNLOCK;
2362 	}
2363 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2364 		ioGMDData * dataP;
2365 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2366 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2367 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2368 		}
2369 	} else {
2370 		while (_wireCount) {
2371 			complete();
2372 		}
2373 	}
2374 
2375 	if (_memoryEntries) {
2376 		_memoryEntries.reset();
2377 	}
2378 
2379 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2380 		if (kIOMemoryTypeUIO == type) {
2381 			uio_free((uio_t) _ranges.v);
2382 		}
2383 #ifndef __LP64__
2384 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2385 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2386 		}
2387 #endif /* !__LP64__ */
2388 		else {
2389 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2390 		}
2391 
2392 		_ranges.v = NULL;
2393 	}
2394 
2395 	if (reserved) {
2396 		cleanKernelReserved(reserved);
2397 		if (reserved->dp.devicePager) {
2398 			// memEntry holds a ref on the device pager which owns reserved
2399 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2400 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2401 		} else {
2402 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2403 		}
2404 		reserved = NULL;
2405 	}
2406 
2407 	if (_memRef) {
2408 		memoryReferenceRelease(_memRef);
2409 	}
2410 	if (_prepareLock) {
2411 		IOLockFree(_prepareLock);
2412 	}
2413 
2414 	super::free();
2415 }
2416 
2417 #ifndef __LP64__
2418 void
unmapFromKernel()2419 IOGeneralMemoryDescriptor::unmapFromKernel()
2420 {
2421 	panic("IOGMD::unmapFromKernel deprecated");
2422 }
2423 
2424 void
mapIntoKernel(unsigned rangeIndex)2425 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2426 {
2427 	panic("IOGMD::mapIntoKernel deprecated");
2428 }
2429 #endif /* !__LP64__ */
2430 
2431 /*
2432  * getDirection:
2433  *
2434  * Get the direction of the transfer.
2435  */
2436 IODirection
getDirection() const2437 IOMemoryDescriptor::getDirection() const
2438 {
2439 #ifndef __LP64__
2440 	if (_direction) {
2441 		return _direction;
2442 	}
2443 #endif /* !__LP64__ */
2444 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2445 }
2446 
2447 /*
2448  * getLength:
2449  *
2450  * Get the length of the transfer (over all ranges).
2451  */
2452 IOByteCount
getLength() const2453 IOMemoryDescriptor::getLength() const
2454 {
2455 	return _length;
2456 }
2457 
2458 void
setTag(IOOptionBits tag)2459 IOMemoryDescriptor::setTag( IOOptionBits tag )
2460 {
2461 	_tag = tag;
2462 }
2463 
2464 IOOptionBits
getTag(void)2465 IOMemoryDescriptor::getTag( void )
2466 {
2467 	return _tag;
2468 }
2469 
2470 uint64_t
getFlags(void)2471 IOMemoryDescriptor::getFlags(void)
2472 {
2473 	return _flags;
2474 }
2475 
2476 OSObject *
copyContext(void) const2477 IOMemoryDescriptor::copyContext(void) const
2478 {
2479 	if (reserved) {
2480 		OSObject * context = reserved->contextObject;
2481 		if (context) {
2482 			context->retain();
2483 		}
2484 		return context;
2485 	} else {
2486 		return NULL;
2487 	}
2488 }
2489 
2490 void
setContext(OSObject * obj)2491 IOMemoryDescriptor::setContext(OSObject * obj)
2492 {
2493 	if (this->reserved == NULL && obj == NULL) {
2494 		// No existing object, and no object to set
2495 		return;
2496 	}
2497 
2498 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2499 	if (reserved) {
2500 		OSObject * oldObject = reserved->contextObject;
2501 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2502 			oldObject->release();
2503 		}
2504 		if (obj != NULL) {
2505 			obj->retain();
2506 			reserved->contextObject = obj;
2507 		}
2508 	}
2509 }
2510 
2511 #ifndef __LP64__
2512 #pragma clang diagnostic push
2513 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2514 
2515 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2516 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2517 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2518 {
2519 	addr64_t physAddr = 0;
2520 
2521 	if (prepare() == kIOReturnSuccess) {
2522 		physAddr = getPhysicalSegment64( offset, length );
2523 		complete();
2524 	}
2525 
2526 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2527 }
2528 
2529 #pragma clang diagnostic pop
2530 
2531 #endif /* !__LP64__ */
2532 
2533 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2534 IOMemoryDescriptor::readBytes
2535 (IOByteCount offset, void *bytes, IOByteCount length)
2536 {
2537 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2538 	IOByteCount endoffset;
2539 	IOByteCount remaining;
2540 
2541 
2542 	// Check that this entire I/O is within the available range
2543 	if ((offset > _length)
2544 	    || os_add_overflow(length, offset, &endoffset)
2545 	    || (endoffset > _length)) {
2546 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2547 		return 0;
2548 	}
2549 	if (offset >= _length) {
2550 		return 0;
2551 	}
2552 
2553 	assert(!(kIOMemoryRemote & _flags));
2554 	if (kIOMemoryRemote & _flags) {
2555 		return 0;
2556 	}
2557 
2558 	if (kIOMemoryThreadSafe & _flags) {
2559 		LOCK;
2560 	}
2561 
2562 	remaining = length = min(length, _length - offset);
2563 	while (remaining) { // (process another target segment?)
2564 		addr64_t        srcAddr64;
2565 		IOByteCount     srcLen;
2566 
2567 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2568 		if (!srcAddr64) {
2569 			break;
2570 		}
2571 
2572 		// Clip segment length to remaining
2573 		if (srcLen > remaining) {
2574 			srcLen = remaining;
2575 		}
2576 
2577 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2578 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2579 		}
2580 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2581 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2582 
2583 		dstAddr   += srcLen;
2584 		offset    += srcLen;
2585 		remaining -= srcLen;
2586 	}
2587 
2588 	if (kIOMemoryThreadSafe & _flags) {
2589 		UNLOCK;
2590 	}
2591 
2592 	assert(!remaining);
2593 
2594 	return length - remaining;
2595 }
2596 
2597 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2598 IOMemoryDescriptor::writeBytes
2599 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2600 {
2601 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2602 	IOByteCount remaining;
2603 	IOByteCount endoffset;
2604 	IOByteCount offset = inoffset;
2605 
2606 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2607 
2608 	// Check that this entire I/O is within the available range
2609 	if ((offset > _length)
2610 	    || os_add_overflow(length, offset, &endoffset)
2611 	    || (endoffset > _length)) {
2612 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2613 		return 0;
2614 	}
2615 	if (kIOMemoryPreparedReadOnly & _flags) {
2616 		return 0;
2617 	}
2618 	if (offset >= _length) {
2619 		return 0;
2620 	}
2621 
2622 	assert(!(kIOMemoryRemote & _flags));
2623 	if (kIOMemoryRemote & _flags) {
2624 		return 0;
2625 	}
2626 
2627 	if (kIOMemoryThreadSafe & _flags) {
2628 		LOCK;
2629 	}
2630 
2631 	remaining = length = min(length, _length - offset);
2632 	while (remaining) { // (process another target segment?)
2633 		addr64_t    dstAddr64;
2634 		IOByteCount dstLen;
2635 
2636 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2637 		if (!dstAddr64) {
2638 			break;
2639 		}
2640 
2641 		// Clip segment length to remaining
2642 		if (dstLen > remaining) {
2643 			dstLen = remaining;
2644 		}
2645 
2646 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2647 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2648 		}
2649 		if (!srcAddr) {
2650 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2651 		} else {
2652 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2653 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2654 			srcAddr   += dstLen;
2655 		}
2656 		offset    += dstLen;
2657 		remaining -= dstLen;
2658 	}
2659 
2660 	if (kIOMemoryThreadSafe & _flags) {
2661 		UNLOCK;
2662 	}
2663 
2664 	assert(!remaining);
2665 
2666 #if defined(__x86_64__)
2667 	// copypv does not cppvFsnk on intel
2668 #else
2669 	if (!srcAddr) {
2670 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2671 	}
2672 #endif
2673 
2674 	return length - remaining;
2675 }
2676 
2677 #ifndef __LP64__
2678 void
setPosition(IOByteCount position)2679 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2680 {
2681 	panic("IOGMD::setPosition deprecated");
2682 }
2683 #endif /* !__LP64__ */
2684 
2685 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2686 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2687 
2688 uint64_t
getPreparationID(void)2689 IOGeneralMemoryDescriptor::getPreparationID( void )
2690 {
2691 	ioGMDData *dataP;
2692 
2693 	if (!_wireCount) {
2694 		return kIOPreparationIDUnprepared;
2695 	}
2696 
2697 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2698 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2699 		IOMemoryDescriptor::setPreparationID();
2700 		return IOMemoryDescriptor::getPreparationID();
2701 	}
2702 
2703 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2704 		return kIOPreparationIDUnprepared;
2705 	}
2706 
2707 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2708 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2709 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2710 	}
2711 	return dataP->fPreparationID;
2712 }
2713 
2714 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2715 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2716 {
2717 	if (reserved->creator) {
2718 		task_deallocate(reserved->creator);
2719 		reserved->creator = NULL;
2720 	}
2721 
2722 	if (reserved->contextObject) {
2723 		reserved->contextObject->release();
2724 		reserved->contextObject = NULL;
2725 	}
2726 }
2727 
2728 IOMemoryDescriptorReserved *
getKernelReserved(void)2729 IOMemoryDescriptor::getKernelReserved( void )
2730 {
2731 	if (!reserved) {
2732 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2733 	}
2734 	return reserved;
2735 }
2736 
2737 void
setPreparationID(void)2738 IOMemoryDescriptor::setPreparationID( void )
2739 {
2740 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2741 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2742 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2743 	}
2744 }
2745 
2746 uint64_t
getPreparationID(void)2747 IOMemoryDescriptor::getPreparationID( void )
2748 {
2749 	if (reserved) {
2750 		return reserved->preparationID;
2751 	} else {
2752 		return kIOPreparationIDUnsupported;
2753 	}
2754 }
2755 
2756 void
setDescriptorID(void)2757 IOMemoryDescriptor::setDescriptorID( void )
2758 {
2759 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2760 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2761 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2762 	}
2763 }
2764 
2765 uint64_t
getDescriptorID(void)2766 IOMemoryDescriptor::getDescriptorID( void )
2767 {
2768 	setDescriptorID();
2769 
2770 	if (reserved) {
2771 		return reserved->descriptorID;
2772 	} else {
2773 		return kIODescriptorIDInvalid;
2774 	}
2775 }
2776 
2777 IOReturn
ktraceEmitPhysicalSegments(void)2778 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2779 {
2780 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2781 		return kIOReturnSuccess;
2782 	}
2783 
2784 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2785 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2786 		return kIOReturnBadArgument;
2787 	}
2788 
2789 	uint64_t descriptorID = getDescriptorID();
2790 	assert(descriptorID != kIODescriptorIDInvalid);
2791 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2792 		return kIOReturnBadArgument;
2793 	}
2794 
2795 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2796 
2797 #if __LP64__
2798 	static const uint8_t num_segments_page = 8;
2799 #else
2800 	static const uint8_t num_segments_page = 4;
2801 #endif
2802 	static const uint8_t num_segments_long = 2;
2803 
2804 	IOPhysicalAddress segments_page[num_segments_page];
2805 	IOPhysicalRange   segments_long[num_segments_long];
2806 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2807 	memset(segments_long, 0, sizeof(segments_long));
2808 
2809 	uint8_t segment_page_idx = 0;
2810 	uint8_t segment_long_idx = 0;
2811 
2812 	IOPhysicalRange physical_segment;
2813 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2814 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2815 
2816 		if (physical_segment.length == 0) {
2817 			break;
2818 		}
2819 
2820 		/**
2821 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2822 		 * buffer memory, pack segment events according to the following.
2823 		 *
2824 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2825 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2826 		 *
2827 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2828 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2829 		 * - unmapped pages will have a ppn of MAX_INT_32
2830 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2831 		 * - address_0, length_0, address_0, length_1
2832 		 * - unmapped pages will have an address of 0
2833 		 *
2834 		 * During each iteration do the following depending on the length of the mapping:
2835 		 * 1. add the current segment to the appropriate queue of pending segments
2836 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2837 		 * 1a. if FALSE emit and reset all events in the previous queue
2838 		 * 2. check if we have filled up the current queue of pending events
2839 		 * 2a. if TRUE emit and reset all events in the pending queue
2840 		 * 3. after completing all iterations emit events in the current queue
2841 		 */
2842 
2843 		bool emit_page = false;
2844 		bool emit_long = false;
2845 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2846 			segments_page[segment_page_idx] = physical_segment.address;
2847 			segment_page_idx++;
2848 
2849 			emit_long = segment_long_idx != 0;
2850 			emit_page = segment_page_idx == num_segments_page;
2851 
2852 			if (os_unlikely(emit_long)) {
2853 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2854 				    segments_long[0].address, segments_long[0].length,
2855 				    segments_long[1].address, segments_long[1].length);
2856 			}
2857 
2858 			if (os_unlikely(emit_page)) {
2859 #if __LP64__
2860 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2861 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2862 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2863 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2864 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2865 #else
2866 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2867 				    (ppnum_t) atop_32(segments_page[1]),
2868 				    (ppnum_t) atop_32(segments_page[2]),
2869 				    (ppnum_t) atop_32(segments_page[3]),
2870 				    (ppnum_t) atop_32(segments_page[4]));
2871 #endif
2872 			}
2873 		} else {
2874 			segments_long[segment_long_idx] = physical_segment;
2875 			segment_long_idx++;
2876 
2877 			emit_page = segment_page_idx != 0;
2878 			emit_long = segment_long_idx == num_segments_long;
2879 
2880 			if (os_unlikely(emit_page)) {
2881 #if __LP64__
2882 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2883 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2884 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2885 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2886 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2887 #else
2888 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2889 				    (ppnum_t) atop_32(segments_page[1]),
2890 				    (ppnum_t) atop_32(segments_page[2]),
2891 				    (ppnum_t) atop_32(segments_page[3]),
2892 				    (ppnum_t) atop_32(segments_page[4]));
2893 #endif
2894 			}
2895 
2896 			if (emit_long) {
2897 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2898 				    segments_long[0].address, segments_long[0].length,
2899 				    segments_long[1].address, segments_long[1].length);
2900 			}
2901 		}
2902 
2903 		if (os_unlikely(emit_page)) {
2904 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2905 			segment_page_idx = 0;
2906 		}
2907 
2908 		if (os_unlikely(emit_long)) {
2909 			memset(segments_long, 0, sizeof(segments_long));
2910 			segment_long_idx = 0;
2911 		}
2912 	}
2913 
2914 	if (segment_page_idx != 0) {
2915 		assert(segment_long_idx == 0);
2916 #if __LP64__
2917 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2918 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2919 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2920 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2921 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2922 #else
2923 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2924 		    (ppnum_t) atop_32(segments_page[1]),
2925 		    (ppnum_t) atop_32(segments_page[2]),
2926 		    (ppnum_t) atop_32(segments_page[3]),
2927 		    (ppnum_t) atop_32(segments_page[4]));
2928 #endif
2929 	} else if (segment_long_idx != 0) {
2930 		assert(segment_page_idx == 0);
2931 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2932 		    segments_long[0].address, segments_long[0].length,
2933 		    segments_long[1].address, segments_long[1].length);
2934 	}
2935 
2936 	return kIOReturnSuccess;
2937 }
2938 
2939 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2940 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2941 {
2942 	_kernelTag = (vm_tag_t) kernelTag;
2943 	_userTag   = (vm_tag_t) userTag;
2944 }
2945 
2946 uint32_t
getVMTag(vm_map_t map)2947 IOMemoryDescriptor::getVMTag(vm_map_t map)
2948 {
2949 	if (vm_kernel_map_is_kernel(map)) {
2950 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2951 			return (uint32_t) _kernelTag;
2952 		}
2953 	} else {
2954 		if (VM_KERN_MEMORY_NONE != _userTag) {
2955 			return (uint32_t) _userTag;
2956 		}
2957 	}
2958 	return IOMemoryTag(map);
2959 }
2960 
2961 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2962 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2963 {
2964 	IOReturn err = kIOReturnSuccess;
2965 	DMACommandOps params;
2966 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2967 	ioGMDData *dataP;
2968 
2969 	params = (op & ~kIOMDDMACommandOperationMask & op);
2970 	op &= kIOMDDMACommandOperationMask;
2971 
2972 	if (kIOMDDMAMap == op) {
2973 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2974 			return kIOReturnUnderrun;
2975 		}
2976 
2977 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2978 
2979 		if (!_memoryEntries
2980 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2981 			return kIOReturnNoMemory;
2982 		}
2983 
2984 		if (_memoryEntries && data->fMapper) {
2985 			bool remap, keepMap;
2986 			dataP = getDataP(_memoryEntries);
2987 
2988 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2989 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2990 			}
2991 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2992 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2993 			}
2994 
2995 			keepMap = (data->fMapper == gIOSystemMapper);
2996 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2997 
2998 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2999 				IOLockLock(_prepareLock);
3000 			}
3001 
3002 			remap = (!keepMap);
3003 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3004 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3005 			remap |= (dataP->fDMAMapAlignment > page_size);
3006 
3007 			if (remap || !dataP->fMappedBaseValid) {
3008 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3009 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3010 					dataP->fMappedBase      = data->fAlloc;
3011 					dataP->fMappedBaseValid = true;
3012 					dataP->fMappedLength    = data->fAllocLength;
3013 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3014 				}
3015 			} else {
3016 				data->fAlloc = dataP->fMappedBase;
3017 				data->fAllocLength = 0;         // give out IOMD map
3018 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3019 			}
3020 
3021 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3022 				IOLockUnlock(_prepareLock);
3023 			}
3024 		}
3025 		return err;
3026 	}
3027 	if (kIOMDDMAUnmap == op) {
3028 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3029 			return kIOReturnUnderrun;
3030 		}
3031 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3032 
3033 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3034 
3035 		return kIOReturnSuccess;
3036 	}
3037 
3038 	if (kIOMDAddDMAMapSpec == op) {
3039 		if (dataSize < sizeof(IODMAMapSpecification)) {
3040 			return kIOReturnUnderrun;
3041 		}
3042 
3043 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3044 
3045 		if (!_memoryEntries
3046 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3047 			return kIOReturnNoMemory;
3048 		}
3049 
3050 		if (_memoryEntries) {
3051 			dataP = getDataP(_memoryEntries);
3052 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3053 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3054 			}
3055 			if (data->alignment > dataP->fDMAMapAlignment) {
3056 				dataP->fDMAMapAlignment = data->alignment;
3057 			}
3058 		}
3059 		return kIOReturnSuccess;
3060 	}
3061 
3062 	if (kIOMDGetCharacteristics == op) {
3063 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3064 			return kIOReturnUnderrun;
3065 		}
3066 
3067 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3068 		data->fLength = _length;
3069 		data->fSGCount = _rangesCount;
3070 		data->fPages = _pages;
3071 		data->fDirection = getDirection();
3072 		if (!_wireCount) {
3073 			data->fIsPrepared = false;
3074 		} else {
3075 			data->fIsPrepared = true;
3076 			data->fHighestPage = _highestPage;
3077 			if (_memoryEntries) {
3078 				dataP = getDataP(_memoryEntries);
3079 				ioPLBlock *ioplList = getIOPLList(dataP);
3080 				UInt count = getNumIOPL(_memoryEntries, dataP);
3081 				if (count == 1) {
3082 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3083 				}
3084 			}
3085 		}
3086 
3087 		return kIOReturnSuccess;
3088 	} else if (kIOMDDMAActive == op) {
3089 		if (params) {
3090 			int16_t prior;
3091 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3092 			if (!prior) {
3093 				md->_mapName = NULL;
3094 			}
3095 		} else {
3096 			if (md->_dmaReferences) {
3097 				OSAddAtomic16(-1, &md->_dmaReferences);
3098 			} else {
3099 				panic("_dmaReferences underflow");
3100 			}
3101 		}
3102 	} else if (kIOMDWalkSegments != op) {
3103 		return kIOReturnBadArgument;
3104 	}
3105 
3106 	// Get the next segment
3107 	struct InternalState {
3108 		IOMDDMAWalkSegmentArgs fIO;
3109 		mach_vm_size_t fOffset2Index;
3110 		mach_vm_size_t fNextOffset;
3111 		UInt fIndex;
3112 	} *isP;
3113 
3114 	// Find the next segment
3115 	if (dataSize < sizeof(*isP)) {
3116 		return kIOReturnUnderrun;
3117 	}
3118 
3119 	isP = (InternalState *) vData;
3120 	uint64_t offset = isP->fIO.fOffset;
3121 	uint8_t mapped = isP->fIO.fMapped;
3122 	uint64_t mappedBase;
3123 
3124 	if (mapped && (kIOMemoryRemote & _flags)) {
3125 		return kIOReturnNotAttached;
3126 	}
3127 
3128 	if (IOMapper::gSystem && mapped
3129 	    && (!(kIOMemoryHostOnly & _flags))
3130 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3131 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3132 		if (!_memoryEntries
3133 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3134 			return kIOReturnNoMemory;
3135 		}
3136 
3137 		dataP = getDataP(_memoryEntries);
3138 		if (dataP->fMapper) {
3139 			IODMAMapSpecification mapSpec;
3140 			bzero(&mapSpec, sizeof(mapSpec));
3141 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3142 			mapSpec.alignment = dataP->fDMAMapAlignment;
3143 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3144 			if (kIOReturnSuccess != err) {
3145 				return err;
3146 			}
3147 			dataP->fMappedBaseValid = true;
3148 		}
3149 	}
3150 
3151 	if (mapped) {
3152 		if (IOMapper::gSystem
3153 		    && (!(kIOMemoryHostOnly & _flags))
3154 		    && _memoryEntries
3155 		    && (dataP = getDataP(_memoryEntries))
3156 		    && dataP->fMappedBaseValid) {
3157 			mappedBase = dataP->fMappedBase;
3158 		} else {
3159 			mapped = 0;
3160 		}
3161 	}
3162 
3163 	if (offset >= _length) {
3164 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3165 	}
3166 
3167 	// Validate the previous offset
3168 	UInt ind;
3169 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3170 	if (!params
3171 	    && offset
3172 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3173 		ind = isP->fIndex;
3174 	} else {
3175 		ind = off2Ind = 0; // Start from beginning
3176 	}
3177 	mach_vm_size_t length;
3178 	UInt64 address;
3179 
3180 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3181 		// Physical address based memory descriptor
3182 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3183 
3184 		// Find the range after the one that contains the offset
3185 		mach_vm_size_t len;
3186 		for (len = 0; off2Ind <= offset; ind++) {
3187 			len = physP[ind].length;
3188 			off2Ind += len;
3189 		}
3190 
3191 		// Calculate length within range and starting address
3192 		length   = off2Ind - offset;
3193 		address  = physP[ind - 1].address + len - length;
3194 
3195 		if (true && mapped) {
3196 			address = mappedBase + offset;
3197 		} else {
3198 			// see how far we can coalesce ranges
3199 			while (ind < _rangesCount && address + length == physP[ind].address) {
3200 				len = physP[ind].length;
3201 				length += len;
3202 				off2Ind += len;
3203 				ind++;
3204 			}
3205 		}
3206 
3207 		// correct contiguous check overshoot
3208 		ind--;
3209 		off2Ind -= len;
3210 	}
3211 #ifndef __LP64__
3212 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3213 		// Physical address based memory descriptor
3214 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3215 
3216 		// Find the range after the one that contains the offset
3217 		mach_vm_size_t len;
3218 		for (len = 0; off2Ind <= offset; ind++) {
3219 			len = physP[ind].length;
3220 			off2Ind += len;
3221 		}
3222 
3223 		// Calculate length within range and starting address
3224 		length   = off2Ind - offset;
3225 		address  = physP[ind - 1].address + len - length;
3226 
3227 		if (true && mapped) {
3228 			address = mappedBase + offset;
3229 		} else {
3230 			// see how far we can coalesce ranges
3231 			while (ind < _rangesCount && address + length == physP[ind].address) {
3232 				len = physP[ind].length;
3233 				length += len;
3234 				off2Ind += len;
3235 				ind++;
3236 			}
3237 		}
3238 		// correct contiguous check overshoot
3239 		ind--;
3240 		off2Ind -= len;
3241 	}
3242 #endif /* !__LP64__ */
3243 	else {
3244 		do {
3245 			if (!_wireCount) {
3246 				panic("IOGMD: not wired for the IODMACommand");
3247 			}
3248 
3249 			assert(_memoryEntries);
3250 
3251 			dataP = getDataP(_memoryEntries);
3252 			const ioPLBlock *ioplList = getIOPLList(dataP);
3253 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3254 			upl_page_info_t *pageList = getPageList(dataP);
3255 
3256 			assert(numIOPLs > 0);
3257 
3258 			// Scan through iopl info blocks looking for block containing offset
3259 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3260 				ind++;
3261 			}
3262 
3263 			// Go back to actual range as search goes past it
3264 			ioPLBlock ioplInfo = ioplList[ind - 1];
3265 			off2Ind = ioplInfo.fIOMDOffset;
3266 
3267 			if (ind < numIOPLs) {
3268 				length = ioplList[ind].fIOMDOffset;
3269 			} else {
3270 				length = _length;
3271 			}
3272 			length -= offset;       // Remainder within iopl
3273 
3274 			// Subtract offset till this iopl in total list
3275 			offset -= off2Ind;
3276 
3277 			// If a mapped address is requested and this is a pre-mapped IOPL
3278 			// then just need to compute an offset relative to the mapped base.
3279 			if (mapped) {
3280 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3281 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3282 				continue; // Done leave do/while(false) now
3283 			}
3284 
3285 			// The offset is rebased into the current iopl.
3286 			// Now add the iopl 1st page offset.
3287 			offset += ioplInfo.fPageOffset;
3288 
3289 			// For external UPLs the fPageInfo field points directly to
3290 			// the upl's upl_page_info_t array.
3291 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3292 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3293 			} else {
3294 				pageList = &pageList[ioplInfo.fPageInfo];
3295 			}
3296 
3297 			// Check for direct device non-paged memory
3298 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3299 				address = ptoa_64(pageList->phys_addr) + offset;
3300 				continue; // Done leave do/while(false) now
3301 			}
3302 
3303 			// Now we need compute the index into the pageList
3304 			UInt pageInd = atop_32(offset);
3305 			offset &= PAGE_MASK;
3306 
3307 			// Compute the starting address of this segment
3308 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3309 			if (!pageAddr) {
3310 				panic("!pageList phys_addr");
3311 			}
3312 
3313 			address = ptoa_64(pageAddr) + offset;
3314 
3315 			// length is currently set to the length of the remainider of the iopl.
3316 			// We need to check that the remainder of the iopl is contiguous.
3317 			// This is indicated by pageList[ind].phys_addr being sequential.
3318 			IOByteCount contigLength = PAGE_SIZE - offset;
3319 			while (contigLength < length
3320 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3321 				contigLength += PAGE_SIZE;
3322 			}
3323 
3324 			if (contigLength < length) {
3325 				length = contigLength;
3326 			}
3327 
3328 
3329 			assert(address);
3330 			assert(length);
3331 		} while (false);
3332 	}
3333 
3334 	// Update return values and state
3335 	isP->fIO.fIOVMAddr = address;
3336 	isP->fIO.fLength   = length;
3337 	isP->fIndex        = ind;
3338 	isP->fOffset2Index = off2Ind;
3339 	isP->fNextOffset   = isP->fIO.fOffset + length;
3340 
3341 	return kIOReturnSuccess;
3342 }
3343 
3344 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3345 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3346 {
3347 	IOReturn          ret;
3348 	mach_vm_address_t address = 0;
3349 	mach_vm_size_t    length  = 0;
3350 	IOMapper *        mapper  = gIOSystemMapper;
3351 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3352 
3353 	if (lengthOfSegment) {
3354 		*lengthOfSegment = 0;
3355 	}
3356 
3357 	if (offset >= _length) {
3358 		return 0;
3359 	}
3360 
3361 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3362 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3363 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3364 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3365 
3366 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3367 		unsigned rangesIndex = 0;
3368 		Ranges vec = _ranges;
3369 		mach_vm_address_t addr;
3370 
3371 		// Find starting address within the vector of ranges
3372 		for (;;) {
3373 			getAddrLenForInd(addr, length, type, vec, rangesIndex);
3374 			if (offset < length) {
3375 				break;
3376 			}
3377 			offset -= length; // (make offset relative)
3378 			rangesIndex++;
3379 		}
3380 
3381 		// Now that we have the starting range,
3382 		// lets find the last contiguous range
3383 		addr   += offset;
3384 		length -= offset;
3385 
3386 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3387 			mach_vm_address_t newAddr;
3388 			mach_vm_size_t    newLen;
3389 
3390 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3391 			if (addr + length != newAddr) {
3392 				break;
3393 			}
3394 			length += newLen;
3395 		}
3396 		if (addr) {
3397 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3398 		}
3399 	} else {
3400 		IOMDDMAWalkSegmentState _state;
3401 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3402 
3403 		state->fOffset = offset;
3404 		state->fLength = _length - offset;
3405 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3406 
3407 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3408 
3409 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3410 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3411 			    ret, this, state->fOffset,
3412 			    state->fIOVMAddr, state->fLength);
3413 		}
3414 		if (kIOReturnSuccess == ret) {
3415 			address = state->fIOVMAddr;
3416 			length  = state->fLength;
3417 		}
3418 
3419 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3420 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3421 
3422 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3423 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3424 				addr64_t    origAddr = address;
3425 				IOByteCount origLen  = length;
3426 
3427 				address = mapper->mapToPhysicalAddress(origAddr);
3428 				length = page_size - (address & (page_size - 1));
3429 				while ((length < origLen)
3430 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3431 					length += page_size;
3432 				}
3433 				if (length > origLen) {
3434 					length = origLen;
3435 				}
3436 			}
3437 		}
3438 	}
3439 
3440 	if (!address) {
3441 		length = 0;
3442 	}
3443 
3444 	if (lengthOfSegment) {
3445 		*lengthOfSegment = length;
3446 	}
3447 
3448 	return address;
3449 }
3450 
3451 #ifndef __LP64__
3452 #pragma clang diagnostic push
3453 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3454 
3455 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3456 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3457 {
3458 	addr64_t address = 0;
3459 
3460 	if (options & _kIOMemorySourceSegment) {
3461 		address = getSourceSegment(offset, lengthOfSegment);
3462 	} else if (options & kIOMemoryMapperNone) {
3463 		address = getPhysicalSegment64(offset, lengthOfSegment);
3464 	} else {
3465 		address = getPhysicalSegment(offset, lengthOfSegment);
3466 	}
3467 
3468 	return address;
3469 }
3470 #pragma clang diagnostic pop
3471 
3472 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3473 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3474 {
3475 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3476 }
3477 
3478 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3479 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3480 {
3481 	addr64_t    address = 0;
3482 	IOByteCount length  = 0;
3483 
3484 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3485 
3486 	if (lengthOfSegment) {
3487 		length = *lengthOfSegment;
3488 	}
3489 
3490 	if ((address + length) > 0x100000000ULL) {
3491 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3492 		    address, (long) length, (getMetaClass())->getClassName());
3493 	}
3494 
3495 	return (IOPhysicalAddress) address;
3496 }
3497 
3498 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3499 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3500 {
3501 	IOPhysicalAddress phys32;
3502 	IOByteCount       length;
3503 	addr64_t          phys64;
3504 	IOMapper *        mapper = NULL;
3505 
3506 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3507 	if (!phys32) {
3508 		return 0;
3509 	}
3510 
3511 	if (gIOSystemMapper) {
3512 		mapper = gIOSystemMapper;
3513 	}
3514 
3515 	if (mapper) {
3516 		IOByteCount origLen;
3517 
3518 		phys64 = mapper->mapToPhysicalAddress(phys32);
3519 		origLen = *lengthOfSegment;
3520 		length = page_size - (phys64 & (page_size - 1));
3521 		while ((length < origLen)
3522 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3523 			length += page_size;
3524 		}
3525 		if (length > origLen) {
3526 			length = origLen;
3527 		}
3528 
3529 		*lengthOfSegment = length;
3530 	} else {
3531 		phys64 = (addr64_t) phys32;
3532 	}
3533 
3534 	return phys64;
3535 }
3536 
3537 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3538 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3539 {
3540 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3541 }
3542 
3543 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3544 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3545 {
3546 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3547 }
3548 
3549 #pragma clang diagnostic push
3550 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3551 
3552 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3553 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3554     IOByteCount * lengthOfSegment)
3555 {
3556 	if (_task == kernel_task) {
3557 		return (void *) getSourceSegment(offset, lengthOfSegment);
3558 	} else {
3559 		panic("IOGMD::getVirtualSegment deprecated");
3560 	}
3561 
3562 	return NULL;
3563 }
3564 #pragma clang diagnostic pop
3565 #endif /* !__LP64__ */
3566 
3567 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3568 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3569 {
3570 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3571 	DMACommandOps params;
3572 	IOReturn err;
3573 
3574 	params = (op & ~kIOMDDMACommandOperationMask & op);
3575 	op &= kIOMDDMACommandOperationMask;
3576 
3577 	if (kIOMDGetCharacteristics == op) {
3578 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3579 			return kIOReturnUnderrun;
3580 		}
3581 
3582 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3583 		data->fLength = getLength();
3584 		data->fSGCount = 0;
3585 		data->fDirection = getDirection();
3586 		data->fIsPrepared = true; // Assume prepared - fails safe
3587 	} else if (kIOMDWalkSegments == op) {
3588 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3589 			return kIOReturnUnderrun;
3590 		}
3591 
3592 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3593 		IOByteCount offset  = (IOByteCount) data->fOffset;
3594 		IOPhysicalLength length, nextLength;
3595 		addr64_t         addr, nextAddr;
3596 
3597 		if (data->fMapped) {
3598 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3599 		}
3600 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3601 		offset += length;
3602 		while (offset < getLength()) {
3603 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3604 			if ((addr + length) != nextAddr) {
3605 				break;
3606 			}
3607 			length += nextLength;
3608 			offset += nextLength;
3609 		}
3610 		data->fIOVMAddr = addr;
3611 		data->fLength   = length;
3612 	} else if (kIOMDAddDMAMapSpec == op) {
3613 		return kIOReturnUnsupported;
3614 	} else if (kIOMDDMAMap == op) {
3615 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3616 			return kIOReturnUnderrun;
3617 		}
3618 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3619 
3620 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3621 
3622 		return err;
3623 	} else if (kIOMDDMAUnmap == op) {
3624 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3625 			return kIOReturnUnderrun;
3626 		}
3627 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3628 
3629 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3630 
3631 		return kIOReturnSuccess;
3632 	} else {
3633 		return kIOReturnBadArgument;
3634 	}
3635 
3636 	return kIOReturnSuccess;
3637 }
3638 
3639 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3640 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3641     IOOptionBits * oldState )
3642 {
3643 	IOReturn      err = kIOReturnSuccess;
3644 
3645 	vm_purgable_t control;
3646 	int           state;
3647 
3648 	assert(!(kIOMemoryRemote & _flags));
3649 	if (kIOMemoryRemote & _flags) {
3650 		return kIOReturnNotAttached;
3651 	}
3652 
3653 	if (_memRef) {
3654 		err = super::setPurgeable(newState, oldState);
3655 	} else {
3656 		if (kIOMemoryThreadSafe & _flags) {
3657 			LOCK;
3658 		}
3659 		do{
3660 			// Find the appropriate vm_map for the given task
3661 			vm_map_t curMap;
3662 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3663 				err = kIOReturnNotReady;
3664 				break;
3665 			} else if (!_task) {
3666 				err = kIOReturnUnsupported;
3667 				break;
3668 			} else {
3669 				curMap = get_task_map(_task);
3670 				if (NULL == curMap) {
3671 					err = KERN_INVALID_ARGUMENT;
3672 					break;
3673 				}
3674 			}
3675 
3676 			// can only do one range
3677 			Ranges vec = _ranges;
3678 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3679 			mach_vm_address_t addr;
3680 			mach_vm_size_t    len;
3681 			getAddrLenForInd(addr, len, type, vec, 0);
3682 
3683 			err = purgeableControlBits(newState, &control, &state);
3684 			if (kIOReturnSuccess != err) {
3685 				break;
3686 			}
3687 			err = vm_map_purgable_control(curMap, addr, control, &state);
3688 			if (oldState) {
3689 				if (kIOReturnSuccess == err) {
3690 					err = purgeableStateBits(&state);
3691 					*oldState = state;
3692 				}
3693 			}
3694 		}while (false);
3695 		if (kIOMemoryThreadSafe & _flags) {
3696 			UNLOCK;
3697 		}
3698 	}
3699 
3700 	return err;
3701 }
3702 
3703 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3704 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3705     IOOptionBits * oldState )
3706 {
3707 	IOReturn err = kIOReturnNotReady;
3708 
3709 	if (kIOMemoryThreadSafe & _flags) {
3710 		LOCK;
3711 	}
3712 	if (_memRef) {
3713 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3714 	}
3715 	if (kIOMemoryThreadSafe & _flags) {
3716 		UNLOCK;
3717 	}
3718 
3719 	return err;
3720 }
3721 
3722 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3723 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3724     int newLedgerTag,
3725     IOOptionBits newLedgerOptions )
3726 {
3727 	IOReturn      err = kIOReturnSuccess;
3728 
3729 	assert(!(kIOMemoryRemote & _flags));
3730 	if (kIOMemoryRemote & _flags) {
3731 		return kIOReturnNotAttached;
3732 	}
3733 
3734 	if (iokit_iomd_setownership_enabled == FALSE) {
3735 		return kIOReturnUnsupported;
3736 	}
3737 
3738 	if (_memRef) {
3739 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3740 	} else {
3741 		err = kIOReturnUnsupported;
3742 	}
3743 
3744 	return err;
3745 }
3746 
3747 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3748 IOMemoryDescriptor::setOwnership( task_t newOwner,
3749     int newLedgerTag,
3750     IOOptionBits newLedgerOptions )
3751 {
3752 	IOReturn err = kIOReturnNotReady;
3753 
3754 	assert(!(kIOMemoryRemote & _flags));
3755 	if (kIOMemoryRemote & _flags) {
3756 		return kIOReturnNotAttached;
3757 	}
3758 
3759 	if (iokit_iomd_setownership_enabled == FALSE) {
3760 		return kIOReturnUnsupported;
3761 	}
3762 
3763 	if (kIOMemoryThreadSafe & _flags) {
3764 		LOCK;
3765 	}
3766 	if (_memRef) {
3767 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3768 	} else {
3769 		IOMultiMemoryDescriptor * mmd;
3770 		IOSubMemoryDescriptor   * smd;
3771 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3772 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3773 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3774 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775 		}
3776 	}
3777 	if (kIOMemoryThreadSafe & _flags) {
3778 		UNLOCK;
3779 	}
3780 
3781 	return err;
3782 }
3783 
3784 
3785 uint64_t
getDMAMapLength(uint64_t * offset)3786 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3787 {
3788 	uint64_t length;
3789 
3790 	if (_memRef) {
3791 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3792 	} else {
3793 		IOByteCount       iterate, segLen;
3794 		IOPhysicalAddress sourceAddr, sourceAlign;
3795 
3796 		if (kIOMemoryThreadSafe & _flags) {
3797 			LOCK;
3798 		}
3799 		length = 0;
3800 		iterate = 0;
3801 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3802 			sourceAlign = (sourceAddr & page_mask);
3803 			if (offset && !iterate) {
3804 				*offset = sourceAlign;
3805 			}
3806 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3807 			iterate += segLen;
3808 		}
3809 		if (!iterate) {
3810 			length = getLength();
3811 			if (offset) {
3812 				*offset = 0;
3813 			}
3814 		}
3815 		if (kIOMemoryThreadSafe & _flags) {
3816 			UNLOCK;
3817 		}
3818 	}
3819 
3820 	return length;
3821 }
3822 
3823 
3824 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3825 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3826     IOByteCount * dirtyPageCount )
3827 {
3828 	IOReturn err = kIOReturnNotReady;
3829 
3830 	assert(!(kIOMemoryRemote & _flags));
3831 	if (kIOMemoryRemote & _flags) {
3832 		return kIOReturnNotAttached;
3833 	}
3834 
3835 	if (kIOMemoryThreadSafe & _flags) {
3836 		LOCK;
3837 	}
3838 	if (_memRef) {
3839 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3840 	} else {
3841 		IOMultiMemoryDescriptor * mmd;
3842 		IOSubMemoryDescriptor   * smd;
3843 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3844 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3845 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3846 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3847 		}
3848 	}
3849 	if (kIOMemoryThreadSafe & _flags) {
3850 		UNLOCK;
3851 	}
3852 
3853 	return err;
3854 }
3855 
3856 
3857 #if defined(__arm64__)
3858 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3859 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3860 #else /* defined(__arm64__) */
3861 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3862 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3863 #endif /* defined(__arm64__) */
3864 
3865 static void
SetEncryptOp(addr64_t pa,unsigned int count)3866 SetEncryptOp(addr64_t pa, unsigned int count)
3867 {
3868 	ppnum_t page, end;
3869 
3870 	page = (ppnum_t) atop_64(round_page_64(pa));
3871 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3872 	for (; page < end; page++) {
3873 		pmap_clear_noencrypt(page);
3874 	}
3875 }
3876 
3877 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3878 ClearEncryptOp(addr64_t pa, unsigned int count)
3879 {
3880 	ppnum_t page, end;
3881 
3882 	page = (ppnum_t) atop_64(round_page_64(pa));
3883 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3884 	for (; page < end; page++) {
3885 		pmap_set_noencrypt(page);
3886 	}
3887 }
3888 
3889 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3890 IOMemoryDescriptor::performOperation( IOOptionBits options,
3891     IOByteCount offset, IOByteCount length )
3892 {
3893 	IOByteCount remaining;
3894 	unsigned int res;
3895 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3896 #if defined(__arm64__)
3897 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3898 #endif
3899 
3900 	assert(!(kIOMemoryRemote & _flags));
3901 	if (kIOMemoryRemote & _flags) {
3902 		return kIOReturnNotAttached;
3903 	}
3904 
3905 	switch (options) {
3906 	case kIOMemoryIncoherentIOFlush:
3907 #if defined(__arm64__)
3908 		func_ext = &dcache_incoherent_io_flush64;
3909 #if __ARM_COHERENT_IO__
3910 		func_ext(0, 0, 0, &res);
3911 		return kIOReturnSuccess;
3912 #else /* __ARM_COHERENT_IO__ */
3913 		break;
3914 #endif /* __ARM_COHERENT_IO__ */
3915 #else /* defined(__arm64__) */
3916 		func = &dcache_incoherent_io_flush64;
3917 		break;
3918 #endif /* defined(__arm64__) */
3919 	case kIOMemoryIncoherentIOStore:
3920 #if defined(__arm64__)
3921 		func_ext = &dcache_incoherent_io_store64;
3922 #if __ARM_COHERENT_IO__
3923 		func_ext(0, 0, 0, &res);
3924 		return kIOReturnSuccess;
3925 #else /* __ARM_COHERENT_IO__ */
3926 		break;
3927 #endif /* __ARM_COHERENT_IO__ */
3928 #else /* defined(__arm64__) */
3929 		func = &dcache_incoherent_io_store64;
3930 		break;
3931 #endif /* defined(__arm64__) */
3932 
3933 	case kIOMemorySetEncrypted:
3934 		func = &SetEncryptOp;
3935 		break;
3936 	case kIOMemoryClearEncrypted:
3937 		func = &ClearEncryptOp;
3938 		break;
3939 	}
3940 
3941 #if defined(__arm64__)
3942 	if ((func == NULL) && (func_ext == NULL)) {
3943 		return kIOReturnUnsupported;
3944 	}
3945 #else /* defined(__arm64__) */
3946 	if (!func) {
3947 		return kIOReturnUnsupported;
3948 	}
3949 #endif /* defined(__arm64__) */
3950 
3951 	if (kIOMemoryThreadSafe & _flags) {
3952 		LOCK;
3953 	}
3954 
3955 	res = 0x0UL;
3956 	remaining = length = min(length, getLength() - offset);
3957 	while (remaining) {
3958 		// (process another target segment?)
3959 		addr64_t    dstAddr64;
3960 		IOByteCount dstLen;
3961 
3962 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3963 		if (!dstAddr64) {
3964 			break;
3965 		}
3966 
3967 		// Clip segment length to remaining
3968 		if (dstLen > remaining) {
3969 			dstLen = remaining;
3970 		}
3971 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3972 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3973 		}
3974 		if (remaining > UINT_MAX) {
3975 			remaining = UINT_MAX;
3976 		}
3977 
3978 #if defined(__arm64__)
3979 		if (func) {
3980 			(*func)(dstAddr64, (unsigned int) dstLen);
3981 		}
3982 		if (func_ext) {
3983 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3984 			if (res != 0x0UL) {
3985 				remaining = 0;
3986 				break;
3987 			}
3988 		}
3989 #else /* defined(__arm64__) */
3990 		(*func)(dstAddr64, (unsigned int) dstLen);
3991 #endif /* defined(__arm64__) */
3992 
3993 		offset    += dstLen;
3994 		remaining -= dstLen;
3995 	}
3996 
3997 	if (kIOMemoryThreadSafe & _flags) {
3998 		UNLOCK;
3999 	}
4000 
4001 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4002 }
4003 
4004 /*
4005  *
4006  */
4007 
4008 #if defined(__i386__) || defined(__x86_64__)
4009 
4010 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4011 
4012 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4013  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4014  * kernel non-text data -- should we just add another range instead?
4015  */
4016 #define io_kernel_static_start  vm_kernel_stext
4017 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4018 
4019 #elif defined(__arm64__)
4020 
4021 extern vm_offset_t              static_memory_end;
4022 
4023 #if defined(__arm64__)
4024 #define io_kernel_static_start vm_kext_base
4025 #else /* defined(__arm64__) */
4026 #define io_kernel_static_start vm_kernel_stext
4027 #endif /* defined(__arm64__) */
4028 
4029 #define io_kernel_static_end    static_memory_end
4030 
4031 #else
4032 #error io_kernel_static_end is undefined for this architecture
4033 #endif
4034 
4035 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4036 io_get_kernel_static_upl(
4037 	vm_map_t                /* map */,
4038 	uintptr_t               offset,
4039 	upl_size_t              *upl_size,
4040 	unsigned int            *page_offset,
4041 	upl_t                   *upl,
4042 	upl_page_info_array_t   page_list,
4043 	unsigned int            *count,
4044 	ppnum_t                 *highest_page)
4045 {
4046 	unsigned int pageCount, page;
4047 	ppnum_t phys;
4048 	ppnum_t highestPage = 0;
4049 
4050 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4051 	if (pageCount > *count) {
4052 		pageCount = *count;
4053 	}
4054 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4055 
4056 	*upl = NULL;
4057 	*page_offset = ((unsigned int) page_mask & offset);
4058 
4059 	for (page = 0; page < pageCount; page++) {
4060 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4061 		if (!phys) {
4062 			break;
4063 		}
4064 		page_list[page].phys_addr = phys;
4065 		page_list[page].free_when_done = 0;
4066 		page_list[page].absent    = 0;
4067 		page_list[page].dirty     = 0;
4068 		page_list[page].precious  = 0;
4069 		page_list[page].device    = 0;
4070 		if (phys > highestPage) {
4071 			highestPage = phys;
4072 		}
4073 	}
4074 
4075 	*highest_page = highestPage;
4076 
4077 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4078 }
4079 
4080 IOReturn
wireVirtual(IODirection forDirection)4081 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4082 {
4083 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4084 	IOReturn error = kIOReturnSuccess;
4085 	ioGMDData *dataP;
4086 	upl_page_info_array_t pageInfo;
4087 	ppnum_t mapBase;
4088 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4089 	mach_vm_size_t numBytesWired = 0;
4090 
4091 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4092 
4093 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4094 		forDirection = (IODirection) (forDirection | getDirection());
4095 	}
4096 
4097 	dataP = getDataP(_memoryEntries);
4098 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4099 	switch (kIODirectionOutIn & forDirection) {
4100 	case kIODirectionOut:
4101 		// Pages do not need to be marked as dirty on commit
4102 		uplFlags = UPL_COPYOUT_FROM;
4103 		dataP->fDMAAccess = kIODMAMapReadAccess;
4104 		break;
4105 
4106 	case kIODirectionIn:
4107 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4108 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4109 		break;
4110 
4111 	default:
4112 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4113 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4114 		break;
4115 	}
4116 
4117 	if (_wireCount) {
4118 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4119 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4120 			    (size_t)VM_KERNEL_ADDRPERM(this));
4121 			error = kIOReturnNotWritable;
4122 		}
4123 	} else {
4124 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4125 		IOMapper *mapper;
4126 
4127 		mapper = dataP->fMapper;
4128 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4129 
4130 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4131 		tag = _kernelTag;
4132 		if (VM_KERN_MEMORY_NONE == tag) {
4133 			tag = IOMemoryTag(kernel_map);
4134 		}
4135 
4136 		if (kIODirectionPrepareToPhys32 & forDirection) {
4137 			if (!mapper) {
4138 				uplFlags |= UPL_NEED_32BIT_ADDR;
4139 			}
4140 			if (dataP->fDMAMapNumAddressBits > 32) {
4141 				dataP->fDMAMapNumAddressBits = 32;
4142 			}
4143 		}
4144 		if (kIODirectionPrepareNoFault    & forDirection) {
4145 			uplFlags |= UPL_REQUEST_NO_FAULT;
4146 		}
4147 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4148 			uplFlags |= UPL_NOZEROFILLIO;
4149 		}
4150 		if (kIODirectionPrepareNonCoherent & forDirection) {
4151 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4152 		}
4153 
4154 		mapBase = 0;
4155 
4156 		// Note that appendBytes(NULL) zeros the data up to the desired length
4157 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4158 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4159 			error = kIOReturnNoMemory;
4160 			traceInterval.setEndArg2(error);
4161 			return error;
4162 		}
4163 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4164 			error = kIOReturnNoMemory;
4165 			traceInterval.setEndArg2(error);
4166 			return error;
4167 		}
4168 		dataP = NULL;
4169 
4170 		// Find the appropriate vm_map for the given task
4171 		vm_map_t curMap;
4172 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4173 			curMap = NULL;
4174 		} else {
4175 			curMap = get_task_map(_task);
4176 		}
4177 
4178 		// Iterate over the vector of virtual ranges
4179 		Ranges vec = _ranges;
4180 		unsigned int pageIndex  = 0;
4181 		IOByteCount mdOffset    = 0;
4182 		ppnum_t highestPage     = 0;
4183 		bool         byteAlignUPL;
4184 
4185 		IOMemoryEntry * memRefEntry = NULL;
4186 		if (_memRef) {
4187 			memRefEntry = &_memRef->entries[0];
4188 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4189 		} else {
4190 			byteAlignUPL = true;
4191 		}
4192 
4193 		for (UInt range = 0; mdOffset < _length; range++) {
4194 			ioPLBlock iopl;
4195 			mach_vm_address_t startPage, startPageOffset;
4196 			mach_vm_size_t    numBytes;
4197 			ppnum_t highPage = 0;
4198 
4199 			if (_memRef) {
4200 				if (range >= _memRef->count) {
4201 					panic("memRefEntry");
4202 				}
4203 				memRefEntry = &_memRef->entries[range];
4204 				numBytes    = memRefEntry->size;
4205 				startPage   = -1ULL;
4206 				if (byteAlignUPL) {
4207 					startPageOffset = 0;
4208 				} else {
4209 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4210 				}
4211 			} else {
4212 				// Get the startPage address and length of vec[range]
4213 				getAddrLenForInd(startPage, numBytes, type, vec, range);
4214 				if (byteAlignUPL) {
4215 					startPageOffset = 0;
4216 				} else {
4217 					startPageOffset = startPage & PAGE_MASK;
4218 					startPage = trunc_page_64(startPage);
4219 				}
4220 			}
4221 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4222 			numBytes += startPageOffset;
4223 
4224 			if (mapper) {
4225 				iopl.fMappedPage = mapBase + pageIndex;
4226 			} else {
4227 				iopl.fMappedPage = 0;
4228 			}
4229 
4230 			// Iterate over the current range, creating UPLs
4231 			while (numBytes) {
4232 				vm_address_t kernelStart = (vm_address_t) startPage;
4233 				vm_map_t theMap;
4234 				if (curMap) {
4235 					theMap = curMap;
4236 				} else if (_memRef) {
4237 					theMap = NULL;
4238 				} else {
4239 					assert(_task == kernel_task);
4240 					theMap = IOPageableMapForAddress(kernelStart);
4241 				}
4242 
4243 				// ioplFlags is an in/out parameter
4244 				upl_control_flags_t ioplFlags = uplFlags;
4245 				dataP = getDataP(_memoryEntries);
4246 				pageInfo = getPageList(dataP);
4247 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4248 
4249 				mach_vm_size_t ioplPhysSize;
4250 				upl_size_t     ioplSize;
4251 				unsigned int   numPageInfo;
4252 
4253 				if (_memRef) {
4254 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4255 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4256 				} else {
4257 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4258 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4259 				}
4260 				if (error != KERN_SUCCESS) {
4261 					if (_memRef) {
4262 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4263 					} else {
4264 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4265 					}
4266 					printf("entry size error %d\n", error);
4267 					goto abortExit;
4268 				}
4269 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4270 				numPageInfo = atop_32(ioplPhysSize);
4271 				if (byteAlignUPL) {
4272 					if (numBytes > ioplPhysSize) {
4273 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4274 					} else {
4275 						ioplSize = ((typeof(ioplSize))numBytes);
4276 					}
4277 				} else {
4278 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4279 				}
4280 
4281 				if (_memRef) {
4282 					memory_object_offset_t entryOffset;
4283 
4284 					entryOffset = mdOffset;
4285 					if (byteAlignUPL) {
4286 						entryOffset = (entryOffset - memRefEntry->offset);
4287 					} else {
4288 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4289 					}
4290 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4291 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4292 					}
4293 					error = memory_object_iopl_request(memRefEntry->entry,
4294 					    entryOffset,
4295 					    &ioplSize,
4296 					    &iopl.fIOPL,
4297 					    baseInfo,
4298 					    &numPageInfo,
4299 					    &ioplFlags,
4300 					    tag);
4301 				} else if ((theMap == kernel_map)
4302 				    && (kernelStart >= io_kernel_static_start)
4303 				    && (kernelStart < io_kernel_static_end)) {
4304 					error = io_get_kernel_static_upl(theMap,
4305 					    kernelStart,
4306 					    &ioplSize,
4307 					    &iopl.fPageOffset,
4308 					    &iopl.fIOPL,
4309 					    baseInfo,
4310 					    &numPageInfo,
4311 					    &highPage);
4312 				} else {
4313 					assert(theMap);
4314 					error = vm_map_create_upl(theMap,
4315 					    startPage,
4316 					    (upl_size_t*)&ioplSize,
4317 					    &iopl.fIOPL,
4318 					    baseInfo,
4319 					    &numPageInfo,
4320 					    &ioplFlags,
4321 					    tag);
4322 				}
4323 
4324 				if (error != KERN_SUCCESS) {
4325 					traceInterval.setEndArg2(error);
4326 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4327 					goto abortExit;
4328 				}
4329 
4330 				assert(ioplSize);
4331 
4332 				if (iopl.fIOPL) {
4333 					highPage = upl_get_highest_page(iopl.fIOPL);
4334 				}
4335 				if (highPage > highestPage) {
4336 					highestPage = highPage;
4337 				}
4338 
4339 				if (baseInfo->device) {
4340 					numPageInfo = 1;
4341 					iopl.fFlags = kIOPLOnDevice;
4342 				} else {
4343 					iopl.fFlags = 0;
4344 				}
4345 
4346 				if (byteAlignUPL) {
4347 					if (iopl.fIOPL) {
4348 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4349 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4350 					}
4351 					if (startPage != (mach_vm_address_t)-1) {
4352 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4353 						startPage -= iopl.fPageOffset;
4354 					}
4355 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4356 					numBytes += iopl.fPageOffset;
4357 				}
4358 
4359 				iopl.fIOMDOffset = mdOffset;
4360 				iopl.fPageInfo = pageIndex;
4361 
4362 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4363 					// Clean up partial created and unsaved iopl
4364 					if (iopl.fIOPL) {
4365 						upl_abort(iopl.fIOPL, 0);
4366 						upl_deallocate(iopl.fIOPL);
4367 					}
4368 					error = kIOReturnNoMemory;
4369 					traceInterval.setEndArg2(error);
4370 					goto abortExit;
4371 				}
4372 				dataP = NULL;
4373 
4374 				// Check for a multiple iopl's in one virtual range
4375 				pageIndex += numPageInfo;
4376 				mdOffset -= iopl.fPageOffset;
4377 				numBytesWired += ioplSize;
4378 				if (ioplSize < numBytes) {
4379 					numBytes -= ioplSize;
4380 					if (startPage != (mach_vm_address_t)-1) {
4381 						startPage += ioplSize;
4382 					}
4383 					mdOffset += ioplSize;
4384 					iopl.fPageOffset = 0;
4385 					if (mapper) {
4386 						iopl.fMappedPage = mapBase + pageIndex;
4387 					}
4388 				} else {
4389 					mdOffset += numBytes;
4390 					break;
4391 				}
4392 			}
4393 		}
4394 
4395 		_highestPage = highestPage;
4396 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4397 
4398 		if (UPL_COPYOUT_FROM & uplFlags) {
4399 			_flags |= kIOMemoryPreparedReadOnly;
4400 		}
4401 		traceInterval.setEndCodes(numBytesWired, error);
4402 	}
4403 
4404 #if IOTRACKING
4405 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4406 		dataP = getDataP(_memoryEntries);
4407 		if (!dataP->fWireTracking.link.next) {
4408 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4409 		}
4410 	}
4411 #endif /* IOTRACKING */
4412 
4413 	return error;
4414 
4415 abortExit:
4416 	{
4417 		dataP = getDataP(_memoryEntries);
4418 		UInt done = getNumIOPL(_memoryEntries, dataP);
4419 		ioPLBlock *ioplList = getIOPLList(dataP);
4420 
4421 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4422 			if (ioplList[ioplIdx].fIOPL) {
4423 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4424 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4425 			}
4426 		}
4427 		_memoryEntries->setLength(computeDataSize(0, 0));
4428 	}
4429 
4430 	if (error == KERN_FAILURE) {
4431 		error = kIOReturnCannotWire;
4432 	} else if (error == KERN_MEMORY_ERROR) {
4433 		error = kIOReturnNoResources;
4434 	}
4435 
4436 	return error;
4437 }
4438 
4439 bool
initMemoryEntries(size_t size,IOMapper * mapper)4440 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4441 {
4442 	ioGMDData * dataP;
4443 
4444 	if (size > UINT_MAX) {
4445 		return false;
4446 	}
4447 	if (!_memoryEntries) {
4448 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4449 		if (!_memoryEntries) {
4450 			return false;
4451 		}
4452 	} else if (!_memoryEntries->initWithCapacity(size)) {
4453 		return false;
4454 	}
4455 
4456 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4457 	dataP = getDataP(_memoryEntries);
4458 
4459 	if (mapper == kIOMapperWaitSystem) {
4460 		IOMapper::checkForSystemMapper();
4461 		mapper = IOMapper::gSystem;
4462 	}
4463 	dataP->fMapper               = mapper;
4464 	dataP->fPageCnt              = 0;
4465 	dataP->fMappedBase           = 0;
4466 	dataP->fDMAMapNumAddressBits = 64;
4467 	dataP->fDMAMapAlignment      = 0;
4468 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4469 	dataP->fCompletionError      = false;
4470 	dataP->fMappedBaseValid      = false;
4471 
4472 	return true;
4473 }
4474 
4475 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4476 IOMemoryDescriptor::dmaMap(
4477 	IOMapper                    * mapper,
4478 	IOMemoryDescriptor          * memory,
4479 	IODMACommand                * command,
4480 	const IODMAMapSpecification * mapSpec,
4481 	uint64_t                      offset,
4482 	uint64_t                      length,
4483 	uint64_t                    * mapAddress,
4484 	uint64_t                    * mapLength)
4485 {
4486 	IOReturn err;
4487 	uint32_t mapOptions;
4488 
4489 	mapOptions = 0;
4490 	mapOptions |= kIODMAMapReadAccess;
4491 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4492 		mapOptions |= kIODMAMapWriteAccess;
4493 	}
4494 
4495 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4496 	    mapSpec, command, NULL, mapAddress, mapLength);
4497 
4498 	if (kIOReturnSuccess == err) {
4499 		dmaMapRecord(mapper, command, *mapLength);
4500 	}
4501 
4502 	return err;
4503 }
4504 
4505 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4506 IOMemoryDescriptor::dmaMapRecord(
4507 	IOMapper                    * mapper,
4508 	IODMACommand                * command,
4509 	uint64_t                      mapLength)
4510 {
4511 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4512 	kern_allocation_name_t alloc;
4513 	int16_t                prior;
4514 
4515 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4516 		kern_allocation_update_size(mapper->fAllocName, mapLength);
4517 	}
4518 
4519 	if (!command) {
4520 		return;
4521 	}
4522 	prior = OSAddAtomic16(1, &_dmaReferences);
4523 	if (!prior) {
4524 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4525 			_mapName  = alloc;
4526 			mapLength = _length;
4527 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4528 		} else {
4529 			_mapName = NULL;
4530 		}
4531 	}
4532 }
4533 
4534 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4535 IOMemoryDescriptor::dmaUnmap(
4536 	IOMapper                    * mapper,
4537 	IODMACommand                * command,
4538 	uint64_t                      offset,
4539 	uint64_t                      mapAddress,
4540 	uint64_t                      mapLength)
4541 {
4542 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4543 	IOReturn ret;
4544 	kern_allocation_name_t alloc;
4545 	kern_allocation_name_t mapName;
4546 	int16_t prior;
4547 
4548 	mapName = NULL;
4549 	prior = 0;
4550 	if (command) {
4551 		mapName = _mapName;
4552 		if (_dmaReferences) {
4553 			prior = OSAddAtomic16(-1, &_dmaReferences);
4554 		} else {
4555 			panic("_dmaReferences underflow");
4556 		}
4557 	}
4558 
4559 	if (!mapLength) {
4560 		traceInterval.setEndArg1(kIOReturnSuccess);
4561 		return kIOReturnSuccess;
4562 	}
4563 
4564 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4565 
4566 	if ((alloc = mapper->fAllocName)) {
4567 		kern_allocation_update_size(alloc, -mapLength);
4568 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4569 			mapLength = _length;
4570 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4571 		}
4572 	}
4573 
4574 	traceInterval.setEndArg1(ret);
4575 	return ret;
4576 }
4577 
4578 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4579 IOGeneralMemoryDescriptor::dmaMap(
4580 	IOMapper                    * mapper,
4581 	IOMemoryDescriptor          * memory,
4582 	IODMACommand                * command,
4583 	const IODMAMapSpecification * mapSpec,
4584 	uint64_t                      offset,
4585 	uint64_t                      length,
4586 	uint64_t                    * mapAddress,
4587 	uint64_t                    * mapLength)
4588 {
4589 	IOReturn          err = kIOReturnSuccess;
4590 	ioGMDData *       dataP;
4591 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4592 
4593 	*mapAddress = 0;
4594 	if (kIOMemoryHostOnly & _flags) {
4595 		return kIOReturnSuccess;
4596 	}
4597 	if (kIOMemoryRemote & _flags) {
4598 		return kIOReturnNotAttached;
4599 	}
4600 
4601 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4602 	    || offset || (length != _length)) {
4603 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4604 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4605 		const ioPLBlock * ioplList = getIOPLList(dataP);
4606 		upl_page_info_t * pageList;
4607 		uint32_t          mapOptions = 0;
4608 
4609 		IODMAMapSpecification mapSpec;
4610 		bzero(&mapSpec, sizeof(mapSpec));
4611 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4612 		mapSpec.alignment = dataP->fDMAMapAlignment;
4613 
4614 		// For external UPLs the fPageInfo field points directly to
4615 		// the upl's upl_page_info_t array.
4616 		if (ioplList->fFlags & kIOPLExternUPL) {
4617 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4618 			mapOptions |= kIODMAMapPagingPath;
4619 		} else {
4620 			pageList = getPageList(dataP);
4621 		}
4622 
4623 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4624 			mapOptions |= kIODMAMapPageListFullyOccupied;
4625 		}
4626 
4627 		assert(dataP->fDMAAccess);
4628 		mapOptions |= dataP->fDMAAccess;
4629 
4630 		// Check for direct device non-paged memory
4631 		if (ioplList->fFlags & kIOPLOnDevice) {
4632 			mapOptions |= kIODMAMapPhysicallyContiguous;
4633 		}
4634 
4635 		IODMAMapPageList dmaPageList =
4636 		{
4637 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4638 			.pageListCount = _pages,
4639 			.pageList      = &pageList[0]
4640 		};
4641 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4642 		    command, &dmaPageList, mapAddress, mapLength);
4643 
4644 		if (kIOReturnSuccess == err) {
4645 			dmaMapRecord(mapper, command, *mapLength);
4646 		}
4647 	}
4648 
4649 	return err;
4650 }
4651 
4652 /*
4653  * prepare
4654  *
4655  * Prepare the memory for an I/O transfer.  This involves paging in
4656  * the memory, if necessary, and wiring it down for the duration of
4657  * the transfer.  The complete() method completes the processing of
4658  * the memory after the I/O transfer finishes.  This method needn't
4659  * called for non-pageable memory.
4660  */
4661 
4662 IOReturn
prepare(IODirection forDirection)4663 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4664 {
4665 	IOReturn     error    = kIOReturnSuccess;
4666 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4667 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4668 
4669 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4670 		traceInterval.setEndArg1(kIOReturnSuccess);
4671 		return kIOReturnSuccess;
4672 	}
4673 
4674 	assert(!(kIOMemoryRemote & _flags));
4675 	if (kIOMemoryRemote & _flags) {
4676 		traceInterval.setEndArg1(kIOReturnNotAttached);
4677 		return kIOReturnNotAttached;
4678 	}
4679 
4680 	if (_prepareLock) {
4681 		IOLockLock(_prepareLock);
4682 	}
4683 
4684 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4685 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4686 			error = kIOReturnNotReady;
4687 			goto finish;
4688 		}
4689 		error = wireVirtual(forDirection);
4690 	}
4691 
4692 	if (kIOReturnSuccess == error) {
4693 		if (1 == ++_wireCount) {
4694 			if (kIOMemoryClearEncrypt & _flags) {
4695 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4696 			}
4697 
4698 			ktraceEmitPhysicalSegments();
4699 		}
4700 	}
4701 
4702 finish:
4703 
4704 	if (_prepareLock) {
4705 		IOLockUnlock(_prepareLock);
4706 	}
4707 	traceInterval.setEndArg1(error);
4708 
4709 	return error;
4710 }
4711 
4712 /*
4713  * complete
4714  *
4715  * Complete processing of the memory after an I/O transfer finishes.
4716  * This method should not be called unless a prepare was previously
4717  * issued; the prepare() and complete() must occur in pairs, before
4718  * before and after an I/O transfer involving pageable memory.
4719  */
4720 
4721 IOReturn
complete(IODirection forDirection)4722 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4723 {
4724 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4725 	ioGMDData  * dataP;
4726 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4727 
4728 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4729 		traceInterval.setEndArg1(kIOReturnSuccess);
4730 		return kIOReturnSuccess;
4731 	}
4732 
4733 	assert(!(kIOMemoryRemote & _flags));
4734 	if (kIOMemoryRemote & _flags) {
4735 		traceInterval.setEndArg1(kIOReturnNotAttached);
4736 		return kIOReturnNotAttached;
4737 	}
4738 
4739 	if (_prepareLock) {
4740 		IOLockLock(_prepareLock);
4741 	}
4742 	do{
4743 		assert(_wireCount);
4744 		if (!_wireCount) {
4745 			break;
4746 		}
4747 		dataP = getDataP(_memoryEntries);
4748 		if (!dataP) {
4749 			break;
4750 		}
4751 
4752 		if (kIODirectionCompleteWithError & forDirection) {
4753 			dataP->fCompletionError = true;
4754 		}
4755 
4756 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4757 			performOperation(kIOMemorySetEncrypted, 0, _length);
4758 		}
4759 
4760 		_wireCount--;
4761 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4762 			ioPLBlock *ioplList = getIOPLList(dataP);
4763 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4764 
4765 			if (_wireCount) {
4766 				// kIODirectionCompleteWithDataValid & forDirection
4767 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4768 					vm_tag_t tag;
4769 					tag = (typeof(tag))getVMTag(kernel_map);
4770 					for (ind = 0; ind < count; ind++) {
4771 						if (ioplList[ind].fIOPL) {
4772 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4773 						}
4774 					}
4775 				}
4776 			} else {
4777 				if (_dmaReferences) {
4778 					panic("complete() while dma active");
4779 				}
4780 
4781 				if (dataP->fMappedBaseValid) {
4782 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4783 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4784 				}
4785 #if IOTRACKING
4786 				if (dataP->fWireTracking.link.next) {
4787 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4788 				}
4789 #endif /* IOTRACKING */
4790 				// Only complete iopls that we created which are for TypeVirtual
4791 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4792 					for (ind = 0; ind < count; ind++) {
4793 						if (ioplList[ind].fIOPL) {
4794 							if (dataP->fCompletionError) {
4795 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4796 							} else {
4797 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4798 							}
4799 							upl_deallocate(ioplList[ind].fIOPL);
4800 						}
4801 					}
4802 				} else if (kIOMemoryTypeUPL == type) {
4803 					upl_set_referenced(ioplList[0].fIOPL, false);
4804 				}
4805 
4806 				_memoryEntries->setLength(computeDataSize(0, 0));
4807 
4808 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4809 				_flags &= ~kIOMemoryPreparedReadOnly;
4810 
4811 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4812 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4813 				}
4814 			}
4815 		}
4816 	}while (false);
4817 
4818 	if (_prepareLock) {
4819 		IOLockUnlock(_prepareLock);
4820 	}
4821 
4822 	traceInterval.setEndArg1(kIOReturnSuccess);
4823 	return kIOReturnSuccess;
4824 }
4825 
4826 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4827 IOGeneralMemoryDescriptor::doMap(
4828 	vm_map_t                __addressMap,
4829 	IOVirtualAddress *      __address,
4830 	IOOptionBits            options,
4831 	IOByteCount             __offset,
4832 	IOByteCount             __length )
4833 {
4834 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4835 	traceInterval.setEndArg1(kIOReturnSuccess);
4836 #ifndef __LP64__
4837 	if (!(kIOMap64Bit & options)) {
4838 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4839 	}
4840 #endif /* !__LP64__ */
4841 
4842 	kern_return_t  err;
4843 
4844 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4845 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4846 	mach_vm_size_t length  = mapping->fLength;
4847 
4848 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4849 	Ranges vec = _ranges;
4850 
4851 	mach_vm_address_t range0Addr = 0;
4852 	mach_vm_size_t    range0Len = 0;
4853 
4854 	if ((offset >= _length) || ((offset + length) > _length)) {
4855 		traceInterval.setEndArg1(kIOReturnBadArgument);
4856 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4857 		// assert(offset == 0 && _length == 0 && length == 0);
4858 		return kIOReturnBadArgument;
4859 	}
4860 
4861 	assert(!(kIOMemoryRemote & _flags));
4862 	if (kIOMemoryRemote & _flags) {
4863 		return 0;
4864 	}
4865 
4866 	if (vec.v) {
4867 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4868 	}
4869 
4870 	// mapping source == dest? (could be much better)
4871 	if (_task
4872 	    && (mapping->fAddressTask == _task)
4873 	    && (mapping->fAddressMap == get_task_map(_task))
4874 	    && (options & kIOMapAnywhere)
4875 	    && (!(kIOMapUnique & options))
4876 	    && (!(kIOMapGuardedMask & options))
4877 	    && (1 == _rangesCount)
4878 	    && (0 == offset)
4879 	    && range0Addr
4880 	    && (length <= range0Len)) {
4881 		mapping->fAddress = range0Addr;
4882 		mapping->fOptions |= kIOMapStatic;
4883 
4884 		return kIOReturnSuccess;
4885 	}
4886 
4887 	if (!_memRef) {
4888 		IOOptionBits createOptions = 0;
4889 		if (!(kIOMapReadOnly & options)) {
4890 			createOptions |= kIOMemoryReferenceWrite;
4891 #if DEVELOPMENT || DEBUG
4892 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4893 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4894 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4895 			}
4896 #endif
4897 		}
4898 		err = memoryReferenceCreate(createOptions, &_memRef);
4899 		if (kIOReturnSuccess != err) {
4900 			traceInterval.setEndArg1(err);
4901 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4902 			return err;
4903 		}
4904 	}
4905 
4906 	memory_object_t pager;
4907 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4908 
4909 	// <upl_transpose //
4910 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4911 		do{
4912 			upl_t               redirUPL2;
4913 			upl_size_t          size;
4914 			upl_control_flags_t flags;
4915 			unsigned int        lock_count;
4916 
4917 			if (!_memRef || (1 != _memRef->count)) {
4918 				err = kIOReturnNotReadable;
4919 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4920 				break;
4921 			}
4922 
4923 			size = (upl_size_t) round_page(mapping->fLength);
4924 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4925 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4926 
4927 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4928 			    NULL, NULL,
4929 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4930 				redirUPL2 = NULL;
4931 			}
4932 
4933 			for (lock_count = 0;
4934 			    IORecursiveLockHaveLock(gIOMemoryLock);
4935 			    lock_count++) {
4936 				UNLOCK;
4937 			}
4938 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4939 			for (;
4940 			    lock_count;
4941 			    lock_count--) {
4942 				LOCK;
4943 			}
4944 
4945 			if (kIOReturnSuccess != err) {
4946 				IOLog("upl_transpose(%x)\n", err);
4947 				err = kIOReturnSuccess;
4948 			}
4949 
4950 			if (redirUPL2) {
4951 				upl_commit(redirUPL2, NULL, 0);
4952 				upl_deallocate(redirUPL2);
4953 				redirUPL2 = NULL;
4954 			}
4955 			{
4956 				// swap the memEntries since they now refer to different vm_objects
4957 				IOMemoryReference * me = _memRef;
4958 				_memRef = mapping->fMemory->_memRef;
4959 				mapping->fMemory->_memRef = me;
4960 			}
4961 			if (pager) {
4962 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4963 			}
4964 		}while (false);
4965 	}
4966 	// upl_transpose> //
4967 	else {
4968 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4969 		if (err) {
4970 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4971 		}
4972 #if IOTRACKING
4973 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4974 			// only dram maps in the default on developement case
4975 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4976 		}
4977 #endif /* IOTRACKING */
4978 		if ((err == KERN_SUCCESS) && pager) {
4979 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4980 
4981 			if (err != KERN_SUCCESS) {
4982 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4983 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4984 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4985 			}
4986 		}
4987 	}
4988 
4989 	traceInterval.setEndArg1(err);
4990 	if (err) {
4991 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4992 	}
4993 	return err;
4994 }
4995 
4996 #if IOTRACKING
4997 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)4998 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4999     mach_vm_address_t * address, mach_vm_size_t * size)
5000 {
5001 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5002 
5003 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5004 
5005 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5006 		return kIOReturnNotReady;
5007 	}
5008 
5009 	*task    = map->fAddressTask;
5010 	*address = map->fAddress;
5011 	*size    = map->fLength;
5012 
5013 	return kIOReturnSuccess;
5014 }
5015 #endif /* IOTRACKING */
5016 
5017 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5018 IOGeneralMemoryDescriptor::doUnmap(
5019 	vm_map_t                addressMap,
5020 	IOVirtualAddress        __address,
5021 	IOByteCount             __length )
5022 {
5023 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5024 	IOReturn ret;
5025 	ret = super::doUnmap(addressMap, __address, __length);
5026 	traceInterval.setEndArg1(ret);
5027 	return ret;
5028 }
5029 
5030 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5031 
5032 #undef super
5033 #define super OSObject
5034 
5035 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5036 
5037 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5038 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5039 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5040 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5041 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5042 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5043 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5044 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5045 
5046 /* ex-inline function implementation */
5047 IOPhysicalAddress
getPhysicalAddress()5048 IOMemoryMap::getPhysicalAddress()
5049 {
5050 	return getPhysicalSegment( 0, NULL );
5051 }
5052 
5053 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5054 
5055 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5056 IOMemoryMap::init(
5057 	task_t                  intoTask,
5058 	mach_vm_address_t       toAddress,
5059 	IOOptionBits            _options,
5060 	mach_vm_size_t          _offset,
5061 	mach_vm_size_t          _length )
5062 {
5063 	if (!intoTask) {
5064 		return false;
5065 	}
5066 
5067 	if (!super::init()) {
5068 		return false;
5069 	}
5070 
5071 	fAddressMap  = get_task_map(intoTask);
5072 	if (!fAddressMap) {
5073 		return false;
5074 	}
5075 	vm_map_reference(fAddressMap);
5076 
5077 	fAddressTask = intoTask;
5078 	fOptions     = _options;
5079 	fLength      = _length;
5080 	fOffset      = _offset;
5081 	fAddress     = toAddress;
5082 
5083 	return true;
5084 }
5085 
5086 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5087 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5088 {
5089 	if (!_memory) {
5090 		return false;
5091 	}
5092 
5093 	if (!fSuperMap) {
5094 		if ((_offset + fLength) > _memory->getLength()) {
5095 			return false;
5096 		}
5097 		fOffset = _offset;
5098 	}
5099 
5100 
5101 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5102 	if (fMemory) {
5103 		if (fMemory != _memory) {
5104 			fMemory->removeMapping(this);
5105 		}
5106 	}
5107 	fMemory = os::move(tempval);
5108 
5109 	return true;
5110 }
5111 
5112 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5113 IOMemoryDescriptor::doMap(
5114 	vm_map_t                __addressMap,
5115 	IOVirtualAddress *      __address,
5116 	IOOptionBits            options,
5117 	IOByteCount             __offset,
5118 	IOByteCount             __length )
5119 {
5120 	return kIOReturnUnsupported;
5121 }
5122 
5123 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5124 IOMemoryDescriptor::handleFault(
5125 	void *                  _pager,
5126 	mach_vm_size_t          sourceOffset,
5127 	mach_vm_size_t          length)
5128 {
5129 	if (kIOMemoryRedirected & _flags) {
5130 #if DEBUG
5131 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5132 #endif
5133 		do {
5134 			SLEEP;
5135 		} while (kIOMemoryRedirected & _flags);
5136 	}
5137 	return kIOReturnSuccess;
5138 }
5139 
5140 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5141 IOMemoryDescriptor::populateDevicePager(
5142 	void *                  _pager,
5143 	vm_map_t                addressMap,
5144 	mach_vm_address_t       address,
5145 	mach_vm_size_t          sourceOffset,
5146 	mach_vm_size_t          length,
5147 	IOOptionBits            options )
5148 {
5149 	IOReturn            err = kIOReturnSuccess;
5150 	memory_object_t     pager = (memory_object_t) _pager;
5151 	mach_vm_size_t      size;
5152 	mach_vm_size_t      bytes;
5153 	mach_vm_size_t      page;
5154 	mach_vm_size_t      pageOffset;
5155 	mach_vm_size_t      pagerOffset;
5156 	IOPhysicalLength    segLen, chunk;
5157 	addr64_t            physAddr;
5158 	IOOptionBits        type;
5159 
5160 	type = _flags & kIOMemoryTypeMask;
5161 
5162 	if (reserved->dp.pagerContig) {
5163 		sourceOffset = 0;
5164 		pagerOffset  = 0;
5165 	}
5166 
5167 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5168 	assert( physAddr );
5169 	pageOffset = physAddr - trunc_page_64( physAddr );
5170 	pagerOffset = sourceOffset;
5171 
5172 	size = length + pageOffset;
5173 	physAddr -= pageOffset;
5174 
5175 	segLen += pageOffset;
5176 	bytes = size;
5177 	do{
5178 		// in the middle of the loop only map whole pages
5179 		if (segLen >= bytes) {
5180 			segLen = bytes;
5181 		} else if (segLen != trunc_page_64(segLen)) {
5182 			err = kIOReturnVMError;
5183 		}
5184 		if (physAddr != trunc_page_64(physAddr)) {
5185 			err = kIOReturnBadArgument;
5186 		}
5187 
5188 		if (kIOReturnSuccess != err) {
5189 			break;
5190 		}
5191 
5192 #if DEBUG || DEVELOPMENT
5193 		if ((kIOMemoryTypeUPL != type)
5194 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5195 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5196 			    physAddr, (uint64_t)segLen);
5197 		}
5198 #endif /* DEBUG || DEVELOPMENT */
5199 
5200 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5201 		for (page = 0;
5202 		    (page < segLen) && (KERN_SUCCESS == err);
5203 		    page += chunk) {
5204 			err = device_pager_populate_object(pager, pagerOffset,
5205 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5206 			pagerOffset += chunk;
5207 		}
5208 
5209 		assert(KERN_SUCCESS == err);
5210 		if (err) {
5211 			break;
5212 		}
5213 
5214 		// This call to vm_fault causes an early pmap level resolution
5215 		// of the mappings created above for kernel mappings, since
5216 		// faulting in later can't take place from interrupt level.
5217 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5218 			err = vm_fault(addressMap,
5219 			    (vm_map_offset_t)trunc_page_64(address),
5220 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5221 			    FALSE, VM_KERN_MEMORY_NONE,
5222 			    THREAD_UNINT, NULL,
5223 			    (vm_map_offset_t)0);
5224 
5225 			if (KERN_SUCCESS != err) {
5226 				break;
5227 			}
5228 		}
5229 
5230 		sourceOffset += segLen - pageOffset;
5231 		address += segLen;
5232 		bytes -= segLen;
5233 		pageOffset = 0;
5234 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5235 
5236 	if (bytes) {
5237 		err = kIOReturnBadArgument;
5238 	}
5239 
5240 	return err;
5241 }
5242 
5243 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5244 IOMemoryDescriptor::doUnmap(
5245 	vm_map_t                addressMap,
5246 	IOVirtualAddress        __address,
5247 	IOByteCount             __length )
5248 {
5249 	IOReturn          err;
5250 	IOMemoryMap *     mapping;
5251 	mach_vm_address_t address;
5252 	mach_vm_size_t    length;
5253 
5254 	if (__length) {
5255 		panic("doUnmap");
5256 	}
5257 
5258 	mapping = (IOMemoryMap *) __address;
5259 	addressMap = mapping->fAddressMap;
5260 	address    = mapping->fAddress;
5261 	length     = mapping->fLength;
5262 
5263 	if (kIOMapOverwrite & mapping->fOptions) {
5264 		err = KERN_SUCCESS;
5265 	} else {
5266 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5267 			addressMap = IOPageableMapForAddress( address );
5268 		}
5269 #if DEBUG
5270 		if (kIOLogMapping & gIOKitDebug) {
5271 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5272 			    addressMap, address, length );
5273 		}
5274 #endif
5275 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5276 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5277 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5278 		}
5279 	}
5280 
5281 #if IOTRACKING
5282 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5283 #endif /* IOTRACKING */
5284 
5285 	return err;
5286 }
5287 
5288 IOReturn
redirect(task_t safeTask,bool doRedirect)5289 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5290 {
5291 	IOReturn            err = kIOReturnSuccess;
5292 	IOMemoryMap *       mapping = NULL;
5293 	OSSharedPtr<OSIterator>        iter;
5294 
5295 	LOCK;
5296 
5297 	if (doRedirect) {
5298 		_flags |= kIOMemoryRedirected;
5299 	} else {
5300 		_flags &= ~kIOMemoryRedirected;
5301 	}
5302 
5303 	do {
5304 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5305 			memory_object_t   pager;
5306 
5307 			if (reserved) {
5308 				pager = (memory_object_t) reserved->dp.devicePager;
5309 			} else {
5310 				pager = MACH_PORT_NULL;
5311 			}
5312 
5313 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5314 				mapping->redirect( safeTask, doRedirect );
5315 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5316 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5317 				}
5318 			}
5319 
5320 			iter.reset();
5321 		}
5322 	} while (false);
5323 
5324 	if (!doRedirect) {
5325 		WAKEUP;
5326 	}
5327 
5328 	UNLOCK;
5329 
5330 #ifndef __LP64__
5331 	// temporary binary compatibility
5332 	IOSubMemoryDescriptor * subMem;
5333 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5334 		err = subMem->redirect( safeTask, doRedirect );
5335 	} else {
5336 		err = kIOReturnSuccess;
5337 	}
5338 #endif /* !__LP64__ */
5339 
5340 	return err;
5341 }
5342 
5343 IOReturn
redirect(task_t safeTask,bool doRedirect)5344 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5345 {
5346 	IOReturn err = kIOReturnSuccess;
5347 
5348 	if (fSuperMap) {
5349 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5350 	} else {
5351 		LOCK;
5352 
5353 		do{
5354 			if (!fAddress) {
5355 				break;
5356 			}
5357 			if (!fAddressMap) {
5358 				break;
5359 			}
5360 
5361 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5362 			    && (0 == (fOptions & kIOMapStatic))) {
5363 				IOUnmapPages( fAddressMap, fAddress, fLength );
5364 				err = kIOReturnSuccess;
5365 #if DEBUG
5366 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5367 #endif
5368 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5369 				IOOptionBits newMode;
5370 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5371 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5372 			}
5373 		}while (false);
5374 		UNLOCK;
5375 	}
5376 
5377 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5378 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5379 	    && safeTask
5380 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5381 		fMemory->redirect(safeTask, doRedirect);
5382 	}
5383 
5384 	return err;
5385 }
5386 
5387 IOReturn
unmap(void)5388 IOMemoryMap::unmap( void )
5389 {
5390 	IOReturn    err;
5391 
5392 	LOCK;
5393 
5394 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5395 	    && (0 == (kIOMapStatic & fOptions))) {
5396 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5397 	} else {
5398 		err = kIOReturnSuccess;
5399 	}
5400 
5401 	if (fAddressMap) {
5402 		vm_map_deallocate(fAddressMap);
5403 		fAddressMap = NULL;
5404 	}
5405 
5406 	fAddress = 0;
5407 
5408 	UNLOCK;
5409 
5410 	return err;
5411 }
5412 
5413 void
taskDied(void)5414 IOMemoryMap::taskDied( void )
5415 {
5416 	LOCK;
5417 	if (fUserClientUnmap) {
5418 		unmap();
5419 	}
5420 #if IOTRACKING
5421 	else {
5422 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5423 	}
5424 #endif /* IOTRACKING */
5425 
5426 	if (fAddressMap) {
5427 		vm_map_deallocate(fAddressMap);
5428 		fAddressMap = NULL;
5429 	}
5430 	fAddressTask = NULL;
5431 	fAddress     = 0;
5432 	UNLOCK;
5433 }
5434 
5435 IOReturn
userClientUnmap(void)5436 IOMemoryMap::userClientUnmap( void )
5437 {
5438 	fUserClientUnmap = true;
5439 	return kIOReturnSuccess;
5440 }
5441 
5442 // Overload the release mechanism.  All mappings must be a member
5443 // of a memory descriptors _mappings set.  This means that we
5444 // always have 2 references on a mapping.  When either of these mappings
5445 // are released we need to free ourselves.
5446 void
taggedRelease(const void * tag) const5447 IOMemoryMap::taggedRelease(const void *tag) const
5448 {
5449 	LOCK;
5450 	super::taggedRelease(tag, 2);
5451 	UNLOCK;
5452 }
5453 
5454 void
free()5455 IOMemoryMap::free()
5456 {
5457 	unmap();
5458 
5459 	if (fMemory) {
5460 		LOCK;
5461 		fMemory->removeMapping(this);
5462 		UNLOCK;
5463 		fMemory.reset();
5464 	}
5465 
5466 	if (fSuperMap) {
5467 		fSuperMap.reset();
5468 	}
5469 
5470 	if (fRedirUPL) {
5471 		upl_commit(fRedirUPL, NULL, 0);
5472 		upl_deallocate(fRedirUPL);
5473 	}
5474 
5475 	super::free();
5476 }
5477 
5478 IOByteCount
getLength()5479 IOMemoryMap::getLength()
5480 {
5481 	return fLength;
5482 }
5483 
5484 IOVirtualAddress
getVirtualAddress()5485 IOMemoryMap::getVirtualAddress()
5486 {
5487 #ifndef __LP64__
5488 	if (fSuperMap) {
5489 		fSuperMap->getVirtualAddress();
5490 	} else if (fAddressMap
5491 	    && vm_map_is_64bit(fAddressMap)
5492 	    && (sizeof(IOVirtualAddress) < 8)) {
5493 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5494 	}
5495 #endif /* !__LP64__ */
5496 
5497 	return fAddress;
5498 }
5499 
5500 #ifndef __LP64__
5501 mach_vm_address_t
getAddress()5502 IOMemoryMap::getAddress()
5503 {
5504 	return fAddress;
5505 }
5506 
5507 mach_vm_size_t
getSize()5508 IOMemoryMap::getSize()
5509 {
5510 	return fLength;
5511 }
5512 #endif /* !__LP64__ */
5513 
5514 
5515 task_t
getAddressTask()5516 IOMemoryMap::getAddressTask()
5517 {
5518 	if (fSuperMap) {
5519 		return fSuperMap->getAddressTask();
5520 	} else {
5521 		return fAddressTask;
5522 	}
5523 }
5524 
5525 IOOptionBits
getMapOptions()5526 IOMemoryMap::getMapOptions()
5527 {
5528 	return fOptions;
5529 }
5530 
5531 IOMemoryDescriptor *
getMemoryDescriptor()5532 IOMemoryMap::getMemoryDescriptor()
5533 {
5534 	return fMemory.get();
5535 }
5536 
5537 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5538 IOMemoryMap::copyCompatible(
5539 	IOMemoryMap * newMapping )
5540 {
5541 	task_t              task      = newMapping->getAddressTask();
5542 	mach_vm_address_t   toAddress = newMapping->fAddress;
5543 	IOOptionBits        _options  = newMapping->fOptions;
5544 	mach_vm_size_t      _offset   = newMapping->fOffset;
5545 	mach_vm_size_t      _length   = newMapping->fLength;
5546 
5547 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5548 		return NULL;
5549 	}
5550 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5551 		return NULL;
5552 	}
5553 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5554 		return NULL;
5555 	}
5556 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5557 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5558 		return NULL;
5559 	}
5560 
5561 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5562 		return NULL;
5563 	}
5564 
5565 	if (_offset < fOffset) {
5566 		return NULL;
5567 	}
5568 
5569 	_offset -= fOffset;
5570 
5571 	if ((_offset + _length) > fLength) {
5572 		return NULL;
5573 	}
5574 
5575 	if ((fLength == _length) && (!_offset)) {
5576 		retain();
5577 		newMapping = this;
5578 	} else {
5579 		newMapping->fSuperMap.reset(this, OSRetain);
5580 		newMapping->fOffset   = fOffset + _offset;
5581 		newMapping->fAddress  = fAddress + _offset;
5582 	}
5583 
5584 	return newMapping;
5585 }
5586 
5587 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5588 IOMemoryMap::wireRange(
5589 	uint32_t                options,
5590 	mach_vm_size_t          offset,
5591 	mach_vm_size_t          length)
5592 {
5593 	IOReturn kr;
5594 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5595 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5596 	vm_prot_t prot;
5597 
5598 	prot = (kIODirectionOutIn & options);
5599 	if (prot) {
5600 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5601 	} else {
5602 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5603 	}
5604 
5605 	return kr;
5606 }
5607 
5608 
5609 IOPhysicalAddress
5610 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5611 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5612 #else /* !__LP64__ */
5613 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5614 #endif /* !__LP64__ */
5615 {
5616 	IOPhysicalAddress   address;
5617 
5618 	LOCK;
5619 #ifdef __LP64__
5620 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5621 #else /* !__LP64__ */
5622 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5623 #endif /* !__LP64__ */
5624 	UNLOCK;
5625 
5626 	return address;
5627 }
5628 
5629 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5630 
5631 #undef super
5632 #define super OSObject
5633 
5634 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5635 
5636 void
initialize(void)5637 IOMemoryDescriptor::initialize( void )
5638 {
5639 	if (NULL == gIOMemoryLock) {
5640 		gIOMemoryLock = IORecursiveLockAlloc();
5641 	}
5642 
5643 	gIOLastPage = IOGetLastPageNumber();
5644 }
5645 
5646 void
free(void)5647 IOMemoryDescriptor::free( void )
5648 {
5649 	if (_mappings) {
5650 		_mappings.reset();
5651 	}
5652 
5653 	if (reserved) {
5654 		cleanKernelReserved(reserved);
5655 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5656 		reserved = NULL;
5657 	}
5658 	super::free();
5659 }
5660 
5661 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5662 IOMemoryDescriptor::setMapping(
5663 	task_t                  intoTask,
5664 	IOVirtualAddress        mapAddress,
5665 	IOOptionBits            options )
5666 {
5667 	return createMappingInTask( intoTask, mapAddress,
5668 	           options | kIOMapStatic,
5669 	           0, getLength());
5670 }
5671 
5672 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5673 IOMemoryDescriptor::map(
5674 	IOOptionBits            options )
5675 {
5676 	return createMappingInTask( kernel_task, 0,
5677 	           options | kIOMapAnywhere,
5678 	           0, getLength());
5679 }
5680 
5681 #ifndef __LP64__
5682 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5683 IOMemoryDescriptor::map(
5684 	task_t                  intoTask,
5685 	IOVirtualAddress        atAddress,
5686 	IOOptionBits            options,
5687 	IOByteCount             offset,
5688 	IOByteCount             length )
5689 {
5690 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5691 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5692 		return NULL;
5693 	}
5694 
5695 	return createMappingInTask(intoTask, atAddress,
5696 	           options, offset, length);
5697 }
5698 #endif /* !__LP64__ */
5699 
5700 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5701 IOMemoryDescriptor::createMappingInTask(
5702 	task_t                  intoTask,
5703 	mach_vm_address_t       atAddress,
5704 	IOOptionBits            options,
5705 	mach_vm_size_t          offset,
5706 	mach_vm_size_t          length)
5707 {
5708 	IOMemoryMap * result;
5709 	IOMemoryMap * mapping;
5710 
5711 	if (0 == length) {
5712 		length = getLength();
5713 	}
5714 
5715 	mapping = new IOMemoryMap;
5716 
5717 	if (mapping
5718 	    && !mapping->init( intoTask, atAddress,
5719 	    options, offset, length )) {
5720 		mapping->release();
5721 		mapping = NULL;
5722 	}
5723 
5724 	if (mapping) {
5725 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5726 	} else {
5727 		result = nullptr;
5728 	}
5729 
5730 #if DEBUG
5731 	if (!result) {
5732 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5733 		    this, atAddress, (uint32_t) options, offset, length);
5734 	}
5735 #endif
5736 
5737 	// already retained through makeMapping
5738 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5739 
5740 	return retval;
5741 }
5742 
5743 #ifndef __LP64__ // there is only a 64 bit version for LP64
5744 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5745 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5746     IOOptionBits         options,
5747     IOByteCount          offset)
5748 {
5749 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5750 }
5751 #endif
5752 
5753 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5754 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5755     IOOptionBits         options,
5756     mach_vm_size_t       offset)
5757 {
5758 	IOReturn err = kIOReturnSuccess;
5759 	OSSharedPtr<IOMemoryDescriptor> physMem;
5760 
5761 	LOCK;
5762 
5763 	if (fAddress && fAddressMap) {
5764 		do{
5765 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5766 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5767 				physMem = fMemory;
5768 			}
5769 
5770 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5771 				upl_size_t          size = (typeof(size))round_page(fLength);
5772 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5773 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5774 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5775 				    NULL, NULL,
5776 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5777 					fRedirUPL = NULL;
5778 				}
5779 
5780 				if (physMem) {
5781 					IOUnmapPages( fAddressMap, fAddress, fLength );
5782 					if ((false)) {
5783 						physMem->redirect(NULL, true);
5784 					}
5785 				}
5786 			}
5787 
5788 			if (newBackingMemory) {
5789 				if (newBackingMemory != fMemory) {
5790 					fOffset = 0;
5791 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5792 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5793 					    offset, fLength)) {
5794 						err = kIOReturnError;
5795 					}
5796 				}
5797 				if (fRedirUPL) {
5798 					upl_commit(fRedirUPL, NULL, 0);
5799 					upl_deallocate(fRedirUPL);
5800 					fRedirUPL = NULL;
5801 				}
5802 				if ((false) && physMem) {
5803 					physMem->redirect(NULL, false);
5804 				}
5805 			}
5806 		}while (false);
5807 	}
5808 
5809 	UNLOCK;
5810 
5811 	return err;
5812 }
5813 
5814 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5815 IOMemoryDescriptor::makeMapping(
5816 	IOMemoryDescriptor *    owner,
5817 	task_t                  __intoTask,
5818 	IOVirtualAddress        __address,
5819 	IOOptionBits            options,
5820 	IOByteCount             __offset,
5821 	IOByteCount             __length )
5822 {
5823 #ifndef __LP64__
5824 	if (!(kIOMap64Bit & options)) {
5825 		panic("IOMemoryDescriptor::makeMapping !64bit");
5826 	}
5827 #endif /* !__LP64__ */
5828 
5829 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5830 	__block IOMemoryMap * result  = NULL;
5831 
5832 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5833 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5834 	mach_vm_size_t length  = mapping->fLength;
5835 
5836 	mapping->fOffset = offset;
5837 
5838 	LOCK;
5839 
5840 	do{
5841 		if (kIOMapStatic & options) {
5842 			result = mapping;
5843 			addMapping(mapping);
5844 			mapping->setMemoryDescriptor(this, 0);
5845 			continue;
5846 		}
5847 
5848 		if (kIOMapUnique & options) {
5849 			addr64_t phys;
5850 			IOByteCount       physLen;
5851 
5852 //	    if (owner != this)		continue;
5853 
5854 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5855 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5856 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5857 				if (!phys || (physLen < length)) {
5858 					continue;
5859 				}
5860 
5861 				mapDesc = IOMemoryDescriptor::withAddressRange(
5862 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5863 				if (!mapDesc) {
5864 					continue;
5865 				}
5866 				offset = 0;
5867 				mapping->fOffset = offset;
5868 			}
5869 		} else {
5870 			// look for a compatible existing mapping
5871 			if (_mappings) {
5872 				_mappings->iterateObjects(^(OSObject * object)
5873 				{
5874 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5875 					if ((result = lookMapping->copyCompatible(mapping))) {
5876 					        addMapping(result);
5877 					        result->setMemoryDescriptor(this, offset);
5878 					        return true;
5879 					}
5880 					return false;
5881 				});
5882 			}
5883 			if (result || (options & kIOMapReference)) {
5884 				if (result != mapping) {
5885 					mapping->release();
5886 					mapping = NULL;
5887 				}
5888 				continue;
5889 			}
5890 		}
5891 
5892 		if (!mapDesc) {
5893 			mapDesc.reset(this, OSRetain);
5894 		}
5895 		IOReturn
5896 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5897 		if (kIOReturnSuccess == kr) {
5898 			result = mapping;
5899 			mapDesc->addMapping(result);
5900 			result->setMemoryDescriptor(mapDesc.get(), offset);
5901 		} else {
5902 			mapping->release();
5903 			mapping = NULL;
5904 		}
5905 	}while (false);
5906 
5907 	UNLOCK;
5908 
5909 	return result;
5910 }
5911 
5912 void
addMapping(IOMemoryMap * mapping)5913 IOMemoryDescriptor::addMapping(
5914 	IOMemoryMap * mapping )
5915 {
5916 	if (mapping) {
5917 		if (NULL == _mappings) {
5918 			_mappings = OSSet::withCapacity(1);
5919 		}
5920 		if (_mappings) {
5921 			_mappings->setObject( mapping );
5922 		}
5923 	}
5924 }
5925 
5926 void
removeMapping(IOMemoryMap * mapping)5927 IOMemoryDescriptor::removeMapping(
5928 	IOMemoryMap * mapping )
5929 {
5930 	if (_mappings) {
5931 		_mappings->removeObject( mapping);
5932 	}
5933 }
5934 
5935 void
setMapperOptions(uint16_t options)5936 IOMemoryDescriptor::setMapperOptions( uint16_t options)
5937 {
5938 	_iomapperOptions = options;
5939 }
5940 
5941 uint16_t
getMapperOptions(void)5942 IOMemoryDescriptor::getMapperOptions( void )
5943 {
5944 	return _iomapperOptions;
5945 }
5946 
5947 #ifndef __LP64__
5948 // obsolete initializers
5949 // - initWithOptions is the designated initializer
5950 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5951 IOMemoryDescriptor::initWithAddress(void *      address,
5952     IOByteCount   length,
5953     IODirection direction)
5954 {
5955 	return false;
5956 }
5957 
5958 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5959 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5960     IOByteCount    length,
5961     IODirection  direction,
5962     task_t       task)
5963 {
5964 	return false;
5965 }
5966 
5967 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5968 IOMemoryDescriptor::initWithPhysicalAddress(
5969 	IOPhysicalAddress      address,
5970 	IOByteCount            length,
5971 	IODirection            direction )
5972 {
5973 	return false;
5974 }
5975 
5976 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5977 IOMemoryDescriptor::initWithRanges(
5978 	IOVirtualRange * ranges,
5979 	UInt32           withCount,
5980 	IODirection      direction,
5981 	task_t           task,
5982 	bool             asReference)
5983 {
5984 	return false;
5985 }
5986 
5987 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5988 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5989     UInt32           withCount,
5990     IODirection      direction,
5991     bool             asReference)
5992 {
5993 	return false;
5994 }
5995 
5996 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5997 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5998     IOByteCount * lengthOfSegment)
5999 {
6000 	return NULL;
6001 }
6002 #endif /* !__LP64__ */
6003 
6004 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6005 
6006 bool
serialize(OSSerialize * s) const6007 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6008 {
6009 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6010 	OSSharedPtr<OSObject>           values[2] = {NULL};
6011 	OSSharedPtr<OSArray>            array;
6012 
6013 	struct SerData {
6014 		user_addr_t address;
6015 		user_size_t length;
6016 	};
6017 
6018 	unsigned int index;
6019 
6020 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6021 
6022 	if (s == NULL) {
6023 		return false;
6024 	}
6025 
6026 	array = OSArray::withCapacity(4);
6027 	if (!array) {
6028 		return false;
6029 	}
6030 
6031 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6032 	if (!vcopy) {
6033 		return false;
6034 	}
6035 
6036 	keys[0] = OSSymbol::withCString("address");
6037 	keys[1] = OSSymbol::withCString("length");
6038 
6039 	// Copy the volatile data so we don't have to allocate memory
6040 	// while the lock is held.
6041 	LOCK;
6042 	if (vcopy.size() == _rangesCount) {
6043 		Ranges vec = _ranges;
6044 		for (index = 0; index < vcopy.size(); index++) {
6045 			mach_vm_address_t addr; mach_vm_size_t len;
6046 			getAddrLenForInd(addr, len, type, vec, index);
6047 			vcopy[index].address = addr;
6048 			vcopy[index].length  = len;
6049 		}
6050 	} else {
6051 		// The descriptor changed out from under us.  Give up.
6052 		UNLOCK;
6053 		return false;
6054 	}
6055 	UNLOCK;
6056 
6057 	for (index = 0; index < vcopy.size(); index++) {
6058 		user_addr_t addr = vcopy[index].address;
6059 		IOByteCount len = (IOByteCount) vcopy[index].length;
6060 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6061 		if (values[0] == NULL) {
6062 			return false;
6063 		}
6064 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6065 		if (values[1] == NULL) {
6066 			return false;
6067 		}
6068 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6069 		if (dict == NULL) {
6070 			return false;
6071 		}
6072 		array->setObject(dict.get());
6073 		dict.reset();
6074 		values[0].reset();
6075 		values[1].reset();
6076 	}
6077 
6078 	return array->serialize(s);
6079 }
6080 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6081 
6082 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6083 #ifdef __LP64__
6084 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6085 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6086 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6087 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6088 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6089 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6090 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6091 #else /* !__LP64__ */
6092 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6093 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6094 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6095 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6096 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6097 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6098 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6099 #endif /* !__LP64__ */
6100 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6101 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6102 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6103 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6104 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6105 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6106 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6107 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6108 
6109 /* ex-inline function implementation */
6110 IOPhysicalAddress
getPhysicalAddress()6111 IOMemoryDescriptor::getPhysicalAddress()
6112 {
6113 	return getPhysicalSegment( 0, NULL );
6114 }
6115 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6116 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6117 
6118 OSPtr<_IOMemoryDescriptorMixedData>
6119 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6120 {
6121 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6122 	if (me && !me->initWithCapacity(capacity)) {
6123 		return nullptr;
6124 	}
6125 	return me;
6126 }
6127 
6128 /*
6129  * Ignore -Wxnu-typed-allocators within IOMemoryDescriptorMixedData
6130  * because it implements an allocator.
6131  */
6132 __typed_allocators_ignore_push
6133 
6134 bool
initWithCapacity(size_t capacity)6135 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6136 {
6137 	if (_data && (!capacity || (_capacity < capacity))) {
6138 		freeMemory();
6139 	}
6140 
6141 	if (!OSObject::init()) {
6142 		return false;
6143 	}
6144 
6145 	if (!_data && capacity) {
6146 		_data = IOMalloc(capacity);
6147 		if (!_data) {
6148 			return false;
6149 		}
6150 		_capacity = capacity;
6151 	}
6152 
6153 	_length = 0;
6154 
6155 	return true;
6156 }
6157 
6158 void
free()6159 _IOMemoryDescriptorMixedData::free()
6160 {
6161 	freeMemory();
6162 	OSObject::free();
6163 }
6164 
6165 void
freeMemory()6166 _IOMemoryDescriptorMixedData::freeMemory()
6167 {
6168 	IOFree(_data, _capacity);
6169 	_data = nullptr;
6170 	_capacity = _length = 0;
6171 }
6172 
6173 bool
appendBytes(const void * bytes,size_t length)6174 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6175 {
6176 	const auto oldLength = getLength();
6177 	size_t newLength;
6178 	if (os_add_overflow(oldLength, length, &newLength)) {
6179 		return false;
6180 	}
6181 
6182 	if (newLength > _capacity) {
6183 		void * const newData = IOMalloc(newLength);
6184 		if (!newData) {
6185 			return false;
6186 		}
6187 		if (_data) {
6188 			bcopy(_data, newData, oldLength);
6189 			IOFree(_data, _capacity);
6190 		}
6191 		_data = newData;
6192 		_capacity = newLength;
6193 	}
6194 
6195 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6196 	if (bytes) {
6197 		bcopy(bytes, dest, length);
6198 	} else {
6199 		bzero(dest, length);
6200 	}
6201 
6202 	_length = newLength;
6203 
6204 	return true;
6205 }
6206 
6207 void
setLength(size_t length)6208 _IOMemoryDescriptorMixedData::setLength(size_t length)
6209 {
6210 	if (!_data || (length > _capacity)) {
6211 		void * const newData = IOMallocZero(length);
6212 		if (_data) {
6213 			bcopy(_data, newData, _length);
6214 			IOFree(_data, _capacity);
6215 		}
6216 		_data = newData;
6217 		_capacity = length;
6218 	}
6219 	_length = length;
6220 }
6221 
6222 __typed_allocators_ignore_pop
6223 
6224 const void *
getBytes() const6225 _IOMemoryDescriptorMixedData::getBytes() const
6226 {
6227 	return _length ? _data : nullptr;
6228 }
6229 
6230 size_t
getLength() const6231 _IOMemoryDescriptorMixedData::getLength() const
6232 {
6233 	return _data ? _length : 0;
6234 }
6235