xref: /xnu-10002.61.3/iokit/Kernel/IOMemoryDescriptor.cpp (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <vm/vm_fault.h>
73 #include <vm/vm_protos.h>
74 
75 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
76 extern void ipc_port_release_send(ipc_port_t port);
77 
78 extern kern_return_t
79 mach_memory_entry_ownership(
80 	ipc_port_t      entry_port,
81 	task_t          owner,
82 	int             ledger_tag,
83 	int             ledger_flags);
84 
85 __END_DECLS
86 
87 #define kIOMapperWaitSystem     ((IOMapper *) 1)
88 
89 static IOMapper * gIOSystemMapper = NULL;
90 
91 ppnum_t           gIOLastPage;
92 
93 enum {
94 	kIOMapGuardSizeLarge = 65536
95 };
96 
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98 
99 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100 
101 #define super IOMemoryDescriptor
102 
103 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
104     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
105 
106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107 
108 static IORecursiveLock * gIOMemoryLock;
109 
110 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
111 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
112 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113 #define WAKEUP  \
114     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115 
116 #if 0
117 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
118 #else
119 #define DEBG(fmt, args...)      {}
120 #endif
121 
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123 
124 // Some data structures and accessor macros used by the initWithOptions
125 // Function
126 
127 enum ioPLBlockFlags {
128 	kIOPLOnDevice  = 0x00000001,
129 	kIOPLExternUPL = 0x00000002,
130 };
131 
132 struct IOMDPersistentInitData {
133 	const IOGeneralMemoryDescriptor * fMD;
134 	IOMemoryReference               * fMemRef;
135 };
136 
137 struct ioPLBlock {
138 	upl_t fIOPL;
139 	vm_address_t fPageInfo; // Pointer to page list or index into it
140 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
141 	ppnum_t fMappedPage;        // Page number of first page in this iopl
142 	unsigned int fPageOffset;   // Offset within first page of iopl
143 	unsigned int fFlags;        // Flags
144 };
145 
146 enum { kMaxWireTags = 6 };
147 
148 struct ioGMDData {
149 	IOMapper *  fMapper;
150 	uint64_t    fDMAMapAlignment;
151 	uint64_t    fMappedBase;
152 	uint64_t    fMappedLength;
153 	uint64_t    fPreparationID;
154 #if IOTRACKING
155 	IOTracking  fWireTracking;
156 #endif /* IOTRACKING */
157 	unsigned int      fPageCnt;
158 	uint8_t           fDMAMapNumAddressBits;
159 	unsigned char     fCompletionError:1;
160 	unsigned char     fMappedBaseValid:1;
161 	unsigned char     _resv:4;
162 	unsigned char     fDMAAccess:2;
163 
164 	/* variable length arrays */
165 	upl_page_info_t fPageList[1]
166 #if __LP64__
167 	// align fPageList as for ioPLBlock
168 	__attribute__((aligned(sizeof(upl_t))))
169 #endif
170 	;
171 	//ioPLBlock fBlocks[1];
172 };
173 
174 #pragma GCC visibility push(hidden)
175 
176 class _IOMemoryDescriptorMixedData : public OSObject
177 {
178 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
179 
180 public:
181 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
182 	bool initWithCapacity(size_t capacity);
183 	virtual void free() APPLE_KEXT_OVERRIDE;
184 
185 	bool appendBytes(const void * bytes, size_t length);
186 	bool setLength(size_t length);
187 
188 	const void * getBytes() const;
189 	size_t getLength() const;
190 
191 private:
192 	void freeMemory();
193 
194 	void *  _data = nullptr;
195 	size_t  _length = 0;
196 	size_t  _capacity = 0;
197 };
198 
199 #pragma GCC visibility pop
200 
201 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
202 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
203 #define getNumIOPL(osd, d)      \
204     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
205 #define getPageList(d)  (&(d->fPageList[0]))
206 #define computeDataSize(p, u) \
207     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
208 
209 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
210 
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212 
213 extern "C" {
214 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)215 device_data_action(
216 	uintptr_t               device_handle,
217 	ipc_port_t              device_pager,
218 	vm_prot_t               protection,
219 	vm_object_offset_t      offset,
220 	vm_size_t               size)
221 {
222 	kern_return_t        kr;
223 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
224 	OSSharedPtr<IOMemoryDescriptor> memDesc;
225 
226 	LOCK;
227 	if (ref->dp.memory) {
228 		memDesc.reset(ref->dp.memory, OSRetain);
229 		kr = memDesc->handleFault(device_pager, offset, size);
230 		memDesc.reset();
231 	} else {
232 		kr = KERN_ABORTED;
233 	}
234 	UNLOCK;
235 
236 	return kr;
237 }
238 
239 kern_return_t
device_close(uintptr_t device_handle)240 device_close(
241 	uintptr_t     device_handle)
242 {
243 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
244 
245 	IOFreeType( ref, IOMemoryDescriptorReserved );
246 
247 	return kIOReturnSuccess;
248 }
249 };      // end extern "C"
250 
251 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252 
253 // Note this inline function uses C++ reference arguments to return values
254 // This means that pointers are not passed and NULLs don't have to be
255 // checked for as a NULL reference is illegal.
256 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)257 getAddrLenForInd(
258 	mach_vm_address_t                &addr,
259 	mach_vm_size_t                   &len, // Output variables
260 	UInt32                            type,
261 	IOGeneralMemoryDescriptor::Ranges r,
262 	UInt32                            ind,
263 	task_t                            task __unused)
264 {
265 	assert(kIOMemoryTypeUIO == type
266 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
267 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
268 	if (kIOMemoryTypeUIO == type) {
269 		user_size_t us;
270 		user_addr_t ad;
271 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
272 	}
273 #ifndef __LP64__
274 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
275 		IOAddressRange cur = r.v64[ind];
276 		addr = cur.address;
277 		len  = cur.length;
278 	}
279 #endif /* !__LP64__ */
280 	else {
281 		IOVirtualRange cur = r.v[ind];
282 		addr = cur.address;
283 		len  = cur.length;
284 	}
285 #if CONFIG_PROB_GZALLOC
286 	if (task == kernel_task) {
287 		addr = pgz_decode(addr, len);
288 	}
289 #endif /* CONFIG_PROB_GZALLOC */
290 }
291 
292 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
293 
294 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)295 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
296 {
297 	IOReturn err = kIOReturnSuccess;
298 
299 	*control = VM_PURGABLE_SET_STATE;
300 
301 	enum { kIOMemoryPurgeableControlMask = 15 };
302 
303 	switch (kIOMemoryPurgeableControlMask & newState) {
304 	case kIOMemoryPurgeableKeepCurrent:
305 		*control = VM_PURGABLE_GET_STATE;
306 		break;
307 
308 	case kIOMemoryPurgeableNonVolatile:
309 		*state = VM_PURGABLE_NONVOLATILE;
310 		break;
311 	case kIOMemoryPurgeableVolatile:
312 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
313 		break;
314 	case kIOMemoryPurgeableEmpty:
315 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
316 		break;
317 	default:
318 		err = kIOReturnBadArgument;
319 		break;
320 	}
321 
322 	if (*control == VM_PURGABLE_SET_STATE) {
323 		// let VM know this call is from the kernel and is allowed to alter
324 		// the volatility of the memory entry even if it was created with
325 		// MAP_MEM_PURGABLE_KERNEL_ONLY
326 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
327 	}
328 
329 	return err;
330 }
331 
332 static IOReturn
purgeableStateBits(int * state)333 purgeableStateBits(int * state)
334 {
335 	IOReturn err = kIOReturnSuccess;
336 
337 	switch (VM_PURGABLE_STATE_MASK & *state) {
338 	case VM_PURGABLE_NONVOLATILE:
339 		*state = kIOMemoryPurgeableNonVolatile;
340 		break;
341 	case VM_PURGABLE_VOLATILE:
342 		*state = kIOMemoryPurgeableVolatile;
343 		break;
344 	case VM_PURGABLE_EMPTY:
345 		*state = kIOMemoryPurgeableEmpty;
346 		break;
347 	default:
348 		*state = kIOMemoryPurgeableNonVolatile;
349 		err = kIOReturnNotReady;
350 		break;
351 	}
352 	return err;
353 }
354 
355 typedef struct {
356 	unsigned int wimg;
357 	unsigned int object_type;
358 } iokit_memtype_entry;
359 
360 static const iokit_memtype_entry iomd_mem_types[] = {
361 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
362 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
363 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
364 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
365 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
366 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
367 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
368 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
369 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
370 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
371 };
372 
373 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)374 vmProtForCacheMode(IOOptionBits cacheMode)
375 {
376 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
377 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
378 		cacheMode = kIODefaultCache;
379 	}
380 	vm_prot_t prot = 0;
381 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
382 	return prot;
383 }
384 
385 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)386 pagerFlagsForCacheMode(IOOptionBits cacheMode)
387 {
388 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
389 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
390 		cacheMode = kIODefaultCache;
391 	}
392 	if (cacheMode == kIODefaultCache) {
393 		return -1U;
394 	}
395 	return iomd_mem_types[cacheMode].wimg;
396 }
397 
398 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)399 cacheModeForPagerFlags(unsigned int pagerFlags)
400 {
401 	pagerFlags &= VM_WIMG_MASK;
402 	IOOptionBits cacheMode = kIODefaultCache;
403 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
404 		if (iomd_mem_types[i].wimg == pagerFlags) {
405 			cacheMode = i;
406 			break;
407 		}
408 	}
409 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
410 }
411 
412 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414 
415 struct IOMemoryEntry {
416 	ipc_port_t entry;
417 	int64_t    offset;
418 	uint64_t   size;
419 	uint64_t   start;
420 };
421 
422 struct IOMemoryReference {
423 	volatile SInt32             refCount;
424 	vm_prot_t                   prot;
425 	uint32_t                    capacity;
426 	uint32_t                    count;
427 	struct IOMemoryReference  * mapRef;
428 	IOMemoryEntry               entries[0];
429 };
430 
431 enum{
432 	kIOMemoryReferenceReuse = 0x00000001,
433 	kIOMemoryReferenceWrite = 0x00000002,
434 	kIOMemoryReferenceCOW   = 0x00000004,
435 };
436 
437 SInt32 gIOMemoryReferenceCount;
438 
439 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)440 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
441 {
442 	IOMemoryReference * ref;
443 	size_t              oldCapacity;
444 
445 	if (realloc) {
446 		oldCapacity = realloc->capacity;
447 	} else {
448 		oldCapacity = 0;
449 	}
450 
451 	// Use the kalloc API instead of manually handling the reallocation
452 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
453 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
454 	if (ref) {
455 		if (oldCapacity == 0) {
456 			ref->refCount = 1;
457 			OSIncrementAtomic(&gIOMemoryReferenceCount);
458 		}
459 		ref->capacity = capacity;
460 	}
461 	return ref;
462 }
463 
464 void
memoryReferenceFree(IOMemoryReference * ref)465 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
466 {
467 	IOMemoryEntry * entries;
468 
469 	if (ref->mapRef) {
470 		memoryReferenceFree(ref->mapRef);
471 		ref->mapRef = NULL;
472 	}
473 
474 	entries = ref->entries + ref->count;
475 	while (entries > &ref->entries[0]) {
476 		entries--;
477 		ipc_port_release_send(entries->entry);
478 	}
479 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
480 
481 	OSDecrementAtomic(&gIOMemoryReferenceCount);
482 }
483 
484 void
memoryReferenceRelease(IOMemoryReference * ref)485 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
486 {
487 	if (1 == OSDecrementAtomic(&ref->refCount)) {
488 		memoryReferenceFree(ref);
489 	}
490 }
491 
492 
493 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)494 IOGeneralMemoryDescriptor::memoryReferenceCreate(
495 	IOOptionBits         options,
496 	IOMemoryReference ** reference)
497 {
498 	enum { kCapacity = 4, kCapacityInc = 4 };
499 
500 	kern_return_t        err;
501 	IOMemoryReference *  ref;
502 	IOMemoryEntry *      entries;
503 	IOMemoryEntry *      cloneEntries = NULL;
504 	vm_map_t             map;
505 	ipc_port_t           entry, cloneEntry;
506 	vm_prot_t            prot;
507 	memory_object_size_t actualSize;
508 	uint32_t             rangeIdx;
509 	uint32_t             count;
510 	mach_vm_address_t    entryAddr, endAddr, entrySize;
511 	mach_vm_size_t       srcAddr, srcLen;
512 	mach_vm_size_t       nextAddr, nextLen;
513 	mach_vm_size_t       offset, remain;
514 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
515 	int                  misaligned_start = 0, misaligned_end = 0;
516 	IOByteCount          physLen;
517 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
518 	IOOptionBits         cacheMode;
519 	unsigned int         pagerFlags;
520 	vm_tag_t             tag;
521 	vm_named_entry_kernel_flags_t vmne_kflags;
522 
523 	ref = memoryReferenceAlloc(kCapacity, NULL);
524 	if (!ref) {
525 		return kIOReturnNoMemory;
526 	}
527 
528 	tag = (vm_tag_t) getVMTag(kernel_map);
529 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
530 	entries = &ref->entries[0];
531 	count = 0;
532 	err = KERN_SUCCESS;
533 
534 	offset = 0;
535 	rangeIdx = 0;
536 	remain = _length;
537 	if (_task) {
538 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
539 
540 		// account for IOBMD setLength(), use its capacity as length
541 		IOBufferMemoryDescriptor * bmd;
542 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
543 			nextLen = bmd->getCapacity();
544 			remain  = nextLen;
545 		}
546 	} else {
547 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
548 		nextLen = physLen;
549 
550 		// default cache mode for physical
551 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
552 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
553 			_flags |= (mode << kIOMemoryBufferCacheShift);
554 		}
555 	}
556 
557 	// cache mode & vm_prot
558 	prot = VM_PROT_READ;
559 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
560 	prot |= vmProtForCacheMode(cacheMode);
561 	// VM system requires write access to change cache mode
562 	if (kIODefaultCache != cacheMode) {
563 		prot |= VM_PROT_WRITE;
564 	}
565 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
566 		prot |= VM_PROT_WRITE;
567 	}
568 	if (kIOMemoryReferenceWrite & options) {
569 		prot |= VM_PROT_WRITE;
570 	}
571 	if (kIOMemoryReferenceCOW   & options) {
572 		prot |= MAP_MEM_VM_COPY;
573 	}
574 
575 	if (kIOMemoryUseReserve & _flags) {
576 		prot |= MAP_MEM_GRAB_SECLUDED;
577 	}
578 
579 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
580 		cloneEntries = &_memRef->entries[0];
581 		prot |= MAP_MEM_NAMED_REUSE;
582 	}
583 
584 	if (_task) {
585 		// virtual ranges
586 
587 		if (kIOMemoryBufferPageable & _flags) {
588 			int ledger_tag, ledger_no_footprint;
589 
590 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
591 			prot |= MAP_MEM_NAMED_CREATE;
592 
593 			// default accounting settings:
594 			//   + "none" ledger tag
595 			//   + include in footprint
596 			// can be changed later with ::setOwnership()
597 			ledger_tag = VM_LEDGER_TAG_NONE;
598 			ledger_no_footprint = 0;
599 
600 			if (kIOMemoryBufferPurgeable & _flags) {
601 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
602 				if (VM_KERN_MEMORY_SKYWALK == tag) {
603 					// Skywalk purgeable memory accounting:
604 					//    + "network" ledger tag
605 					//    + not included in footprint
606 					ledger_tag = VM_LEDGER_TAG_NETWORK;
607 					ledger_no_footprint = 1;
608 				} else {
609 					// regular purgeable memory accounting:
610 					//    + no ledger tag
611 					//    + included in footprint
612 					ledger_tag = VM_LEDGER_TAG_NONE;
613 					ledger_no_footprint = 0;
614 				}
615 			}
616 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
617 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
618 			if (kIOMemoryUseReserve & _flags) {
619 				prot |= MAP_MEM_GRAB_SECLUDED;
620 			}
621 
622 			prot |= VM_PROT_WRITE;
623 			map = NULL;
624 		} else {
625 			prot |= MAP_MEM_USE_DATA_ADDR;
626 			map = get_task_map(_task);
627 		}
628 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
629 
630 		while (remain) {
631 			srcAddr  = nextAddr;
632 			srcLen   = nextLen;
633 			nextAddr = 0;
634 			nextLen  = 0;
635 			// coalesce addr range
636 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
637 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
638 				if ((srcAddr + srcLen) != nextAddr) {
639 					break;
640 				}
641 				srcLen += nextLen;
642 			}
643 
644 			if (MAP_MEM_USE_DATA_ADDR & prot) {
645 				entryAddr = srcAddr;
646 				endAddr   = srcAddr + srcLen;
647 			} else {
648 				entryAddr = trunc_page_64(srcAddr);
649 				endAddr   = round_page_64(srcAddr + srcLen);
650 			}
651 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
652 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
653 			}
654 
655 			do{
656 				entrySize = (endAddr - entryAddr);
657 				if (!entrySize) {
658 					break;
659 				}
660 				actualSize = entrySize;
661 
662 				cloneEntry = MACH_PORT_NULL;
663 				if (MAP_MEM_NAMED_REUSE & prot) {
664 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
665 						cloneEntry = cloneEntries->entry;
666 					} else {
667 						prot &= ~MAP_MEM_NAMED_REUSE;
668 					}
669 				}
670 
671 				err = mach_make_memory_entry_internal(map,
672 				    &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
673 
674 				if (KERN_SUCCESS != err) {
675 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
676 					break;
677 				}
678 				if (MAP_MEM_USE_DATA_ADDR & prot) {
679 					if (actualSize > entrySize) {
680 						actualSize = entrySize;
681 					}
682 				} else if (actualSize > entrySize) {
683 					panic("mach_make_memory_entry_64 actualSize");
684 				}
685 
686 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
687 
688 				if (count && overmap_start) {
689 					/*
690 					 * Track misaligned start for all
691 					 * except the first entry.
692 					 */
693 					misaligned_start++;
694 				}
695 
696 				if (overmap_end) {
697 					/*
698 					 * Ignore misaligned end for the
699 					 * last entry.
700 					 */
701 					if ((entryAddr + actualSize) != endAddr) {
702 						misaligned_end++;
703 					}
704 				}
705 
706 				if (count) {
707 					/* Middle entries */
708 					if (misaligned_start || misaligned_end) {
709 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
710 						ipc_port_release_send(entry);
711 						err = KERN_NOT_SUPPORTED;
712 						break;
713 					}
714 				}
715 
716 				if (count >= ref->capacity) {
717 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
718 					entries = &ref->entries[count];
719 				}
720 				entries->entry  = entry;
721 				entries->size   = actualSize;
722 				entries->offset = offset + (entryAddr - srcAddr);
723 				entries->start = entryAddr;
724 				entryAddr += actualSize;
725 				if (MAP_MEM_NAMED_REUSE & prot) {
726 					if ((cloneEntries->entry == entries->entry)
727 					    && (cloneEntries->size == entries->size)
728 					    && (cloneEntries->offset == entries->offset)) {
729 						cloneEntries++;
730 					} else {
731 						prot &= ~MAP_MEM_NAMED_REUSE;
732 					}
733 				}
734 				entries++;
735 				count++;
736 			}while (true);
737 			offset += srcLen;
738 			remain -= srcLen;
739 		}
740 	} else {
741 		// _task == 0, physical or kIOMemoryTypeUPL
742 		memory_object_t pager;
743 		vm_size_t       size = ptoa_64(_pages);
744 
745 		if (!getKernelReserved()) {
746 			panic("getKernelReserved");
747 		}
748 
749 		reserved->dp.pagerContig = (1 == _rangesCount);
750 		reserved->dp.memory      = this;
751 
752 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
753 		if (-1U == pagerFlags) {
754 			panic("phys is kIODefaultCache");
755 		}
756 		if (reserved->dp.pagerContig) {
757 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
758 		}
759 
760 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
761 		    size, pagerFlags);
762 		assert(pager);
763 		if (!pager) {
764 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
765 			err = kIOReturnVMError;
766 		} else {
767 			srcAddr  = nextAddr;
768 			entryAddr = trunc_page_64(srcAddr);
769 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
770 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
771 			assert(KERN_SUCCESS == err);
772 			if (KERN_SUCCESS != err) {
773 				device_pager_deallocate(pager);
774 			} else {
775 				reserved->dp.devicePager = pager;
776 				entries->entry  = entry;
777 				entries->size   = size;
778 				entries->offset = offset + (entryAddr - srcAddr);
779 				entries++;
780 				count++;
781 			}
782 		}
783 	}
784 
785 	ref->count = count;
786 	ref->prot  = prot;
787 
788 	if (_task && (KERN_SUCCESS == err)
789 	    && (kIOMemoryMapCopyOnWrite & _flags)
790 	    && !(kIOMemoryReferenceCOW & options)) {
791 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
792 		if (KERN_SUCCESS != err) {
793 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
794 		}
795 	}
796 
797 	if (KERN_SUCCESS == err) {
798 		if (MAP_MEM_NAMED_REUSE & prot) {
799 			memoryReferenceFree(ref);
800 			OSIncrementAtomic(&_memRef->refCount);
801 			ref = _memRef;
802 		}
803 	} else {
804 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
805 		memoryReferenceFree(ref);
806 		ref = NULL;
807 	}
808 
809 	*reference = ref;
810 
811 	return err;
812 }
813 
814 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)815 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
816 {
817 	switch (kIOMapGuardedMask & options) {
818 	default:
819 	case kIOMapGuardedSmall:
820 		return vm_map_page_size(map);
821 	case kIOMapGuardedLarge:
822 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
823 		return kIOMapGuardSizeLarge;
824 	}
825 	;
826 }
827 
828 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)829 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
830     vm_map_offset_t addr, mach_vm_size_t size)
831 {
832 	kern_return_t   kr;
833 	vm_map_offset_t actualAddr;
834 	mach_vm_size_t  actualSize;
835 
836 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
837 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
838 
839 	if (kIOMapGuardedMask & options) {
840 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
841 		actualAddr -= guardSize;
842 		actualSize += 2 * guardSize;
843 	}
844 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
845 
846 	return kr;
847 }
848 
849 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)850 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
851 {
852 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
853 	IOReturn                        err;
854 	vm_map_offset_t                 addr;
855 	mach_vm_size_t                  size;
856 	mach_vm_size_t                  guardSize;
857 	vm_map_kernel_flags_t           vmk_flags;
858 
859 	addr = ref->mapped;
860 	size = ref->size;
861 	guardSize = 0;
862 
863 	if (kIOMapGuardedMask & ref->options) {
864 		if (!(kIOMapAnywhere & ref->options)) {
865 			return kIOReturnBadArgument;
866 		}
867 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
868 		size += 2 * guardSize;
869 	}
870 	if (kIOMapAnywhere & ref->options) {
871 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
872 	} else {
873 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
874 	}
875 	vmk_flags.vm_tag = ref->tag;
876 
877 	/*
878 	 * Mapping memory into the kernel_map using IOMDs use the data range.
879 	 * Memory being mapped should not contain kernel pointers.
880 	 */
881 	if (map == kernel_map) {
882 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
883 	}
884 
885 	err = vm_map_enter_mem_object(map, &addr, size,
886 #if __ARM_MIXED_PAGE_SIZE__
887 	    // TODO4K this should not be necessary...
888 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
889 #else /* __ARM_MIXED_PAGE_SIZE__ */
890 	    (vm_map_offset_t) 0,
891 #endif /* __ARM_MIXED_PAGE_SIZE__ */
892 	    vmk_flags,
893 	    IPC_PORT_NULL,
894 	    (memory_object_offset_t) 0,
895 	    false,                       /* copy */
896 	    ref->prot,
897 	    ref->prot,
898 	    VM_INHERIT_NONE);
899 	if (KERN_SUCCESS == err) {
900 		ref->mapped = (mach_vm_address_t) addr;
901 		ref->map = map;
902 		if (kIOMapGuardedMask & ref->options) {
903 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
904 
905 			err = vm_map_protect(map, addr, addr + guardSize, VM_PROT_NONE, false /*set_max*/);
906 			assert(KERN_SUCCESS == err);
907 			err = vm_map_protect(map, lastpage, lastpage + guardSize, VM_PROT_NONE, false /*set_max*/);
908 			assert(KERN_SUCCESS == err);
909 			ref->mapped += guardSize;
910 		}
911 	}
912 
913 	return err;
914 }
915 
916 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)917 IOGeneralMemoryDescriptor::memoryReferenceMap(
918 	IOMemoryReference * ref,
919 	vm_map_t            map,
920 	mach_vm_size_t      inoffset,
921 	mach_vm_size_t      size,
922 	IOOptionBits        options,
923 	mach_vm_address_t * inaddr)
924 {
925 	IOReturn        err;
926 	int64_t         offset = inoffset;
927 	uint32_t        rangeIdx, entryIdx;
928 	vm_map_offset_t addr, mapAddr;
929 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
930 
931 	mach_vm_address_t nextAddr;
932 	mach_vm_size_t    nextLen;
933 	IOByteCount       physLen;
934 	IOMemoryEntry   * entry;
935 	vm_prot_t         prot, memEntryCacheMode;
936 	IOOptionBits      type;
937 	IOOptionBits      cacheMode;
938 	vm_tag_t          tag;
939 	// for the kIOMapPrefault option.
940 	upl_page_info_t * pageList = NULL;
941 	UInt              currentPageIndex = 0;
942 	bool              didAlloc;
943 
944 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
945 
946 	if (ref->mapRef) {
947 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
948 		return err;
949 	}
950 
951 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
952 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
953 		return err;
954 	}
955 
956 	type = _flags & kIOMemoryTypeMask;
957 
958 	prot = VM_PROT_READ;
959 	if (!(kIOMapReadOnly & options)) {
960 		prot |= VM_PROT_WRITE;
961 	}
962 	prot &= ref->prot;
963 
964 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
965 	if (kIODefaultCache != cacheMode) {
966 		// VM system requires write access to update named entry cache mode
967 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
968 	}
969 
970 	tag = (typeof(tag))getVMTag(map);
971 
972 	if (_task) {
973 		// Find first range for offset
974 		if (!_rangesCount) {
975 			return kIOReturnBadArgument;
976 		}
977 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
978 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
979 			if (remain < nextLen) {
980 				break;
981 			}
982 			remain -= nextLen;
983 		}
984 	} else {
985 		rangeIdx = 0;
986 		remain   = 0;
987 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
988 		nextLen  = size;
989 	}
990 
991 	assert(remain < nextLen);
992 	if (remain >= nextLen) {
993 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
994 		return kIOReturnBadArgument;
995 	}
996 
997 	nextAddr  += remain;
998 	nextLen   -= remain;
999 #if __ARM_MIXED_PAGE_SIZE__
1000 	pageOffset = (vm_map_page_mask(map) & nextAddr);
1001 #else /* __ARM_MIXED_PAGE_SIZE__ */
1002 	pageOffset = (page_mask & nextAddr);
1003 #endif /* __ARM_MIXED_PAGE_SIZE__ */
1004 	addr       = 0;
1005 	didAlloc   = false;
1006 
1007 	if (!(options & kIOMapAnywhere)) {
1008 		addr = *inaddr;
1009 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1010 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1011 		}
1012 		addr -= pageOffset;
1013 	}
1014 
1015 	// find first entry for offset
1016 	for (entryIdx = 0;
1017 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1018 	    entryIdx++) {
1019 	}
1020 	entryIdx--;
1021 	entry = &ref->entries[entryIdx];
1022 
1023 	// allocate VM
1024 #if __ARM_MIXED_PAGE_SIZE__
1025 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1026 #else
1027 	size = round_page_64(size + pageOffset);
1028 #endif
1029 	if (kIOMapOverwrite & options) {
1030 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1031 			map = IOPageableMapForAddress(addr);
1032 		}
1033 		err = KERN_SUCCESS;
1034 	} else {
1035 		IOMemoryDescriptorMapAllocRef ref;
1036 		ref.map     = map;
1037 		ref.tag     = tag;
1038 		ref.options = options;
1039 		ref.size    = size;
1040 		ref.prot    = prot;
1041 		if (options & kIOMapAnywhere) {
1042 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1043 			ref.mapped = 0;
1044 		} else {
1045 			ref.mapped = addr;
1046 		}
1047 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1048 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1049 		} else {
1050 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1051 		}
1052 		if (KERN_SUCCESS == err) {
1053 			addr     = ref.mapped;
1054 			map      = ref.map;
1055 			didAlloc = true;
1056 		}
1057 	}
1058 
1059 	/*
1060 	 * If the memory is associated with a device pager but doesn't have a UPL,
1061 	 * it will be immediately faulted in through the pager via populateDevicePager().
1062 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1063 	 * operations.
1064 	 */
1065 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1066 		options &= ~kIOMapPrefault;
1067 	}
1068 
1069 	/*
1070 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1071 	 * memory type, and the underlying data.
1072 	 */
1073 	if (options & kIOMapPrefault) {
1074 		/*
1075 		 * The memory must have been wired by calling ::prepare(), otherwise
1076 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1077 		 */
1078 		assert(_wireCount != 0);
1079 		assert(_memoryEntries != NULL);
1080 		if ((_wireCount == 0) ||
1081 		    (_memoryEntries == NULL)) {
1082 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1083 			return kIOReturnBadArgument;
1084 		}
1085 
1086 		// Get the page list.
1087 		ioGMDData* dataP = getDataP(_memoryEntries);
1088 		ioPLBlock const* ioplList = getIOPLList(dataP);
1089 		pageList = getPageList(dataP);
1090 
1091 		// Get the number of IOPLs.
1092 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1093 
1094 		/*
1095 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1096 		 * the offset. The research will go past it, so we'll need to go back to the
1097 		 * right range at the end.
1098 		 */
1099 		UInt ioplIndex = 0;
1100 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1101 			ioplIndex++;
1102 		}
1103 		ioplIndex--;
1104 
1105 		// Retrieve the IOPL info block.
1106 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1107 
1108 		/*
1109 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1110 		 * array.
1111 		 */
1112 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1113 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1114 		} else {
1115 			pageList = &pageList[ioplInfo.fPageInfo];
1116 		}
1117 
1118 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1119 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1120 
1121 		// Retrieve the index of the first page corresponding to the offset.
1122 		currentPageIndex = atop_32(offsetInIOPL);
1123 	}
1124 
1125 	// enter mappings
1126 	remain  = size;
1127 	mapAddr = addr;
1128 	addr    += pageOffset;
1129 
1130 	while (remain && (KERN_SUCCESS == err)) {
1131 		entryOffset = offset - entry->offset;
1132 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1133 			err = kIOReturnNotAligned;
1134 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1135 			break;
1136 		}
1137 
1138 		if (kIODefaultCache != cacheMode) {
1139 			vm_size_t unused = 0;
1140 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1141 			    memEntryCacheMode, NULL, entry->entry);
1142 			assert(KERN_SUCCESS == err);
1143 		}
1144 
1145 		entryOffset -= pageOffset;
1146 		if (entryOffset >= entry->size) {
1147 			panic("entryOffset");
1148 		}
1149 		chunk = entry->size - entryOffset;
1150 		if (chunk) {
1151 			vm_map_kernel_flags_t vmk_flags = {
1152 				.vmf_fixed = true,
1153 				.vmf_overwrite = true,
1154 				.vm_tag = tag,
1155 				.vmkf_iokit_acct = true,
1156 			};
1157 
1158 			if (chunk > remain) {
1159 				chunk = remain;
1160 			}
1161 			if (options & kIOMapPrefault) {
1162 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1163 
1164 				err = vm_map_enter_mem_object_prefault(map,
1165 				    &mapAddr,
1166 				    chunk, 0 /* mask */,
1167 				    vmk_flags,
1168 				    entry->entry,
1169 				    entryOffset,
1170 				    prot,                        // cur
1171 				    prot,                        // max
1172 				    &pageList[currentPageIndex],
1173 				    nb_pages);
1174 
1175 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1176 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1177 				}
1178 				// Compute the next index in the page list.
1179 				currentPageIndex += nb_pages;
1180 				assert(currentPageIndex <= _pages);
1181 			} else {
1182 				err = vm_map_enter_mem_object(map,
1183 				    &mapAddr,
1184 				    chunk, 0 /* mask */,
1185 				    vmk_flags,
1186 				    entry->entry,
1187 				    entryOffset,
1188 				    false,               // copy
1189 				    prot,               // cur
1190 				    prot,               // max
1191 				    VM_INHERIT_NONE);
1192 			}
1193 			if (KERN_SUCCESS != err) {
1194 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1195 				break;
1196 			}
1197 			remain -= chunk;
1198 			if (!remain) {
1199 				break;
1200 			}
1201 			mapAddr  += chunk;
1202 			offset   += chunk - pageOffset;
1203 		}
1204 		pageOffset = 0;
1205 		entry++;
1206 		entryIdx++;
1207 		if (entryIdx >= ref->count) {
1208 			err = kIOReturnOverrun;
1209 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1210 			break;
1211 		}
1212 	}
1213 
1214 	if ((KERN_SUCCESS != err) && didAlloc) {
1215 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1216 		addr = 0;
1217 	}
1218 	*inaddr = addr;
1219 
1220 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1221 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1222 	}
1223 	return err;
1224 }
1225 
1226 #define LOGUNALIGN 0
1227 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1228 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1229 	IOMemoryReference * ref,
1230 	vm_map_t            map,
1231 	mach_vm_size_t      inoffset,
1232 	mach_vm_size_t      size,
1233 	IOOptionBits        options,
1234 	mach_vm_address_t * inaddr)
1235 {
1236 	IOReturn            err;
1237 	int64_t             offset = inoffset;
1238 	uint32_t            entryIdx, firstEntryIdx;
1239 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1240 	vm_map_offset_t     entryOffset, remain, chunk;
1241 
1242 	IOMemoryEntry    * entry;
1243 	vm_prot_t          prot, memEntryCacheMode;
1244 	IOOptionBits       type;
1245 	IOOptionBits       cacheMode;
1246 	vm_tag_t           tag;
1247 	// for the kIOMapPrefault option.
1248 	upl_page_info_t  * pageList = NULL;
1249 	UInt               currentPageIndex = 0;
1250 	bool               didAlloc;
1251 
1252 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1253 
1254 	if (ref->mapRef) {
1255 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1256 		return err;
1257 	}
1258 
1259 #if LOGUNALIGN
1260 	printf("MAP offset %qx, %qx\n", inoffset, size);
1261 #endif
1262 
1263 	type = _flags & kIOMemoryTypeMask;
1264 
1265 	prot = VM_PROT_READ;
1266 	if (!(kIOMapReadOnly & options)) {
1267 		prot |= VM_PROT_WRITE;
1268 	}
1269 	prot &= ref->prot;
1270 
1271 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1272 	if (kIODefaultCache != cacheMode) {
1273 		// VM system requires write access to update named entry cache mode
1274 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1275 	}
1276 
1277 	tag = (vm_tag_t) getVMTag(map);
1278 
1279 	addr       = 0;
1280 	didAlloc   = false;
1281 
1282 	if (!(options & kIOMapAnywhere)) {
1283 		addr = *inaddr;
1284 	}
1285 
1286 	// find first entry for offset
1287 	for (firstEntryIdx = 0;
1288 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1289 	    firstEntryIdx++) {
1290 	}
1291 	firstEntryIdx--;
1292 
1293 	// calculate required VM space
1294 
1295 	entryIdx = firstEntryIdx;
1296 	entry = &ref->entries[entryIdx];
1297 
1298 	remain  = size;
1299 	int64_t iteroffset = offset;
1300 	uint64_t mapSize = 0;
1301 	while (remain) {
1302 		entryOffset = iteroffset - entry->offset;
1303 		if (entryOffset >= entry->size) {
1304 			panic("entryOffset");
1305 		}
1306 
1307 #if LOGUNALIGN
1308 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1309 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1310 #endif
1311 
1312 		chunk = entry->size - entryOffset;
1313 		if (chunk) {
1314 			if (chunk > remain) {
1315 				chunk = remain;
1316 			}
1317 			mach_vm_size_t entrySize;
1318 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1319 			assert(KERN_SUCCESS == err);
1320 			mapSize += entrySize;
1321 
1322 			remain -= chunk;
1323 			if (!remain) {
1324 				break;
1325 			}
1326 			iteroffset   += chunk; // - pageOffset;
1327 		}
1328 		entry++;
1329 		entryIdx++;
1330 		if (entryIdx >= ref->count) {
1331 			panic("overrun");
1332 			err = kIOReturnOverrun;
1333 			break;
1334 		}
1335 	}
1336 
1337 	if (kIOMapOverwrite & options) {
1338 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1339 			map = IOPageableMapForAddress(addr);
1340 		}
1341 		err = KERN_SUCCESS;
1342 	} else {
1343 		IOMemoryDescriptorMapAllocRef ref;
1344 		ref.map     = map;
1345 		ref.tag     = tag;
1346 		ref.options = options;
1347 		ref.size    = mapSize;
1348 		ref.prot    = prot;
1349 		if (options & kIOMapAnywhere) {
1350 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1351 			ref.mapped = 0;
1352 		} else {
1353 			ref.mapped = addr;
1354 		}
1355 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1356 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1357 		} else {
1358 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1359 		}
1360 
1361 		if (KERN_SUCCESS == err) {
1362 			addr     = ref.mapped;
1363 			map      = ref.map;
1364 			didAlloc = true;
1365 		}
1366 #if LOGUNALIGN
1367 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1368 #endif
1369 	}
1370 
1371 	/*
1372 	 * If the memory is associated with a device pager but doesn't have a UPL,
1373 	 * it will be immediately faulted in through the pager via populateDevicePager().
1374 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1375 	 * operations.
1376 	 */
1377 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1378 		options &= ~kIOMapPrefault;
1379 	}
1380 
1381 	/*
1382 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1383 	 * memory type, and the underlying data.
1384 	 */
1385 	if (options & kIOMapPrefault) {
1386 		/*
1387 		 * The memory must have been wired by calling ::prepare(), otherwise
1388 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1389 		 */
1390 		assert(_wireCount != 0);
1391 		assert(_memoryEntries != NULL);
1392 		if ((_wireCount == 0) ||
1393 		    (_memoryEntries == NULL)) {
1394 			return kIOReturnBadArgument;
1395 		}
1396 
1397 		// Get the page list.
1398 		ioGMDData* dataP = getDataP(_memoryEntries);
1399 		ioPLBlock const* ioplList = getIOPLList(dataP);
1400 		pageList = getPageList(dataP);
1401 
1402 		// Get the number of IOPLs.
1403 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1404 
1405 		/*
1406 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1407 		 * the offset. The research will go past it, so we'll need to go back to the
1408 		 * right range at the end.
1409 		 */
1410 		UInt ioplIndex = 0;
1411 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1412 			ioplIndex++;
1413 		}
1414 		ioplIndex--;
1415 
1416 		// Retrieve the IOPL info block.
1417 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1418 
1419 		/*
1420 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1421 		 * array.
1422 		 */
1423 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1424 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1425 		} else {
1426 			pageList = &pageList[ioplInfo.fPageInfo];
1427 		}
1428 
1429 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1430 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1431 
1432 		// Retrieve the index of the first page corresponding to the offset.
1433 		currentPageIndex = atop_32(offsetInIOPL);
1434 	}
1435 
1436 	// enter mappings
1437 	remain   = size;
1438 	mapAddr  = addr;
1439 	entryIdx = firstEntryIdx;
1440 	entry = &ref->entries[entryIdx];
1441 
1442 	while (remain && (KERN_SUCCESS == err)) {
1443 #if LOGUNALIGN
1444 		printf("offset %qx, %qx\n", offset, entry->offset);
1445 #endif
1446 		if (kIODefaultCache != cacheMode) {
1447 			vm_size_t unused = 0;
1448 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1449 			    memEntryCacheMode, NULL, entry->entry);
1450 			assert(KERN_SUCCESS == err);
1451 		}
1452 		entryOffset = offset - entry->offset;
1453 		if (entryOffset >= entry->size) {
1454 			panic("entryOffset");
1455 		}
1456 		chunk = entry->size - entryOffset;
1457 #if LOGUNALIGN
1458 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1459 #endif
1460 		if (chunk) {
1461 			vm_map_kernel_flags_t vmk_flags = {
1462 				.vmf_fixed = true,
1463 				.vmf_overwrite = true,
1464 				.vmf_return_data_addr = true,
1465 				.vm_tag = tag,
1466 				.vmkf_iokit_acct = true,
1467 			};
1468 
1469 			if (chunk > remain) {
1470 				chunk = remain;
1471 			}
1472 			mapAddrOut = mapAddr;
1473 			if (options & kIOMapPrefault) {
1474 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1475 
1476 				err = vm_map_enter_mem_object_prefault(map,
1477 				    &mapAddrOut,
1478 				    chunk, 0 /* mask */,
1479 				    vmk_flags,
1480 				    entry->entry,
1481 				    entryOffset,
1482 				    prot,                        // cur
1483 				    prot,                        // max
1484 				    &pageList[currentPageIndex],
1485 				    nb_pages);
1486 
1487 				// Compute the next index in the page list.
1488 				currentPageIndex += nb_pages;
1489 				assert(currentPageIndex <= _pages);
1490 			} else {
1491 #if LOGUNALIGN
1492 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1493 #endif
1494 				err = vm_map_enter_mem_object(map,
1495 				    &mapAddrOut,
1496 				    chunk, 0 /* mask */,
1497 				    vmk_flags,
1498 				    entry->entry,
1499 				    entryOffset,
1500 				    false,               // copy
1501 				    prot,               // cur
1502 				    prot,               // max
1503 				    VM_INHERIT_NONE);
1504 			}
1505 			if (KERN_SUCCESS != err) {
1506 				panic("map enter err %x", err);
1507 				break;
1508 			}
1509 #if LOGUNALIGN
1510 			printf("mapAddr o %qx\n", mapAddrOut);
1511 #endif
1512 			if (entryIdx == firstEntryIdx) {
1513 				addr = mapAddrOut;
1514 			}
1515 			remain -= chunk;
1516 			if (!remain) {
1517 				break;
1518 			}
1519 			mach_vm_size_t entrySize;
1520 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1521 			assert(KERN_SUCCESS == err);
1522 			mapAddr += entrySize;
1523 			offset  += chunk;
1524 		}
1525 
1526 		entry++;
1527 		entryIdx++;
1528 		if (entryIdx >= ref->count) {
1529 			err = kIOReturnOverrun;
1530 			break;
1531 		}
1532 	}
1533 
1534 	if (KERN_SUCCESS != err) {
1535 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1536 	}
1537 
1538 	if ((KERN_SUCCESS != err) && didAlloc) {
1539 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1540 		addr = 0;
1541 	}
1542 	*inaddr = addr;
1543 
1544 	return err;
1545 }
1546 
1547 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1548 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1549 	IOMemoryReference * ref,
1550 	uint64_t          * offset)
1551 {
1552 	kern_return_t kr;
1553 	vm_object_offset_t data_offset = 0;
1554 	uint64_t total;
1555 	uint32_t idx;
1556 
1557 	assert(ref->count);
1558 	if (offset) {
1559 		*offset = (uint64_t) data_offset;
1560 	}
1561 	total = 0;
1562 	for (idx = 0; idx < ref->count; idx++) {
1563 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1564 		    &data_offset);
1565 		if (KERN_SUCCESS != kr) {
1566 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1567 		} else if (0 != data_offset) {
1568 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1569 		}
1570 		if (offset && !idx) {
1571 			*offset = (uint64_t) data_offset;
1572 		}
1573 		total += round_page(data_offset + ref->entries[idx].size);
1574 	}
1575 
1576 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1577 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1578 
1579 	return total;
1580 }
1581 
1582 
1583 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1584 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1585 	IOMemoryReference * ref,
1586 	IOByteCount       * residentPageCount,
1587 	IOByteCount       * dirtyPageCount)
1588 {
1589 	IOReturn        err;
1590 	IOMemoryEntry * entries;
1591 	unsigned int resident, dirty;
1592 	unsigned int totalResident, totalDirty;
1593 
1594 	totalResident = totalDirty = 0;
1595 	err = kIOReturnSuccess;
1596 	entries = ref->entries + ref->count;
1597 	while (entries > &ref->entries[0]) {
1598 		entries--;
1599 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1600 		if (KERN_SUCCESS != err) {
1601 			break;
1602 		}
1603 		totalResident += resident;
1604 		totalDirty    += dirty;
1605 	}
1606 
1607 	if (residentPageCount) {
1608 		*residentPageCount = totalResident;
1609 	}
1610 	if (dirtyPageCount) {
1611 		*dirtyPageCount    = totalDirty;
1612 	}
1613 	return err;
1614 }
1615 
1616 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618 	IOMemoryReference * ref,
1619 	IOOptionBits        newState,
1620 	IOOptionBits      * oldState)
1621 {
1622 	IOReturn        err;
1623 	IOMemoryEntry * entries;
1624 	vm_purgable_t   control;
1625 	int             totalState, state;
1626 
1627 	totalState = kIOMemoryPurgeableNonVolatile;
1628 	err = kIOReturnSuccess;
1629 	entries = ref->entries + ref->count;
1630 	while (entries > &ref->entries[0]) {
1631 		entries--;
1632 
1633 		err = purgeableControlBits(newState, &control, &state);
1634 		if (KERN_SUCCESS != err) {
1635 			break;
1636 		}
1637 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638 		if (KERN_SUCCESS != err) {
1639 			break;
1640 		}
1641 		err = purgeableStateBits(&state);
1642 		if (KERN_SUCCESS != err) {
1643 			break;
1644 		}
1645 
1646 		if (kIOMemoryPurgeableEmpty == state) {
1647 			totalState = kIOMemoryPurgeableEmpty;
1648 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1649 			continue;
1650 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1651 			continue;
1652 		} else if (kIOMemoryPurgeableVolatile == state) {
1653 			totalState = kIOMemoryPurgeableVolatile;
1654 		} else {
1655 			totalState = kIOMemoryPurgeableNonVolatile;
1656 		}
1657 	}
1658 
1659 	if (oldState) {
1660 		*oldState = totalState;
1661 	}
1662 	return err;
1663 }
1664 
1665 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667 	IOMemoryReference * ref,
1668 	task_t              newOwner,
1669 	int                 newLedgerTag,
1670 	IOOptionBits        newLedgerOptions)
1671 {
1672 	IOReturn        err, totalErr;
1673 	IOMemoryEntry * entries;
1674 
1675 	totalErr = kIOReturnSuccess;
1676 	entries = ref->entries + ref->count;
1677 	while (entries > &ref->entries[0]) {
1678 		entries--;
1679 
1680 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681 		if (KERN_SUCCESS != err) {
1682 			totalErr = err;
1683 		}
1684 	}
1685 
1686 	return totalErr;
1687 }
1688 
1689 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690 
1691 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692 IOMemoryDescriptor::withAddress(void *      address,
1693     IOByteCount   length,
1694     IODirection direction)
1695 {
1696 	return IOMemoryDescriptor::
1697 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698 }
1699 
1700 #ifndef __LP64__
1701 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703     IOByteCount  length,
1704     IODirection  direction,
1705     task_t       task)
1706 {
1707 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708 	if (that) {
1709 		if (that->initWithAddress(address, length, direction, task)) {
1710 			return os::move(that);
1711 		}
1712 	}
1713 	return nullptr;
1714 }
1715 #endif /* !__LP64__ */
1716 
1717 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718 IOMemoryDescriptor::withPhysicalAddress(
1719 	IOPhysicalAddress       address,
1720 	IOByteCount             length,
1721 	IODirection             direction )
1722 {
1723 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724 }
1725 
1726 #ifndef __LP64__
1727 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729     UInt32           withCount,
1730     IODirection      direction,
1731     task_t           task,
1732     bool             asReference)
1733 {
1734 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735 	if (that) {
1736 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737 			return os::move(that);
1738 		}
1739 	}
1740 	return nullptr;
1741 }
1742 #endif /* !__LP64__ */
1743 
1744 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746     mach_vm_size_t length,
1747     IOOptionBits   options,
1748     task_t         task)
1749 {
1750 	IOAddressRange range = { address, length };
1751 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752 }
1753 
1754 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1756     UInt32           rangeCount,
1757     IOOptionBits     options,
1758     task_t           task)
1759 {
1760 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761 	if (that) {
1762 		if (task) {
1763 			options |= kIOMemoryTypeVirtual64;
1764 		} else {
1765 			options |= kIOMemoryTypePhysical64;
1766 		}
1767 
1768 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769 			return os::move(that);
1770 		}
1771 	}
1772 
1773 	return nullptr;
1774 }
1775 
1776 
1777 /*
1778  * withOptions:
1779  *
1780  * Create a new IOMemoryDescriptor. The buffer is made up of several
1781  * virtual address ranges, from a given task.
1782  *
1783  * Passing the ranges as a reference will avoid an extra allocation.
1784  */
1785 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786 IOMemoryDescriptor::withOptions(void *          buffers,
1787     UInt32          count,
1788     UInt32          offset,
1789     task_t          task,
1790     IOOptionBits    opts,
1791     IOMapper *      mapper)
1792 {
1793 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794 
1795 	if (self
1796 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797 		return nullptr;
1798 	}
1799 
1800 	return os::move(self);
1801 }
1802 
1803 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804 IOMemoryDescriptor::initWithOptions(void *         buffers,
1805     UInt32         count,
1806     UInt32         offset,
1807     task_t         task,
1808     IOOptionBits   options,
1809     IOMapper *     mapper)
1810 {
1811 	return false;
1812 }
1813 
1814 #ifndef __LP64__
1815 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817     UInt32          withCount,
1818     IODirection     direction,
1819     bool            asReference)
1820 {
1821 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822 	if (that) {
1823 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824 			return os::move(that);
1825 		}
1826 	}
1827 	return nullptr;
1828 }
1829 
1830 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1832     IOByteCount             offset,
1833     IOByteCount             length,
1834     IODirection             direction)
1835 {
1836 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837 }
1838 #endif /* !__LP64__ */
1839 
1840 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842 {
1843 	IOGeneralMemoryDescriptor *origGenMD =
1844 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845 
1846 	if (origGenMD) {
1847 		return IOGeneralMemoryDescriptor::
1848 		       withPersistentMemoryDescriptor(origGenMD);
1849 	} else {
1850 		return nullptr;
1851 	}
1852 }
1853 
1854 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856 {
1857 	IOMemoryReference * memRef;
1858 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859 
1860 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861 		return nullptr;
1862 	}
1863 
1864 	if (memRef == originalMD->_memRef) {
1865 		self.reset(originalMD, OSRetain);
1866 		originalMD->memoryReferenceRelease(memRef);
1867 		return os::move(self);
1868 	}
1869 
1870 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871 	IOMDPersistentInitData initData = { originalMD, memRef };
1872 
1873 	if (self
1874 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875 		return nullptr;
1876 	}
1877 	return os::move(self);
1878 }
1879 
1880 #ifndef __LP64__
1881 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1883     IOByteCount   withLength,
1884     IODirection withDirection)
1885 {
1886 	_singleRange.v.address = (vm_offset_t) address;
1887 	_singleRange.v.length  = withLength;
1888 
1889 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890 }
1891 
1892 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894     IOByteCount    withLength,
1895     IODirection  withDirection,
1896     task_t       withTask)
1897 {
1898 	_singleRange.v.address = address;
1899 	_singleRange.v.length  = withLength;
1900 
1901 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902 }
1903 
1904 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906 	IOPhysicalAddress      address,
1907 	IOByteCount            withLength,
1908 	IODirection            withDirection )
1909 {
1910 	_singleRange.p.address = address;
1911 	_singleRange.p.length  = withLength;
1912 
1913 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914 }
1915 
1916 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918 	IOPhysicalRange * ranges,
1919 	UInt32            count,
1920 	IODirection       direction,
1921 	bool              reference)
1922 {
1923 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924 
1925 	if (reference) {
1926 		mdOpts |= kIOMemoryAsReference;
1927 	}
1928 
1929 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930 }
1931 
1932 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933 IOGeneralMemoryDescriptor::initWithRanges(
1934 	IOVirtualRange * ranges,
1935 	UInt32           count,
1936 	IODirection      direction,
1937 	task_t           task,
1938 	bool             reference)
1939 {
1940 	IOOptionBits mdOpts = direction;
1941 
1942 	if (reference) {
1943 		mdOpts |= kIOMemoryAsReference;
1944 	}
1945 
1946 	if (task) {
1947 		mdOpts |= kIOMemoryTypeVirtual;
1948 
1949 		// Auto-prepare if this is a kernel memory descriptor as very few
1950 		// clients bother to prepare() kernel memory.
1951 		// But it was not enforced so what are you going to do?
1952 		if (task == kernel_task) {
1953 			mdOpts |= kIOMemoryAutoPrepare;
1954 		}
1955 	} else {
1956 		mdOpts |= kIOMemoryTypePhysical;
1957 	}
1958 
1959 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960 }
1961 #endif /* !__LP64__ */
1962 
1963 /*
1964  * initWithOptions:
1965  *
1966  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967  * from a given task, several physical ranges, an UPL from the ubc
1968  * system or a uio (may be 64bit) from the BSD subsystem.
1969  *
1970  * Passing the ranges as a reference will avoid an extra allocation.
1971  *
1972  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973  * existing instance -- note this behavior is not commonly supported in other
1974  * I/O Kit classes, although it is supported here.
1975  */
1976 
1977 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1979     UInt32       count,
1980     UInt32       offset,
1981     task_t       task,
1982     IOOptionBits options,
1983     IOMapper *   mapper)
1984 {
1985 	IOOptionBits type = options & kIOMemoryTypeMask;
1986 
1987 #ifndef __LP64__
1988 	if (task
1989 	    && (kIOMemoryTypeVirtual == type)
1990 	    && vm_map_is_64bit(get_task_map(task))
1991 	    && ((IOVirtualRange *) buffers)->address) {
1992 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993 		return false;
1994 	}
1995 #endif /* !__LP64__ */
1996 
1997 	// Grab the original MD's configuation data to initialse the
1998 	// arguments to this function.
1999 	if (kIOMemoryTypePersistentMD == type) {
2000 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003 
2004 		// Only accept persistent memory descriptors with valid dataP data.
2005 		assert(orig->_rangesCount == 1);
2006 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007 			return false;
2008 		}
2009 
2010 		_memRef = initData->fMemRef; // Grab the new named entry
2011 		options = orig->_flags & ~kIOMemoryAsReference;
2012 		type = options & kIOMemoryTypeMask;
2013 		buffers = orig->_ranges.v;
2014 		count = orig->_rangesCount;
2015 
2016 		// Now grab the original task and whatever mapper was previously used
2017 		task = orig->_task;
2018 		mapper = dataP->fMapper;
2019 
2020 		// We are ready to go through the original initialisation now
2021 	}
2022 
2023 	switch (type) {
2024 	case kIOMemoryTypeUIO:
2025 	case kIOMemoryTypeVirtual:
2026 #ifndef __LP64__
2027 	case kIOMemoryTypeVirtual64:
2028 #endif /* !__LP64__ */
2029 		assert(task);
2030 		if (!task) {
2031 			return false;
2032 		}
2033 		break;
2034 
2035 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2036 #ifndef __LP64__
2037 	case kIOMemoryTypePhysical64:
2038 #endif /* !__LP64__ */
2039 	case kIOMemoryTypeUPL:
2040 		assert(!task);
2041 		break;
2042 	default:
2043 		return false; /* bad argument */
2044 	}
2045 
2046 	assert(buffers);
2047 	assert(count);
2048 
2049 	/*
2050 	 * We can check the _initialized  instance variable before having ever set
2051 	 * it to an initial value because I/O Kit guarantees that all our instance
2052 	 * variables are zeroed on an object's allocation.
2053 	 */
2054 
2055 	if (_initialized) {
2056 		/*
2057 		 * An existing memory descriptor is being retargeted to point to
2058 		 * somewhere else.  Clean up our present state.
2059 		 */
2060 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2061 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062 			while (_wireCount) {
2063 				complete();
2064 			}
2065 		}
2066 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067 			if (kIOMemoryTypeUIO == type) {
2068 				uio_free((uio_t) _ranges.v);
2069 			}
2070 #ifndef __LP64__
2071 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073 			}
2074 #endif /* !__LP64__ */
2075 			else {
2076 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077 			}
2078 		}
2079 
2080 		options |= (kIOMemoryRedirected & _flags);
2081 		if (!(kIOMemoryRedirected & options)) {
2082 			if (_memRef) {
2083 				memoryReferenceRelease(_memRef);
2084 				_memRef = NULL;
2085 			}
2086 			if (_mappings) {
2087 				_mappings->flushCollection();
2088 			}
2089 		}
2090 	} else {
2091 		if (!super::init()) {
2092 			return false;
2093 		}
2094 		_initialized = true;
2095 	}
2096 
2097 	// Grab the appropriate mapper
2098 	if (kIOMemoryHostOrRemote & options) {
2099 		options |= kIOMemoryMapperNone;
2100 	}
2101 	if (kIOMemoryMapperNone & options) {
2102 		mapper = NULL; // No Mapper
2103 	} else if (mapper == kIOMapperSystem) {
2104 		IOMapper::checkForSystemMapper();
2105 		gIOSystemMapper = mapper = IOMapper::gSystem;
2106 	}
2107 
2108 	// Remove the dynamic internal use flags from the initial setting
2109 	options               &= ~(kIOMemoryPreparedReadOnly);
2110 	_flags                 = options;
2111 	_task                  = task;
2112 
2113 #ifndef __LP64__
2114 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2115 #endif /* !__LP64__ */
2116 
2117 	_dmaReferences = 0;
2118 	__iomd_reservedA = 0;
2119 	__iomd_reservedB = 0;
2120 	_highestPage = 0;
2121 
2122 	if (kIOMemoryThreadSafe & options) {
2123 		if (!_prepareLock) {
2124 			_prepareLock = IOLockAlloc();
2125 		}
2126 	} else if (_prepareLock) {
2127 		IOLockFree(_prepareLock);
2128 		_prepareLock = NULL;
2129 	}
2130 
2131 	if (kIOMemoryTypeUPL == type) {
2132 		ioGMDData *dataP;
2133 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134 
2135 		if (!initMemoryEntries(dataSize, mapper)) {
2136 			return false;
2137 		}
2138 		dataP = getDataP(_memoryEntries);
2139 		dataP->fPageCnt = 0;
2140 		switch (kIOMemoryDirectionMask & options) {
2141 		case kIODirectionOut:
2142 			dataP->fDMAAccess = kIODMAMapReadAccess;
2143 			break;
2144 		case kIODirectionIn:
2145 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2146 			break;
2147 		case kIODirectionNone:
2148 		case kIODirectionOutIn:
2149 		default:
2150 			panic("bad dir for upl 0x%x", (int) options);
2151 			break;
2152 		}
2153 		//       _wireCount++;	// UPLs start out life wired
2154 
2155 		_length    = count;
2156 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157 
2158 		ioPLBlock iopl;
2159 		iopl.fIOPL = (upl_t) buffers;
2160 		upl_set_referenced(iopl.fIOPL, true);
2161 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162 
2163 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164 			panic("short external upl");
2165 		}
2166 
2167 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2168 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169 
2170 		// Set the flag kIOPLOnDevice convieniently equal to 1
2171 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2172 		if (!pageList->device) {
2173 			// Pre-compute the offset into the UPL's page list
2174 			pageList = &pageList[atop_32(offset)];
2175 			offset &= PAGE_MASK;
2176 		}
2177 		iopl.fIOMDOffset = 0;
2178 		iopl.fMappedPage = 0;
2179 		iopl.fPageInfo = (vm_address_t) pageList;
2180 		iopl.fPageOffset = offset;
2181 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182 	} else {
2183 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185 
2186 		// Initialize the memory descriptor
2187 		if (options & kIOMemoryAsReference) {
2188 #ifndef __LP64__
2189 			_rangesIsAllocated = false;
2190 #endif /* !__LP64__ */
2191 
2192 			// Hack assignment to get the buffer arg into _ranges.
2193 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194 			// work, C++ sigh.
2195 			// This also initialises the uio & physical ranges.
2196 			_ranges.v = (IOVirtualRange *) buffers;
2197 		} else {
2198 #ifndef __LP64__
2199 			_rangesIsAllocated = true;
2200 #endif /* !__LP64__ */
2201 			switch (type) {
2202 			case kIOMemoryTypeUIO:
2203 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204 				break;
2205 
2206 #ifndef __LP64__
2207 			case kIOMemoryTypeVirtual64:
2208 			case kIOMemoryTypePhysical64:
2209 				if (count == 1
2210 #ifndef __arm__
2211 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212 #endif
2213 				    ) {
2214 					if (kIOMemoryTypeVirtual64 == type) {
2215 						type = kIOMemoryTypeVirtual;
2216 					} else {
2217 						type = kIOMemoryTypePhysical;
2218 					}
2219 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220 					_rangesIsAllocated = false;
2221 					_ranges.v = &_singleRange.v;
2222 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2224 					break;
2225 				}
2226 				_ranges.v64 = IONew(IOAddressRange, count);
2227 				if (!_ranges.v64) {
2228 					return false;
2229 				}
2230 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231 				break;
2232 #endif /* !__LP64__ */
2233 			case kIOMemoryTypeVirtual:
2234 			case kIOMemoryTypePhysical:
2235 				if (count == 1) {
2236 					_flags |= kIOMemoryAsReference;
2237 #ifndef __LP64__
2238 					_rangesIsAllocated = false;
2239 #endif /* !__LP64__ */
2240 					_ranges.v = &_singleRange.v;
2241 				} else {
2242 					_ranges.v = IONew(IOVirtualRange, count);
2243 					if (!_ranges.v) {
2244 						return false;
2245 					}
2246 				}
2247 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248 				break;
2249 			}
2250 		}
2251 		_rangesCount = count;
2252 
2253 		// Find starting address within the vector of ranges
2254 		Ranges vec = _ranges;
2255 		mach_vm_size_t totalLength = 0;
2256 		unsigned int ind, pages = 0;
2257 		for (ind = 0; ind < count; ind++) {
2258 			mach_vm_address_t addr;
2259 			mach_vm_address_t endAddr;
2260 			mach_vm_size_t    len;
2261 
2262 			// addr & len are returned by this function
2263 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2264 			if (_task) {
2265 				mach_vm_size_t phys_size;
2266 				kern_return_t kret;
2267 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268 				if (KERN_SUCCESS != kret) {
2269 					break;
2270 				}
2271 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272 					break;
2273 				}
2274 			} else {
2275 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276 					break;
2277 				}
2278 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279 					break;
2280 				}
2281 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282 					break;
2283 				}
2284 			}
2285 			if (os_add_overflow(totalLength, len, &totalLength)) {
2286 				break;
2287 			}
2288 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289 				uint64_t highPage = atop_64(addr + len - 1);
2290 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291 					_highestPage = (ppnum_t) highPage;
2292 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293 				}
2294 			}
2295 		}
2296 		if ((ind < count)
2297 		    || (totalLength != ((IOByteCount) totalLength))) {
2298 			return false;                                   /* overflow */
2299 		}
2300 		_length      = totalLength;
2301 		_pages       = pages;
2302 
2303 		// Auto-prepare memory at creation time.
2304 		// Implied completion when descriptor is free-ed
2305 
2306 
2307 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2308 			_wireCount++; // Physical MDs are, by definition, wired
2309 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2310 			ioGMDData *dataP;
2311 			unsigned dataSize;
2312 
2313 			if (_pages > atop_64(max_mem)) {
2314 				return false;
2315 			}
2316 
2317 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2318 			if (!initMemoryEntries(dataSize, mapper)) {
2319 				return false;
2320 			}
2321 			dataP = getDataP(_memoryEntries);
2322 			dataP->fPageCnt = _pages;
2323 
2324 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2325 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2326 				_kernelTag = IOMemoryTag(kernel_map);
2327 				if (_kernelTag == gIOSurfaceTag) {
2328 					_userTag = VM_MEMORY_IOSURFACE;
2329 				}
2330 			}
2331 
2332 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2333 				IOReturn
2334 				    err = memoryReferenceCreate(0, &_memRef);
2335 				if (kIOReturnSuccess != err) {
2336 					return false;
2337 				}
2338 			}
2339 
2340 			if ((_flags & kIOMemoryAutoPrepare)
2341 			    && prepare() != kIOReturnSuccess) {
2342 				return false;
2343 			}
2344 		}
2345 	}
2346 
2347 	return true;
2348 }
2349 
2350 /*
2351  * free
2352  *
2353  * Free resources.
2354  */
2355 void
free()2356 IOGeneralMemoryDescriptor::free()
2357 {
2358 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2359 
2360 	if (reserved && reserved->dp.memory) {
2361 		LOCK;
2362 		reserved->dp.memory = NULL;
2363 		UNLOCK;
2364 	}
2365 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2366 		ioGMDData * dataP;
2367 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2368 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2369 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2370 		}
2371 	} else {
2372 		while (_wireCount) {
2373 			complete();
2374 		}
2375 	}
2376 
2377 	if (_memoryEntries) {
2378 		_memoryEntries.reset();
2379 	}
2380 
2381 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2382 		if (kIOMemoryTypeUIO == type) {
2383 			uio_free((uio_t) _ranges.v);
2384 		}
2385 #ifndef __LP64__
2386 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2387 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2388 		}
2389 #endif /* !__LP64__ */
2390 		else {
2391 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2392 		}
2393 
2394 		_ranges.v = NULL;
2395 	}
2396 
2397 	if (reserved) {
2398 		cleanKernelReserved(reserved);
2399 		if (reserved->dp.devicePager) {
2400 			// memEntry holds a ref on the device pager which owns reserved
2401 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2402 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2403 		} else {
2404 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2405 		}
2406 		reserved = NULL;
2407 	}
2408 
2409 	if (_memRef) {
2410 		memoryReferenceRelease(_memRef);
2411 	}
2412 	if (_prepareLock) {
2413 		IOLockFree(_prepareLock);
2414 	}
2415 
2416 	super::free();
2417 }
2418 
2419 #ifndef __LP64__
2420 void
unmapFromKernel()2421 IOGeneralMemoryDescriptor::unmapFromKernel()
2422 {
2423 	panic("IOGMD::unmapFromKernel deprecated");
2424 }
2425 
2426 void
mapIntoKernel(unsigned rangeIndex)2427 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2428 {
2429 	panic("IOGMD::mapIntoKernel deprecated");
2430 }
2431 #endif /* !__LP64__ */
2432 
2433 /*
2434  * getDirection:
2435  *
2436  * Get the direction of the transfer.
2437  */
2438 IODirection
getDirection() const2439 IOMemoryDescriptor::getDirection() const
2440 {
2441 #ifndef __LP64__
2442 	if (_direction) {
2443 		return _direction;
2444 	}
2445 #endif /* !__LP64__ */
2446 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2447 }
2448 
2449 /*
2450  * getLength:
2451  *
2452  * Get the length of the transfer (over all ranges).
2453  */
2454 IOByteCount
getLength() const2455 IOMemoryDescriptor::getLength() const
2456 {
2457 	return _length;
2458 }
2459 
2460 void
setTag(IOOptionBits tag)2461 IOMemoryDescriptor::setTag( IOOptionBits tag )
2462 {
2463 	_tag = tag;
2464 }
2465 
2466 IOOptionBits
getTag(void)2467 IOMemoryDescriptor::getTag( void )
2468 {
2469 	return _tag;
2470 }
2471 
2472 uint64_t
getFlags(void)2473 IOMemoryDescriptor::getFlags(void)
2474 {
2475 	return _flags;
2476 }
2477 
2478 OSObject *
copyContext(void) const2479 IOMemoryDescriptor::copyContext(void) const
2480 {
2481 	if (reserved) {
2482 		OSObject * context = reserved->contextObject;
2483 		if (context) {
2484 			context->retain();
2485 		}
2486 		return context;
2487 	} else {
2488 		return NULL;
2489 	}
2490 }
2491 
2492 void
setContext(OSObject * obj)2493 IOMemoryDescriptor::setContext(OSObject * obj)
2494 {
2495 	if (this->reserved == NULL && obj == NULL) {
2496 		// No existing object, and no object to set
2497 		return;
2498 	}
2499 
2500 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2501 	if (reserved) {
2502 		OSObject * oldObject = reserved->contextObject;
2503 		if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2504 			oldObject->release();
2505 		}
2506 		if (obj != NULL) {
2507 			obj->retain();
2508 			reserved->contextObject = obj;
2509 		}
2510 	}
2511 }
2512 
2513 #ifndef __LP64__
2514 #pragma clang diagnostic push
2515 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2516 
2517 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2518 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2519 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2520 {
2521 	addr64_t physAddr = 0;
2522 
2523 	if (prepare() == kIOReturnSuccess) {
2524 		physAddr = getPhysicalSegment64( offset, length );
2525 		complete();
2526 	}
2527 
2528 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2529 }
2530 
2531 #pragma clang diagnostic pop
2532 
2533 #endif /* !__LP64__ */
2534 
2535 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2536 IOMemoryDescriptor::readBytes
2537 (IOByteCount offset, void *bytes, IOByteCount length)
2538 {
2539 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2540 	IOByteCount endoffset;
2541 	IOByteCount remaining;
2542 
2543 
2544 	// Check that this entire I/O is within the available range
2545 	if ((offset > _length)
2546 	    || os_add_overflow(length, offset, &endoffset)
2547 	    || (endoffset > _length)) {
2548 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2549 		return 0;
2550 	}
2551 	if (offset >= _length) {
2552 		return 0;
2553 	}
2554 
2555 	assert(!(kIOMemoryRemote & _flags));
2556 	if (kIOMemoryRemote & _flags) {
2557 		return 0;
2558 	}
2559 
2560 	if (kIOMemoryThreadSafe & _flags) {
2561 		LOCK;
2562 	}
2563 
2564 	remaining = length = min(length, _length - offset);
2565 	while (remaining) { // (process another target segment?)
2566 		addr64_t        srcAddr64;
2567 		IOByteCount     srcLen;
2568 
2569 		srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2570 		if (!srcAddr64) {
2571 			break;
2572 		}
2573 
2574 		// Clip segment length to remaining
2575 		if (srcLen > remaining) {
2576 			srcLen = remaining;
2577 		}
2578 
2579 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2580 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2581 		}
2582 		copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2583 		    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2584 
2585 		dstAddr   += srcLen;
2586 		offset    += srcLen;
2587 		remaining -= srcLen;
2588 	}
2589 
2590 	if (kIOMemoryThreadSafe & _flags) {
2591 		UNLOCK;
2592 	}
2593 
2594 	assert(!remaining);
2595 
2596 	return length - remaining;
2597 }
2598 
2599 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2600 IOMemoryDescriptor::writeBytes
2601 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2602 {
2603 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2604 	IOByteCount remaining;
2605 	IOByteCount endoffset;
2606 	IOByteCount offset = inoffset;
2607 
2608 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2609 
2610 	// Check that this entire I/O is within the available range
2611 	if ((offset > _length)
2612 	    || os_add_overflow(length, offset, &endoffset)
2613 	    || (endoffset > _length)) {
2614 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2615 		return 0;
2616 	}
2617 	if (kIOMemoryPreparedReadOnly & _flags) {
2618 		return 0;
2619 	}
2620 	if (offset >= _length) {
2621 		return 0;
2622 	}
2623 
2624 	assert(!(kIOMemoryRemote & _flags));
2625 	if (kIOMemoryRemote & _flags) {
2626 		return 0;
2627 	}
2628 
2629 	if (kIOMemoryThreadSafe & _flags) {
2630 		LOCK;
2631 	}
2632 
2633 	remaining = length = min(length, _length - offset);
2634 	while (remaining) { // (process another target segment?)
2635 		addr64_t    dstAddr64;
2636 		IOByteCount dstLen;
2637 
2638 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2639 		if (!dstAddr64) {
2640 			break;
2641 		}
2642 
2643 		// Clip segment length to remaining
2644 		if (dstLen > remaining) {
2645 			dstLen = remaining;
2646 		}
2647 
2648 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2649 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2650 		}
2651 		if (!srcAddr) {
2652 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2653 		} else {
2654 			copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2655 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2656 			srcAddr   += dstLen;
2657 		}
2658 		offset    += dstLen;
2659 		remaining -= dstLen;
2660 	}
2661 
2662 	if (kIOMemoryThreadSafe & _flags) {
2663 		UNLOCK;
2664 	}
2665 
2666 	assert(!remaining);
2667 
2668 #if defined(__x86_64__)
2669 	// copypv does not cppvFsnk on intel
2670 #else
2671 	if (!srcAddr) {
2672 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2673 	}
2674 #endif
2675 
2676 	return length - remaining;
2677 }
2678 
2679 #ifndef __LP64__
2680 void
setPosition(IOByteCount position)2681 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2682 {
2683 	panic("IOGMD::setPosition deprecated");
2684 }
2685 #endif /* !__LP64__ */
2686 
2687 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2688 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2689 
2690 uint64_t
getPreparationID(void)2691 IOGeneralMemoryDescriptor::getPreparationID( void )
2692 {
2693 	ioGMDData *dataP;
2694 
2695 	if (!_wireCount) {
2696 		return kIOPreparationIDUnprepared;
2697 	}
2698 
2699 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2700 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2701 		IOMemoryDescriptor::setPreparationID();
2702 		return IOMemoryDescriptor::getPreparationID();
2703 	}
2704 
2705 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2706 		return kIOPreparationIDUnprepared;
2707 	}
2708 
2709 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2710 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2711 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2712 	}
2713 	return dataP->fPreparationID;
2714 }
2715 
2716 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2717 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2718 {
2719 	if (reserved->creator) {
2720 		task_deallocate(reserved->creator);
2721 		reserved->creator = NULL;
2722 	}
2723 
2724 	if (reserved->contextObject) {
2725 		reserved->contextObject->release();
2726 		reserved->contextObject = NULL;
2727 	}
2728 }
2729 
2730 IOMemoryDescriptorReserved *
getKernelReserved(void)2731 IOMemoryDescriptor::getKernelReserved( void )
2732 {
2733 	if (!reserved) {
2734 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2735 	}
2736 	return reserved;
2737 }
2738 
2739 void
setPreparationID(void)2740 IOMemoryDescriptor::setPreparationID( void )
2741 {
2742 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2743 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2744 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2745 	}
2746 }
2747 
2748 uint64_t
getPreparationID(void)2749 IOMemoryDescriptor::getPreparationID( void )
2750 {
2751 	if (reserved) {
2752 		return reserved->preparationID;
2753 	} else {
2754 		return kIOPreparationIDUnsupported;
2755 	}
2756 }
2757 
2758 void
setDescriptorID(void)2759 IOMemoryDescriptor::setDescriptorID( void )
2760 {
2761 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2762 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2763 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2764 	}
2765 }
2766 
2767 uint64_t
getDescriptorID(void)2768 IOMemoryDescriptor::getDescriptorID( void )
2769 {
2770 	setDescriptorID();
2771 
2772 	if (reserved) {
2773 		return reserved->descriptorID;
2774 	} else {
2775 		return kIODescriptorIDInvalid;
2776 	}
2777 }
2778 
2779 IOReturn
ktraceEmitPhysicalSegments(void)2780 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2781 {
2782 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2783 		return kIOReturnSuccess;
2784 	}
2785 
2786 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2787 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2788 		return kIOReturnBadArgument;
2789 	}
2790 
2791 	uint64_t descriptorID = getDescriptorID();
2792 	assert(descriptorID != kIODescriptorIDInvalid);
2793 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2794 		return kIOReturnBadArgument;
2795 	}
2796 
2797 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2798 
2799 #if __LP64__
2800 	static const uint8_t num_segments_page = 8;
2801 #else
2802 	static const uint8_t num_segments_page = 4;
2803 #endif
2804 	static const uint8_t num_segments_long = 2;
2805 
2806 	IOPhysicalAddress segments_page[num_segments_page];
2807 	IOPhysicalRange   segments_long[num_segments_long];
2808 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2809 	memset(segments_long, 0, sizeof(segments_long));
2810 
2811 	uint8_t segment_page_idx = 0;
2812 	uint8_t segment_long_idx = 0;
2813 
2814 	IOPhysicalRange physical_segment;
2815 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2816 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2817 
2818 		if (physical_segment.length == 0) {
2819 			break;
2820 		}
2821 
2822 		/**
2823 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2824 		 * buffer memory, pack segment events according to the following.
2825 		 *
2826 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2827 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2828 		 *
2829 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2830 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2831 		 * - unmapped pages will have a ppn of MAX_INT_32
2832 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2833 		 * - address_0, length_0, address_0, length_1
2834 		 * - unmapped pages will have an address of 0
2835 		 *
2836 		 * During each iteration do the following depending on the length of the mapping:
2837 		 * 1. add the current segment to the appropriate queue of pending segments
2838 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2839 		 * 1a. if FALSE emit and reset all events in the previous queue
2840 		 * 2. check if we have filled up the current queue of pending events
2841 		 * 2a. if TRUE emit and reset all events in the pending queue
2842 		 * 3. after completing all iterations emit events in the current queue
2843 		 */
2844 
2845 		bool emit_page = false;
2846 		bool emit_long = false;
2847 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2848 			segments_page[segment_page_idx] = physical_segment.address;
2849 			segment_page_idx++;
2850 
2851 			emit_long = segment_long_idx != 0;
2852 			emit_page = segment_page_idx == num_segments_page;
2853 
2854 			if (os_unlikely(emit_long)) {
2855 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2856 				    segments_long[0].address, segments_long[0].length,
2857 				    segments_long[1].address, segments_long[1].length);
2858 			}
2859 
2860 			if (os_unlikely(emit_page)) {
2861 #if __LP64__
2862 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2863 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2864 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2865 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2866 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2867 #else
2868 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2869 				    (ppnum_t) atop_32(segments_page[1]),
2870 				    (ppnum_t) atop_32(segments_page[2]),
2871 				    (ppnum_t) atop_32(segments_page[3]),
2872 				    (ppnum_t) atop_32(segments_page[4]));
2873 #endif
2874 			}
2875 		} else {
2876 			segments_long[segment_long_idx] = physical_segment;
2877 			segment_long_idx++;
2878 
2879 			emit_page = segment_page_idx != 0;
2880 			emit_long = segment_long_idx == num_segments_long;
2881 
2882 			if (os_unlikely(emit_page)) {
2883 #if __LP64__
2884 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2885 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2886 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2887 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2888 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2889 #else
2890 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2891 				    (ppnum_t) atop_32(segments_page[1]),
2892 				    (ppnum_t) atop_32(segments_page[2]),
2893 				    (ppnum_t) atop_32(segments_page[3]),
2894 				    (ppnum_t) atop_32(segments_page[4]));
2895 #endif
2896 			}
2897 
2898 			if (emit_long) {
2899 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2900 				    segments_long[0].address, segments_long[0].length,
2901 				    segments_long[1].address, segments_long[1].length);
2902 			}
2903 		}
2904 
2905 		if (os_unlikely(emit_page)) {
2906 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2907 			segment_page_idx = 0;
2908 		}
2909 
2910 		if (os_unlikely(emit_long)) {
2911 			memset(segments_long, 0, sizeof(segments_long));
2912 			segment_long_idx = 0;
2913 		}
2914 	}
2915 
2916 	if (segment_page_idx != 0) {
2917 		assert(segment_long_idx == 0);
2918 #if __LP64__
2919 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2920 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2921 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2922 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2923 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2924 #else
2925 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2926 		    (ppnum_t) atop_32(segments_page[1]),
2927 		    (ppnum_t) atop_32(segments_page[2]),
2928 		    (ppnum_t) atop_32(segments_page[3]),
2929 		    (ppnum_t) atop_32(segments_page[4]));
2930 #endif
2931 	} else if (segment_long_idx != 0) {
2932 		assert(segment_page_idx == 0);
2933 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2934 		    segments_long[0].address, segments_long[0].length,
2935 		    segments_long[1].address, segments_long[1].length);
2936 	}
2937 
2938 	return kIOReturnSuccess;
2939 }
2940 
2941 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2942 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2943 {
2944 	_kernelTag = (vm_tag_t) kernelTag;
2945 	_userTag   = (vm_tag_t) userTag;
2946 }
2947 
2948 uint32_t
getVMTag(vm_map_t map)2949 IOMemoryDescriptor::getVMTag(vm_map_t map)
2950 {
2951 	if (vm_kernel_map_is_kernel(map)) {
2952 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
2953 			return (uint32_t) _kernelTag;
2954 		}
2955 	} else {
2956 		if (VM_KERN_MEMORY_NONE != _userTag) {
2957 			return (uint32_t) _userTag;
2958 		}
2959 	}
2960 	return IOMemoryTag(map);
2961 }
2962 
2963 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2964 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2965 {
2966 	IOReturn err = kIOReturnSuccess;
2967 	DMACommandOps params;
2968 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2969 	ioGMDData *dataP;
2970 
2971 	params = (op & ~kIOMDDMACommandOperationMask & op);
2972 	op &= kIOMDDMACommandOperationMask;
2973 
2974 	if (kIOMDDMAMap == op) {
2975 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
2976 			return kIOReturnUnderrun;
2977 		}
2978 
2979 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2980 
2981 		if (!_memoryEntries
2982 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2983 			return kIOReturnNoMemory;
2984 		}
2985 
2986 		if (_memoryEntries && data->fMapper) {
2987 			bool remap, keepMap;
2988 			dataP = getDataP(_memoryEntries);
2989 
2990 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2991 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2992 			}
2993 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2994 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
2995 			}
2996 
2997 			keepMap = (data->fMapper == gIOSystemMapper);
2998 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2999 
3000 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3001 				IOLockLock(_prepareLock);
3002 			}
3003 
3004 			remap = (!keepMap);
3005 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3006 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3007 			remap |= (dataP->fDMAMapAlignment > page_size);
3008 
3009 			if (remap || !dataP->fMappedBaseValid) {
3010 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3011 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3012 					dataP->fMappedBase      = data->fAlloc;
3013 					dataP->fMappedBaseValid = true;
3014 					dataP->fMappedLength    = data->fAllocLength;
3015 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3016 				}
3017 			} else {
3018 				data->fAlloc = dataP->fMappedBase;
3019 				data->fAllocLength = 0;         // give out IOMD map
3020 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3021 			}
3022 
3023 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3024 				IOLockUnlock(_prepareLock);
3025 			}
3026 		}
3027 		return err;
3028 	}
3029 	if (kIOMDDMAUnmap == op) {
3030 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3031 			return kIOReturnUnderrun;
3032 		}
3033 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3034 
3035 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3036 
3037 		return kIOReturnSuccess;
3038 	}
3039 
3040 	if (kIOMDAddDMAMapSpec == op) {
3041 		if (dataSize < sizeof(IODMAMapSpecification)) {
3042 			return kIOReturnUnderrun;
3043 		}
3044 
3045 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046 
3047 		if (!_memoryEntries
3048 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049 			return kIOReturnNoMemory;
3050 		}
3051 
3052 		if (_memoryEntries) {
3053 			dataP = getDataP(_memoryEntries);
3054 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056 			}
3057 			if (data->alignment > dataP->fDMAMapAlignment) {
3058 				dataP->fDMAMapAlignment = data->alignment;
3059 			}
3060 		}
3061 		return kIOReturnSuccess;
3062 	}
3063 
3064 	if (kIOMDGetCharacteristics == op) {
3065 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066 			return kIOReturnUnderrun;
3067 		}
3068 
3069 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070 		data->fLength = _length;
3071 		data->fSGCount = _rangesCount;
3072 		data->fPages = _pages;
3073 		data->fDirection = getDirection();
3074 		if (!_wireCount) {
3075 			data->fIsPrepared = false;
3076 		} else {
3077 			data->fIsPrepared = true;
3078 			data->fHighestPage = _highestPage;
3079 			if (_memoryEntries) {
3080 				dataP = getDataP(_memoryEntries);
3081 				ioPLBlock *ioplList = getIOPLList(dataP);
3082 				UInt count = getNumIOPL(_memoryEntries, dataP);
3083 				if (count == 1) {
3084 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085 				}
3086 			}
3087 		}
3088 
3089 		return kIOReturnSuccess;
3090 	} else if (kIOMDDMAActive == op) {
3091 		if (params) {
3092 			int16_t prior;
3093 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3094 			if (!prior) {
3095 				md->_mapName = NULL;
3096 			}
3097 		} else {
3098 			if (md->_dmaReferences) {
3099 				OSAddAtomic16(-1, &md->_dmaReferences);
3100 			} else {
3101 				panic("_dmaReferences underflow");
3102 			}
3103 		}
3104 	} else if (kIOMDWalkSegments != op) {
3105 		return kIOReturnBadArgument;
3106 	}
3107 
3108 	// Get the next segment
3109 	struct InternalState {
3110 		IOMDDMAWalkSegmentArgs fIO;
3111 		mach_vm_size_t fOffset2Index;
3112 		mach_vm_size_t fNextOffset;
3113 		UInt fIndex;
3114 	} *isP;
3115 
3116 	// Find the next segment
3117 	if (dataSize < sizeof(*isP)) {
3118 		return kIOReturnUnderrun;
3119 	}
3120 
3121 	isP = (InternalState *) vData;
3122 	uint64_t offset = isP->fIO.fOffset;
3123 	uint8_t mapped = isP->fIO.fMapped;
3124 	uint64_t mappedBase;
3125 
3126 	if (mapped && (kIOMemoryRemote & _flags)) {
3127 		return kIOReturnNotAttached;
3128 	}
3129 
3130 	if (IOMapper::gSystem && mapped
3131 	    && (!(kIOMemoryHostOnly & _flags))
3132 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134 		if (!_memoryEntries
3135 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136 			return kIOReturnNoMemory;
3137 		}
3138 
3139 		dataP = getDataP(_memoryEntries);
3140 		if (dataP->fMapper) {
3141 			IODMAMapSpecification mapSpec;
3142 			bzero(&mapSpec, sizeof(mapSpec));
3143 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144 			mapSpec.alignment = dataP->fDMAMapAlignment;
3145 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146 			if (kIOReturnSuccess != err) {
3147 				return err;
3148 			}
3149 			dataP->fMappedBaseValid = true;
3150 		}
3151 	}
3152 
3153 	if (mapped) {
3154 		if (IOMapper::gSystem
3155 		    && (!(kIOMemoryHostOnly & _flags))
3156 		    && _memoryEntries
3157 		    && (dataP = getDataP(_memoryEntries))
3158 		    && dataP->fMappedBaseValid) {
3159 			mappedBase = dataP->fMappedBase;
3160 		} else {
3161 			mapped = 0;
3162 		}
3163 	}
3164 
3165 	if (offset >= _length) {
3166 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167 	}
3168 
3169 	// Validate the previous offset
3170 	UInt ind;
3171 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3172 	if (!params
3173 	    && offset
3174 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175 		ind = isP->fIndex;
3176 	} else {
3177 		ind = off2Ind = 0; // Start from beginning
3178 	}
3179 	mach_vm_size_t length;
3180 	UInt64 address;
3181 
3182 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183 		// Physical address based memory descriptor
3184 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185 
3186 		// Find the range after the one that contains the offset
3187 		mach_vm_size_t len;
3188 		for (len = 0; off2Ind <= offset; ind++) {
3189 			len = physP[ind].length;
3190 			off2Ind += len;
3191 		}
3192 
3193 		// Calculate length within range and starting address
3194 		length   = off2Ind - offset;
3195 		address  = physP[ind - 1].address + len - length;
3196 
3197 		if (true && mapped) {
3198 			address = mappedBase + offset;
3199 		} else {
3200 			// see how far we can coalesce ranges
3201 			while (ind < _rangesCount && address + length == physP[ind].address) {
3202 				len = physP[ind].length;
3203 				length += len;
3204 				off2Ind += len;
3205 				ind++;
3206 			}
3207 		}
3208 
3209 		// correct contiguous check overshoot
3210 		ind--;
3211 		off2Ind -= len;
3212 	}
3213 #ifndef __LP64__
3214 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215 		// Physical address based memory descriptor
3216 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217 
3218 		// Find the range after the one that contains the offset
3219 		mach_vm_size_t len;
3220 		for (len = 0; off2Ind <= offset; ind++) {
3221 			len = physP[ind].length;
3222 			off2Ind += len;
3223 		}
3224 
3225 		// Calculate length within range and starting address
3226 		length   = off2Ind - offset;
3227 		address  = physP[ind - 1].address + len - length;
3228 
3229 		if (true && mapped) {
3230 			address = mappedBase + offset;
3231 		} else {
3232 			// see how far we can coalesce ranges
3233 			while (ind < _rangesCount && address + length == physP[ind].address) {
3234 				len = physP[ind].length;
3235 				length += len;
3236 				off2Ind += len;
3237 				ind++;
3238 			}
3239 		}
3240 		// correct contiguous check overshoot
3241 		ind--;
3242 		off2Ind -= len;
3243 	}
3244 #endif /* !__LP64__ */
3245 	else {
3246 		do {
3247 			if (!_wireCount) {
3248 				panic("IOGMD: not wired for the IODMACommand");
3249 			}
3250 
3251 			assert(_memoryEntries);
3252 
3253 			dataP = getDataP(_memoryEntries);
3254 			const ioPLBlock *ioplList = getIOPLList(dataP);
3255 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256 			upl_page_info_t *pageList = getPageList(dataP);
3257 
3258 			assert(numIOPLs > 0);
3259 
3260 			// Scan through iopl info blocks looking for block containing offset
3261 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262 				ind++;
3263 			}
3264 
3265 			// Go back to actual range as search goes past it
3266 			ioPLBlock ioplInfo = ioplList[ind - 1];
3267 			off2Ind = ioplInfo.fIOMDOffset;
3268 
3269 			if (ind < numIOPLs) {
3270 				length = ioplList[ind].fIOMDOffset;
3271 			} else {
3272 				length = _length;
3273 			}
3274 			length -= offset;       // Remainder within iopl
3275 
3276 			// Subtract offset till this iopl in total list
3277 			offset -= off2Ind;
3278 
3279 			// If a mapped address is requested and this is a pre-mapped IOPL
3280 			// then just need to compute an offset relative to the mapped base.
3281 			if (mapped) {
3282 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284 				continue; // Done leave do/while(false) now
3285 			}
3286 
3287 			// The offset is rebased into the current iopl.
3288 			// Now add the iopl 1st page offset.
3289 			offset += ioplInfo.fPageOffset;
3290 
3291 			// For external UPLs the fPageInfo field points directly to
3292 			// the upl's upl_page_info_t array.
3293 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3294 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295 			} else {
3296 				pageList = &pageList[ioplInfo.fPageInfo];
3297 			}
3298 
3299 			// Check for direct device non-paged memory
3300 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3301 				address = ptoa_64(pageList->phys_addr) + offset;
3302 				continue; // Done leave do/while(false) now
3303 			}
3304 
3305 			// Now we need compute the index into the pageList
3306 			UInt pageInd = atop_32(offset);
3307 			offset &= PAGE_MASK;
3308 
3309 			// Compute the starting address of this segment
3310 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311 			if (!pageAddr) {
3312 				panic("!pageList phys_addr");
3313 			}
3314 
3315 			address = ptoa_64(pageAddr) + offset;
3316 
3317 			// length is currently set to the length of the remainider of the iopl.
3318 			// We need to check that the remainder of the iopl is contiguous.
3319 			// This is indicated by pageList[ind].phys_addr being sequential.
3320 			IOByteCount contigLength = PAGE_SIZE - offset;
3321 			while (contigLength < length
3322 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3323 				contigLength += PAGE_SIZE;
3324 			}
3325 
3326 			if (contigLength < length) {
3327 				length = contigLength;
3328 			}
3329 
3330 
3331 			assert(address);
3332 			assert(length);
3333 		} while (false);
3334 	}
3335 
3336 	// Update return values and state
3337 	isP->fIO.fIOVMAddr = address;
3338 	isP->fIO.fLength   = length;
3339 	isP->fIndex        = ind;
3340 	isP->fOffset2Index = off2Ind;
3341 	isP->fNextOffset   = isP->fIO.fOffset + length;
3342 
3343 	return kIOReturnSuccess;
3344 }
3345 
3346 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348 {
3349 	IOReturn          ret;
3350 	mach_vm_address_t address = 0;
3351 	mach_vm_size_t    length  = 0;
3352 	IOMapper *        mapper  = gIOSystemMapper;
3353 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3354 
3355 	if (lengthOfSegment) {
3356 		*lengthOfSegment = 0;
3357 	}
3358 
3359 	if (offset >= _length) {
3360 		return 0;
3361 	}
3362 
3363 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367 
3368 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369 		unsigned rangesIndex = 0;
3370 		Ranges vec = _ranges;
3371 		mach_vm_address_t addr;
3372 
3373 		// Find starting address within the vector of ranges
3374 		for (;;) {
3375 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376 			if (offset < length) {
3377 				break;
3378 			}
3379 			offset -= length; // (make offset relative)
3380 			rangesIndex++;
3381 		}
3382 
3383 		// Now that we have the starting range,
3384 		// lets find the last contiguous range
3385 		addr   += offset;
3386 		length -= offset;
3387 
3388 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389 			mach_vm_address_t newAddr;
3390 			mach_vm_size_t    newLen;
3391 
3392 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393 			if (addr + length != newAddr) {
3394 				break;
3395 			}
3396 			length += newLen;
3397 		}
3398 		if (addr) {
3399 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400 		}
3401 	} else {
3402 		IOMDDMAWalkSegmentState _state;
3403 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404 
3405 		state->fOffset = offset;
3406 		state->fLength = _length - offset;
3407 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408 
3409 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410 
3411 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413 			    ret, this, state->fOffset,
3414 			    state->fIOVMAddr, state->fLength);
3415 		}
3416 		if (kIOReturnSuccess == ret) {
3417 			address = state->fIOVMAddr;
3418 			length  = state->fLength;
3419 		}
3420 
3421 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423 
3424 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426 				addr64_t    origAddr = address;
3427 				IOByteCount origLen  = length;
3428 
3429 				address = mapper->mapToPhysicalAddress(origAddr);
3430 				length = page_size - (address & (page_size - 1));
3431 				while ((length < origLen)
3432 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433 					length += page_size;
3434 				}
3435 				if (length > origLen) {
3436 					length = origLen;
3437 				}
3438 			}
3439 		}
3440 	}
3441 
3442 	if (!address) {
3443 		length = 0;
3444 	}
3445 
3446 	if (lengthOfSegment) {
3447 		*lengthOfSegment = length;
3448 	}
3449 
3450 	return address;
3451 }
3452 
3453 #ifndef __LP64__
3454 #pragma clang diagnostic push
3455 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456 
3457 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459 {
3460 	addr64_t address = 0;
3461 
3462 	if (options & _kIOMemorySourceSegment) {
3463 		address = getSourceSegment(offset, lengthOfSegment);
3464 	} else if (options & kIOMemoryMapperNone) {
3465 		address = getPhysicalSegment64(offset, lengthOfSegment);
3466 	} else {
3467 		address = getPhysicalSegment(offset, lengthOfSegment);
3468 	}
3469 
3470 	return address;
3471 }
3472 #pragma clang diagnostic pop
3473 
3474 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476 {
3477 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478 }
3479 
3480 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482 {
3483 	addr64_t    address = 0;
3484 	IOByteCount length  = 0;
3485 
3486 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487 
3488 	if (lengthOfSegment) {
3489 		length = *lengthOfSegment;
3490 	}
3491 
3492 	if ((address + length) > 0x100000000ULL) {
3493 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494 		    address, (long) length, (getMetaClass())->getClassName());
3495 	}
3496 
3497 	return (IOPhysicalAddress) address;
3498 }
3499 
3500 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502 {
3503 	IOPhysicalAddress phys32;
3504 	IOByteCount       length;
3505 	addr64_t          phys64;
3506 	IOMapper *        mapper = NULL;
3507 
3508 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509 	if (!phys32) {
3510 		return 0;
3511 	}
3512 
3513 	if (gIOSystemMapper) {
3514 		mapper = gIOSystemMapper;
3515 	}
3516 
3517 	if (mapper) {
3518 		IOByteCount origLen;
3519 
3520 		phys64 = mapper->mapToPhysicalAddress(phys32);
3521 		origLen = *lengthOfSegment;
3522 		length = page_size - (phys64 & (page_size - 1));
3523 		while ((length < origLen)
3524 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525 			length += page_size;
3526 		}
3527 		if (length > origLen) {
3528 			length = origLen;
3529 		}
3530 
3531 		*lengthOfSegment = length;
3532 	} else {
3533 		phys64 = (addr64_t) phys32;
3534 	}
3535 
3536 	return phys64;
3537 }
3538 
3539 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541 {
3542 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543 }
3544 
3545 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547 {
3548 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549 }
3550 
3551 #pragma clang diagnostic push
3552 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553 
3554 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556     IOByteCount * lengthOfSegment)
3557 {
3558 	if (_task == kernel_task) {
3559 		return (void *) getSourceSegment(offset, lengthOfSegment);
3560 	} else {
3561 		panic("IOGMD::getVirtualSegment deprecated");
3562 	}
3563 
3564 	return NULL;
3565 }
3566 #pragma clang diagnostic pop
3567 #endif /* !__LP64__ */
3568 
3569 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571 {
3572 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573 	DMACommandOps params;
3574 	IOReturn err;
3575 
3576 	params = (op & ~kIOMDDMACommandOperationMask & op);
3577 	op &= kIOMDDMACommandOperationMask;
3578 
3579 	if (kIOMDGetCharacteristics == op) {
3580 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581 			return kIOReturnUnderrun;
3582 		}
3583 
3584 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585 		data->fLength = getLength();
3586 		data->fSGCount = 0;
3587 		data->fDirection = getDirection();
3588 		data->fIsPrepared = true; // Assume prepared - fails safe
3589 	} else if (kIOMDWalkSegments == op) {
3590 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591 			return kIOReturnUnderrun;
3592 		}
3593 
3594 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595 		IOByteCount offset  = (IOByteCount) data->fOffset;
3596 		IOPhysicalLength length, nextLength;
3597 		addr64_t         addr, nextAddr;
3598 
3599 		if (data->fMapped) {
3600 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601 		}
3602 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603 		offset += length;
3604 		while (offset < getLength()) {
3605 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606 			if ((addr + length) != nextAddr) {
3607 				break;
3608 			}
3609 			length += nextLength;
3610 			offset += nextLength;
3611 		}
3612 		data->fIOVMAddr = addr;
3613 		data->fLength   = length;
3614 	} else if (kIOMDAddDMAMapSpec == op) {
3615 		return kIOReturnUnsupported;
3616 	} else if (kIOMDDMAMap == op) {
3617 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618 			return kIOReturnUnderrun;
3619 		}
3620 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621 
3622 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623 
3624 		return err;
3625 	} else if (kIOMDDMAUnmap == op) {
3626 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627 			return kIOReturnUnderrun;
3628 		}
3629 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630 
3631 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632 
3633 		return kIOReturnSuccess;
3634 	} else {
3635 		return kIOReturnBadArgument;
3636 	}
3637 
3638 	return kIOReturnSuccess;
3639 }
3640 
3641 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643     IOOptionBits * oldState )
3644 {
3645 	IOReturn      err = kIOReturnSuccess;
3646 
3647 	vm_purgable_t control;
3648 	int           state;
3649 
3650 	assert(!(kIOMemoryRemote & _flags));
3651 	if (kIOMemoryRemote & _flags) {
3652 		return kIOReturnNotAttached;
3653 	}
3654 
3655 	if (_memRef) {
3656 		err = super::setPurgeable(newState, oldState);
3657 	} else {
3658 		if (kIOMemoryThreadSafe & _flags) {
3659 			LOCK;
3660 		}
3661 		do{
3662 			// Find the appropriate vm_map for the given task
3663 			vm_map_t curMap;
3664 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665 				err = kIOReturnNotReady;
3666 				break;
3667 			} else if (!_task) {
3668 				err = kIOReturnUnsupported;
3669 				break;
3670 			} else {
3671 				curMap = get_task_map(_task);
3672 				if (NULL == curMap) {
3673 					err = KERN_INVALID_ARGUMENT;
3674 					break;
3675 				}
3676 			}
3677 
3678 			// can only do one range
3679 			Ranges vec = _ranges;
3680 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3681 			mach_vm_address_t addr;
3682 			mach_vm_size_t    len;
3683 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3684 
3685 			err = purgeableControlBits(newState, &control, &state);
3686 			if (kIOReturnSuccess != err) {
3687 				break;
3688 			}
3689 			err = vm_map_purgable_control(curMap, addr, control, &state);
3690 			if (oldState) {
3691 				if (kIOReturnSuccess == err) {
3692 					err = purgeableStateBits(&state);
3693 					*oldState = state;
3694 				}
3695 			}
3696 		}while (false);
3697 		if (kIOMemoryThreadSafe & _flags) {
3698 			UNLOCK;
3699 		}
3700 	}
3701 
3702 	return err;
3703 }
3704 
3705 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707     IOOptionBits * oldState )
3708 {
3709 	IOReturn err = kIOReturnNotReady;
3710 
3711 	if (kIOMemoryThreadSafe & _flags) {
3712 		LOCK;
3713 	}
3714 	if (_memRef) {
3715 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716 	}
3717 	if (kIOMemoryThreadSafe & _flags) {
3718 		UNLOCK;
3719 	}
3720 
3721 	return err;
3722 }
3723 
3724 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726     int newLedgerTag,
3727     IOOptionBits newLedgerOptions )
3728 {
3729 	IOReturn      err = kIOReturnSuccess;
3730 
3731 	assert(!(kIOMemoryRemote & _flags));
3732 	if (kIOMemoryRemote & _flags) {
3733 		return kIOReturnNotAttached;
3734 	}
3735 
3736 	if (iokit_iomd_setownership_enabled == FALSE) {
3737 		return kIOReturnUnsupported;
3738 	}
3739 
3740 	if (_memRef) {
3741 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742 	} else {
3743 		err = kIOReturnUnsupported;
3744 	}
3745 
3746 	return err;
3747 }
3748 
3749 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750 IOMemoryDescriptor::setOwnership( task_t newOwner,
3751     int newLedgerTag,
3752     IOOptionBits newLedgerOptions )
3753 {
3754 	IOReturn err = kIOReturnNotReady;
3755 
3756 	assert(!(kIOMemoryRemote & _flags));
3757 	if (kIOMemoryRemote & _flags) {
3758 		return kIOReturnNotAttached;
3759 	}
3760 
3761 	if (iokit_iomd_setownership_enabled == FALSE) {
3762 		return kIOReturnUnsupported;
3763 	}
3764 
3765 	if (kIOMemoryThreadSafe & _flags) {
3766 		LOCK;
3767 	}
3768 	if (_memRef) {
3769 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770 	} else {
3771 		IOMultiMemoryDescriptor * mmd;
3772 		IOSubMemoryDescriptor   * smd;
3773 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777 		}
3778 	}
3779 	if (kIOMemoryThreadSafe & _flags) {
3780 		UNLOCK;
3781 	}
3782 
3783 	return err;
3784 }
3785 
3786 
3787 uint64_t
getDMAMapLength(uint64_t * offset)3788 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789 {
3790 	uint64_t length;
3791 
3792 	if (_memRef) {
3793 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794 	} else {
3795 		IOByteCount       iterate, segLen;
3796 		IOPhysicalAddress sourceAddr, sourceAlign;
3797 
3798 		if (kIOMemoryThreadSafe & _flags) {
3799 			LOCK;
3800 		}
3801 		length = 0;
3802 		iterate = 0;
3803 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804 			sourceAlign = (sourceAddr & page_mask);
3805 			if (offset && !iterate) {
3806 				*offset = sourceAlign;
3807 			}
3808 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809 			iterate += segLen;
3810 		}
3811 		if (!iterate) {
3812 			length = getLength();
3813 			if (offset) {
3814 				*offset = 0;
3815 			}
3816 		}
3817 		if (kIOMemoryThreadSafe & _flags) {
3818 			UNLOCK;
3819 		}
3820 	}
3821 
3822 	return length;
3823 }
3824 
3825 
3826 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828     IOByteCount * dirtyPageCount )
3829 {
3830 	IOReturn err = kIOReturnNotReady;
3831 
3832 	assert(!(kIOMemoryRemote & _flags));
3833 	if (kIOMemoryRemote & _flags) {
3834 		return kIOReturnNotAttached;
3835 	}
3836 
3837 	if (kIOMemoryThreadSafe & _flags) {
3838 		LOCK;
3839 	}
3840 	if (_memRef) {
3841 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842 	} else {
3843 		IOMultiMemoryDescriptor * mmd;
3844 		IOSubMemoryDescriptor   * smd;
3845 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846 			err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849 		}
3850 	}
3851 	if (kIOMemoryThreadSafe & _flags) {
3852 		UNLOCK;
3853 	}
3854 
3855 	return err;
3856 }
3857 
3858 
3859 #if defined(__arm64__)
3860 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862 #else /* defined(__arm64__) */
3863 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865 #endif /* defined(__arm64__) */
3866 
3867 static void
SetEncryptOp(addr64_t pa,unsigned int count)3868 SetEncryptOp(addr64_t pa, unsigned int count)
3869 {
3870 	ppnum_t page, end;
3871 
3872 	page = (ppnum_t) atop_64(round_page_64(pa));
3873 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874 	for (; page < end; page++) {
3875 		pmap_clear_noencrypt(page);
3876 	}
3877 }
3878 
3879 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880 ClearEncryptOp(addr64_t pa, unsigned int count)
3881 {
3882 	ppnum_t page, end;
3883 
3884 	page = (ppnum_t) atop_64(round_page_64(pa));
3885 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886 	for (; page < end; page++) {
3887 		pmap_set_noencrypt(page);
3888 	}
3889 }
3890 
3891 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892 IOMemoryDescriptor::performOperation( IOOptionBits options,
3893     IOByteCount offset, IOByteCount length )
3894 {
3895 	IOByteCount remaining;
3896 	unsigned int res;
3897 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3898 #if defined(__arm64__)
3899 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900 #endif
3901 
3902 	assert(!(kIOMemoryRemote & _flags));
3903 	if (kIOMemoryRemote & _flags) {
3904 		return kIOReturnNotAttached;
3905 	}
3906 
3907 	switch (options) {
3908 	case kIOMemoryIncoherentIOFlush:
3909 #if defined(__arm64__)
3910 		func_ext = &dcache_incoherent_io_flush64;
3911 #if __ARM_COHERENT_IO__
3912 		func_ext(0, 0, 0, &res);
3913 		return kIOReturnSuccess;
3914 #else /* __ARM_COHERENT_IO__ */
3915 		break;
3916 #endif /* __ARM_COHERENT_IO__ */
3917 #else /* defined(__arm64__) */
3918 		func = &dcache_incoherent_io_flush64;
3919 		break;
3920 #endif /* defined(__arm64__) */
3921 	case kIOMemoryIncoherentIOStore:
3922 #if defined(__arm64__)
3923 		func_ext = &dcache_incoherent_io_store64;
3924 #if __ARM_COHERENT_IO__
3925 		func_ext(0, 0, 0, &res);
3926 		return kIOReturnSuccess;
3927 #else /* __ARM_COHERENT_IO__ */
3928 		break;
3929 #endif /* __ARM_COHERENT_IO__ */
3930 #else /* defined(__arm64__) */
3931 		func = &dcache_incoherent_io_store64;
3932 		break;
3933 #endif /* defined(__arm64__) */
3934 
3935 	case kIOMemorySetEncrypted:
3936 		func = &SetEncryptOp;
3937 		break;
3938 	case kIOMemoryClearEncrypted:
3939 		func = &ClearEncryptOp;
3940 		break;
3941 	}
3942 
3943 #if defined(__arm64__)
3944 	if ((func == NULL) && (func_ext == NULL)) {
3945 		return kIOReturnUnsupported;
3946 	}
3947 #else /* defined(__arm64__) */
3948 	if (!func) {
3949 		return kIOReturnUnsupported;
3950 	}
3951 #endif /* defined(__arm64__) */
3952 
3953 	if (kIOMemoryThreadSafe & _flags) {
3954 		LOCK;
3955 	}
3956 
3957 	res = 0x0UL;
3958 	remaining = length = min(length, getLength() - offset);
3959 	while (remaining) {
3960 		// (process another target segment?)
3961 		addr64_t    dstAddr64;
3962 		IOByteCount dstLen;
3963 
3964 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965 		if (!dstAddr64) {
3966 			break;
3967 		}
3968 
3969 		// Clip segment length to remaining
3970 		if (dstLen > remaining) {
3971 			dstLen = remaining;
3972 		}
3973 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975 		}
3976 		if (remaining > UINT_MAX) {
3977 			remaining = UINT_MAX;
3978 		}
3979 
3980 #if defined(__arm64__)
3981 		if (func) {
3982 			(*func)(dstAddr64, (unsigned int) dstLen);
3983 		}
3984 		if (func_ext) {
3985 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986 			if (res != 0x0UL) {
3987 				remaining = 0;
3988 				break;
3989 			}
3990 		}
3991 #else /* defined(__arm64__) */
3992 		(*func)(dstAddr64, (unsigned int) dstLen);
3993 #endif /* defined(__arm64__) */
3994 
3995 		offset    += dstLen;
3996 		remaining -= dstLen;
3997 	}
3998 
3999 	if (kIOMemoryThreadSafe & _flags) {
4000 		UNLOCK;
4001 	}
4002 
4003 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004 }
4005 
4006 /*
4007  *
4008  */
4009 
4010 #if defined(__i386__) || defined(__x86_64__)
4011 
4012 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013 
4014 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016  * kernel non-text data -- should we just add another range instead?
4017  */
4018 #define io_kernel_static_start  vm_kernel_stext
4019 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020 
4021 #elif defined(__arm64__)
4022 
4023 extern vm_offset_t              static_memory_end;
4024 
4025 #if defined(__arm64__)
4026 #define io_kernel_static_start vm_kext_base
4027 #else /* defined(__arm64__) */
4028 #define io_kernel_static_start vm_kernel_stext
4029 #endif /* defined(__arm64__) */
4030 
4031 #define io_kernel_static_end    static_memory_end
4032 
4033 #else
4034 #error io_kernel_static_end is undefined for this architecture
4035 #endif
4036 
4037 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038 io_get_kernel_static_upl(
4039 	vm_map_t                /* map */,
4040 	uintptr_t               offset,
4041 	upl_size_t              *upl_size,
4042 	unsigned int            *page_offset,
4043 	upl_t                   *upl,
4044 	upl_page_info_array_t   page_list,
4045 	unsigned int            *count,
4046 	ppnum_t                 *highest_page)
4047 {
4048 	unsigned int pageCount, page;
4049 	ppnum_t phys;
4050 	ppnum_t highestPage = 0;
4051 
4052 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053 	if (pageCount > *count) {
4054 		pageCount = *count;
4055 	}
4056 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4057 
4058 	*upl = NULL;
4059 	*page_offset = ((unsigned int) page_mask & offset);
4060 
4061 	for (page = 0; page < pageCount; page++) {
4062 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063 		if (!phys) {
4064 			break;
4065 		}
4066 		page_list[page].phys_addr = phys;
4067 		page_list[page].free_when_done = 0;
4068 		page_list[page].absent    = 0;
4069 		page_list[page].dirty     = 0;
4070 		page_list[page].precious  = 0;
4071 		page_list[page].device    = 0;
4072 		if (phys > highestPage) {
4073 			highestPage = phys;
4074 		}
4075 	}
4076 
4077 	*highest_page = highestPage;
4078 
4079 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080 }
4081 
4082 IOReturn
wireVirtual(IODirection forDirection)4083 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084 {
4085 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4086 	IOReturn error = kIOReturnSuccess;
4087 	ioGMDData *dataP;
4088 	upl_page_info_array_t pageInfo;
4089 	ppnum_t mapBase;
4090 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091 	mach_vm_size_t numBytesWired = 0;
4092 
4093 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094 
4095 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096 		forDirection = (IODirection) (forDirection | getDirection());
4097 	}
4098 
4099 	dataP = getDataP(_memoryEntries);
4100 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101 	switch (kIODirectionOutIn & forDirection) {
4102 	case kIODirectionOut:
4103 		// Pages do not need to be marked as dirty on commit
4104 		uplFlags = UPL_COPYOUT_FROM;
4105 		dataP->fDMAAccess = kIODMAMapReadAccess;
4106 		break;
4107 
4108 	case kIODirectionIn:
4109 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4110 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4111 		break;
4112 
4113 	default:
4114 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4116 		break;
4117 	}
4118 
4119 	if (_wireCount) {
4120 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122 			    (size_t)VM_KERNEL_ADDRPERM(this));
4123 			error = kIOReturnNotWritable;
4124 		}
4125 	} else {
4126 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127 		IOMapper *mapper;
4128 
4129 		mapper = dataP->fMapper;
4130 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131 
4132 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133 		tag = _kernelTag;
4134 		if (VM_KERN_MEMORY_NONE == tag) {
4135 			tag = IOMemoryTag(kernel_map);
4136 		}
4137 
4138 		if (kIODirectionPrepareToPhys32 & forDirection) {
4139 			if (!mapper) {
4140 				uplFlags |= UPL_NEED_32BIT_ADDR;
4141 			}
4142 			if (dataP->fDMAMapNumAddressBits > 32) {
4143 				dataP->fDMAMapNumAddressBits = 32;
4144 			}
4145 		}
4146 		if (kIODirectionPrepareNoFault    & forDirection) {
4147 			uplFlags |= UPL_REQUEST_NO_FAULT;
4148 		}
4149 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4150 			uplFlags |= UPL_NOZEROFILLIO;
4151 		}
4152 		if (kIODirectionPrepareNonCoherent & forDirection) {
4153 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154 		}
4155 
4156 		mapBase = 0;
4157 
4158 		// Note that appendBytes(NULL) zeros the data up to the desired length
4159 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4161 			error = kIOReturnNoMemory;
4162 			traceInterval.setEndArg2(error);
4163 			return error;
4164 		}
4165 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166 			error = kIOReturnNoMemory;
4167 			traceInterval.setEndArg2(error);
4168 			return error;
4169 		}
4170 		dataP = NULL;
4171 
4172 		// Find the appropriate vm_map for the given task
4173 		vm_map_t curMap;
4174 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175 			curMap = NULL;
4176 		} else {
4177 			curMap = get_task_map(_task);
4178 		}
4179 
4180 		// Iterate over the vector of virtual ranges
4181 		Ranges vec = _ranges;
4182 		unsigned int pageIndex  = 0;
4183 		IOByteCount mdOffset    = 0;
4184 		ppnum_t highestPage     = 0;
4185 		bool         byteAlignUPL;
4186 
4187 		IOMemoryEntry * memRefEntry = NULL;
4188 		if (_memRef) {
4189 			memRefEntry = &_memRef->entries[0];
4190 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191 		} else {
4192 			byteAlignUPL = true;
4193 		}
4194 
4195 		for (UInt range = 0; mdOffset < _length; range++) {
4196 			ioPLBlock iopl;
4197 			mach_vm_address_t startPage, startPageOffset;
4198 			mach_vm_size_t    numBytes;
4199 			ppnum_t highPage = 0;
4200 
4201 			if (_memRef) {
4202 				if (range >= _memRef->count) {
4203 					panic("memRefEntry");
4204 				}
4205 				memRefEntry = &_memRef->entries[range];
4206 				numBytes    = memRefEntry->size;
4207 				startPage   = -1ULL;
4208 				if (byteAlignUPL) {
4209 					startPageOffset = 0;
4210 				} else {
4211 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4212 				}
4213 			} else {
4214 				// Get the startPage address and length of vec[range]
4215 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216 				if (byteAlignUPL) {
4217 					startPageOffset = 0;
4218 				} else {
4219 					startPageOffset = startPage & PAGE_MASK;
4220 					startPage = trunc_page_64(startPage);
4221 				}
4222 			}
4223 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224 			numBytes += startPageOffset;
4225 
4226 			if (mapper) {
4227 				iopl.fMappedPage = mapBase + pageIndex;
4228 			} else {
4229 				iopl.fMappedPage = 0;
4230 			}
4231 
4232 			// Iterate over the current range, creating UPLs
4233 			while (numBytes) {
4234 				vm_address_t kernelStart = (vm_address_t) startPage;
4235 				vm_map_t theMap;
4236 				if (curMap) {
4237 					theMap = curMap;
4238 				} else if (_memRef) {
4239 					theMap = NULL;
4240 				} else {
4241 					assert(_task == kernel_task);
4242 					theMap = IOPageableMapForAddress(kernelStart);
4243 				}
4244 
4245 				// ioplFlags is an in/out parameter
4246 				upl_control_flags_t ioplFlags = uplFlags;
4247 				dataP = getDataP(_memoryEntries);
4248 				pageInfo = getPageList(dataP);
4249 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250 
4251 				mach_vm_size_t ioplPhysSize;
4252 				upl_size_t     ioplSize;
4253 				unsigned int   numPageInfo;
4254 
4255 				if (_memRef) {
4256 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258 				} else {
4259 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261 				}
4262 				if (error != KERN_SUCCESS) {
4263 					if (_memRef) {
4264 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265 					} else {
4266 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267 					}
4268 					printf("entry size error %d\n", error);
4269 					goto abortExit;
4270 				}
4271 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272 				numPageInfo = atop_32(ioplPhysSize);
4273 				if (byteAlignUPL) {
4274 					if (numBytes > ioplPhysSize) {
4275 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276 					} else {
4277 						ioplSize = ((typeof(ioplSize))numBytes);
4278 					}
4279 				} else {
4280 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281 				}
4282 
4283 				if (_memRef) {
4284 					memory_object_offset_t entryOffset;
4285 
4286 					entryOffset = mdOffset;
4287 					if (byteAlignUPL) {
4288 						entryOffset = (entryOffset - memRefEntry->offset);
4289 					} else {
4290 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291 					}
4292 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4293 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294 					}
4295 					error = memory_object_iopl_request(memRefEntry->entry,
4296 					    entryOffset,
4297 					    &ioplSize,
4298 					    &iopl.fIOPL,
4299 					    baseInfo,
4300 					    &numPageInfo,
4301 					    &ioplFlags,
4302 					    tag);
4303 				} else if ((theMap == kernel_map)
4304 				    && (kernelStart >= io_kernel_static_start)
4305 				    && (kernelStart < io_kernel_static_end)) {
4306 					error = io_get_kernel_static_upl(theMap,
4307 					    kernelStart,
4308 					    &ioplSize,
4309 					    &iopl.fPageOffset,
4310 					    &iopl.fIOPL,
4311 					    baseInfo,
4312 					    &numPageInfo,
4313 					    &highPage);
4314 				} else {
4315 					assert(theMap);
4316 					error = vm_map_create_upl(theMap,
4317 					    startPage,
4318 					    (upl_size_t*)&ioplSize,
4319 					    &iopl.fIOPL,
4320 					    baseInfo,
4321 					    &numPageInfo,
4322 					    &ioplFlags,
4323 					    tag);
4324 				}
4325 
4326 				if (error != KERN_SUCCESS) {
4327 					traceInterval.setEndArg2(error);
4328 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329 					goto abortExit;
4330 				}
4331 
4332 				assert(ioplSize);
4333 
4334 				if (iopl.fIOPL) {
4335 					highPage = upl_get_highest_page(iopl.fIOPL);
4336 				}
4337 				if (highPage > highestPage) {
4338 					highestPage = highPage;
4339 				}
4340 
4341 				if (baseInfo->device) {
4342 					numPageInfo = 1;
4343 					iopl.fFlags = kIOPLOnDevice;
4344 				} else {
4345 					iopl.fFlags = 0;
4346 				}
4347 
4348 				if (byteAlignUPL) {
4349 					if (iopl.fIOPL) {
4350 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352 					}
4353 					if (startPage != (mach_vm_address_t)-1) {
4354 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355 						startPage -= iopl.fPageOffset;
4356 					}
4357 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358 					numBytes += iopl.fPageOffset;
4359 				}
4360 
4361 				iopl.fIOMDOffset = mdOffset;
4362 				iopl.fPageInfo = pageIndex;
4363 
4364 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365 					// Clean up partial created and unsaved iopl
4366 					if (iopl.fIOPL) {
4367 						upl_abort(iopl.fIOPL, 0);
4368 						upl_deallocate(iopl.fIOPL);
4369 					}
4370 					error = kIOReturnNoMemory;
4371 					traceInterval.setEndArg2(error);
4372 					goto abortExit;
4373 				}
4374 				dataP = NULL;
4375 
4376 				// Check for a multiple iopl's in one virtual range
4377 				pageIndex += numPageInfo;
4378 				mdOffset -= iopl.fPageOffset;
4379 				numBytesWired += ioplSize;
4380 				if (ioplSize < numBytes) {
4381 					numBytes -= ioplSize;
4382 					if (startPage != (mach_vm_address_t)-1) {
4383 						startPage += ioplSize;
4384 					}
4385 					mdOffset += ioplSize;
4386 					iopl.fPageOffset = 0;
4387 					if (mapper) {
4388 						iopl.fMappedPage = mapBase + pageIndex;
4389 					}
4390 				} else {
4391 					mdOffset += numBytes;
4392 					break;
4393 				}
4394 			}
4395 		}
4396 
4397 		_highestPage = highestPage;
4398 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399 
4400 		if (UPL_COPYOUT_FROM & uplFlags) {
4401 			_flags |= kIOMemoryPreparedReadOnly;
4402 		}
4403 		traceInterval.setEndCodes(numBytesWired, error);
4404 	}
4405 
4406 #if IOTRACKING
4407 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408 		dataP = getDataP(_memoryEntries);
4409 		if (!dataP->fWireTracking.link.next) {
4410 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411 		}
4412 	}
4413 #endif /* IOTRACKING */
4414 
4415 	return error;
4416 
4417 abortExit:
4418 	{
4419 		dataP = getDataP(_memoryEntries);
4420 		UInt done = getNumIOPL(_memoryEntries, dataP);
4421 		ioPLBlock *ioplList = getIOPLList(dataP);
4422 
4423 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424 			if (ioplList[ioplIdx].fIOPL) {
4425 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4427 			}
4428 		}
4429 		_memoryEntries->setLength(computeDataSize(0, 0));
4430 	}
4431 
4432 	if (error == KERN_FAILURE) {
4433 		error = kIOReturnCannotWire;
4434 	} else if (error == KERN_MEMORY_ERROR) {
4435 		error = kIOReturnNoResources;
4436 	}
4437 
4438 	return error;
4439 }
4440 
4441 bool
initMemoryEntries(size_t size,IOMapper * mapper)4442 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443 {
4444 	ioGMDData * dataP;
4445 
4446 	if (size > UINT_MAX) {
4447 		return false;
4448 	}
4449 	if (!_memoryEntries) {
4450 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451 		if (!_memoryEntries) {
4452 			return false;
4453 		}
4454 	} else if (!_memoryEntries->initWithCapacity(size)) {
4455 		return false;
4456 	}
4457 
4458 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459 	dataP = getDataP(_memoryEntries);
4460 
4461 	if (mapper == kIOMapperWaitSystem) {
4462 		IOMapper::checkForSystemMapper();
4463 		mapper = IOMapper::gSystem;
4464 	}
4465 	dataP->fMapper               = mapper;
4466 	dataP->fPageCnt              = 0;
4467 	dataP->fMappedBase           = 0;
4468 	dataP->fDMAMapNumAddressBits = 64;
4469 	dataP->fDMAMapAlignment      = 0;
4470 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4471 	dataP->fCompletionError      = false;
4472 	dataP->fMappedBaseValid      = false;
4473 
4474 	return true;
4475 }
4476 
4477 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478 IOMemoryDescriptor::dmaMap(
4479 	IOMapper                    * mapper,
4480 	IOMemoryDescriptor          * memory,
4481 	IODMACommand                * command,
4482 	const IODMAMapSpecification * mapSpec,
4483 	uint64_t                      offset,
4484 	uint64_t                      length,
4485 	uint64_t                    * mapAddress,
4486 	uint64_t                    * mapLength)
4487 {
4488 	IOReturn err;
4489 	uint32_t mapOptions;
4490 
4491 	mapOptions = 0;
4492 	mapOptions |= kIODMAMapReadAccess;
4493 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494 		mapOptions |= kIODMAMapWriteAccess;
4495 	}
4496 
4497 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498 	    mapSpec, command, NULL, mapAddress, mapLength);
4499 
4500 	if (kIOReturnSuccess == err) {
4501 		dmaMapRecord(mapper, command, *mapLength);
4502 	}
4503 
4504 	return err;
4505 }
4506 
4507 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508 IOMemoryDescriptor::dmaMapRecord(
4509 	IOMapper                    * mapper,
4510 	IODMACommand                * command,
4511 	uint64_t                      mapLength)
4512 {
4513 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514 	kern_allocation_name_t alloc;
4515 	int16_t                prior;
4516 
4517 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518 		kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4519 	}
4520 
4521 	if (!command) {
4522 		return;
4523 	}
4524 	prior = OSAddAtomic16(1, &_dmaReferences);
4525 	if (!prior) {
4526 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527 			_mapName  = alloc;
4528 			mapLength = _length;
4529 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530 		} else {
4531 			_mapName = NULL;
4532 		}
4533 	}
4534 }
4535 
4536 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537 IOMemoryDescriptor::dmaUnmap(
4538 	IOMapper                    * mapper,
4539 	IODMACommand                * command,
4540 	uint64_t                      offset,
4541 	uint64_t                      mapAddress,
4542 	uint64_t                      mapLength)
4543 {
4544 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545 	IOReturn ret;
4546 	kern_allocation_name_t alloc;
4547 	kern_allocation_name_t mapName;
4548 	int16_t prior;
4549 
4550 	mapName = NULL;
4551 	prior = 0;
4552 	if (command) {
4553 		mapName = _mapName;
4554 		if (_dmaReferences) {
4555 			prior = OSAddAtomic16(-1, &_dmaReferences);
4556 		} else {
4557 			panic("_dmaReferences underflow");
4558 		}
4559 	}
4560 
4561 	if (!mapLength) {
4562 		traceInterval.setEndArg1(kIOReturnSuccess);
4563 		return kIOReturnSuccess;
4564 	}
4565 
4566 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567 
4568 	if ((alloc = mapper->fAllocName)) {
4569 		kern_allocation_update_size(alloc, -mapLength, NULL);
4570 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571 			mapLength = _length;
4572 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573 		}
4574 	}
4575 
4576 	traceInterval.setEndArg1(ret);
4577 	return ret;
4578 }
4579 
4580 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581 IOGeneralMemoryDescriptor::dmaMap(
4582 	IOMapper                    * mapper,
4583 	IOMemoryDescriptor          * memory,
4584 	IODMACommand                * command,
4585 	const IODMAMapSpecification * mapSpec,
4586 	uint64_t                      offset,
4587 	uint64_t                      length,
4588 	uint64_t                    * mapAddress,
4589 	uint64_t                    * mapLength)
4590 {
4591 	IOReturn          err = kIOReturnSuccess;
4592 	ioGMDData *       dataP;
4593 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4594 
4595 	*mapAddress = 0;
4596 	if (kIOMemoryHostOnly & _flags) {
4597 		return kIOReturnSuccess;
4598 	}
4599 	if (kIOMemoryRemote & _flags) {
4600 		return kIOReturnNotAttached;
4601 	}
4602 
4603 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604 	    || offset || (length != _length)) {
4605 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607 		const ioPLBlock * ioplList = getIOPLList(dataP);
4608 		upl_page_info_t * pageList;
4609 		uint32_t          mapOptions = 0;
4610 
4611 		IODMAMapSpecification mapSpec;
4612 		bzero(&mapSpec, sizeof(mapSpec));
4613 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614 		mapSpec.alignment = dataP->fDMAMapAlignment;
4615 
4616 		// For external UPLs the fPageInfo field points directly to
4617 		// the upl's upl_page_info_t array.
4618 		if (ioplList->fFlags & kIOPLExternUPL) {
4619 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620 			mapOptions |= kIODMAMapPagingPath;
4621 		} else {
4622 			pageList = getPageList(dataP);
4623 		}
4624 
4625 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626 			mapOptions |= kIODMAMapPageListFullyOccupied;
4627 		}
4628 
4629 		assert(dataP->fDMAAccess);
4630 		mapOptions |= dataP->fDMAAccess;
4631 
4632 		// Check for direct device non-paged memory
4633 		if (ioplList->fFlags & kIOPLOnDevice) {
4634 			mapOptions |= kIODMAMapPhysicallyContiguous;
4635 		}
4636 
4637 		IODMAMapPageList dmaPageList =
4638 		{
4639 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4640 			.pageListCount = _pages,
4641 			.pageList      = &pageList[0]
4642 		};
4643 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644 		    command, &dmaPageList, mapAddress, mapLength);
4645 
4646 		if (kIOReturnSuccess == err) {
4647 			dmaMapRecord(mapper, command, *mapLength);
4648 		}
4649 	}
4650 
4651 	return err;
4652 }
4653 
4654 /*
4655  * prepare
4656  *
4657  * Prepare the memory for an I/O transfer.  This involves paging in
4658  * the memory, if necessary, and wiring it down for the duration of
4659  * the transfer.  The complete() method completes the processing of
4660  * the memory after the I/O transfer finishes.  This method needn't
4661  * called for non-pageable memory.
4662  */
4663 
4664 IOReturn
prepare(IODirection forDirection)4665 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666 {
4667 	IOReturn     error    = kIOReturnSuccess;
4668 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4669 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670 
4671 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672 		traceInterval.setEndArg1(kIOReturnSuccess);
4673 		return kIOReturnSuccess;
4674 	}
4675 
4676 	assert(!(kIOMemoryRemote & _flags));
4677 	if (kIOMemoryRemote & _flags) {
4678 		traceInterval.setEndArg1(kIOReturnNotAttached);
4679 		return kIOReturnNotAttached;
4680 	}
4681 
4682 	if (_prepareLock) {
4683 		IOLockLock(_prepareLock);
4684 	}
4685 
4686 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688 			error = kIOReturnNotReady;
4689 			goto finish;
4690 		}
4691 		error = wireVirtual(forDirection);
4692 	}
4693 
4694 	if (kIOReturnSuccess == error) {
4695 		if (1 == ++_wireCount) {
4696 			if (kIOMemoryClearEncrypt & _flags) {
4697 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4698 			}
4699 
4700 			ktraceEmitPhysicalSegments();
4701 		}
4702 	}
4703 
4704 finish:
4705 
4706 	if (_prepareLock) {
4707 		IOLockUnlock(_prepareLock);
4708 	}
4709 	traceInterval.setEndArg1(error);
4710 
4711 	return error;
4712 }
4713 
4714 /*
4715  * complete
4716  *
4717  * Complete processing of the memory after an I/O transfer finishes.
4718  * This method should not be called unless a prepare was previously
4719  * issued; the prepare() and complete() must occur in pairs, before
4720  * before and after an I/O transfer involving pageable memory.
4721  */
4722 
4723 IOReturn
complete(IODirection forDirection)4724 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725 {
4726 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4727 	ioGMDData  * dataP;
4728 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729 
4730 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731 		traceInterval.setEndArg1(kIOReturnSuccess);
4732 		return kIOReturnSuccess;
4733 	}
4734 
4735 	assert(!(kIOMemoryRemote & _flags));
4736 	if (kIOMemoryRemote & _flags) {
4737 		traceInterval.setEndArg1(kIOReturnNotAttached);
4738 		return kIOReturnNotAttached;
4739 	}
4740 
4741 	if (_prepareLock) {
4742 		IOLockLock(_prepareLock);
4743 	}
4744 	do{
4745 		assert(_wireCount);
4746 		if (!_wireCount) {
4747 			break;
4748 		}
4749 		dataP = getDataP(_memoryEntries);
4750 		if (!dataP) {
4751 			break;
4752 		}
4753 
4754 		if (kIODirectionCompleteWithError & forDirection) {
4755 			dataP->fCompletionError = true;
4756 		}
4757 
4758 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759 			performOperation(kIOMemorySetEncrypted, 0, _length);
4760 		}
4761 
4762 		_wireCount--;
4763 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764 			ioPLBlock *ioplList = getIOPLList(dataP);
4765 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766 
4767 			if (_wireCount) {
4768 				// kIODirectionCompleteWithDataValid & forDirection
4769 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770 					vm_tag_t tag;
4771 					tag = (typeof(tag))getVMTag(kernel_map);
4772 					for (ind = 0; ind < count; ind++) {
4773 						if (ioplList[ind].fIOPL) {
4774 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4775 						}
4776 					}
4777 				}
4778 			} else {
4779 				if (_dmaReferences) {
4780 					panic("complete() while dma active");
4781 				}
4782 
4783 				if (dataP->fMappedBaseValid) {
4784 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786 				}
4787 #if IOTRACKING
4788 				if (dataP->fWireTracking.link.next) {
4789 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790 				}
4791 #endif /* IOTRACKING */
4792 				// Only complete iopls that we created which are for TypeVirtual
4793 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794 					for (ind = 0; ind < count; ind++) {
4795 						if (ioplList[ind].fIOPL) {
4796 							if (dataP->fCompletionError) {
4797 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798 							} else {
4799 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800 							}
4801 							upl_deallocate(ioplList[ind].fIOPL);
4802 						}
4803 					}
4804 				} else if (kIOMemoryTypeUPL == type) {
4805 					upl_set_referenced(ioplList[0].fIOPL, false);
4806 				}
4807 
4808 				_memoryEntries->setLength(computeDataSize(0, 0));
4809 
4810 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4811 				_flags &= ~kIOMemoryPreparedReadOnly;
4812 
4813 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815 				}
4816 			}
4817 		}
4818 	}while (false);
4819 
4820 	if (_prepareLock) {
4821 		IOLockUnlock(_prepareLock);
4822 	}
4823 
4824 	traceInterval.setEndArg1(kIOReturnSuccess);
4825 	return kIOReturnSuccess;
4826 }
4827 
4828 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4829 IOGeneralMemoryDescriptor::doMap(
4830 	vm_map_t                __addressMap,
4831 	IOVirtualAddress *      __address,
4832 	IOOptionBits            options,
4833 	IOByteCount             __offset,
4834 	IOByteCount             __length )
4835 {
4836 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4837 	traceInterval.setEndArg1(kIOReturnSuccess);
4838 #ifndef __LP64__
4839 	if (!(kIOMap64Bit & options)) {
4840 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4841 	}
4842 #endif /* !__LP64__ */
4843 
4844 	kern_return_t  err;
4845 
4846 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
4847 	mach_vm_size_t offset  = mapping->fOffset + __offset;
4848 	mach_vm_size_t length  = mapping->fLength;
4849 
4850 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4851 	Ranges vec = _ranges;
4852 
4853 	mach_vm_address_t range0Addr = 0;
4854 	mach_vm_size_t    range0Len = 0;
4855 
4856 	if ((offset >= _length) || ((offset + length) > _length)) {
4857 		traceInterval.setEndArg1(kIOReturnBadArgument);
4858 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4859 		// assert(offset == 0 && _length == 0 && length == 0);
4860 		return kIOReturnBadArgument;
4861 	}
4862 
4863 	assert(!(kIOMemoryRemote & _flags));
4864 	if (kIOMemoryRemote & _flags) {
4865 		return 0;
4866 	}
4867 
4868 	if (vec.v) {
4869 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4870 	}
4871 
4872 	// mapping source == dest? (could be much better)
4873 	if (_task
4874 	    && (mapping->fAddressTask == _task)
4875 	    && (mapping->fAddressMap == get_task_map(_task))
4876 	    && (options & kIOMapAnywhere)
4877 	    && (!(kIOMapUnique & options))
4878 	    && (!(kIOMapGuardedMask & options))
4879 	    && (1 == _rangesCount)
4880 	    && (0 == offset)
4881 	    && range0Addr
4882 	    && (length <= range0Len)) {
4883 		mapping->fAddress = range0Addr;
4884 		mapping->fOptions |= kIOMapStatic;
4885 
4886 		return kIOReturnSuccess;
4887 	}
4888 
4889 	if (!_memRef) {
4890 		IOOptionBits createOptions = 0;
4891 		if (!(kIOMapReadOnly & options)) {
4892 			createOptions |= kIOMemoryReferenceWrite;
4893 #if DEVELOPMENT || DEBUG
4894 			if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4895 			    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4896 				OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4897 			}
4898 #endif
4899 		}
4900 		err = memoryReferenceCreate(createOptions, &_memRef);
4901 		if (kIOReturnSuccess != err) {
4902 			traceInterval.setEndArg1(err);
4903 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4904 			return err;
4905 		}
4906 	}
4907 
4908 	memory_object_t pager;
4909 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4910 
4911 	// <upl_transpose //
4912 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4913 		do{
4914 			upl_t               redirUPL2;
4915 			upl_size_t          size;
4916 			upl_control_flags_t flags;
4917 			unsigned int        lock_count;
4918 
4919 			if (!_memRef || (1 != _memRef->count)) {
4920 				err = kIOReturnNotReadable;
4921 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4922 				break;
4923 			}
4924 
4925 			size = (upl_size_t) round_page(mapping->fLength);
4926 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4927 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4928 
4929 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4930 			    NULL, NULL,
4931 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
4932 				redirUPL2 = NULL;
4933 			}
4934 
4935 			for (lock_count = 0;
4936 			    IORecursiveLockHaveLock(gIOMemoryLock);
4937 			    lock_count++) {
4938 				UNLOCK;
4939 			}
4940 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4941 			for (;
4942 			    lock_count;
4943 			    lock_count--) {
4944 				LOCK;
4945 			}
4946 
4947 			if (kIOReturnSuccess != err) {
4948 				IOLog("upl_transpose(%x)\n", err);
4949 				err = kIOReturnSuccess;
4950 			}
4951 
4952 			if (redirUPL2) {
4953 				upl_commit(redirUPL2, NULL, 0);
4954 				upl_deallocate(redirUPL2);
4955 				redirUPL2 = NULL;
4956 			}
4957 			{
4958 				// swap the memEntries since they now refer to different vm_objects
4959 				IOMemoryReference * me = _memRef;
4960 				_memRef = mapping->fMemory->_memRef;
4961 				mapping->fMemory->_memRef = me;
4962 			}
4963 			if (pager) {
4964 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4965 			}
4966 		}while (false);
4967 	}
4968 	// upl_transpose> //
4969 	else {
4970 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4971 		if (err) {
4972 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4973 		}
4974 #if IOTRACKING
4975 		if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4976 			// only dram maps in the default on developement case
4977 			IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4978 		}
4979 #endif /* IOTRACKING */
4980 		if ((err == KERN_SUCCESS) && pager) {
4981 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4982 
4983 			if (err != KERN_SUCCESS) {
4984 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4985 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4986 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4987 			}
4988 		}
4989 	}
4990 
4991 	traceInterval.setEndArg1(err);
4992 	if (err) {
4993 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4994 	}
4995 	return err;
4996 }
4997 
4998 #if IOTRACKING
4999 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5000 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5001     mach_vm_address_t * address, mach_vm_size_t * size)
5002 {
5003 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5004 
5005 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5006 
5007 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5008 		return kIOReturnNotReady;
5009 	}
5010 
5011 	*task    = map->fAddressTask;
5012 	*address = map->fAddress;
5013 	*size    = map->fLength;
5014 
5015 	return kIOReturnSuccess;
5016 }
5017 #endif /* IOTRACKING */
5018 
5019 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5020 IOGeneralMemoryDescriptor::doUnmap(
5021 	vm_map_t                addressMap,
5022 	IOVirtualAddress        __address,
5023 	IOByteCount             __length )
5024 {
5025 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5026 	IOReturn ret;
5027 	ret = super::doUnmap(addressMap, __address, __length);
5028 	traceInterval.setEndArg1(ret);
5029 	return ret;
5030 }
5031 
5032 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5033 
5034 #undef super
5035 #define super OSObject
5036 
5037 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5038 
5039 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5040 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5041 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5042 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5043 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5044 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5045 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5046 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5047 
5048 /* ex-inline function implementation */
5049 IOPhysicalAddress
getPhysicalAddress()5050 IOMemoryMap::getPhysicalAddress()
5051 {
5052 	return getPhysicalSegment( 0, NULL );
5053 }
5054 
5055 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5056 
5057 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5058 IOMemoryMap::init(
5059 	task_t                  intoTask,
5060 	mach_vm_address_t       toAddress,
5061 	IOOptionBits            _options,
5062 	mach_vm_size_t          _offset,
5063 	mach_vm_size_t          _length )
5064 {
5065 	if (!intoTask) {
5066 		return false;
5067 	}
5068 
5069 	if (!super::init()) {
5070 		return false;
5071 	}
5072 
5073 	fAddressMap  = get_task_map(intoTask);
5074 	if (!fAddressMap) {
5075 		return false;
5076 	}
5077 	vm_map_reference(fAddressMap);
5078 
5079 	fAddressTask = intoTask;
5080 	fOptions     = _options;
5081 	fLength      = _length;
5082 	fOffset      = _offset;
5083 	fAddress     = toAddress;
5084 
5085 	return true;
5086 }
5087 
5088 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5089 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5090 {
5091 	if (!_memory) {
5092 		return false;
5093 	}
5094 
5095 	if (!fSuperMap) {
5096 		if ((_offset + fLength) > _memory->getLength()) {
5097 			return false;
5098 		}
5099 		fOffset = _offset;
5100 	}
5101 
5102 
5103 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5104 	if (fMemory) {
5105 		if (fMemory != _memory) {
5106 			fMemory->removeMapping(this);
5107 		}
5108 	}
5109 	fMemory = os::move(tempval);
5110 
5111 	return true;
5112 }
5113 
5114 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5115 IOMemoryDescriptor::doMap(
5116 	vm_map_t                __addressMap,
5117 	IOVirtualAddress *      __address,
5118 	IOOptionBits            options,
5119 	IOByteCount             __offset,
5120 	IOByteCount             __length )
5121 {
5122 	return kIOReturnUnsupported;
5123 }
5124 
5125 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5126 IOMemoryDescriptor::handleFault(
5127 	void *                  _pager,
5128 	mach_vm_size_t          sourceOffset,
5129 	mach_vm_size_t          length)
5130 {
5131 	if (kIOMemoryRedirected & _flags) {
5132 #if DEBUG
5133 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5134 #endif
5135 		do {
5136 			SLEEP;
5137 		} while (kIOMemoryRedirected & _flags);
5138 	}
5139 	return kIOReturnSuccess;
5140 }
5141 
5142 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5143 IOMemoryDescriptor::populateDevicePager(
5144 	void *                  _pager,
5145 	vm_map_t                addressMap,
5146 	mach_vm_address_t       address,
5147 	mach_vm_size_t          sourceOffset,
5148 	mach_vm_size_t          length,
5149 	IOOptionBits            options )
5150 {
5151 	IOReturn            err = kIOReturnSuccess;
5152 	memory_object_t     pager = (memory_object_t) _pager;
5153 	mach_vm_size_t      size;
5154 	mach_vm_size_t      bytes;
5155 	mach_vm_size_t      page;
5156 	mach_vm_size_t      pageOffset;
5157 	mach_vm_size_t      pagerOffset;
5158 	IOPhysicalLength    segLen, chunk;
5159 	addr64_t            physAddr;
5160 	IOOptionBits        type;
5161 
5162 	type = _flags & kIOMemoryTypeMask;
5163 
5164 	if (reserved->dp.pagerContig) {
5165 		sourceOffset = 0;
5166 		pagerOffset  = 0;
5167 	}
5168 
5169 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5170 	assert( physAddr );
5171 	pageOffset = physAddr - trunc_page_64( physAddr );
5172 	pagerOffset = sourceOffset;
5173 
5174 	size = length + pageOffset;
5175 	physAddr -= pageOffset;
5176 
5177 	segLen += pageOffset;
5178 	bytes = size;
5179 	do{
5180 		// in the middle of the loop only map whole pages
5181 		if (segLen >= bytes) {
5182 			segLen = bytes;
5183 		} else if (segLen != trunc_page_64(segLen)) {
5184 			err = kIOReturnVMError;
5185 		}
5186 		if (physAddr != trunc_page_64(physAddr)) {
5187 			err = kIOReturnBadArgument;
5188 		}
5189 
5190 		if (kIOReturnSuccess != err) {
5191 			break;
5192 		}
5193 
5194 #if DEBUG || DEVELOPMENT
5195 		if ((kIOMemoryTypeUPL != type)
5196 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5197 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5198 			    physAddr, (uint64_t)segLen);
5199 		}
5200 #endif /* DEBUG || DEVELOPMENT */
5201 
5202 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5203 		for (page = 0;
5204 		    (page < segLen) && (KERN_SUCCESS == err);
5205 		    page += chunk) {
5206 			err = device_pager_populate_object(pager, pagerOffset,
5207 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5208 			pagerOffset += chunk;
5209 		}
5210 
5211 		assert(KERN_SUCCESS == err);
5212 		if (err) {
5213 			break;
5214 		}
5215 
5216 		// This call to vm_fault causes an early pmap level resolution
5217 		// of the mappings created above for kernel mappings, since
5218 		// faulting in later can't take place from interrupt level.
5219 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5220 			err = vm_fault(addressMap,
5221 			    (vm_map_offset_t)trunc_page_64(address),
5222 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5223 			    FALSE, VM_KERN_MEMORY_NONE,
5224 			    THREAD_UNINT, NULL,
5225 			    (vm_map_offset_t)0);
5226 
5227 			if (KERN_SUCCESS != err) {
5228 				break;
5229 			}
5230 		}
5231 
5232 		sourceOffset += segLen - pageOffset;
5233 		address += segLen;
5234 		bytes -= segLen;
5235 		pageOffset = 0;
5236 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5237 
5238 	if (bytes) {
5239 		err = kIOReturnBadArgument;
5240 	}
5241 
5242 	return err;
5243 }
5244 
5245 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5246 IOMemoryDescriptor::doUnmap(
5247 	vm_map_t                addressMap,
5248 	IOVirtualAddress        __address,
5249 	IOByteCount             __length )
5250 {
5251 	IOReturn          err;
5252 	IOMemoryMap *     mapping;
5253 	mach_vm_address_t address;
5254 	mach_vm_size_t    length;
5255 
5256 	if (__length) {
5257 		panic("doUnmap");
5258 	}
5259 
5260 	mapping = (IOMemoryMap *) __address;
5261 	addressMap = mapping->fAddressMap;
5262 	address    = mapping->fAddress;
5263 	length     = mapping->fLength;
5264 
5265 	if (kIOMapOverwrite & mapping->fOptions) {
5266 		err = KERN_SUCCESS;
5267 	} else {
5268 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5269 			addressMap = IOPageableMapForAddress( address );
5270 		}
5271 #if DEBUG
5272 		if (kIOLogMapping & gIOKitDebug) {
5273 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5274 			    addressMap, address, length );
5275 		}
5276 #endif
5277 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5278 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5279 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5280 		}
5281 	}
5282 
5283 #if IOTRACKING
5284 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5285 #endif /* IOTRACKING */
5286 
5287 	return err;
5288 }
5289 
5290 IOReturn
redirect(task_t safeTask,bool doRedirect)5291 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5292 {
5293 	IOReturn            err = kIOReturnSuccess;
5294 	IOMemoryMap *       mapping = NULL;
5295 	OSSharedPtr<OSIterator>        iter;
5296 
5297 	LOCK;
5298 
5299 	if (doRedirect) {
5300 		_flags |= kIOMemoryRedirected;
5301 	} else {
5302 		_flags &= ~kIOMemoryRedirected;
5303 	}
5304 
5305 	do {
5306 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5307 			memory_object_t   pager;
5308 
5309 			if (reserved) {
5310 				pager = (memory_object_t) reserved->dp.devicePager;
5311 			} else {
5312 				pager = MACH_PORT_NULL;
5313 			}
5314 
5315 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5316 				mapping->redirect( safeTask, doRedirect );
5317 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5318 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5319 				}
5320 			}
5321 
5322 			iter.reset();
5323 		}
5324 	} while (false);
5325 
5326 	if (!doRedirect) {
5327 		WAKEUP;
5328 	}
5329 
5330 	UNLOCK;
5331 
5332 #ifndef __LP64__
5333 	// temporary binary compatibility
5334 	IOSubMemoryDescriptor * subMem;
5335 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5336 		err = subMem->redirect( safeTask, doRedirect );
5337 	} else {
5338 		err = kIOReturnSuccess;
5339 	}
5340 #endif /* !__LP64__ */
5341 
5342 	return err;
5343 }
5344 
5345 IOReturn
redirect(task_t safeTask,bool doRedirect)5346 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5347 {
5348 	IOReturn err = kIOReturnSuccess;
5349 
5350 	if (fSuperMap) {
5351 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5352 	} else {
5353 		LOCK;
5354 
5355 		do{
5356 			if (!fAddress) {
5357 				break;
5358 			}
5359 			if (!fAddressMap) {
5360 				break;
5361 			}
5362 
5363 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5364 			    && (0 == (fOptions & kIOMapStatic))) {
5365 				IOUnmapPages( fAddressMap, fAddress, fLength );
5366 				err = kIOReturnSuccess;
5367 #if DEBUG
5368 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5369 #endif
5370 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5371 				IOOptionBits newMode;
5372 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5373 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5374 			}
5375 		}while (false);
5376 		UNLOCK;
5377 	}
5378 
5379 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5380 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5381 	    && safeTask
5382 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5383 		fMemory->redirect(safeTask, doRedirect);
5384 	}
5385 
5386 	return err;
5387 }
5388 
5389 IOReturn
unmap(void)5390 IOMemoryMap::unmap( void )
5391 {
5392 	IOReturn    err;
5393 
5394 	LOCK;
5395 
5396 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5397 	    && (0 == (kIOMapStatic & fOptions))) {
5398 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5399 	} else {
5400 		err = kIOReturnSuccess;
5401 	}
5402 
5403 	if (fAddressMap) {
5404 		vm_map_deallocate(fAddressMap);
5405 		fAddressMap = NULL;
5406 	}
5407 
5408 	fAddress = 0;
5409 
5410 	UNLOCK;
5411 
5412 	return err;
5413 }
5414 
5415 void
taskDied(void)5416 IOMemoryMap::taskDied( void )
5417 {
5418 	LOCK;
5419 	if (fUserClientUnmap) {
5420 		unmap();
5421 	}
5422 #if IOTRACKING
5423 	else {
5424 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5425 	}
5426 #endif /* IOTRACKING */
5427 
5428 	if (fAddressMap) {
5429 		vm_map_deallocate(fAddressMap);
5430 		fAddressMap = NULL;
5431 	}
5432 	fAddressTask = NULL;
5433 	fAddress     = 0;
5434 	UNLOCK;
5435 }
5436 
5437 IOReturn
userClientUnmap(void)5438 IOMemoryMap::userClientUnmap( void )
5439 {
5440 	fUserClientUnmap = true;
5441 	return kIOReturnSuccess;
5442 }
5443 
5444 // Overload the release mechanism.  All mappings must be a member
5445 // of a memory descriptors _mappings set.  This means that we
5446 // always have 2 references on a mapping.  When either of these mappings
5447 // are released we need to free ourselves.
5448 void
taggedRelease(const void * tag) const5449 IOMemoryMap::taggedRelease(const void *tag) const
5450 {
5451 	LOCK;
5452 	super::taggedRelease(tag, 2);
5453 	UNLOCK;
5454 }
5455 
5456 void
free()5457 IOMemoryMap::free()
5458 {
5459 	unmap();
5460 
5461 	if (fMemory) {
5462 		LOCK;
5463 		fMemory->removeMapping(this);
5464 		UNLOCK;
5465 		fMemory.reset();
5466 	}
5467 
5468 	if (fSuperMap) {
5469 		fSuperMap.reset();
5470 	}
5471 
5472 	if (fRedirUPL) {
5473 		upl_commit(fRedirUPL, NULL, 0);
5474 		upl_deallocate(fRedirUPL);
5475 	}
5476 
5477 	super::free();
5478 }
5479 
5480 IOByteCount
getLength()5481 IOMemoryMap::getLength()
5482 {
5483 	return fLength;
5484 }
5485 
5486 IOVirtualAddress
getVirtualAddress()5487 IOMemoryMap::getVirtualAddress()
5488 {
5489 #ifndef __LP64__
5490 	if (fSuperMap) {
5491 		fSuperMap->getVirtualAddress();
5492 	} else if (fAddressMap
5493 	    && vm_map_is_64bit(fAddressMap)
5494 	    && (sizeof(IOVirtualAddress) < 8)) {
5495 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5496 	}
5497 #endif /* !__LP64__ */
5498 
5499 	return fAddress;
5500 }
5501 
5502 #ifndef __LP64__
5503 mach_vm_address_t
getAddress()5504 IOMemoryMap::getAddress()
5505 {
5506 	return fAddress;
5507 }
5508 
5509 mach_vm_size_t
getSize()5510 IOMemoryMap::getSize()
5511 {
5512 	return fLength;
5513 }
5514 #endif /* !__LP64__ */
5515 
5516 
5517 task_t
getAddressTask()5518 IOMemoryMap::getAddressTask()
5519 {
5520 	if (fSuperMap) {
5521 		return fSuperMap->getAddressTask();
5522 	} else {
5523 		return fAddressTask;
5524 	}
5525 }
5526 
5527 IOOptionBits
getMapOptions()5528 IOMemoryMap::getMapOptions()
5529 {
5530 	return fOptions;
5531 }
5532 
5533 IOMemoryDescriptor *
getMemoryDescriptor()5534 IOMemoryMap::getMemoryDescriptor()
5535 {
5536 	return fMemory.get();
5537 }
5538 
5539 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5540 IOMemoryMap::copyCompatible(
5541 	IOMemoryMap * newMapping )
5542 {
5543 	task_t              task      = newMapping->getAddressTask();
5544 	mach_vm_address_t   toAddress = newMapping->fAddress;
5545 	IOOptionBits        _options  = newMapping->fOptions;
5546 	mach_vm_size_t      _offset   = newMapping->fOffset;
5547 	mach_vm_size_t      _length   = newMapping->fLength;
5548 
5549 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5550 		return NULL;
5551 	}
5552 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5553 		return NULL;
5554 	}
5555 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5556 		return NULL;
5557 	}
5558 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5559 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5560 		return NULL;
5561 	}
5562 
5563 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5564 		return NULL;
5565 	}
5566 
5567 	if (_offset < fOffset) {
5568 		return NULL;
5569 	}
5570 
5571 	_offset -= fOffset;
5572 
5573 	if ((_offset + _length) > fLength) {
5574 		return NULL;
5575 	}
5576 
5577 	if ((fLength == _length) && (!_offset)) {
5578 		retain();
5579 		newMapping = this;
5580 	} else {
5581 		newMapping->fSuperMap.reset(this, OSRetain);
5582 		newMapping->fOffset   = fOffset + _offset;
5583 		newMapping->fAddress  = fAddress + _offset;
5584 	}
5585 
5586 	return newMapping;
5587 }
5588 
5589 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5590 IOMemoryMap::wireRange(
5591 	uint32_t                options,
5592 	mach_vm_size_t          offset,
5593 	mach_vm_size_t          length)
5594 {
5595 	IOReturn kr;
5596 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5597 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5598 	vm_prot_t prot;
5599 
5600 	prot = (kIODirectionOutIn & options);
5601 	if (prot) {
5602 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5603 	} else {
5604 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5605 	}
5606 
5607 	return kr;
5608 }
5609 
5610 
5611 IOPhysicalAddress
5612 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5613 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5614 #else /* !__LP64__ */
5615 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5616 #endif /* !__LP64__ */
5617 {
5618 	IOPhysicalAddress   address;
5619 
5620 	LOCK;
5621 #ifdef __LP64__
5622 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5623 #else /* !__LP64__ */
5624 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5625 #endif /* !__LP64__ */
5626 	UNLOCK;
5627 
5628 	return address;
5629 }
5630 
5631 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5632 
5633 #undef super
5634 #define super OSObject
5635 
5636 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5637 
5638 void
initialize(void)5639 IOMemoryDescriptor::initialize( void )
5640 {
5641 	if (NULL == gIOMemoryLock) {
5642 		gIOMemoryLock = IORecursiveLockAlloc();
5643 	}
5644 
5645 	gIOLastPage = IOGetLastPageNumber();
5646 }
5647 
5648 void
free(void)5649 IOMemoryDescriptor::free( void )
5650 {
5651 	if (_mappings) {
5652 		_mappings.reset();
5653 	}
5654 
5655 	if (reserved) {
5656 		cleanKernelReserved(reserved);
5657 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5658 		reserved = NULL;
5659 	}
5660 	super::free();
5661 }
5662 
5663 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5664 IOMemoryDescriptor::setMapping(
5665 	task_t                  intoTask,
5666 	IOVirtualAddress        mapAddress,
5667 	IOOptionBits            options )
5668 {
5669 	return createMappingInTask( intoTask, mapAddress,
5670 	           options | kIOMapStatic,
5671 	           0, getLength());
5672 }
5673 
5674 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5675 IOMemoryDescriptor::map(
5676 	IOOptionBits            options )
5677 {
5678 	return createMappingInTask( kernel_task, 0,
5679 	           options | kIOMapAnywhere,
5680 	           0, getLength());
5681 }
5682 
5683 #ifndef __LP64__
5684 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5685 IOMemoryDescriptor::map(
5686 	task_t                  intoTask,
5687 	IOVirtualAddress        atAddress,
5688 	IOOptionBits            options,
5689 	IOByteCount             offset,
5690 	IOByteCount             length )
5691 {
5692 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5693 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5694 		return NULL;
5695 	}
5696 
5697 	return createMappingInTask(intoTask, atAddress,
5698 	           options, offset, length);
5699 }
5700 #endif /* !__LP64__ */
5701 
5702 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5703 IOMemoryDescriptor::createMappingInTask(
5704 	task_t                  intoTask,
5705 	mach_vm_address_t       atAddress,
5706 	IOOptionBits            options,
5707 	mach_vm_size_t          offset,
5708 	mach_vm_size_t          length)
5709 {
5710 	IOMemoryMap * result;
5711 	IOMemoryMap * mapping;
5712 
5713 	if (0 == length) {
5714 		length = getLength();
5715 	}
5716 
5717 	mapping = new IOMemoryMap;
5718 
5719 	if (mapping
5720 	    && !mapping->init( intoTask, atAddress,
5721 	    options, offset, length )) {
5722 		mapping->release();
5723 		mapping = NULL;
5724 	}
5725 
5726 	if (mapping) {
5727 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5728 	} else {
5729 		result = nullptr;
5730 	}
5731 
5732 #if DEBUG
5733 	if (!result) {
5734 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5735 		    this, atAddress, (uint32_t) options, offset, length);
5736 	}
5737 #endif
5738 
5739 	// already retained through makeMapping
5740 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5741 
5742 	return retval;
5743 }
5744 
5745 #ifndef __LP64__ // there is only a 64 bit version for LP64
5746 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5747 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5748     IOOptionBits         options,
5749     IOByteCount          offset)
5750 {
5751 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5752 }
5753 #endif
5754 
5755 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5756 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5757     IOOptionBits         options,
5758     mach_vm_size_t       offset)
5759 {
5760 	IOReturn err = kIOReturnSuccess;
5761 	OSSharedPtr<IOMemoryDescriptor> physMem;
5762 
5763 	LOCK;
5764 
5765 	if (fAddress && fAddressMap) {
5766 		do{
5767 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5768 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5769 				physMem = fMemory;
5770 			}
5771 
5772 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5773 				upl_size_t          size = (typeof(size))round_page(fLength);
5774 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5775 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5776 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5777 				    NULL, NULL,
5778 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5779 					fRedirUPL = NULL;
5780 				}
5781 
5782 				if (physMem) {
5783 					IOUnmapPages( fAddressMap, fAddress, fLength );
5784 					if ((false)) {
5785 						physMem->redirect(NULL, true);
5786 					}
5787 				}
5788 			}
5789 
5790 			if (newBackingMemory) {
5791 				if (newBackingMemory != fMemory) {
5792 					fOffset = 0;
5793 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5794 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5795 					    offset, fLength)) {
5796 						err = kIOReturnError;
5797 					}
5798 				}
5799 				if (fRedirUPL) {
5800 					upl_commit(fRedirUPL, NULL, 0);
5801 					upl_deallocate(fRedirUPL);
5802 					fRedirUPL = NULL;
5803 				}
5804 				if ((false) && physMem) {
5805 					physMem->redirect(NULL, false);
5806 				}
5807 			}
5808 		}while (false);
5809 	}
5810 
5811 	UNLOCK;
5812 
5813 	return err;
5814 }
5815 
5816 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5817 IOMemoryDescriptor::makeMapping(
5818 	IOMemoryDescriptor *    owner,
5819 	task_t                  __intoTask,
5820 	IOVirtualAddress        __address,
5821 	IOOptionBits            options,
5822 	IOByteCount             __offset,
5823 	IOByteCount             __length )
5824 {
5825 #ifndef __LP64__
5826 	if (!(kIOMap64Bit & options)) {
5827 		panic("IOMemoryDescriptor::makeMapping !64bit");
5828 	}
5829 #endif /* !__LP64__ */
5830 
5831 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5832 	__block IOMemoryMap * result  = NULL;
5833 
5834 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5835 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5836 	mach_vm_size_t length  = mapping->fLength;
5837 
5838 	mapping->fOffset = offset;
5839 
5840 	LOCK;
5841 
5842 	do{
5843 		if (kIOMapStatic & options) {
5844 			result = mapping;
5845 			addMapping(mapping);
5846 			mapping->setMemoryDescriptor(this, 0);
5847 			continue;
5848 		}
5849 
5850 		if (kIOMapUnique & options) {
5851 			addr64_t phys;
5852 			IOByteCount       physLen;
5853 
5854 //	    if (owner != this)		continue;
5855 
5856 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5857 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5858 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5859 				if (!phys || (physLen < length)) {
5860 					continue;
5861 				}
5862 
5863 				mapDesc = IOMemoryDescriptor::withAddressRange(
5864 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5865 				if (!mapDesc) {
5866 					continue;
5867 				}
5868 				offset = 0;
5869 				mapping->fOffset = offset;
5870 			}
5871 		} else {
5872 			// look for a compatible existing mapping
5873 			if (_mappings) {
5874 				_mappings->iterateObjects(^(OSObject * object)
5875 				{
5876 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5877 					if ((result = lookMapping->copyCompatible(mapping))) {
5878 					        addMapping(result);
5879 					        result->setMemoryDescriptor(this, offset);
5880 					        return true;
5881 					}
5882 					return false;
5883 				});
5884 			}
5885 			if (result || (options & kIOMapReference)) {
5886 				if (result != mapping) {
5887 					mapping->release();
5888 					mapping = NULL;
5889 				}
5890 				continue;
5891 			}
5892 		}
5893 
5894 		if (!mapDesc) {
5895 			mapDesc.reset(this, OSRetain);
5896 		}
5897 		IOReturn
5898 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5899 		if (kIOReturnSuccess == kr) {
5900 			result = mapping;
5901 			mapDesc->addMapping(result);
5902 			result->setMemoryDescriptor(mapDesc.get(), offset);
5903 		} else {
5904 			mapping->release();
5905 			mapping = NULL;
5906 		}
5907 	}while (false);
5908 
5909 	UNLOCK;
5910 
5911 	return result;
5912 }
5913 
5914 void
addMapping(IOMemoryMap * mapping)5915 IOMemoryDescriptor::addMapping(
5916 	IOMemoryMap * mapping )
5917 {
5918 	if (mapping) {
5919 		if (NULL == _mappings) {
5920 			_mappings = OSSet::withCapacity(1);
5921 		}
5922 		if (_mappings) {
5923 			_mappings->setObject( mapping );
5924 		}
5925 	}
5926 }
5927 
5928 void
removeMapping(IOMemoryMap * mapping)5929 IOMemoryDescriptor::removeMapping(
5930 	IOMemoryMap * mapping )
5931 {
5932 	if (_mappings) {
5933 		_mappings->removeObject( mapping);
5934 	}
5935 }
5936 
5937 void
setMapperOptions(uint16_t options)5938 IOMemoryDescriptor::setMapperOptions( uint16_t options)
5939 {
5940 	_iomapperOptions = options;
5941 }
5942 
5943 uint16_t
getMapperOptions(void)5944 IOMemoryDescriptor::getMapperOptions( void )
5945 {
5946 	return _iomapperOptions;
5947 }
5948 
5949 #ifndef __LP64__
5950 // obsolete initializers
5951 // - initWithOptions is the designated initializer
5952 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)5953 IOMemoryDescriptor::initWithAddress(void *      address,
5954     IOByteCount   length,
5955     IODirection direction)
5956 {
5957 	return false;
5958 }
5959 
5960 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)5961 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5962     IOByteCount    length,
5963     IODirection  direction,
5964     task_t       task)
5965 {
5966 	return false;
5967 }
5968 
5969 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)5970 IOMemoryDescriptor::initWithPhysicalAddress(
5971 	IOPhysicalAddress      address,
5972 	IOByteCount            length,
5973 	IODirection            direction )
5974 {
5975 	return false;
5976 }
5977 
5978 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)5979 IOMemoryDescriptor::initWithRanges(
5980 	IOVirtualRange * ranges,
5981 	UInt32           withCount,
5982 	IODirection      direction,
5983 	task_t           task,
5984 	bool             asReference)
5985 {
5986 	return false;
5987 }
5988 
5989 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)5990 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
5991     UInt32           withCount,
5992     IODirection      direction,
5993     bool             asReference)
5994 {
5995 	return false;
5996 }
5997 
5998 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)5999 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6000     IOByteCount * lengthOfSegment)
6001 {
6002 	return NULL;
6003 }
6004 #endif /* !__LP64__ */
6005 
6006 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6007 
6008 bool
serialize(OSSerialize * s) const6009 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6010 {
6011 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6012 	OSSharedPtr<OSObject>           values[2] = {NULL};
6013 	OSSharedPtr<OSArray>            array;
6014 
6015 	struct SerData {
6016 		user_addr_t address;
6017 		user_size_t length;
6018 	};
6019 
6020 	unsigned int index;
6021 
6022 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6023 
6024 	if (s == NULL) {
6025 		return false;
6026 	}
6027 
6028 	array = OSArray::withCapacity(4);
6029 	if (!array) {
6030 		return false;
6031 	}
6032 
6033 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6034 	if (!vcopy) {
6035 		return false;
6036 	}
6037 
6038 	keys[0] = OSSymbol::withCString("address");
6039 	keys[1] = OSSymbol::withCString("length");
6040 
6041 	// Copy the volatile data so we don't have to allocate memory
6042 	// while the lock is held.
6043 	LOCK;
6044 	if (vcopy.size() == _rangesCount) {
6045 		Ranges vec = _ranges;
6046 		for (index = 0; index < vcopy.size(); index++) {
6047 			mach_vm_address_t addr; mach_vm_size_t len;
6048 			getAddrLenForInd(addr, len, type, vec, index, _task);
6049 			vcopy[index].address = addr;
6050 			vcopy[index].length  = len;
6051 		}
6052 	} else {
6053 		// The descriptor changed out from under us.  Give up.
6054 		UNLOCK;
6055 		return false;
6056 	}
6057 	UNLOCK;
6058 
6059 	for (index = 0; index < vcopy.size(); index++) {
6060 		user_addr_t addr = vcopy[index].address;
6061 		IOByteCount len = (IOByteCount) vcopy[index].length;
6062 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6063 		if (values[0] == NULL) {
6064 			return false;
6065 		}
6066 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6067 		if (values[1] == NULL) {
6068 			return false;
6069 		}
6070 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6071 		if (dict == NULL) {
6072 			return false;
6073 		}
6074 		array->setObject(dict.get());
6075 		dict.reset();
6076 		values[0].reset();
6077 		values[1].reset();
6078 	}
6079 
6080 	return array->serialize(s);
6081 }
6082 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6083 
6084 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6085 #ifdef __LP64__
6086 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6087 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6088 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6089 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6090 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6091 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6092 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6093 #else /* !__LP64__ */
6094 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6095 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6096 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6097 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6098 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6099 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6100 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6101 #endif /* !__LP64__ */
6102 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6103 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6104 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6105 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6106 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6107 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6108 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6109 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6110 
6111 /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6112 KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6113     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6114 
6115 /* ex-inline function implementation */
6116 IOPhysicalAddress
getPhysicalAddress()6117 IOMemoryDescriptor::getPhysicalAddress()
6118 {
6119 	return getPhysicalSegment( 0, NULL );
6120 }
6121 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6122 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6123 
6124 OSPtr<_IOMemoryDescriptorMixedData>
6125 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6126 {
6127 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6128 	if (me && !me->initWithCapacity(capacity)) {
6129 		return nullptr;
6130 	}
6131 	return me;
6132 }
6133 
6134 bool
initWithCapacity(size_t capacity)6135 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6136 {
6137 	if (_data && (!capacity || (_capacity < capacity))) {
6138 		freeMemory();
6139 	}
6140 
6141 	if (!OSObject::init()) {
6142 		return false;
6143 	}
6144 
6145 	if (!_data && capacity) {
6146 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6147 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6148 		if (!_data) {
6149 			return false;
6150 		}
6151 		_capacity = capacity;
6152 	}
6153 
6154 	_length = 0;
6155 
6156 	return true;
6157 }
6158 
6159 void
free()6160 _IOMemoryDescriptorMixedData::free()
6161 {
6162 	freeMemory();
6163 	OSObject::free();
6164 }
6165 
6166 void
freeMemory()6167 _IOMemoryDescriptorMixedData::freeMemory()
6168 {
6169 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6170 	_data = nullptr;
6171 	_capacity = _length = 0;
6172 }
6173 
6174 bool
appendBytes(const void * bytes,size_t length)6175 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6176 {
6177 	const auto oldLength = getLength();
6178 	size_t newLength;
6179 	if (os_add_overflow(oldLength, length, &newLength)) {
6180 		return false;
6181 	}
6182 
6183 	if (!setLength(newLength)) {
6184 		return false;
6185 	}
6186 
6187 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6188 	if (bytes) {
6189 		bcopy(bytes, dest, length);
6190 	}
6191 
6192 	return true;
6193 }
6194 
6195 bool
setLength(size_t length)6196 _IOMemoryDescriptorMixedData::setLength(size_t length)
6197 {
6198 	if (!_data || (length > _capacity)) {
6199 		void *newData;
6200 
6201 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6202 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6203 		    NULL);
6204 		if (!newData) {
6205 			return false;
6206 		}
6207 
6208 		_data = newData;
6209 		_capacity = length;
6210 	}
6211 
6212 	_length = length;
6213 	return true;
6214 }
6215 
6216 const void *
getBytes() const6217 _IOMemoryDescriptorMixedData::getBytes() const
6218 {
6219 	return _length ? _data : nullptr;
6220 }
6221 
6222 size_t
getLength() const6223 _IOMemoryDescriptorMixedData::getLength() const
6224 {
6225 	return _data ? _length : 0;
6226 }
6227