xref: /xnu-12377.1.9/iokit/Kernel/IOMemoryDescriptor.cpp (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout_xnu.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <mach/mach_host.h>
73 #include <vm/vm_fault_xnu.h>
74 #include <vm/vm_protos.h>
75 #include <vm/vm_memory_entry.h>
76 #include <vm/vm_kern_xnu.h>
77 #include <vm/vm_iokit.h>
78 #include <vm/vm_map_xnu.h>
79 #include <kern/thread.h>
80 
81 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
82 extern void ipc_port_release_send(ipc_port_t port);
83 
84 __END_DECLS
85 
86 #define kIOMapperWaitSystem     ((IOMapper *) 1)
87 
88 static IOMapper * gIOSystemMapper = NULL;
89 
90 ppnum_t           gIOLastPage;
91 
92 enum {
93 	kIOMapGuardSizeLarge = 65536
94 };
95 
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97 
98 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
99 
100 #define super IOMemoryDescriptor
101 
102 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
103     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
104 
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106 
107 static IORecursiveLock * gIOMemoryLock;
108 
109 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
110 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
111 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112 #define WAKEUP  \
113     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114 
115 #if 0
116 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
117 #else
118 #define DEBG(fmt, args...)      {}
119 #endif
120 
121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122 
123 // Some data structures and accessor macros used by the initWithOptions
124 // Function
125 
126 enum ioPLBlockFlags {
127 	kIOPLOnDevice  = 0x00000001,
128 	kIOPLExternUPL = 0x00000002,
129 };
130 
131 struct IOMDPersistentInitData {
132 	const IOGeneralMemoryDescriptor * fMD;
133 	IOMemoryReference               * fMemRef;
134 };
135 
136 struct ioPLBlock {
137 	upl_t fIOPL;
138 	vm_address_t fPageInfo; // Pointer to page list or index into it
139 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
140 	ppnum_t fMappedPage;        // Page number of first page in this iopl
141 	unsigned int fPageOffset;   // Offset within first page of iopl
142 	unsigned int fFlags;        // Flags
143 };
144 
145 enum { kMaxWireTags = 6 };
146 
147 struct ioGMDData {
148 	IOMapper *  fMapper;
149 	uint64_t    fDMAMapAlignment;
150 	uint64_t    fMappedBase;
151 	uint64_t    fMappedLength;
152 	uint64_t    fPreparationID;
153 #if IOTRACKING
154 	IOTracking  fWireTracking;
155 #endif /* IOTRACKING */
156 	unsigned int      fPageCnt;
157 	uint8_t           fDMAMapNumAddressBits;
158 	unsigned char     fCompletionError:1;
159 	unsigned char     fMappedBaseValid:1;
160 	unsigned char     _resv:4;
161 	unsigned char     fDMAAccess:2;
162 
163 	/* variable length arrays */
164 	upl_page_info_t fPageList[1]
165 #if __LP64__
166 	// align fPageList as for ioPLBlock
167 	__attribute__((aligned(sizeof(upl_t))))
168 #endif
169 	;
170 	//ioPLBlock fBlocks[1];
171 };
172 
173 #pragma GCC visibility push(hidden)
174 
175 class _IOMemoryDescriptorMixedData : public OSObject
176 {
177 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
178 
179 public:
180 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
181 	bool initWithCapacity(size_t capacity);
182 	virtual void free() APPLE_KEXT_OVERRIDE;
183 
184 	bool appendBytes(const void * bytes, size_t length);
185 	bool setLength(size_t length);
186 
187 	const void * getBytes() const;
188 	size_t getLength() const;
189 
190 private:
191 	void freeMemory();
192 
193 	void *  _data = nullptr;
194 	size_t  _length = 0;
195 	size_t  _capacity = 0;
196 };
197 
198 #pragma GCC visibility pop
199 
200 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
201 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
202 #define getNumIOPL(osd, d)      \
203     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
204 #define getPageList(d)  (&(d->fPageList[0]))
205 #define computeDataSize(p, u) \
206     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
207 
208 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
209 
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211 
212 extern "C" {
213 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)214 device_data_action(
215 	uintptr_t               device_handle,
216 	ipc_port_t              device_pager,
217 	vm_prot_t               protection,
218 	vm_object_offset_t      offset,
219 	vm_size_t               size)
220 {
221 	kern_return_t        kr;
222 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
223 	OSSharedPtr<IOMemoryDescriptor> memDesc;
224 
225 	LOCK;
226 	if (ref->dp.memory) {
227 		memDesc.reset(ref->dp.memory, OSRetain);
228 		kr = memDesc->handleFault(device_pager, offset, size);
229 		memDesc.reset();
230 	} else {
231 		kr = KERN_ABORTED;
232 	}
233 	UNLOCK;
234 
235 	return kr;
236 }
237 
238 kern_return_t
device_close(uintptr_t device_handle)239 device_close(
240 	uintptr_t     device_handle)
241 {
242 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
243 
244 	IOFreeType( ref, IOMemoryDescriptorReserved );
245 
246 	return kIOReturnSuccess;
247 }
248 };      // end extern "C"
249 
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251 
252 // Note this inline function uses C++ reference arguments to return values
253 // This means that pointers are not passed and NULLs don't have to be
254 // checked for as a NULL reference is illegal.
255 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)256 getAddrLenForInd(
257 	mach_vm_address_t                &addr,
258 	mach_vm_size_t                   &len, // Output variables
259 	UInt32                            type,
260 	IOGeneralMemoryDescriptor::Ranges r,
261 	UInt32                            ind,
262 	task_t                            task __unused)
263 {
264 	assert(kIOMemoryTypeUIO == type
265 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
266 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
267 	if (kIOMemoryTypeUIO == type) {
268 		user_size_t us;
269 		user_addr_t ad;
270 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
271 	}
272 #ifndef __LP64__
273 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
274 		IOAddressRange cur = r.v64[ind];
275 		addr = cur.address;
276 		len  = cur.length;
277 	}
278 #endif /* !__LP64__ */
279 	else {
280 		IOVirtualRange cur = r.v[ind];
281 		addr = cur.address;
282 		len  = cur.length;
283 	}
284 }
285 
286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
287 
288 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)289 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
290 {
291 	IOReturn err = kIOReturnSuccess;
292 
293 	*control = VM_PURGABLE_SET_STATE;
294 
295 	enum { kIOMemoryPurgeableControlMask = 15 };
296 
297 	switch (kIOMemoryPurgeableControlMask & newState) {
298 	case kIOMemoryPurgeableKeepCurrent:
299 		*control = VM_PURGABLE_GET_STATE;
300 		break;
301 
302 	case kIOMemoryPurgeableNonVolatile:
303 		*state = VM_PURGABLE_NONVOLATILE;
304 		break;
305 	case kIOMemoryPurgeableVolatile:
306 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
307 		break;
308 	case kIOMemoryPurgeableEmpty:
309 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
310 		break;
311 	default:
312 		err = kIOReturnBadArgument;
313 		break;
314 	}
315 
316 	if (*control == VM_PURGABLE_SET_STATE) {
317 		// let VM know this call is from the kernel and is allowed to alter
318 		// the volatility of the memory entry even if it was created with
319 		// MAP_MEM_PURGABLE_KERNEL_ONLY
320 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
321 	}
322 
323 	return err;
324 }
325 
326 static IOReturn
purgeableStateBits(int * state)327 purgeableStateBits(int * state)
328 {
329 	IOReturn err = kIOReturnSuccess;
330 
331 	switch (VM_PURGABLE_STATE_MASK & *state) {
332 	case VM_PURGABLE_NONVOLATILE:
333 		*state = kIOMemoryPurgeableNonVolatile;
334 		break;
335 	case VM_PURGABLE_VOLATILE:
336 		*state = kIOMemoryPurgeableVolatile;
337 		break;
338 	case VM_PURGABLE_EMPTY:
339 		*state = kIOMemoryPurgeableEmpty;
340 		break;
341 	default:
342 		*state = kIOMemoryPurgeableNonVolatile;
343 		err = kIOReturnNotReady;
344 		break;
345 	}
346 	return err;
347 }
348 
349 typedef struct {
350 	unsigned int wimg;
351 	unsigned int object_type;
352 } iokit_memtype_entry;
353 
354 static const iokit_memtype_entry iomd_mem_types[] = {
355 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
356 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
357 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
358 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
359 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
360 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
361 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
362 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
363 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
364 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
365 };
366 
367 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)368 vmProtForCacheMode(IOOptionBits cacheMode)
369 {
370 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
371 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
372 		cacheMode = kIODefaultCache;
373 	}
374 	vm_prot_t prot = 0;
375 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
376 	return prot;
377 }
378 
379 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)380 pagerFlagsForCacheMode(IOOptionBits cacheMode)
381 {
382 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
383 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
384 		cacheMode = kIODefaultCache;
385 	}
386 	if (cacheMode == kIODefaultCache) {
387 		return -1U;
388 	}
389 	return iomd_mem_types[cacheMode].wimg;
390 }
391 
392 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)393 cacheModeForPagerFlags(unsigned int pagerFlags)
394 {
395 	pagerFlags &= VM_WIMG_MASK;
396 	IOOptionBits cacheMode = kIODefaultCache;
397 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
398 		if (iomd_mem_types[i].wimg == pagerFlags) {
399 			cacheMode = i;
400 			break;
401 		}
402 	}
403 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
404 }
405 
406 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
407 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
408 
409 struct IOMemoryEntry {
410 	ipc_port_t entry;
411 	int64_t    offset;
412 	uint64_t   size;
413 	uint64_t   start;
414 };
415 
416 struct IOMemoryReference {
417 	volatile SInt32             refCount;
418 	vm_prot_t                   prot;
419 	uint32_t                    capacity;
420 	uint32_t                    count;
421 	struct IOMemoryReference  * mapRef;
422 	IOMemoryEntry               entries[0];
423 };
424 
425 enum{
426 	kIOMemoryReferenceReuse = 0x00000001,
427 	kIOMemoryReferenceWrite = 0x00000002,
428 	kIOMemoryReferenceCOW   = 0x00000004,
429 };
430 
431 SInt32 gIOMemoryReferenceCount;
432 
433 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)434 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
435 {
436 	IOMemoryReference * ref;
437 	size_t              oldCapacity;
438 
439 	if (realloc) {
440 		oldCapacity = realloc->capacity;
441 	} else {
442 		oldCapacity = 0;
443 	}
444 
445 	// Use the kalloc API instead of manually handling the reallocation
446 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
447 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
448 	if (ref) {
449 		if (oldCapacity == 0) {
450 			ref->refCount = 1;
451 			OSIncrementAtomic(&gIOMemoryReferenceCount);
452 		}
453 		ref->capacity = capacity;
454 	}
455 	return ref;
456 }
457 
458 void
memoryReferenceFree(IOMemoryReference * ref)459 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
460 {
461 	IOMemoryEntry * entries;
462 
463 	if (ref->mapRef) {
464 		memoryReferenceFree(ref->mapRef);
465 		ref->mapRef = NULL;
466 	}
467 
468 	entries = ref->entries + ref->count;
469 	while (entries > &ref->entries[0]) {
470 		entries--;
471 		ipc_port_release_send(entries->entry);
472 	}
473 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
474 
475 	OSDecrementAtomic(&gIOMemoryReferenceCount);
476 }
477 
478 void
memoryReferenceRelease(IOMemoryReference * ref)479 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
480 {
481 	if (1 == OSDecrementAtomic(&ref->refCount)) {
482 		memoryReferenceFree(ref);
483 	}
484 }
485 
486 
487 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)488 IOGeneralMemoryDescriptor::memoryReferenceCreate(
489 	IOOptionBits         options,
490 	IOMemoryReference ** reference)
491 {
492 	enum { kCapacity = 4, kCapacityInc = 4 };
493 
494 	kern_return_t        err;
495 	IOMemoryReference *  ref;
496 	IOMemoryEntry *      entries;
497 	IOMemoryEntry *      cloneEntries = NULL;
498 	vm_map_t             map;
499 	ipc_port_t           entry, cloneEntry;
500 	vm_prot_t            prot;
501 	memory_object_size_t actualSize;
502 	uint32_t             rangeIdx;
503 	uint32_t             count;
504 	mach_vm_address_t    entryAddr, endAddr, entrySize;
505 	mach_vm_size_t       srcAddr, srcLen;
506 	mach_vm_size_t       nextAddr, nextLen;
507 	mach_vm_size_t       offset, remain;
508 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
509 	int                  misaligned_start = 0, misaligned_end = 0;
510 	IOByteCount          physLen;
511 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
512 	IOOptionBits         cacheMode;
513 	unsigned int         pagerFlags;
514 	vm_tag_t             tag;
515 	vm_named_entry_kernel_flags_t vmne_kflags;
516 
517 	ref = memoryReferenceAlloc(kCapacity, NULL);
518 	if (!ref) {
519 		return kIOReturnNoMemory;
520 	}
521 
522 	tag = (vm_tag_t) getVMTag(kernel_map);
523 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
524 	entries = &ref->entries[0];
525 	count = 0;
526 	err = KERN_SUCCESS;
527 
528 	offset = 0;
529 	rangeIdx = 0;
530 	remain = _length;
531 	if (_task) {
532 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
533 
534 		// account for IOBMD setLength(), use its capacity as length
535 		IOBufferMemoryDescriptor * bmd;
536 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
537 			nextLen = bmd->getCapacity();
538 			remain  = nextLen;
539 		}
540 	} else {
541 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
542 		nextLen = physLen;
543 
544 		// default cache mode for physical
545 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
546 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
547 			_flags |= (mode << kIOMemoryBufferCacheShift);
548 		}
549 	}
550 
551 	// cache mode & vm_prot
552 	prot = VM_PROT_READ;
553 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
554 	prot |= vmProtForCacheMode(cacheMode);
555 	// VM system requires write access to change cache mode
556 	if (kIODefaultCache != cacheMode) {
557 		prot |= VM_PROT_WRITE;
558 	}
559 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
560 		prot |= VM_PROT_WRITE;
561 	}
562 	if (kIOMemoryReferenceWrite & options) {
563 		prot |= VM_PROT_WRITE;
564 	}
565 	if (kIOMemoryReferenceCOW   & options) {
566 		prot |= MAP_MEM_VM_COPY;
567 	}
568 
569 	if (kIOMemoryUseReserve & _flags) {
570 		prot |= MAP_MEM_GRAB_SECLUDED;
571 	}
572 
573 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
574 		cloneEntries = &_memRef->entries[0];
575 		prot |= MAP_MEM_NAMED_REUSE;
576 	}
577 
578 	if (_task) {
579 		// virtual ranges
580 
581 		if (kIOMemoryBufferPageable & _flags) {
582 			int ledger_tag, ledger_no_footprint;
583 
584 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
585 			prot |= MAP_MEM_NAMED_CREATE;
586 
587 			// default accounting settings:
588 			//   + "none" ledger tag
589 			//   + include in footprint
590 			// can be changed later with ::setOwnership()
591 			ledger_tag = VM_LEDGER_TAG_NONE;
592 			ledger_no_footprint = 0;
593 
594 			if (kIOMemoryBufferPurgeable & _flags) {
595 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
596 				if (VM_KERN_MEMORY_SKYWALK == tag) {
597 					// Skywalk purgeable memory accounting:
598 					//    + "network" ledger tag
599 					//    + not included in footprint
600 					ledger_tag = VM_LEDGER_TAG_NETWORK;
601 					ledger_no_footprint = 1;
602 				} else {
603 					// regular purgeable memory accounting:
604 					//    + no ledger tag
605 					//    + included in footprint
606 					ledger_tag = VM_LEDGER_TAG_NONE;
607 					ledger_no_footprint = 0;
608 				}
609 			}
610 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
611 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
612 			if (kIOMemoryUseReserve & _flags) {
613 				prot |= MAP_MEM_GRAB_SECLUDED;
614 			}
615 
616 			prot |= VM_PROT_WRITE;
617 			map = NULL;
618 		} else {
619 			prot |= MAP_MEM_USE_DATA_ADDR;
620 			map = get_task_map(_task);
621 		}
622 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
623 
624 		while (remain) {
625 			srcAddr  = nextAddr;
626 			srcLen   = nextLen;
627 			nextAddr = 0;
628 			nextLen  = 0;
629 			// coalesce addr range
630 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
631 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
632 				if ((srcAddr + srcLen) != nextAddr) {
633 					break;
634 				}
635 				srcLen += nextLen;
636 			}
637 
638 			if (MAP_MEM_USE_DATA_ADDR & prot) {
639 				entryAddr = srcAddr;
640 				endAddr   = srcAddr + srcLen;
641 			} else {
642 				entryAddr = trunc_page_64(srcAddr);
643 				endAddr   = round_page_64(srcAddr + srcLen);
644 			}
645 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
646 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
647 			}
648 
649 			do{
650 				entrySize = (endAddr - entryAddr);
651 				if (!entrySize) {
652 					break;
653 				}
654 				actualSize = entrySize;
655 
656 				cloneEntry = MACH_PORT_NULL;
657 				if (MAP_MEM_NAMED_REUSE & prot) {
658 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
659 						cloneEntry = cloneEntries->entry;
660 					} else {
661 						prot &= ~MAP_MEM_NAMED_REUSE;
662 					}
663 				}
664 
665 				mach_vm_offset_t entryAddrForVm = entryAddr;
666 				err = mach_make_memory_entry_internal(map,
667 				    &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
668 
669 				if (KERN_SUCCESS != err) {
670 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
671 					break;
672 				}
673 				if (MAP_MEM_USE_DATA_ADDR & prot) {
674 					if (actualSize > entrySize) {
675 						actualSize = entrySize;
676 					}
677 				} else if (actualSize > entrySize) {
678 					panic("mach_make_memory_entry_64 actualSize");
679 				}
680 
681 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
682 
683 				if (count && overmap_start) {
684 					/*
685 					 * Track misaligned start for all
686 					 * except the first entry.
687 					 */
688 					misaligned_start++;
689 				}
690 
691 				if (overmap_end) {
692 					/*
693 					 * Ignore misaligned end for the
694 					 * last entry.
695 					 */
696 					if ((entryAddr + actualSize) != endAddr) {
697 						misaligned_end++;
698 					}
699 				}
700 
701 				if (count) {
702 					/* Middle entries */
703 					if (misaligned_start || misaligned_end) {
704 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
705 						ipc_port_release_send(entry);
706 						err = KERN_NOT_SUPPORTED;
707 						break;
708 					}
709 				}
710 
711 				if (count >= ref->capacity) {
712 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
713 					entries = &ref->entries[count];
714 				}
715 				entries->entry  = entry;
716 				entries->size   = actualSize;
717 				entries->offset = offset + (entryAddr - srcAddr);
718 				entries->start = entryAddr;
719 				entryAddr += actualSize;
720 				if (MAP_MEM_NAMED_REUSE & prot) {
721 					if ((cloneEntries->entry == entries->entry)
722 					    && (cloneEntries->size == entries->size)
723 					    && (cloneEntries->offset == entries->offset)) {
724 						cloneEntries++;
725 					} else {
726 						prot &= ~MAP_MEM_NAMED_REUSE;
727 					}
728 				}
729 				entries++;
730 				count++;
731 			}while (true);
732 			offset += srcLen;
733 			remain -= srcLen;
734 		}
735 	} else {
736 		// _task == 0, physical or kIOMemoryTypeUPL
737 		memory_object_t pager;
738 		vm_size_t       size = ptoa_64(_pages);
739 
740 		if (!getKernelReserved()) {
741 			panic("getKernelReserved");
742 		}
743 
744 		reserved->dp.pagerContig = (1 == _rangesCount);
745 		reserved->dp.memory      = this;
746 
747 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
748 		if (-1U == pagerFlags) {
749 			panic("phys is kIODefaultCache");
750 		}
751 		if (reserved->dp.pagerContig) {
752 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
753 		}
754 
755 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
756 		    size, pagerFlags);
757 		assert(pager);
758 		if (!pager) {
759 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
760 			err = kIOReturnVMError;
761 		} else {
762 			srcAddr  = nextAddr;
763 			entryAddr = trunc_page_64(srcAddr);
764 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
765 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
766 			assert(KERN_SUCCESS == err);
767 			if (KERN_SUCCESS != err) {
768 				device_pager_deallocate(pager);
769 			} else {
770 				reserved->dp.devicePager = pager;
771 				entries->entry  = entry;
772 				entries->size   = size;
773 				entries->offset = offset + (entryAddr - srcAddr);
774 				entries++;
775 				count++;
776 			}
777 		}
778 	}
779 
780 	ref->count = count;
781 	ref->prot  = prot;
782 
783 	if (_task && (KERN_SUCCESS == err)
784 	    && (kIOMemoryMapCopyOnWrite & _flags)
785 	    && !(kIOMemoryReferenceCOW & options)) {
786 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
787 		if (KERN_SUCCESS != err) {
788 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
789 		}
790 	}
791 
792 	if (KERN_SUCCESS == err) {
793 		if (MAP_MEM_NAMED_REUSE & prot) {
794 			memoryReferenceFree(ref);
795 			OSIncrementAtomic(&_memRef->refCount);
796 			ref = _memRef;
797 		}
798 	} else {
799 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
800 		memoryReferenceFree(ref);
801 		ref = NULL;
802 	}
803 
804 	*reference = ref;
805 
806 	return err;
807 }
808 
809 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)810 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
811 {
812 	switch (kIOMapGuardedMask & options) {
813 	default:
814 	case kIOMapGuardedSmall:
815 		return vm_map_page_size(map);
816 	case kIOMapGuardedLarge:
817 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
818 		return kIOMapGuardSizeLarge;
819 	}
820 	;
821 }
822 
823 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)824 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
825     vm_map_offset_t addr, mach_vm_size_t size)
826 {
827 	kern_return_t   kr;
828 	vm_map_offset_t actualAddr;
829 	mach_vm_size_t  actualSize;
830 
831 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
832 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
833 
834 	if (kIOMapGuardedMask & options) {
835 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
836 		actualAddr -= guardSize;
837 		actualSize += 2 * guardSize;
838 	}
839 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
840 
841 	return kr;
842 }
843 
844 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)845 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
846 {
847 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
848 	IOReturn                        err;
849 	vm_map_offset_t                 addr;
850 	mach_vm_size_t                  size;
851 	mach_vm_size_t                  guardSize;
852 	vm_map_kernel_flags_t           vmk_flags;
853 
854 	addr = ref->mapped;
855 	size = ref->size;
856 	guardSize = 0;
857 
858 	if (kIOMapGuardedMask & ref->options) {
859 		if (!(kIOMapAnywhere & ref->options)) {
860 			return kIOReturnBadArgument;
861 		}
862 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
863 		size += 2 * guardSize;
864 	}
865 	if (kIOMapAnywhere & ref->options) {
866 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
867 	} else {
868 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
869 	}
870 	vmk_flags.vm_tag = ref->tag;
871 
872 	/*
873 	 * Mapping memory into the kernel_map using IOMDs use the data range.
874 	 * Memory being mapped should not contain kernel pointers.
875 	 */
876 	if (map == kernel_map) {
877 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
878 	}
879 
880 	err = mach_vm_map_kernel(map, &addr, size,
881 #if __ARM_MIXED_PAGE_SIZE__
882 	    // TODO4K this should not be necessary...
883 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
884 #else /* __ARM_MIXED_PAGE_SIZE__ */
885 	    (vm_map_offset_t) 0,
886 #endif /* __ARM_MIXED_PAGE_SIZE__ */
887 	    vmk_flags,
888 	    IPC_PORT_NULL,
889 	    (memory_object_offset_t) 0,
890 	    false,                       /* copy */
891 	    ref->prot,
892 	    ref->prot,
893 	    VM_INHERIT_NONE);
894 	if (KERN_SUCCESS == err) {
895 		ref->mapped = (mach_vm_address_t) addr;
896 		ref->map = map;
897 		if (kIOMapGuardedMask & ref->options) {
898 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
899 
900 			err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
901 			assert(KERN_SUCCESS == err);
902 			err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
903 			assert(KERN_SUCCESS == err);
904 			ref->mapped += guardSize;
905 		}
906 	}
907 
908 	return err;
909 }
910 
911 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)912 IOGeneralMemoryDescriptor::memoryReferenceMap(
913 	IOMemoryReference * ref,
914 	vm_map_t            map,
915 	mach_vm_size_t      inoffset,
916 	mach_vm_size_t      size,
917 	IOOptionBits        options,
918 	mach_vm_address_t * inaddr)
919 {
920 	IOReturn        err;
921 	int64_t         offset = inoffset;
922 	uint32_t        rangeIdx, entryIdx;
923 	vm_map_offset_t addr, mapAddr;
924 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
925 
926 	mach_vm_address_t nextAddr;
927 	mach_vm_size_t    nextLen;
928 	IOByteCount       physLen;
929 	IOMemoryEntry   * entry;
930 	vm_prot_t         prot, memEntryCacheMode;
931 	IOOptionBits      type;
932 	IOOptionBits      cacheMode;
933 	vm_tag_t          tag;
934 	// for the kIOMapPrefault option.
935 	upl_page_info_t * pageList = NULL;
936 	UInt              currentPageIndex = 0;
937 	bool              didAlloc;
938 
939 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
940 
941 	if (ref->mapRef) {
942 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
943 		return err;
944 	}
945 
946 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
947 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
948 		return err;
949 	}
950 
951 	type = _flags & kIOMemoryTypeMask;
952 
953 	prot = VM_PROT_READ;
954 	if (!(kIOMapReadOnly & options)) {
955 		prot |= VM_PROT_WRITE;
956 	}
957 	prot &= ref->prot;
958 
959 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
960 	if (kIODefaultCache != cacheMode) {
961 		// VM system requires write access to update named entry cache mode
962 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
963 	}
964 
965 	tag = (typeof(tag))getVMTag(map);
966 
967 	if (_task) {
968 		// Find first range for offset
969 		if (!_rangesCount) {
970 			return kIOReturnBadArgument;
971 		}
972 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
973 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
974 			if (remain < nextLen) {
975 				break;
976 			}
977 			remain -= nextLen;
978 		}
979 	} else {
980 		rangeIdx = 0;
981 		remain   = 0;
982 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
983 		nextLen  = size;
984 	}
985 
986 	assert(remain < nextLen);
987 	if (remain >= nextLen) {
988 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
989 		return kIOReturnBadArgument;
990 	}
991 
992 	nextAddr  += remain;
993 	nextLen   -= remain;
994 #if __ARM_MIXED_PAGE_SIZE__
995 	pageOffset = (vm_map_page_mask(map) & nextAddr);
996 #else /* __ARM_MIXED_PAGE_SIZE__ */
997 	pageOffset = (page_mask & nextAddr);
998 #endif /* __ARM_MIXED_PAGE_SIZE__ */
999 	addr       = 0;
1000 	didAlloc   = false;
1001 
1002 	if (!(options & kIOMapAnywhere)) {
1003 		addr = *inaddr;
1004 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1005 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1006 		}
1007 		addr -= pageOffset;
1008 	}
1009 
1010 	// find first entry for offset
1011 	for (entryIdx = 0;
1012 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1013 	    entryIdx++) {
1014 	}
1015 	entryIdx--;
1016 	entry = &ref->entries[entryIdx];
1017 
1018 	// allocate VM
1019 #if __ARM_MIXED_PAGE_SIZE__
1020 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1021 #else
1022 	size = round_page_64(size + pageOffset);
1023 #endif
1024 	if (kIOMapOverwrite & options) {
1025 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1026 			map = IOPageableMapForAddress(addr);
1027 		}
1028 		err = KERN_SUCCESS;
1029 	} else {
1030 		IOMemoryDescriptorMapAllocRef ref;
1031 		ref.map     = map;
1032 		ref.tag     = tag;
1033 		ref.options = options;
1034 		ref.size    = size;
1035 		ref.prot    = prot;
1036 		if (options & kIOMapAnywhere) {
1037 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1038 			ref.mapped = 0;
1039 		} else {
1040 			ref.mapped = addr;
1041 		}
1042 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1043 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1044 		} else {
1045 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1046 		}
1047 		if (KERN_SUCCESS == err) {
1048 			addr     = ref.mapped;
1049 			map      = ref.map;
1050 			didAlloc = true;
1051 		}
1052 	}
1053 
1054 	/*
1055 	 * If the memory is associated with a device pager but doesn't have a UPL,
1056 	 * it will be immediately faulted in through the pager via populateDevicePager().
1057 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1058 	 * operations.
1059 	 */
1060 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1061 		options &= ~kIOMapPrefault;
1062 	}
1063 
1064 	/*
1065 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1066 	 * memory type, and the underlying data.
1067 	 */
1068 	if (options & kIOMapPrefault) {
1069 		/*
1070 		 * The memory must have been wired by calling ::prepare(), otherwise
1071 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1072 		 */
1073 		assert(_wireCount != 0);
1074 		assert(_memoryEntries != NULL);
1075 		if ((_wireCount == 0) ||
1076 		    (_memoryEntries == NULL)) {
1077 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1078 			return kIOReturnBadArgument;
1079 		}
1080 
1081 		// Get the page list.
1082 		ioGMDData* dataP = getDataP(_memoryEntries);
1083 		ioPLBlock const* ioplList = getIOPLList(dataP);
1084 		pageList = getPageList(dataP);
1085 
1086 		// Get the number of IOPLs.
1087 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1088 
1089 		/*
1090 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1091 		 * the offset. The research will go past it, so we'll need to go back to the
1092 		 * right range at the end.
1093 		 */
1094 		UInt ioplIndex = 0;
1095 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1096 			ioplIndex++;
1097 		}
1098 		ioplIndex--;
1099 
1100 		// Retrieve the IOPL info block.
1101 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1102 
1103 		/*
1104 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1105 		 * array.
1106 		 */
1107 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1108 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1109 		} else {
1110 			pageList = &pageList[ioplInfo.fPageInfo];
1111 		}
1112 
1113 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1114 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1115 
1116 		// Retrieve the index of the first page corresponding to the offset.
1117 		currentPageIndex = atop_32(offsetInIOPL);
1118 	}
1119 
1120 	// enter mappings
1121 	remain  = size;
1122 	mapAddr = addr;
1123 	addr    += pageOffset;
1124 
1125 	while (remain && (KERN_SUCCESS == err)) {
1126 		entryOffset = offset - entry->offset;
1127 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1128 			err = kIOReturnNotAligned;
1129 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1130 			break;
1131 		}
1132 
1133 		if (kIODefaultCache != cacheMode) {
1134 			vm_size_t unused = 0;
1135 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1136 			    memEntryCacheMode, NULL, entry->entry);
1137 			assert(KERN_SUCCESS == err);
1138 		}
1139 
1140 		entryOffset -= pageOffset;
1141 		if (entryOffset >= entry->size) {
1142 			panic("entryOffset");
1143 		}
1144 		chunk = entry->size - entryOffset;
1145 		if (chunk) {
1146 			vm_map_kernel_flags_t vmk_flags = {
1147 				.vmf_fixed = true,
1148 				.vmf_overwrite = true,
1149 				.vm_tag = tag,
1150 				.vmkf_iokit_acct = true,
1151 			};
1152 
1153 			if (chunk > remain) {
1154 				chunk = remain;
1155 			}
1156 			if (options & kIOMapPrefault) {
1157 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1158 
1159 				err = vm_map_enter_mem_object_prefault(map,
1160 				    &mapAddr,
1161 				    chunk, 0 /* mask */,
1162 				    vmk_flags,
1163 				    entry->entry,
1164 				    entryOffset,
1165 				    prot,                        // cur
1166 				    prot,                        // max
1167 				    &pageList[currentPageIndex],
1168 				    nb_pages);
1169 
1170 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1171 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1172 				}
1173 				// Compute the next index in the page list.
1174 				currentPageIndex += nb_pages;
1175 				assert(currentPageIndex <= _pages);
1176 			} else {
1177 				err = mach_vm_map_kernel(map,
1178 				    &mapAddr,
1179 				    chunk, 0 /* mask */,
1180 				    vmk_flags,
1181 				    entry->entry,
1182 				    entryOffset,
1183 				    false,               // copy
1184 				    prot,               // cur
1185 				    prot,               // max
1186 				    VM_INHERIT_NONE);
1187 			}
1188 			if (KERN_SUCCESS != err) {
1189 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1190 				break;
1191 			}
1192 			remain -= chunk;
1193 			if (!remain) {
1194 				break;
1195 			}
1196 			mapAddr  += chunk;
1197 			offset   += chunk - pageOffset;
1198 		}
1199 		pageOffset = 0;
1200 		entry++;
1201 		entryIdx++;
1202 		if (entryIdx >= ref->count) {
1203 			err = kIOReturnOverrun;
1204 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1205 			break;
1206 		}
1207 	}
1208 
1209 	if ((KERN_SUCCESS != err) && didAlloc) {
1210 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1211 		addr = 0;
1212 	}
1213 	*inaddr = addr;
1214 
1215 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1216 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1217 	}
1218 	return err;
1219 }
1220 
1221 #define LOGUNALIGN 0
1222 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1223 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1224 	IOMemoryReference * ref,
1225 	vm_map_t            map,
1226 	mach_vm_size_t      inoffset,
1227 	mach_vm_size_t      size,
1228 	IOOptionBits        options,
1229 	mach_vm_address_t * inaddr)
1230 {
1231 	IOReturn            err;
1232 	int64_t             offset = inoffset;
1233 	uint32_t            entryIdx, firstEntryIdx;
1234 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1235 	vm_map_offset_t     entryOffset, remain, chunk;
1236 
1237 	IOMemoryEntry    * entry;
1238 	vm_prot_t          prot, memEntryCacheMode;
1239 	IOOptionBits       type;
1240 	IOOptionBits       cacheMode;
1241 	vm_tag_t           tag;
1242 	// for the kIOMapPrefault option.
1243 	upl_page_info_t  * pageList = NULL;
1244 	UInt               currentPageIndex = 0;
1245 	bool               didAlloc;
1246 
1247 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1248 
1249 	if (ref->mapRef) {
1250 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1251 		return err;
1252 	}
1253 
1254 #if LOGUNALIGN
1255 	printf("MAP offset %qx, %qx\n", inoffset, size);
1256 #endif
1257 
1258 	type = _flags & kIOMemoryTypeMask;
1259 
1260 	prot = VM_PROT_READ;
1261 	if (!(kIOMapReadOnly & options)) {
1262 		prot |= VM_PROT_WRITE;
1263 	}
1264 	prot &= ref->prot;
1265 
1266 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1267 	if (kIODefaultCache != cacheMode) {
1268 		// VM system requires write access to update named entry cache mode
1269 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1270 	}
1271 
1272 	tag = (vm_tag_t) getVMTag(map);
1273 
1274 	addr       = 0;
1275 	didAlloc   = false;
1276 
1277 	if (!(options & kIOMapAnywhere)) {
1278 		addr = *inaddr;
1279 	}
1280 
1281 	// find first entry for offset
1282 	for (firstEntryIdx = 0;
1283 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1284 	    firstEntryIdx++) {
1285 	}
1286 	firstEntryIdx--;
1287 
1288 	// calculate required VM space
1289 
1290 	entryIdx = firstEntryIdx;
1291 	entry = &ref->entries[entryIdx];
1292 
1293 	remain  = size;
1294 	int64_t iteroffset = offset;
1295 	uint64_t mapSize = 0;
1296 	while (remain) {
1297 		entryOffset = iteroffset - entry->offset;
1298 		if (entryOffset >= entry->size) {
1299 			panic("entryOffset");
1300 		}
1301 
1302 #if LOGUNALIGN
1303 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1304 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1305 #endif
1306 
1307 		chunk = entry->size - entryOffset;
1308 		if (chunk) {
1309 			if (chunk > remain) {
1310 				chunk = remain;
1311 			}
1312 			mach_vm_size_t entrySize;
1313 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1314 			assert(KERN_SUCCESS == err);
1315 			mapSize += entrySize;
1316 
1317 			remain -= chunk;
1318 			if (!remain) {
1319 				break;
1320 			}
1321 			iteroffset   += chunk; // - pageOffset;
1322 		}
1323 		entry++;
1324 		entryIdx++;
1325 		if (entryIdx >= ref->count) {
1326 			panic("overrun");
1327 			err = kIOReturnOverrun;
1328 			break;
1329 		}
1330 	}
1331 
1332 	if (kIOMapOverwrite & options) {
1333 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1334 			map = IOPageableMapForAddress(addr);
1335 		}
1336 		err = KERN_SUCCESS;
1337 	} else {
1338 		IOMemoryDescriptorMapAllocRef ref;
1339 		ref.map     = map;
1340 		ref.tag     = tag;
1341 		ref.options = options;
1342 		ref.size    = mapSize;
1343 		ref.prot    = prot;
1344 		if (options & kIOMapAnywhere) {
1345 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1346 			ref.mapped = 0;
1347 		} else {
1348 			ref.mapped = addr;
1349 		}
1350 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1351 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1352 		} else {
1353 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1354 		}
1355 
1356 		if (KERN_SUCCESS == err) {
1357 			addr     = ref.mapped;
1358 			map      = ref.map;
1359 			didAlloc = true;
1360 		}
1361 #if LOGUNALIGN
1362 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1363 #endif
1364 	}
1365 
1366 	/*
1367 	 * If the memory is associated with a device pager but doesn't have a UPL,
1368 	 * it will be immediately faulted in through the pager via populateDevicePager().
1369 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1370 	 * operations.
1371 	 */
1372 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1373 		options &= ~kIOMapPrefault;
1374 	}
1375 
1376 	/*
1377 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1378 	 * memory type, and the underlying data.
1379 	 */
1380 	if (options & kIOMapPrefault) {
1381 		/*
1382 		 * The memory must have been wired by calling ::prepare(), otherwise
1383 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1384 		 */
1385 		assert(_wireCount != 0);
1386 		assert(_memoryEntries != NULL);
1387 		if ((_wireCount == 0) ||
1388 		    (_memoryEntries == NULL)) {
1389 			return kIOReturnBadArgument;
1390 		}
1391 
1392 		// Get the page list.
1393 		ioGMDData* dataP = getDataP(_memoryEntries);
1394 		ioPLBlock const* ioplList = getIOPLList(dataP);
1395 		pageList = getPageList(dataP);
1396 
1397 		// Get the number of IOPLs.
1398 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1399 
1400 		/*
1401 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1402 		 * the offset. The research will go past it, so we'll need to go back to the
1403 		 * right range at the end.
1404 		 */
1405 		UInt ioplIndex = 0;
1406 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1407 			ioplIndex++;
1408 		}
1409 		ioplIndex--;
1410 
1411 		// Retrieve the IOPL info block.
1412 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1413 
1414 		/*
1415 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1416 		 * array.
1417 		 */
1418 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1419 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1420 		} else {
1421 			pageList = &pageList[ioplInfo.fPageInfo];
1422 		}
1423 
1424 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1425 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1426 
1427 		// Retrieve the index of the first page corresponding to the offset.
1428 		currentPageIndex = atop_32(offsetInIOPL);
1429 	}
1430 
1431 	// enter mappings
1432 	remain   = size;
1433 	mapAddr  = addr;
1434 	entryIdx = firstEntryIdx;
1435 	entry = &ref->entries[entryIdx];
1436 
1437 	while (remain && (KERN_SUCCESS == err)) {
1438 #if LOGUNALIGN
1439 		printf("offset %qx, %qx\n", offset, entry->offset);
1440 #endif
1441 		if (kIODefaultCache != cacheMode) {
1442 			vm_size_t unused = 0;
1443 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1444 			    memEntryCacheMode, NULL, entry->entry);
1445 			assert(KERN_SUCCESS == err);
1446 		}
1447 		entryOffset = offset - entry->offset;
1448 		if (entryOffset >= entry->size) {
1449 			panic("entryOffset");
1450 		}
1451 		chunk = entry->size - entryOffset;
1452 #if LOGUNALIGN
1453 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1454 #endif
1455 		if (chunk) {
1456 			vm_map_kernel_flags_t vmk_flags = {
1457 				.vmf_fixed = true,
1458 				.vmf_overwrite = true,
1459 				.vmf_return_data_addr = true,
1460 				.vm_tag = tag,
1461 				.vmkf_iokit_acct = true,
1462 			};
1463 
1464 			if (chunk > remain) {
1465 				chunk = remain;
1466 			}
1467 			mapAddrOut = mapAddr;
1468 			if (options & kIOMapPrefault) {
1469 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1470 
1471 				err = vm_map_enter_mem_object_prefault(map,
1472 				    &mapAddrOut,
1473 				    chunk, 0 /* mask */,
1474 				    vmk_flags,
1475 				    entry->entry,
1476 				    entryOffset,
1477 				    prot,                        // cur
1478 				    prot,                        // max
1479 				    &pageList[currentPageIndex],
1480 				    nb_pages);
1481 
1482 				// Compute the next index in the page list.
1483 				currentPageIndex += nb_pages;
1484 				assert(currentPageIndex <= _pages);
1485 			} else {
1486 #if LOGUNALIGN
1487 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1488 #endif
1489 				err = mach_vm_map_kernel(map,
1490 				    &mapAddrOut,
1491 				    chunk, 0 /* mask */,
1492 				    vmk_flags,
1493 				    entry->entry,
1494 				    entryOffset,
1495 				    false,               // copy
1496 				    prot,               // cur
1497 				    prot,               // max
1498 				    VM_INHERIT_NONE);
1499 			}
1500 			if (KERN_SUCCESS != err) {
1501 				panic("map enter err %x", err);
1502 				break;
1503 			}
1504 #if LOGUNALIGN
1505 			printf("mapAddr o %qx\n", mapAddrOut);
1506 #endif
1507 			if (entryIdx == firstEntryIdx) {
1508 				addr = mapAddrOut;
1509 			}
1510 			remain -= chunk;
1511 			if (!remain) {
1512 				break;
1513 			}
1514 			mach_vm_size_t entrySize;
1515 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1516 			assert(KERN_SUCCESS == err);
1517 			mapAddr += entrySize;
1518 			offset  += chunk;
1519 		}
1520 
1521 		entry++;
1522 		entryIdx++;
1523 		if (entryIdx >= ref->count) {
1524 			err = kIOReturnOverrun;
1525 			break;
1526 		}
1527 	}
1528 
1529 	if (KERN_SUCCESS != err) {
1530 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1531 	}
1532 
1533 	if ((KERN_SUCCESS != err) && didAlloc) {
1534 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1535 		addr = 0;
1536 	}
1537 	*inaddr = addr;
1538 
1539 	return err;
1540 }
1541 
1542 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1543 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1544 	IOMemoryReference * ref,
1545 	uint64_t          * offset)
1546 {
1547 	kern_return_t kr;
1548 	vm_object_offset_t data_offset = 0;
1549 	uint64_t total;
1550 	uint32_t idx;
1551 
1552 	assert(ref->count);
1553 	if (offset) {
1554 		*offset = (uint64_t) data_offset;
1555 	}
1556 	total = 0;
1557 	for (idx = 0; idx < ref->count; idx++) {
1558 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1559 		    &data_offset);
1560 		if (KERN_SUCCESS != kr) {
1561 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1562 		} else if (0 != data_offset) {
1563 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1564 		}
1565 		if (offset && !idx) {
1566 			*offset = (uint64_t) data_offset;
1567 		}
1568 		total += round_page(data_offset + ref->entries[idx].size);
1569 	}
1570 
1571 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1572 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1573 
1574 	return total;
1575 }
1576 
1577 
1578 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount,IOByteCount * swappedPageCount)1579 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1580 	IOMemoryReference * ref,
1581 	IOByteCount       * residentPageCount,
1582 	IOByteCount       * dirtyPageCount,
1583 	IOByteCount       * swappedPageCount)
1584 {
1585 	IOReturn        err;
1586 	IOMemoryEntry * entries;
1587 	UInt64 resident, dirty, swapped;
1588 	UInt64 totalResident, totalDirty, totalSwapped;
1589 
1590 	totalResident = totalDirty = totalSwapped = 0;
1591 	err = kIOReturnSuccess;
1592 	entries = ref->entries + ref->count;
1593 	while (entries > &ref->entries[0]) {
1594 		entries--;
1595 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty, &swapped);
1596 		if (KERN_SUCCESS != err) {
1597 			break;
1598 		}
1599 		totalResident += resident;
1600 		totalDirty    += dirty;
1601 		totalSwapped  += swapped;
1602 	}
1603 
1604 	if (residentPageCount) {
1605 		*residentPageCount = totalResident;
1606 	}
1607 	if (dirtyPageCount) {
1608 		*dirtyPageCount    = totalDirty;
1609 	}
1610 	if (swappedPageCount) {
1611 		*swappedPageCount  = totalSwapped;
1612 	}
1613 	return err;
1614 }
1615 
1616 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1617 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1618 	IOMemoryReference * ref,
1619 	IOOptionBits        newState,
1620 	IOOptionBits      * oldState)
1621 {
1622 	IOReturn        err;
1623 	IOMemoryEntry * entries;
1624 	vm_purgable_t   control;
1625 	int             totalState, state;
1626 
1627 	totalState = kIOMemoryPurgeableNonVolatile;
1628 	err = kIOReturnSuccess;
1629 	entries = ref->entries + ref->count;
1630 	while (entries > &ref->entries[0]) {
1631 		entries--;
1632 
1633 		err = purgeableControlBits(newState, &control, &state);
1634 		if (KERN_SUCCESS != err) {
1635 			break;
1636 		}
1637 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1638 		if (KERN_SUCCESS != err) {
1639 			break;
1640 		}
1641 		err = purgeableStateBits(&state);
1642 		if (KERN_SUCCESS != err) {
1643 			break;
1644 		}
1645 
1646 		if (kIOMemoryPurgeableEmpty == state) {
1647 			totalState = kIOMemoryPurgeableEmpty;
1648 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1649 			continue;
1650 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1651 			continue;
1652 		} else if (kIOMemoryPurgeableVolatile == state) {
1653 			totalState = kIOMemoryPurgeableVolatile;
1654 		} else {
1655 			totalState = kIOMemoryPurgeableNonVolatile;
1656 		}
1657 	}
1658 
1659 	if (oldState) {
1660 		*oldState = totalState;
1661 	}
1662 	return err;
1663 }
1664 
1665 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1666 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1667 	IOMemoryReference * ref,
1668 	task_t              newOwner,
1669 	int                 newLedgerTag,
1670 	IOOptionBits        newLedgerOptions)
1671 {
1672 	IOReturn        err, totalErr;
1673 	IOMemoryEntry * entries;
1674 
1675 	totalErr = kIOReturnSuccess;
1676 	entries = ref->entries + ref->count;
1677 	while (entries > &ref->entries[0]) {
1678 		entries--;
1679 
1680 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1681 		if (KERN_SUCCESS != err) {
1682 			totalErr = err;
1683 		}
1684 	}
1685 
1686 	return totalErr;
1687 }
1688 
1689 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1690 
1691 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1692 IOMemoryDescriptor::withAddress(void *      address,
1693     IOByteCount   length,
1694     IODirection direction)
1695 {
1696 	return IOMemoryDescriptor::
1697 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1698 }
1699 
1700 #ifndef __LP64__
1701 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1702 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1703     IOByteCount  length,
1704     IODirection  direction,
1705     task_t       task)
1706 {
1707 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1708 	if (that) {
1709 		if (that->initWithAddress(address, length, direction, task)) {
1710 			return os::move(that);
1711 		}
1712 	}
1713 	return nullptr;
1714 }
1715 #endif /* !__LP64__ */
1716 
1717 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1718 IOMemoryDescriptor::withPhysicalAddress(
1719 	IOPhysicalAddress       address,
1720 	IOByteCount             length,
1721 	IODirection             direction )
1722 {
1723 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1724 }
1725 
1726 #ifndef __LP64__
1727 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1728 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1729     UInt32           withCount,
1730     IODirection      direction,
1731     task_t           task,
1732     bool             asReference)
1733 {
1734 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1735 	if (that) {
1736 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1737 			return os::move(that);
1738 		}
1739 	}
1740 	return nullptr;
1741 }
1742 #endif /* !__LP64__ */
1743 
1744 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1745 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1746     mach_vm_size_t length,
1747     IOOptionBits   options,
1748     task_t         task)
1749 {
1750 	IOAddressRange range = { address, length };
1751 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1752 }
1753 
1754 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1755 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1756     UInt32           rangeCount,
1757     IOOptionBits     options,
1758     task_t           task)
1759 {
1760 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1761 	if (that) {
1762 		if (task) {
1763 			options |= kIOMemoryTypeVirtual64;
1764 		} else {
1765 			options |= kIOMemoryTypePhysical64;
1766 		}
1767 
1768 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1769 			return os::move(that);
1770 		}
1771 	}
1772 
1773 	return nullptr;
1774 }
1775 
1776 
1777 /*
1778  * withOptions:
1779  *
1780  * Create a new IOMemoryDescriptor. The buffer is made up of several
1781  * virtual address ranges, from a given task.
1782  *
1783  * Passing the ranges as a reference will avoid an extra allocation.
1784  */
1785 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1786 IOMemoryDescriptor::withOptions(void *          buffers,
1787     UInt32          count,
1788     UInt32          offset,
1789     task_t          task,
1790     IOOptionBits    opts,
1791     IOMapper *      mapper)
1792 {
1793 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1794 
1795 	if (self
1796 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1797 		return nullptr;
1798 	}
1799 
1800 	return os::move(self);
1801 }
1802 
1803 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1804 IOMemoryDescriptor::initWithOptions(void *         buffers,
1805     UInt32         count,
1806     UInt32         offset,
1807     task_t         task,
1808     IOOptionBits   options,
1809     IOMapper *     mapper)
1810 {
1811 	return false;
1812 }
1813 
1814 #ifndef __LP64__
1815 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1816 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1817     UInt32          withCount,
1818     IODirection     direction,
1819     bool            asReference)
1820 {
1821 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1822 	if (that) {
1823 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1824 			return os::move(that);
1825 		}
1826 	}
1827 	return nullptr;
1828 }
1829 
1830 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1831 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1832     IOByteCount             offset,
1833     IOByteCount             length,
1834     IODirection             direction)
1835 {
1836 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1837 }
1838 #endif /* !__LP64__ */
1839 
1840 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1841 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1842 {
1843 	IOGeneralMemoryDescriptor *origGenMD =
1844 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1845 
1846 	if (origGenMD) {
1847 		return IOGeneralMemoryDescriptor::
1848 		       withPersistentMemoryDescriptor(origGenMD);
1849 	} else {
1850 		return nullptr;
1851 	}
1852 }
1853 
1854 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1855 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1856 {
1857 	IOMemoryReference * memRef;
1858 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1859 
1860 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1861 		return nullptr;
1862 	}
1863 
1864 	if (memRef == originalMD->_memRef) {
1865 		self.reset(originalMD, OSRetain);
1866 		originalMD->memoryReferenceRelease(memRef);
1867 		return os::move(self);
1868 	}
1869 
1870 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1871 	IOMDPersistentInitData initData = { originalMD, memRef };
1872 
1873 	if (self
1874 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1875 		return nullptr;
1876 	}
1877 	return os::move(self);
1878 }
1879 
1880 #ifndef __LP64__
1881 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1882 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1883     IOByteCount   withLength,
1884     IODirection withDirection)
1885 {
1886 	_singleRange.v.address = (vm_offset_t) address;
1887 	_singleRange.v.length  = withLength;
1888 
1889 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1890 }
1891 
1892 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1893 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1894     IOByteCount    withLength,
1895     IODirection  withDirection,
1896     task_t       withTask)
1897 {
1898 	_singleRange.v.address = address;
1899 	_singleRange.v.length  = withLength;
1900 
1901 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1902 }
1903 
1904 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1905 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1906 	IOPhysicalAddress      address,
1907 	IOByteCount            withLength,
1908 	IODirection            withDirection )
1909 {
1910 	_singleRange.p.address = address;
1911 	_singleRange.p.length  = withLength;
1912 
1913 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1914 }
1915 
1916 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1917 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1918 	IOPhysicalRange * ranges,
1919 	UInt32            count,
1920 	IODirection       direction,
1921 	bool              reference)
1922 {
1923 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1924 
1925 	if (reference) {
1926 		mdOpts |= kIOMemoryAsReference;
1927 	}
1928 
1929 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1930 }
1931 
1932 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1933 IOGeneralMemoryDescriptor::initWithRanges(
1934 	IOVirtualRange * ranges,
1935 	UInt32           count,
1936 	IODirection      direction,
1937 	task_t           task,
1938 	bool             reference)
1939 {
1940 	IOOptionBits mdOpts = direction;
1941 
1942 	if (reference) {
1943 		mdOpts |= kIOMemoryAsReference;
1944 	}
1945 
1946 	if (task) {
1947 		mdOpts |= kIOMemoryTypeVirtual;
1948 
1949 		// Auto-prepare if this is a kernel memory descriptor as very few
1950 		// clients bother to prepare() kernel memory.
1951 		// But it was not enforced so what are you going to do?
1952 		if (task == kernel_task) {
1953 			mdOpts |= kIOMemoryAutoPrepare;
1954 		}
1955 	} else {
1956 		mdOpts |= kIOMemoryTypePhysical;
1957 	}
1958 
1959 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1960 }
1961 #endif /* !__LP64__ */
1962 
1963 /*
1964  * initWithOptions:
1965  *
1966  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1967  * from a given task, several physical ranges, an UPL from the ubc
1968  * system or a uio (may be 64bit) from the BSD subsystem.
1969  *
1970  * Passing the ranges as a reference will avoid an extra allocation.
1971  *
1972  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1973  * existing instance -- note this behavior is not commonly supported in other
1974  * I/O Kit classes, although it is supported here.
1975  */
1976 
1977 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1978 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1979     UInt32       count,
1980     UInt32       offset,
1981     task_t       task,
1982     IOOptionBits options,
1983     IOMapper *   mapper)
1984 {
1985 	IOOptionBits type = options & kIOMemoryTypeMask;
1986 
1987 #ifndef __LP64__
1988 	if (task
1989 	    && (kIOMemoryTypeVirtual == type)
1990 	    && vm_map_is_64bit(get_task_map(task))
1991 	    && ((IOVirtualRange *) buffers)->address) {
1992 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1993 		return false;
1994 	}
1995 #endif /* !__LP64__ */
1996 
1997 	// Grab the original MD's configuation data to initialse the
1998 	// arguments to this function.
1999 	if (kIOMemoryTypePersistentMD == type) {
2000 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
2001 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2002 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2003 
2004 		// Only accept persistent memory descriptors with valid dataP data.
2005 		assert(orig->_rangesCount == 1);
2006 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2007 			return false;
2008 		}
2009 
2010 		_memRef = initData->fMemRef; // Grab the new named entry
2011 		options = orig->_flags & ~kIOMemoryAsReference;
2012 		type = options & kIOMemoryTypeMask;
2013 		buffers = orig->_ranges.v;
2014 		count = orig->_rangesCount;
2015 
2016 		// Now grab the original task and whatever mapper was previously used
2017 		task = orig->_task;
2018 		mapper = dataP->fMapper;
2019 
2020 		// We are ready to go through the original initialisation now
2021 	}
2022 
2023 	switch (type) {
2024 	case kIOMemoryTypeUIO:
2025 	case kIOMemoryTypeVirtual:
2026 #ifndef __LP64__
2027 	case kIOMemoryTypeVirtual64:
2028 #endif /* !__LP64__ */
2029 		assert(task);
2030 		if (!task) {
2031 			return false;
2032 		}
2033 		break;
2034 
2035 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2036 #ifndef __LP64__
2037 	case kIOMemoryTypePhysical64:
2038 #endif /* !__LP64__ */
2039 	case kIOMemoryTypeUPL:
2040 		assert(!task);
2041 		break;
2042 	default:
2043 		return false; /* bad argument */
2044 	}
2045 
2046 	assert(buffers);
2047 	assert(count);
2048 
2049 	/*
2050 	 * We can check the _initialized  instance variable before having ever set
2051 	 * it to an initial value because I/O Kit guarantees that all our instance
2052 	 * variables are zeroed on an object's allocation.
2053 	 */
2054 
2055 	if (_initialized) {
2056 		/*
2057 		 * An existing memory descriptor is being retargeted to point to
2058 		 * somewhere else.  Clean up our present state.
2059 		 */
2060 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2061 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2062 			while (_wireCount) {
2063 				complete();
2064 			}
2065 		}
2066 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2067 			if (kIOMemoryTypeUIO == type) {
2068 				uio_free((uio_t) _ranges.v);
2069 			}
2070 #ifndef __LP64__
2071 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2072 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2073 			}
2074 #endif /* !__LP64__ */
2075 			else {
2076 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2077 			}
2078 		}
2079 
2080 		options |= (kIOMemoryRedirected & _flags);
2081 		if (!(kIOMemoryRedirected & options)) {
2082 			if (_memRef) {
2083 				memoryReferenceRelease(_memRef);
2084 				_memRef = NULL;
2085 			}
2086 			if (_mappings) {
2087 				_mappings->flushCollection();
2088 			}
2089 		}
2090 	} else {
2091 		if (!super::init()) {
2092 			return false;
2093 		}
2094 		_initialized = true;
2095 	}
2096 
2097 	// Grab the appropriate mapper
2098 	if (kIOMemoryHostOrRemote & options) {
2099 		options |= kIOMemoryMapperNone;
2100 	}
2101 	if (kIOMemoryMapperNone & options) {
2102 		mapper = NULL; // No Mapper
2103 	} else if (mapper == kIOMapperSystem) {
2104 		IOMapper::checkForSystemMapper();
2105 		gIOSystemMapper = mapper = IOMapper::gSystem;
2106 	}
2107 
2108 	// Remove the dynamic internal use flags from the initial setting
2109 	options               &= ~(kIOMemoryPreparedReadOnly);
2110 	_flags                 = options;
2111 	_task                  = task;
2112 
2113 #ifndef __LP64__
2114 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2115 #endif /* !__LP64__ */
2116 
2117 	_dmaReferences = 0;
2118 	__iomd_reservedA = 0;
2119 	__iomd_reservedB = 0;
2120 	_highestPage = 0;
2121 
2122 	if (kIOMemoryThreadSafe & options) {
2123 		if (!_prepareLock) {
2124 			_prepareLock = IOLockAlloc();
2125 		}
2126 	} else if (_prepareLock) {
2127 		IOLockFree(_prepareLock);
2128 		_prepareLock = NULL;
2129 	}
2130 
2131 	if (kIOMemoryTypeUPL == type) {
2132 		ioGMDData *dataP;
2133 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2134 
2135 		if (!initMemoryEntries(dataSize, mapper)) {
2136 			return false;
2137 		}
2138 		dataP = getDataP(_memoryEntries);
2139 		dataP->fPageCnt = 0;
2140 		switch (kIOMemoryDirectionMask & options) {
2141 		case kIODirectionOut:
2142 			dataP->fDMAAccess = kIODMAMapReadAccess;
2143 			break;
2144 		case kIODirectionIn:
2145 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2146 			break;
2147 		case kIODirectionNone:
2148 		case kIODirectionOutIn:
2149 		default:
2150 			panic("bad dir for upl 0x%x", (int) options);
2151 			break;
2152 		}
2153 		//       _wireCount++;	// UPLs start out life wired
2154 
2155 		_length    = count;
2156 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2157 
2158 		ioPLBlock iopl;
2159 		iopl.fIOPL = (upl_t) buffers;
2160 		upl_set_referenced(iopl.fIOPL, true);
2161 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2162 
2163 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2164 			panic("short external upl");
2165 		}
2166 
2167 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2168 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2169 
2170 		// Set the flag kIOPLOnDevice convieniently equal to 1
2171 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2172 		if (!pageList->device) {
2173 			// Pre-compute the offset into the UPL's page list
2174 			pageList = &pageList[atop_32(offset)];
2175 			offset &= PAGE_MASK;
2176 		}
2177 		iopl.fIOMDOffset = 0;
2178 		iopl.fMappedPage = 0;
2179 		iopl.fPageInfo = (vm_address_t) pageList;
2180 		iopl.fPageOffset = offset;
2181 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2182 	} else {
2183 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2184 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2185 
2186 		// Initialize the memory descriptor
2187 		if (options & kIOMemoryAsReference) {
2188 #ifndef __LP64__
2189 			_rangesIsAllocated = false;
2190 #endif /* !__LP64__ */
2191 
2192 			// Hack assignment to get the buffer arg into _ranges.
2193 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2194 			// work, C++ sigh.
2195 			// This also initialises the uio & physical ranges.
2196 			_ranges.v = (IOVirtualRange *) buffers;
2197 		} else {
2198 #ifndef __LP64__
2199 			_rangesIsAllocated = true;
2200 #endif /* !__LP64__ */
2201 			switch (type) {
2202 			case kIOMemoryTypeUIO:
2203 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2204 				break;
2205 
2206 #ifndef __LP64__
2207 			case kIOMemoryTypeVirtual64:
2208 			case kIOMemoryTypePhysical64:
2209 				if (count == 1
2210 #ifndef __arm__
2211 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2212 #endif
2213 				    ) {
2214 					if (type == kIOMemoryTypeVirtual64) {
2215 						type = kIOMemoryTypeVirtual;
2216 					} else {
2217 						type = kIOMemoryTypePhysical;
2218 					}
2219 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2220 					_rangesIsAllocated = false;
2221 					_ranges.v = &_singleRange.v;
2222 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2223 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2224 					break;
2225 				}
2226 				_ranges.v64 = IONew(IOAddressRange, count);
2227 				if (!_ranges.v64) {
2228 					return false;
2229 				}
2230 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2231 				break;
2232 #endif /* !__LP64__ */
2233 			case kIOMemoryTypeVirtual:
2234 			case kIOMemoryTypePhysical:
2235 				if (count == 1) {
2236 					_flags |= kIOMemoryAsReference;
2237 #ifndef __LP64__
2238 					_rangesIsAllocated = false;
2239 #endif /* !__LP64__ */
2240 					_ranges.v = &_singleRange.v;
2241 				} else {
2242 					_ranges.v = IONew(IOVirtualRange, count);
2243 					if (!_ranges.v) {
2244 						return false;
2245 					}
2246 				}
2247 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2248 				break;
2249 			}
2250 		}
2251 		_rangesCount = count;
2252 
2253 		// Find starting address within the vector of ranges
2254 		Ranges vec = _ranges;
2255 		mach_vm_size_t totalLength = 0;
2256 		unsigned int ind, pages = 0;
2257 		for (ind = 0; ind < count; ind++) {
2258 			mach_vm_address_t addr;
2259 			mach_vm_address_t endAddr;
2260 			mach_vm_size_t    len;
2261 
2262 			// addr & len are returned by this function
2263 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2264 			if (_task) {
2265 				mach_vm_size_t phys_size;
2266 				kern_return_t kret;
2267 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2268 				if (KERN_SUCCESS != kret) {
2269 					break;
2270 				}
2271 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2272 					break;
2273 				}
2274 			} else {
2275 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2276 					break;
2277 				}
2278 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2279 					break;
2280 				}
2281 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2282 					break;
2283 				}
2284 			}
2285 			if (os_add_overflow(totalLength, len, &totalLength)) {
2286 				break;
2287 			}
2288 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2289 				uint64_t highPage = atop_64(addr + len - 1);
2290 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2291 					_highestPage = (ppnum_t) highPage;
2292 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2293 				}
2294 			}
2295 		}
2296 		if ((ind < count)
2297 		    || (totalLength != ((IOByteCount) totalLength))) {
2298 			return false;                                   /* overflow */
2299 		}
2300 		_length      = totalLength;
2301 		_pages       = pages;
2302 
2303 		// Auto-prepare memory at creation time.
2304 		// Implied completion when descriptor is free-ed
2305 
2306 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2307 			_wireCount++; // Physical MDs are, by definition, wired
2308 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2309 			ioGMDData *dataP;
2310 			unsigned dataSize;
2311 
2312 			if (_pages > atop_64(max_mem)) {
2313 				return false;
2314 			}
2315 
2316 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2317 			if (!initMemoryEntries(dataSize, mapper)) {
2318 				return false;
2319 			}
2320 			dataP = getDataP(_memoryEntries);
2321 			dataP->fPageCnt = _pages;
2322 
2323 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2324 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2325 				_kernelTag = IOMemoryTag(kernel_map);
2326 				if (_kernelTag == gIOSurfaceTag) {
2327 					_userTag = VM_MEMORY_IOSURFACE;
2328 				}
2329 			}
2330 
2331 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2332 				IOReturn
2333 				    err = memoryReferenceCreate(0, &_memRef);
2334 				if (kIOReturnSuccess != err) {
2335 					return false;
2336 				}
2337 			}
2338 
2339 			if ((_flags & kIOMemoryAutoPrepare)
2340 			    && prepare() != kIOReturnSuccess) {
2341 				return false;
2342 			}
2343 		}
2344 	}
2345 
2346 	return true;
2347 }
2348 
2349 /*
2350  * free
2351  *
2352  * Free resources.
2353  */
2354 void
free()2355 IOGeneralMemoryDescriptor::free()
2356 {
2357 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2358 
2359 	if (reserved && reserved->dp.memory) {
2360 		LOCK;
2361 		reserved->dp.memory = NULL;
2362 		UNLOCK;
2363 	}
2364 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2365 		ioGMDData * dataP;
2366 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2367 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2368 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2369 		}
2370 	} else {
2371 		while (_wireCount) {
2372 			complete();
2373 		}
2374 	}
2375 
2376 	if (_memoryEntries) {
2377 		_memoryEntries.reset();
2378 	}
2379 
2380 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2381 		if (kIOMemoryTypeUIO == type) {
2382 			uio_free((uio_t) _ranges.v);
2383 		}
2384 #ifndef __LP64__
2385 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2386 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2387 		}
2388 #endif /* !__LP64__ */
2389 		else {
2390 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2391 		}
2392 
2393 		_ranges.v = NULL;
2394 	}
2395 
2396 	if (reserved) {
2397 		cleanKernelReserved(reserved);
2398 		if (reserved->dp.devicePager) {
2399 			// memEntry holds a ref on the device pager which owns reserved
2400 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2401 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2402 		} else {
2403 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2404 		}
2405 		reserved = NULL;
2406 	}
2407 
2408 	if (_memRef) {
2409 		memoryReferenceRelease(_memRef);
2410 	}
2411 	if (_prepareLock) {
2412 		IOLockFree(_prepareLock);
2413 	}
2414 
2415 	super::free();
2416 }
2417 
2418 #ifndef __LP64__
2419 void
unmapFromKernel()2420 IOGeneralMemoryDescriptor::unmapFromKernel()
2421 {
2422 	panic("IOGMD::unmapFromKernel deprecated");
2423 }
2424 
2425 void
mapIntoKernel(unsigned rangeIndex)2426 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2427 {
2428 	panic("IOGMD::mapIntoKernel deprecated");
2429 }
2430 #endif /* !__LP64__ */
2431 
2432 /*
2433  * getDirection:
2434  *
2435  * Get the direction of the transfer.
2436  */
2437 IODirection
getDirection() const2438 IOMemoryDescriptor::getDirection() const
2439 {
2440 #ifndef __LP64__
2441 	if (_direction) {
2442 		return _direction;
2443 	}
2444 #endif /* !__LP64__ */
2445 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2446 }
2447 
2448 /*
2449  * getLength:
2450  *
2451  * Get the length of the transfer (over all ranges).
2452  */
2453 IOByteCount
getLength() const2454 IOMemoryDescriptor::getLength() const
2455 {
2456 	return _length;
2457 }
2458 
2459 void
setTag(IOOptionBits tag)2460 IOMemoryDescriptor::setTag( IOOptionBits tag )
2461 {
2462 	_tag = tag;
2463 }
2464 
2465 IOOptionBits
getTag(void)2466 IOMemoryDescriptor::getTag( void )
2467 {
2468 	return _tag;
2469 }
2470 
2471 uint64_t
getFlags(void)2472 IOMemoryDescriptor::getFlags(void)
2473 {
2474 	return _flags;
2475 }
2476 
2477 OSObject *
copyContext(const OSSymbol * key) const2478 IOMemoryDescriptor::copyContext(const OSSymbol * key) const
2479 {
2480 	if (reserved && reserved->contextObjects) {
2481 		OSObject * context = reserved->contextObjects->getObject(key);
2482 		if (context) {
2483 			context->retain();
2484 		}
2485 		return context;
2486 	} else {
2487 		return NULL;
2488 	}
2489 }
2490 
2491 OSObject *
copyContext(const char * key) const2492 IOMemoryDescriptor::copyContext(const char * key) const
2493 {
2494 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2495 	return copyContext(sym.get());
2496 }
2497 
2498 OSObject *
copySharingContext(const char * key) const2499 IOMemoryDescriptor::copySharingContext(const char * key) const
2500 {
2501 	OSObject * context = NULL;
2502 	OSObject * obj = copyContext(kIOMemoryDescriptorSharingContextKey);
2503 	OSDictionary * dict = OSDynamicCast(OSDictionary, obj);
2504 	if (dict) {
2505 		context = dict->getObject(key);
2506 		if (context) {
2507 			context->retain();
2508 		}
2509 	}
2510 	OSSafeReleaseNULL(obj);
2511 	return context;
2512 }
2513 
2514 void
setContext(const OSSymbol * key,OSObject * obj)2515 IOMemoryDescriptor::setContext(const OSSymbol * key, OSObject * obj)
2516 {
2517 	if (this->reserved == NULL && obj == NULL) {
2518 		// No existing object, and no object to set
2519 		return;
2520 	}
2521 
2522 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2523 	if (reserved) {
2524 		if (NULL == reserved->contextObjects) {
2525 			reserved->contextObjects = OSDictionary::withCapacity(2);
2526 		}
2527 		if (obj) {
2528 			reserved->contextObjects->setObject(key, obj);
2529 		} else {
2530 			reserved->contextObjects->removeObject(key);
2531 		}
2532 	}
2533 }
2534 
2535 void
setContext(const char * key,OSObject * obj)2536 IOMemoryDescriptor::setContext(const char * key, OSObject * obj)
2537 {
2538 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2539 	setContext(sym.get(), obj);
2540 }
2541 
2542 OSObject *
copyContext(void) const2543 IOMemoryDescriptor::copyContext(void) const
2544 {
2545 	return copyContext((const OSSymbol *) kOSBooleanFalse);
2546 }
2547 enum {
2548 	kIOMemoryDescriptorInternalFlagsSharing = 0x0001,
2549 };
2550 
2551 void
setSharingContext(const char * key,OSObject * obj)2552 IOMemoryDescriptor::setSharingContext(const char * key, OSObject * obj)
2553 {
2554 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2555 	OSSharedPtr<OSDictionary> dict = OSDictionary::withCapacity(1);
2556 
2557 	dict->setObject(sym.get(), obj);
2558 	setContext(kIOMemoryDescriptorSharingContextKey, dict.get());
2559 	OSBitOrAtomic16(kIOMemoryDescriptorInternalFlagsSharing, &_internalIOMDFlags);
2560 }
2561 
2562 bool
hasSharingContext(void)2563 IOMemoryDescriptor::hasSharingContext(void)
2564 {
2565 	return 0 != (kIOMemoryDescriptorInternalFlagsSharing & _internalIOMDFlags);
2566 }
2567 
2568 void
setContext(OSObject * obj)2569 IOMemoryDescriptor::setContext(OSObject * obj)
2570 {
2571 	setContext((const OSSymbol *) kOSBooleanFalse, obj);
2572 }
2573 
2574 #ifndef __LP64__
2575 #pragma clang diagnostic push
2576 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2577 
2578 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2579 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2580 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2581 {
2582 	addr64_t physAddr = 0;
2583 
2584 	if (prepare() == kIOReturnSuccess) {
2585 		physAddr = getPhysicalSegment64( offset, length );
2586 		complete();
2587 	}
2588 
2589 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2590 }
2591 
2592 #pragma clang diagnostic pop
2593 
2594 #endif /* !__LP64__ */
2595 
2596 
2597 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2598 IOMemoryDescriptor::readBytes
2599 (IOByteCount offset, void *bytes, IOByteCount length)
2600 {
2601 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2602 	IOByteCount endoffset;
2603 	IOByteCount remaining;
2604 
2605 	// Check that this entire I/O is within the available range
2606 	if ((offset > _length)
2607 	    || os_add_overflow(length, offset, &endoffset)
2608 	    || (endoffset > _length)) {
2609 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2610 		return 0;
2611 	}
2612 	if (offset >= _length) {
2613 		return 0;
2614 	}
2615 
2616 	assert(!(kIOMemoryRemote & _flags));
2617 	if (kIOMemoryRemote & _flags) {
2618 		return 0;
2619 	}
2620 
2621 	if (kIOMemoryThreadSafe & _flags) {
2622 		LOCK;
2623 	}
2624 
2625 	remaining = length = min(length, _length - offset);
2626 	while (remaining) { // (process another target segment?)
2627 		addr64_t        srcAddr64;
2628 		IOByteCount     srcLen;
2629 		int             options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2630 
2631 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2632 		srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2633 		if (!srcAddr64) {
2634 			break;
2635 		}
2636 
2637 		// Clip segment length to remaining
2638 		if (srcLen > remaining) {
2639 			srcLen = remaining;
2640 		}
2641 
2642 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2643 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2644 		}
2645 
2646 
2647 		kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2648 #pragma unused(copy_ret)
2649 
2650 		dstAddr   += srcLen;
2651 		offset    += srcLen;
2652 		remaining -= srcLen;
2653 	}
2654 
2655 	if (kIOMemoryThreadSafe & _flags) {
2656 		UNLOCK;
2657 	}
2658 
2659 	assert(!remaining);
2660 
2661 	return length - remaining;
2662 }
2663 
2664 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2665 IOMemoryDescriptor::writeBytes
2666 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2667 {
2668 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2669 	IOByteCount remaining;
2670 	IOByteCount endoffset;
2671 	IOByteCount offset = inoffset;
2672 
2673 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2674 
2675 	// Check that this entire I/O is within the available range
2676 	if ((offset > _length)
2677 	    || os_add_overflow(length, offset, &endoffset)
2678 	    || (endoffset > _length)) {
2679 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2680 		return 0;
2681 	}
2682 	if (kIOMemoryPreparedReadOnly & _flags) {
2683 		return 0;
2684 	}
2685 	if (offset >= _length) {
2686 		return 0;
2687 	}
2688 
2689 	assert(!(kIOMemoryRemote & _flags));
2690 	if (kIOMemoryRemote & _flags) {
2691 		return 0;
2692 	}
2693 
2694 	if (kIOMemoryThreadSafe & _flags) {
2695 		LOCK;
2696 	}
2697 
2698 	remaining = length = min(length, _length - offset);
2699 	while (remaining) { // (process another target segment?)
2700 		addr64_t    dstAddr64;
2701 		IOByteCount dstLen;
2702 		int         options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2703 
2704 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2705 		dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2706 		if (!dstAddr64) {
2707 			break;
2708 		}
2709 
2710 		// Clip segment length to remaining
2711 		if (dstLen > remaining) {
2712 			dstLen = remaining;
2713 		}
2714 
2715 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2716 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2717 		}
2718 
2719 
2720 		if (!srcAddr) {
2721 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2722 		} else {
2723 			kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2724 #pragma unused(copy_ret)
2725 			srcAddr   += dstLen;
2726 		}
2727 		offset    += dstLen;
2728 		remaining -= dstLen;
2729 	}
2730 
2731 	if (kIOMemoryThreadSafe & _flags) {
2732 		UNLOCK;
2733 	}
2734 
2735 	assert(!remaining);
2736 
2737 #if defined(__x86_64__)
2738 	// copypv does not cppvFsnk on intel
2739 #else
2740 	if (!srcAddr) {
2741 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2742 	}
2743 #endif
2744 
2745 	return length - remaining;
2746 }
2747 
2748 #ifndef __LP64__
2749 void
setPosition(IOByteCount position)2750 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2751 {
2752 	panic("IOGMD::setPosition deprecated");
2753 }
2754 #endif /* !__LP64__ */
2755 
2756 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2757 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2758 
2759 uint64_t
getPreparationID(void)2760 IOGeneralMemoryDescriptor::getPreparationID( void )
2761 {
2762 	ioGMDData *dataP;
2763 
2764 	if (!_wireCount) {
2765 		return kIOPreparationIDUnprepared;
2766 	}
2767 
2768 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2769 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2770 		IOMemoryDescriptor::setPreparationID();
2771 		return IOMemoryDescriptor::getPreparationID();
2772 	}
2773 
2774 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2775 		return kIOPreparationIDUnprepared;
2776 	}
2777 
2778 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2779 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2780 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2781 	}
2782 	return dataP->fPreparationID;
2783 }
2784 
2785 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2786 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2787 {
2788 	if (reserved->creator) {
2789 		task_deallocate(reserved->creator);
2790 		reserved->creator = NULL;
2791 	}
2792 
2793 	reserved->contextObjects = NULL;
2794 }
2795 
2796 IOMemoryDescriptorReserved *
getKernelReserved(void)2797 IOMemoryDescriptor::getKernelReserved( void )
2798 {
2799 	if (!reserved) {
2800 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2801 	}
2802 	return reserved;
2803 }
2804 
2805 void
setPreparationID(void)2806 IOMemoryDescriptor::setPreparationID( void )
2807 {
2808 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2809 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2810 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2811 	}
2812 }
2813 
2814 uint64_t
getPreparationID(void)2815 IOMemoryDescriptor::getPreparationID( void )
2816 {
2817 	if (reserved) {
2818 		return reserved->preparationID;
2819 	} else {
2820 		return kIOPreparationIDUnsupported;
2821 	}
2822 }
2823 
2824 void
setDescriptorID(void)2825 IOMemoryDescriptor::setDescriptorID( void )
2826 {
2827 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2828 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2829 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2830 	}
2831 }
2832 
2833 uint64_t
getDescriptorID(void)2834 IOMemoryDescriptor::getDescriptorID( void )
2835 {
2836 	setDescriptorID();
2837 
2838 	if (reserved) {
2839 		return reserved->descriptorID;
2840 	} else {
2841 		return kIODescriptorIDInvalid;
2842 	}
2843 }
2844 
2845 IOReturn
ktraceEmitPhysicalSegments(void)2846 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2847 {
2848 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2849 		return kIOReturnSuccess;
2850 	}
2851 
2852 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2853 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2854 		return kIOReturnBadArgument;
2855 	}
2856 
2857 	uint64_t descriptorID = getDescriptorID();
2858 	assert(descriptorID != kIODescriptorIDInvalid);
2859 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2860 		return kIOReturnBadArgument;
2861 	}
2862 
2863 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2864 
2865 #if __LP64__
2866 	static const uint8_t num_segments_page = 8;
2867 #else
2868 	static const uint8_t num_segments_page = 4;
2869 #endif
2870 	static const uint8_t num_segments_long = 2;
2871 
2872 	IOPhysicalAddress segments_page[num_segments_page];
2873 	IOPhysicalRange   segments_long[num_segments_long];
2874 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2875 	memset(segments_long, 0, sizeof(segments_long));
2876 
2877 	uint8_t segment_page_idx = 0;
2878 	uint8_t segment_long_idx = 0;
2879 
2880 	IOPhysicalRange physical_segment;
2881 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2882 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2883 
2884 		if (physical_segment.length == 0) {
2885 			break;
2886 		}
2887 
2888 		/**
2889 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
2890 		 * buffer memory, pack segment events according to the following.
2891 		 *
2892 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
2893 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
2894 		 *
2895 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2896 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2897 		 * - unmapped pages will have a ppn of MAX_INT_32
2898 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
2899 		 * - address_0, length_0, address_0, length_1
2900 		 * - unmapped pages will have an address of 0
2901 		 *
2902 		 * During each iteration do the following depending on the length of the mapping:
2903 		 * 1. add the current segment to the appropriate queue of pending segments
2904 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2905 		 * 1a. if FALSE emit and reset all events in the previous queue
2906 		 * 2. check if we have filled up the current queue of pending events
2907 		 * 2a. if TRUE emit and reset all events in the pending queue
2908 		 * 3. after completing all iterations emit events in the current queue
2909 		 */
2910 
2911 		bool emit_page = false;
2912 		bool emit_long = false;
2913 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2914 			segments_page[segment_page_idx] = physical_segment.address;
2915 			segment_page_idx++;
2916 
2917 			emit_long = segment_long_idx != 0;
2918 			emit_page = segment_page_idx == num_segments_page;
2919 
2920 			if (os_unlikely(emit_long)) {
2921 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2922 				    segments_long[0].address, segments_long[0].length,
2923 				    segments_long[1].address, segments_long[1].length);
2924 			}
2925 
2926 			if (os_unlikely(emit_page)) {
2927 #if __LP64__
2928 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2929 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2930 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2931 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2932 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2933 #else
2934 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2935 				    (ppnum_t) atop_32(segments_page[1]),
2936 				    (ppnum_t) atop_32(segments_page[2]),
2937 				    (ppnum_t) atop_32(segments_page[3]),
2938 				    (ppnum_t) atop_32(segments_page[4]));
2939 #endif
2940 			}
2941 		} else {
2942 			segments_long[segment_long_idx] = physical_segment;
2943 			segment_long_idx++;
2944 
2945 			emit_page = segment_page_idx != 0;
2946 			emit_long = segment_long_idx == num_segments_long;
2947 
2948 			if (os_unlikely(emit_page)) {
2949 #if __LP64__
2950 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2951 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2952 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2953 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2954 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2955 #else
2956 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2957 				    (ppnum_t) atop_32(segments_page[1]),
2958 				    (ppnum_t) atop_32(segments_page[2]),
2959 				    (ppnum_t) atop_32(segments_page[3]),
2960 				    (ppnum_t) atop_32(segments_page[4]));
2961 #endif
2962 			}
2963 
2964 			if (emit_long) {
2965 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2966 				    segments_long[0].address, segments_long[0].length,
2967 				    segments_long[1].address, segments_long[1].length);
2968 			}
2969 		}
2970 
2971 		if (os_unlikely(emit_page)) {
2972 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
2973 			segment_page_idx = 0;
2974 		}
2975 
2976 		if (os_unlikely(emit_long)) {
2977 			memset(segments_long, 0, sizeof(segments_long));
2978 			segment_long_idx = 0;
2979 		}
2980 	}
2981 
2982 	if (segment_page_idx != 0) {
2983 		assert(segment_long_idx == 0);
2984 #if __LP64__
2985 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2986 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2987 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2988 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2989 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2990 #else
2991 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2992 		    (ppnum_t) atop_32(segments_page[1]),
2993 		    (ppnum_t) atop_32(segments_page[2]),
2994 		    (ppnum_t) atop_32(segments_page[3]),
2995 		    (ppnum_t) atop_32(segments_page[4]));
2996 #endif
2997 	} else if (segment_long_idx != 0) {
2998 		assert(segment_page_idx == 0);
2999 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
3000 		    segments_long[0].address, segments_long[0].length,
3001 		    segments_long[1].address, segments_long[1].length);
3002 	}
3003 
3004 	return kIOReturnSuccess;
3005 }
3006 
3007 void
setVMTags(uint32_t kernelTag,uint32_t userTag)3008 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
3009 {
3010 	_kernelTag = (vm_tag_t) kernelTag;
3011 	_userTag   = (vm_tag_t) userTag;
3012 }
3013 
3014 uint32_t
getVMTag(vm_map_t map)3015 IOMemoryDescriptor::getVMTag(vm_map_t map)
3016 {
3017 	if (vm_kernel_map_is_kernel(map)) {
3018 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
3019 			return (uint32_t) _kernelTag;
3020 		}
3021 	} else {
3022 		if (VM_KERN_MEMORY_NONE != _userTag) {
3023 			return (uint32_t) _userTag;
3024 		}
3025 	}
3026 	return IOMemoryTag(map);
3027 }
3028 
3029 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3030 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3031 {
3032 	IOReturn err = kIOReturnSuccess;
3033 	DMACommandOps params;
3034 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
3035 	ioGMDData *dataP;
3036 
3037 	params = (op & ~kIOMDDMACommandOperationMask & op);
3038 	op &= kIOMDDMACommandOperationMask;
3039 
3040 	if (kIOMDDMAMap == op) {
3041 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3042 			return kIOReturnUnderrun;
3043 		}
3044 
3045 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3046 
3047 		if (!_memoryEntries
3048 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049 			return kIOReturnNoMemory;
3050 		}
3051 
3052 		if (_memoryEntries && data->fMapper) {
3053 			bool remap, keepMap;
3054 			dataP = getDataP(_memoryEntries);
3055 
3056 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
3057 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
3058 			}
3059 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3060 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
3061 			}
3062 
3063 			keepMap = (data->fMapper == gIOSystemMapper);
3064 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3065 
3066 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3067 				IOLockLock(_prepareLock);
3068 			}
3069 
3070 			remap = (!keepMap);
3071 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3072 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3073 			remap |= (dataP->fDMAMapAlignment > page_size);
3074 
3075 			if (remap || !dataP->fMappedBaseValid) {
3076 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3077 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3078 					dataP->fMappedBase      = data->fAlloc;
3079 					dataP->fMappedBaseValid = true;
3080 					dataP->fMappedLength    = data->fAllocLength;
3081 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3082 				}
3083 			} else {
3084 				data->fAlloc = dataP->fMappedBase;
3085 				data->fAllocLength = 0;         // give out IOMD map
3086 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3087 			}
3088 
3089 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3090 				IOLockUnlock(_prepareLock);
3091 			}
3092 		}
3093 		return err;
3094 	}
3095 	if (kIOMDDMAUnmap == op) {
3096 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3097 			return kIOReturnUnderrun;
3098 		}
3099 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3100 
3101 		if (_pages) {
3102 			err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3103 		}
3104 
3105 		return kIOReturnSuccess;
3106 	}
3107 
3108 	if (kIOMDAddDMAMapSpec == op) {
3109 		if (dataSize < sizeof(IODMAMapSpecification)) {
3110 			return kIOReturnUnderrun;
3111 		}
3112 
3113 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3114 
3115 		if (!_memoryEntries
3116 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3117 			return kIOReturnNoMemory;
3118 		}
3119 
3120 		if (_memoryEntries) {
3121 			dataP = getDataP(_memoryEntries);
3122 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3123 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3124 			}
3125 			if (data->alignment > dataP->fDMAMapAlignment) {
3126 				dataP->fDMAMapAlignment = data->alignment;
3127 			}
3128 		}
3129 		return kIOReturnSuccess;
3130 	}
3131 
3132 	if (kIOMDGetCharacteristics == op) {
3133 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3134 			return kIOReturnUnderrun;
3135 		}
3136 
3137 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3138 		data->fLength = _length;
3139 		data->fSGCount = _rangesCount;
3140 		data->fPages = _pages;
3141 		data->fDirection = getDirection();
3142 		if (!_wireCount) {
3143 			data->fIsPrepared = false;
3144 		} else {
3145 			data->fIsPrepared = true;
3146 			data->fHighestPage = _highestPage;
3147 			if (_memoryEntries) {
3148 				dataP = getDataP(_memoryEntries);
3149 				ioPLBlock *ioplList = getIOPLList(dataP);
3150 				UInt count = getNumIOPL(_memoryEntries, dataP);
3151 				if (count == 1) {
3152 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3153 				}
3154 			}
3155 		}
3156 
3157 		return kIOReturnSuccess;
3158 	} else if (kIOMDDMAActive == op) {
3159 		if (params) {
3160 			int16_t prior;
3161 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3162 			if (!prior) {
3163 				md->_mapName = NULL;
3164 			}
3165 		} else {
3166 			if (md->_dmaReferences) {
3167 				OSAddAtomic16(-1, &md->_dmaReferences);
3168 			} else {
3169 				panic("_dmaReferences underflow");
3170 			}
3171 		}
3172 	} else if (kIOMDWalkSegments != op) {
3173 		return kIOReturnBadArgument;
3174 	}
3175 
3176 	// Get the next segment
3177 	struct InternalState {
3178 		IOMDDMAWalkSegmentArgs fIO;
3179 		mach_vm_size_t fOffset2Index;
3180 		mach_vm_size_t fNextOffset;
3181 		UInt fIndex;
3182 	} *isP;
3183 
3184 	// Find the next segment
3185 	if (dataSize < sizeof(*isP)) {
3186 		return kIOReturnUnderrun;
3187 	}
3188 
3189 	isP = (InternalState *) vData;
3190 	uint64_t offset = isP->fIO.fOffset;
3191 	uint8_t mapped = isP->fIO.fMapped;
3192 	uint64_t mappedBase;
3193 
3194 	if (mapped && (kIOMemoryRemote & _flags)) {
3195 		return kIOReturnNotAttached;
3196 	}
3197 
3198 	if (IOMapper::gSystem && mapped
3199 	    && (!(kIOMemoryHostOnly & _flags))
3200 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3201 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3202 		if (!_memoryEntries
3203 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3204 			return kIOReturnNoMemory;
3205 		}
3206 
3207 		dataP = getDataP(_memoryEntries);
3208 		if (dataP->fMapper) {
3209 			IODMAMapSpecification mapSpec;
3210 			bzero(&mapSpec, sizeof(mapSpec));
3211 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3212 			mapSpec.alignment = dataP->fDMAMapAlignment;
3213 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3214 			if (kIOReturnSuccess != err) {
3215 				return err;
3216 			}
3217 			dataP->fMappedBaseValid = true;
3218 		}
3219 	}
3220 
3221 	if (mapped) {
3222 		if (IOMapper::gSystem
3223 		    && (!(kIOMemoryHostOnly & _flags))
3224 		    && _memoryEntries
3225 		    && (dataP = getDataP(_memoryEntries))
3226 		    && dataP->fMappedBaseValid) {
3227 			mappedBase = dataP->fMappedBase;
3228 		} else {
3229 			mapped = 0;
3230 		}
3231 	}
3232 
3233 	if (offset >= _length) {
3234 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3235 	}
3236 
3237 	// Validate the previous offset
3238 	UInt ind;
3239 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3240 	if (!params
3241 	    && offset
3242 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3243 		ind = isP->fIndex;
3244 	} else {
3245 		ind = off2Ind = 0; // Start from beginning
3246 	}
3247 	mach_vm_size_t length;
3248 	UInt64 address;
3249 
3250 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3251 		// Physical address based memory descriptor
3252 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3253 
3254 		// Find the range after the one that contains the offset
3255 		mach_vm_size_t len;
3256 		for (len = 0; off2Ind <= offset; ind++) {
3257 			len = physP[ind].length;
3258 			off2Ind += len;
3259 		}
3260 
3261 		// Calculate length within range and starting address
3262 		length   = off2Ind - offset;
3263 		address  = physP[ind - 1].address + len - length;
3264 
3265 		if (true && mapped) {
3266 			address = mappedBase + offset;
3267 		} else {
3268 			// see how far we can coalesce ranges
3269 			while (ind < _rangesCount && address + length == physP[ind].address) {
3270 				len = physP[ind].length;
3271 				length += len;
3272 				off2Ind += len;
3273 				ind++;
3274 			}
3275 		}
3276 
3277 		// correct contiguous check overshoot
3278 		ind--;
3279 		off2Ind -= len;
3280 	}
3281 #ifndef __LP64__
3282 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3283 		// Physical address based memory descriptor
3284 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3285 
3286 		// Find the range after the one that contains the offset
3287 		mach_vm_size_t len;
3288 		for (len = 0; off2Ind <= offset; ind++) {
3289 			len = physP[ind].length;
3290 			off2Ind += len;
3291 		}
3292 
3293 		// Calculate length within range and starting address
3294 		length   = off2Ind - offset;
3295 		address  = physP[ind - 1].address + len - length;
3296 
3297 		if (true && mapped) {
3298 			address = mappedBase + offset;
3299 		} else {
3300 			// see how far we can coalesce ranges
3301 			while (ind < _rangesCount && address + length == physP[ind].address) {
3302 				len = physP[ind].length;
3303 				length += len;
3304 				off2Ind += len;
3305 				ind++;
3306 			}
3307 		}
3308 		// correct contiguous check overshoot
3309 		ind--;
3310 		off2Ind -= len;
3311 	}
3312 #endif /* !__LP64__ */
3313 	else {
3314 		do {
3315 			if (!_wireCount) {
3316 				panic("IOGMD: not wired for the IODMACommand");
3317 			}
3318 
3319 			assert(_memoryEntries);
3320 
3321 			dataP = getDataP(_memoryEntries);
3322 			const ioPLBlock *ioplList = getIOPLList(dataP);
3323 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3324 			upl_page_info_t *pageList = getPageList(dataP);
3325 
3326 			assert(numIOPLs > 0);
3327 
3328 			// Scan through iopl info blocks looking for block containing offset
3329 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3330 				ind++;
3331 			}
3332 
3333 			// Go back to actual range as search goes past it
3334 			ioPLBlock ioplInfo = ioplList[ind - 1];
3335 			off2Ind = ioplInfo.fIOMDOffset;
3336 
3337 			if (ind < numIOPLs) {
3338 				length = ioplList[ind].fIOMDOffset;
3339 			} else {
3340 				length = _length;
3341 			}
3342 			length -= offset;       // Remainder within iopl
3343 
3344 			// Subtract offset till this iopl in total list
3345 			offset -= off2Ind;
3346 
3347 			// If a mapped address is requested and this is a pre-mapped IOPL
3348 			// then just need to compute an offset relative to the mapped base.
3349 			if (mapped) {
3350 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3351 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3352 				continue; // Done leave do/while(false) now
3353 			}
3354 
3355 			// The offset is rebased into the current iopl.
3356 			// Now add the iopl 1st page offset.
3357 			offset += ioplInfo.fPageOffset;
3358 
3359 			// For external UPLs the fPageInfo field points directly to
3360 			// the upl's upl_page_info_t array.
3361 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3362 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3363 			} else {
3364 				pageList = &pageList[ioplInfo.fPageInfo];
3365 			}
3366 
3367 			// Check for direct device non-paged memory
3368 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3369 				address = ptoa_64(pageList->phys_addr) + offset;
3370 				continue; // Done leave do/while(false) now
3371 			}
3372 
3373 			// Now we need compute the index into the pageList
3374 			UInt pageInd = atop_32(offset);
3375 			offset &= PAGE_MASK;
3376 
3377 			// Compute the starting address of this segment
3378 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3379 			if (!pageAddr) {
3380 				panic("!pageList phys_addr");
3381 			}
3382 
3383 			address = ptoa_64(pageAddr) + offset;
3384 
3385 			// length is currently set to the length of the remainider of the iopl.
3386 			// We need to check that the remainder of the iopl is contiguous.
3387 			// This is indicated by pageList[ind].phys_addr being sequential.
3388 			IOByteCount contigLength = PAGE_SIZE - offset;
3389 			while (contigLength < length
3390 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3391 				contigLength += PAGE_SIZE;
3392 			}
3393 
3394 			if (contigLength < length) {
3395 				length = contigLength;
3396 			}
3397 
3398 			assert(address);
3399 			assert(length);
3400 		} while (false);
3401 	}
3402 
3403 	// Update return values and state
3404 	isP->fIO.fIOVMAddr = address;
3405 	isP->fIO.fLength   = length;
3406 	isP->fIndex        = ind;
3407 	isP->fOffset2Index = off2Ind;
3408 	isP->fNextOffset   = isP->fIO.fOffset + length;
3409 
3410 	return kIOReturnSuccess;
3411 }
3412 
3413 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3414 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3415 {
3416 	IOReturn          ret;
3417 	mach_vm_address_t address = 0;
3418 	mach_vm_size_t    length  = 0;
3419 	IOMapper *        mapper  = gIOSystemMapper;
3420 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3421 
3422 	if (lengthOfSegment) {
3423 		*lengthOfSegment = 0;
3424 	}
3425 
3426 	if (offset >= _length) {
3427 		return 0;
3428 	}
3429 
3430 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3431 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3432 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3433 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3434 
3435 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3436 		unsigned rangesIndex = 0;
3437 		Ranges vec = _ranges;
3438 		mach_vm_address_t addr;
3439 
3440 		// Find starting address within the vector of ranges
3441 		for (;;) {
3442 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3443 			if (offset < length) {
3444 				break;
3445 			}
3446 			offset -= length; // (make offset relative)
3447 			rangesIndex++;
3448 		}
3449 
3450 		// Now that we have the starting range,
3451 		// lets find the last contiguous range
3452 		addr   += offset;
3453 		length -= offset;
3454 
3455 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3456 			mach_vm_address_t newAddr;
3457 			mach_vm_size_t    newLen;
3458 
3459 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3460 			if (addr + length != newAddr) {
3461 				break;
3462 			}
3463 			length += newLen;
3464 		}
3465 		if (addr) {
3466 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3467 		}
3468 	} else {
3469 		IOMDDMAWalkSegmentState _state;
3470 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3471 
3472 		state->fOffset = offset;
3473 		state->fLength = _length - offset;
3474 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3475 
3476 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3477 
3478 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3479 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3480 			    ret, this, state->fOffset,
3481 			    state->fIOVMAddr, state->fLength);
3482 		}
3483 		if (kIOReturnSuccess == ret) {
3484 			address = state->fIOVMAddr;
3485 			length  = state->fLength;
3486 		}
3487 
3488 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3489 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3490 
3491 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3492 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3493 				addr64_t    origAddr = address;
3494 				IOByteCount origLen  = length;
3495 
3496 				address = mapper->mapToPhysicalAddress(origAddr);
3497 				length = page_size - (address & (page_size - 1));
3498 				while ((length < origLen)
3499 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3500 					length += page_size;
3501 				}
3502 				if (length > origLen) {
3503 					length = origLen;
3504 				}
3505 			}
3506 		}
3507 	}
3508 
3509 	if (!address) {
3510 		length = 0;
3511 	}
3512 
3513 	if (lengthOfSegment) {
3514 		*lengthOfSegment = length;
3515 	}
3516 
3517 	return address;
3518 }
3519 
3520 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3521 IOGeneralMemoryDescriptor::readBytes
3522 (IOByteCount offset, void *bytes, IOByteCount length)
3523 {
3524 	IOByteCount count = super::readBytes(offset, bytes, length);
3525 	return count;
3526 }
3527 
3528 IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3529 IOGeneralMemoryDescriptor::writeBytes
3530 (IOByteCount offset, const void* bytes, IOByteCount withLength)
3531 {
3532 	IOByteCount count = super::writeBytes(offset, bytes, withLength);
3533 	return count;
3534 }
3535 
3536 #ifndef __LP64__
3537 #pragma clang diagnostic push
3538 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3539 
3540 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3541 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3542 {
3543 	addr64_t address = 0;
3544 
3545 	if (options & _kIOMemorySourceSegment) {
3546 		address = getSourceSegment(offset, lengthOfSegment);
3547 	} else if (options & kIOMemoryMapperNone) {
3548 		address = getPhysicalSegment64(offset, lengthOfSegment);
3549 	} else {
3550 		address = getPhysicalSegment(offset, lengthOfSegment);
3551 	}
3552 
3553 	return address;
3554 }
3555 #pragma clang diagnostic pop
3556 
3557 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3558 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3559 {
3560 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3561 }
3562 
3563 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3564 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3565 {
3566 	addr64_t    address = 0;
3567 	IOByteCount length  = 0;
3568 
3569 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3570 
3571 	if (lengthOfSegment) {
3572 		length = *lengthOfSegment;
3573 	}
3574 
3575 	if ((address + length) > 0x100000000ULL) {
3576 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3577 		    address, (long) length, (getMetaClass())->getClassName());
3578 	}
3579 
3580 	return (IOPhysicalAddress) address;
3581 }
3582 
3583 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3584 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3585 {
3586 	IOPhysicalAddress phys32;
3587 	IOByteCount       length;
3588 	addr64_t          phys64;
3589 	IOMapper *        mapper = NULL;
3590 
3591 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3592 	if (!phys32) {
3593 		return 0;
3594 	}
3595 
3596 	if (gIOSystemMapper) {
3597 		mapper = gIOSystemMapper;
3598 	}
3599 
3600 	if (mapper) {
3601 		IOByteCount origLen;
3602 
3603 		phys64 = mapper->mapToPhysicalAddress(phys32);
3604 		origLen = *lengthOfSegment;
3605 		length = page_size - (phys64 & (page_size - 1));
3606 		while ((length < origLen)
3607 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3608 			length += page_size;
3609 		}
3610 		if (length > origLen) {
3611 			length = origLen;
3612 		}
3613 
3614 		*lengthOfSegment = length;
3615 	} else {
3616 		phys64 = (addr64_t) phys32;
3617 	}
3618 
3619 	return phys64;
3620 }
3621 
3622 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3623 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3624 {
3625 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3626 }
3627 
3628 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3629 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3630 {
3631 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3632 }
3633 
3634 #pragma clang diagnostic push
3635 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3636 
3637 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3638 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3639     IOByteCount * lengthOfSegment)
3640 {
3641 	if (_task == kernel_task) {
3642 		return (void *) getSourceSegment(offset, lengthOfSegment);
3643 	} else {
3644 		panic("IOGMD::getVirtualSegment deprecated");
3645 	}
3646 
3647 	return NULL;
3648 }
3649 #pragma clang diagnostic pop
3650 #endif /* !__LP64__ */
3651 
3652 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3653 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3654 {
3655 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3656 	DMACommandOps params;
3657 	IOReturn err;
3658 
3659 	params = (op & ~kIOMDDMACommandOperationMask & op);
3660 	op &= kIOMDDMACommandOperationMask;
3661 
3662 	if (kIOMDGetCharacteristics == op) {
3663 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3664 			return kIOReturnUnderrun;
3665 		}
3666 
3667 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3668 		data->fLength = getLength();
3669 		data->fSGCount = 0;
3670 		data->fDirection = getDirection();
3671 		data->fIsPrepared = true; // Assume prepared - fails safe
3672 	} else if (kIOMDWalkSegments == op) {
3673 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3674 			return kIOReturnUnderrun;
3675 		}
3676 
3677 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3678 		IOByteCount offset  = (IOByteCount) data->fOffset;
3679 		IOPhysicalLength length, nextLength;
3680 		addr64_t         addr, nextAddr;
3681 
3682 		if (data->fMapped) {
3683 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3684 		}
3685 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3686 		offset += length;
3687 		while (offset < getLength()) {
3688 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3689 			if ((addr + length) != nextAddr) {
3690 				break;
3691 			}
3692 			length += nextLength;
3693 			offset += nextLength;
3694 		}
3695 		data->fIOVMAddr = addr;
3696 		data->fLength   = length;
3697 	} else if (kIOMDAddDMAMapSpec == op) {
3698 		return kIOReturnUnsupported;
3699 	} else if (kIOMDDMAMap == op) {
3700 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3701 			return kIOReturnUnderrun;
3702 		}
3703 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3704 
3705 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3706 
3707 		return err;
3708 	} else if (kIOMDDMAUnmap == op) {
3709 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3710 			return kIOReturnUnderrun;
3711 		}
3712 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3713 
3714 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3715 
3716 		return kIOReturnSuccess;
3717 	} else {
3718 		return kIOReturnBadArgument;
3719 	}
3720 
3721 	return kIOReturnSuccess;
3722 }
3723 
3724 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3725 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3726     IOOptionBits * oldState )
3727 {
3728 	IOReturn      err = kIOReturnSuccess;
3729 
3730 	vm_purgable_t control;
3731 	int           state;
3732 
3733 	assert(!(kIOMemoryRemote & _flags));
3734 	if (kIOMemoryRemote & _flags) {
3735 		return kIOReturnNotAttached;
3736 	}
3737 
3738 	if (_memRef) {
3739 		err = super::setPurgeable(newState, oldState);
3740 	} else {
3741 		if (kIOMemoryThreadSafe & _flags) {
3742 			LOCK;
3743 		}
3744 		do{
3745 			// Find the appropriate vm_map for the given task
3746 			vm_map_t curMap;
3747 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3748 				err = kIOReturnNotReady;
3749 				break;
3750 			} else if (!_task) {
3751 				err = kIOReturnUnsupported;
3752 				break;
3753 			} else {
3754 				curMap = get_task_map(_task);
3755 				if (NULL == curMap) {
3756 					err = KERN_INVALID_ARGUMENT;
3757 					break;
3758 				}
3759 			}
3760 
3761 			// can only do one range
3762 			Ranges vec = _ranges;
3763 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3764 			mach_vm_address_t addr;
3765 			mach_vm_size_t    len;
3766 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3767 
3768 			err = purgeableControlBits(newState, &control, &state);
3769 			if (kIOReturnSuccess != err) {
3770 				break;
3771 			}
3772 			err = vm_map_purgable_control(curMap, addr, control, &state);
3773 			if (oldState) {
3774 				if (kIOReturnSuccess == err) {
3775 					err = purgeableStateBits(&state);
3776 					*oldState = state;
3777 				}
3778 			}
3779 		}while (false);
3780 		if (kIOMemoryThreadSafe & _flags) {
3781 			UNLOCK;
3782 		}
3783 	}
3784 
3785 	return err;
3786 }
3787 
3788 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3789 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3790     IOOptionBits * oldState )
3791 {
3792 	IOReturn err = kIOReturnNotReady;
3793 
3794 	if (kIOMemoryThreadSafe & _flags) {
3795 		LOCK;
3796 	}
3797 	if (_memRef) {
3798 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3799 	}
3800 	if (kIOMemoryThreadSafe & _flags) {
3801 		UNLOCK;
3802 	}
3803 
3804 	return err;
3805 }
3806 
3807 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3808 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3809     int newLedgerTag,
3810     IOOptionBits newLedgerOptions )
3811 {
3812 	IOReturn      err = kIOReturnSuccess;
3813 
3814 	assert(!(kIOMemoryRemote & _flags));
3815 	if (kIOMemoryRemote & _flags) {
3816 		return kIOReturnNotAttached;
3817 	}
3818 
3819 	if (iokit_iomd_setownership_enabled == FALSE) {
3820 		return kIOReturnUnsupported;
3821 	}
3822 
3823 	if (_memRef) {
3824 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3825 	} else {
3826 		err = kIOReturnUnsupported;
3827 	}
3828 
3829 	return err;
3830 }
3831 
3832 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3833 IOMemoryDescriptor::setOwnership( task_t newOwner,
3834     int newLedgerTag,
3835     IOOptionBits newLedgerOptions )
3836 {
3837 	IOReturn err = kIOReturnNotReady;
3838 
3839 	assert(!(kIOMemoryRemote & _flags));
3840 	if (kIOMemoryRemote & _flags) {
3841 		return kIOReturnNotAttached;
3842 	}
3843 
3844 	if (iokit_iomd_setownership_enabled == FALSE) {
3845 		return kIOReturnUnsupported;
3846 	}
3847 
3848 	if (kIOMemoryThreadSafe & _flags) {
3849 		LOCK;
3850 	}
3851 	if (_memRef) {
3852 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3853 	} else {
3854 		IOMultiMemoryDescriptor * mmd;
3855 		IOSubMemoryDescriptor   * smd;
3856 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3857 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3858 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3859 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3860 		}
3861 	}
3862 	if (kIOMemoryThreadSafe & _flags) {
3863 		UNLOCK;
3864 	}
3865 
3866 	return err;
3867 }
3868 
3869 
3870 uint64_t
getDMAMapLength(uint64_t * offset)3871 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3872 {
3873 	uint64_t length;
3874 
3875 	if (_memRef) {
3876 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3877 	} else {
3878 		IOByteCount       iterate, segLen;
3879 		IOPhysicalAddress sourceAddr, sourceAlign;
3880 
3881 		if (kIOMemoryThreadSafe & _flags) {
3882 			LOCK;
3883 		}
3884 		length = 0;
3885 		iterate = 0;
3886 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3887 			sourceAlign = (sourceAddr & page_mask);
3888 			if (offset && !iterate) {
3889 				*offset = sourceAlign;
3890 			}
3891 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3892 			iterate += segLen;
3893 		}
3894 		if (!iterate) {
3895 			length = getLength();
3896 			if (offset) {
3897 				*offset = 0;
3898 			}
3899 		}
3900 		if (kIOMemoryThreadSafe & _flags) {
3901 			UNLOCK;
3902 		}
3903 	}
3904 
3905 	return length;
3906 }
3907 
3908 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount,IOByteCount * swappedPageCount)3909 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3910     IOByteCount * dirtyPageCount,
3911     IOByteCount * swappedPageCount )
3912 {
3913 	IOReturn err = kIOReturnNotReady;
3914 
3915 	assert(!(kIOMemoryRemote & _flags));
3916 	if (kIOMemoryRemote & _flags) {
3917 		return kIOReturnNotAttached;
3918 	}
3919 
3920 	if (kIOMemoryThreadSafe & _flags) {
3921 		LOCK;
3922 	}
3923 	if (_memRef) {
3924 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount, swappedPageCount);
3925 	} else {
3926 		IOMultiMemoryDescriptor * mmd;
3927 		IOSubMemoryDescriptor   * smd;
3928 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3929 			err = smd->getPageCounts(residentPageCount, dirtyPageCount, swappedPageCount);
3930 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3931 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount, swappedPageCount);
3932 		}
3933 	}
3934 	if (kIOMemoryThreadSafe & _flags) {
3935 		UNLOCK;
3936 	}
3937 
3938 	return err;
3939 }
3940 
3941 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3942 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3943     IOByteCount * dirtyPageCount )
3944 {
3945 	return getPageCounts(residentPageCount, dirtyPageCount, NULL);
3946 }
3947 
3948 
3949 #if defined(__arm64__)
3950 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3951 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3952 #else /* defined(__arm64__) */
3953 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3954 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3955 #endif /* defined(__arm64__) */
3956 
3957 static void
SetEncryptOp(addr64_t pa,unsigned int count)3958 SetEncryptOp(addr64_t pa, unsigned int count)
3959 {
3960 	ppnum_t page, end;
3961 
3962 	page = (ppnum_t) atop_64(round_page_64(pa));
3963 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3964 	for (; page < end; page++) {
3965 		pmap_clear_noencrypt(page);
3966 	}
3967 }
3968 
3969 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3970 ClearEncryptOp(addr64_t pa, unsigned int count)
3971 {
3972 	ppnum_t page, end;
3973 
3974 	page = (ppnum_t) atop_64(round_page_64(pa));
3975 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
3976 	for (; page < end; page++) {
3977 		pmap_set_noencrypt(page);
3978 	}
3979 }
3980 
3981 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3982 IOMemoryDescriptor::performOperation( IOOptionBits options,
3983     IOByteCount offset, IOByteCount length )
3984 {
3985 	IOByteCount remaining;
3986 	unsigned int res;
3987 	void (*func)(addr64_t pa, unsigned int count) = NULL;
3988 #if defined(__arm64__)
3989 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3990 #endif
3991 
3992 	assert(!(kIOMemoryRemote & _flags));
3993 	if (kIOMemoryRemote & _flags) {
3994 		return kIOReturnNotAttached;
3995 	}
3996 
3997 	switch (options) {
3998 	case kIOMemoryIncoherentIOFlush:
3999 #if defined(__arm64__)
4000 		func_ext = &dcache_incoherent_io_flush64;
4001 #if __ARM_COHERENT_IO__
4002 		func_ext(0, 0, 0, &res);
4003 		return kIOReturnSuccess;
4004 #else /* __ARM_COHERENT_IO__ */
4005 		break;
4006 #endif /* __ARM_COHERENT_IO__ */
4007 #else /* defined(__arm64__) */
4008 		func = &dcache_incoherent_io_flush64;
4009 		break;
4010 #endif /* defined(__arm64__) */
4011 	case kIOMemoryIncoherentIOStore:
4012 #if defined(__arm64__)
4013 		func_ext = &dcache_incoherent_io_store64;
4014 #if __ARM_COHERENT_IO__
4015 		func_ext(0, 0, 0, &res);
4016 		return kIOReturnSuccess;
4017 #else /* __ARM_COHERENT_IO__ */
4018 		break;
4019 #endif /* __ARM_COHERENT_IO__ */
4020 #else /* defined(__arm64__) */
4021 		func = &dcache_incoherent_io_store64;
4022 		break;
4023 #endif /* defined(__arm64__) */
4024 
4025 	case kIOMemorySetEncrypted:
4026 		func = &SetEncryptOp;
4027 		break;
4028 	case kIOMemoryClearEncrypted:
4029 		func = &ClearEncryptOp;
4030 		break;
4031 	}
4032 
4033 #if defined(__arm64__)
4034 	if ((func == NULL) && (func_ext == NULL)) {
4035 		return kIOReturnUnsupported;
4036 	}
4037 #else /* defined(__arm64__) */
4038 	if (!func) {
4039 		return kIOReturnUnsupported;
4040 	}
4041 #endif /* defined(__arm64__) */
4042 
4043 	if (kIOMemoryThreadSafe & _flags) {
4044 		LOCK;
4045 	}
4046 
4047 	res = 0x0UL;
4048 	remaining = length = min(length, getLength() - offset);
4049 	while (remaining) {
4050 		// (process another target segment?)
4051 		addr64_t    dstAddr64;
4052 		IOByteCount dstLen;
4053 
4054 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
4055 		if (!dstAddr64) {
4056 			break;
4057 		}
4058 
4059 		// Clip segment length to remaining
4060 		if (dstLen > remaining) {
4061 			dstLen = remaining;
4062 		}
4063 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
4064 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
4065 		}
4066 		if (remaining > UINT_MAX) {
4067 			remaining = UINT_MAX;
4068 		}
4069 
4070 #if defined(__arm64__)
4071 		if (func) {
4072 			(*func)(dstAddr64, (unsigned int) dstLen);
4073 		}
4074 		if (func_ext) {
4075 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4076 			if (res != 0x0UL) {
4077 				remaining = 0;
4078 				break;
4079 			}
4080 		}
4081 #else /* defined(__arm64__) */
4082 		(*func)(dstAddr64, (unsigned int) dstLen);
4083 #endif /* defined(__arm64__) */
4084 
4085 		offset    += dstLen;
4086 		remaining -= dstLen;
4087 	}
4088 
4089 	if (kIOMemoryThreadSafe & _flags) {
4090 		UNLOCK;
4091 	}
4092 
4093 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4094 }
4095 
4096 /*
4097  *
4098  */
4099 
4100 #if defined(__i386__) || defined(__x86_64__)
4101 
4102 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4103 
4104 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4105  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4106  * kernel non-text data -- should we just add another range instead?
4107  */
4108 #define io_kernel_static_start  vm_kernel_stext
4109 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4110 
4111 #elif defined(__arm64__)
4112 
4113 extern vm_offset_t              static_memory_end;
4114 
4115 #if defined(__arm64__)
4116 #define io_kernel_static_start vm_kext_base
4117 #else /* defined(__arm64__) */
4118 #define io_kernel_static_start vm_kernel_stext
4119 #endif /* defined(__arm64__) */
4120 
4121 #define io_kernel_static_end    static_memory_end
4122 
4123 #else
4124 #error io_kernel_static_end is undefined for this architecture
4125 #endif
4126 
4127 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4128 io_get_kernel_static_upl(
4129 	vm_map_t                /* map */,
4130 	uintptr_t               offset,
4131 	upl_size_t              *upl_size,
4132 	unsigned int            *page_offset,
4133 	upl_t                   *upl,
4134 	upl_page_info_array_t   page_list,
4135 	unsigned int            *count,
4136 	ppnum_t                 *highest_page)
4137 {
4138 	unsigned int pageCount, page;
4139 	ppnum_t phys;
4140 	ppnum_t highestPage = 0;
4141 
4142 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4143 	if (pageCount > *count) {
4144 		pageCount = *count;
4145 	}
4146 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4147 
4148 	*upl = NULL;
4149 	*page_offset = ((unsigned int) page_mask & offset);
4150 
4151 	for (page = 0; page < pageCount; page++) {
4152 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4153 		if (!phys) {
4154 			break;
4155 		}
4156 		page_list[page].phys_addr = phys;
4157 		page_list[page].free_when_done = 0;
4158 		page_list[page].absent    = 0;
4159 		page_list[page].dirty     = 0;
4160 		page_list[page].precious  = 0;
4161 		page_list[page].device    = 0;
4162 		if (phys > highestPage) {
4163 			highestPage = phys;
4164 		}
4165 	}
4166 
4167 	*highest_page = highestPage;
4168 
4169 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4170 }
4171 
4172 IOReturn
wireVirtual(IODirection forDirection)4173 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4174 {
4175 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4176 	IOReturn error = kIOReturnSuccess;
4177 	ioGMDData *dataP;
4178 	upl_page_info_array_t pageInfo;
4179 	ppnum_t mapBase;
4180 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4181 	mach_vm_size_t numBytesWired = 0;
4182 
4183 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4184 
4185 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4186 		forDirection = (IODirection) (forDirection | getDirection());
4187 	}
4188 
4189 	dataP = getDataP(_memoryEntries);
4190 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4191 	switch (kIODirectionOutIn & forDirection) {
4192 	case kIODirectionOut:
4193 		// Pages do not need to be marked as dirty on commit
4194 		uplFlags = UPL_COPYOUT_FROM;
4195 		dataP->fDMAAccess = kIODMAMapReadAccess;
4196 		break;
4197 
4198 	case kIODirectionIn:
4199 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4200 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4201 		break;
4202 
4203 	default:
4204 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4205 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4206 		break;
4207 	}
4208 
4209 	if (_wireCount) {
4210 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4211 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4212 			    (size_t)VM_KERNEL_ADDRPERM(this));
4213 			error = kIOReturnNotWritable;
4214 		}
4215 	} else {
4216 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4217 		IOMapper *mapper;
4218 
4219 		mapper = dataP->fMapper;
4220 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4221 
4222 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4223 		tag = _kernelTag;
4224 		if (VM_KERN_MEMORY_NONE == tag) {
4225 			tag = IOMemoryTag(kernel_map);
4226 		}
4227 
4228 		if (kIODirectionPrepareToPhys32 & forDirection) {
4229 			if (!mapper) {
4230 				uplFlags |= UPL_NEED_32BIT_ADDR;
4231 			}
4232 			if (dataP->fDMAMapNumAddressBits > 32) {
4233 				dataP->fDMAMapNumAddressBits = 32;
4234 			}
4235 		}
4236 		if (kIODirectionPrepareNoFault    & forDirection) {
4237 			uplFlags |= UPL_REQUEST_NO_FAULT;
4238 		}
4239 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4240 			uplFlags |= UPL_NOZEROFILLIO;
4241 		}
4242 		if (kIODirectionPrepareNonCoherent & forDirection) {
4243 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4244 		}
4245 
4246 		mapBase = 0;
4247 
4248 		// Note that appendBytes(NULL) zeros the data up to the desired length
4249 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4250 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4251 			error = kIOReturnNoMemory;
4252 			traceInterval.setEndArg2(error);
4253 			return error;
4254 		}
4255 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4256 			error = kIOReturnNoMemory;
4257 			traceInterval.setEndArg2(error);
4258 			return error;
4259 		}
4260 		dataP = NULL;
4261 
4262 		// Find the appropriate vm_map for the given task
4263 		vm_map_t curMap;
4264 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4265 			curMap = NULL;
4266 		} else {
4267 			curMap = get_task_map(_task);
4268 		}
4269 
4270 		// Iterate over the vector of virtual ranges
4271 		Ranges vec = _ranges;
4272 		unsigned int pageIndex  = 0;
4273 		IOByteCount mdOffset    = 0;
4274 		ppnum_t highestPage     = 0;
4275 		bool         byteAlignUPL;
4276 
4277 		IOMemoryEntry * memRefEntry = NULL;
4278 		if (_memRef) {
4279 			memRefEntry = &_memRef->entries[0];
4280 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4281 		} else {
4282 			byteAlignUPL = true;
4283 		}
4284 
4285 		for (UInt range = 0; mdOffset < _length; range++) {
4286 			ioPLBlock iopl;
4287 			mach_vm_address_t startPage, startPageOffset;
4288 			mach_vm_size_t    numBytes;
4289 			ppnum_t highPage = 0;
4290 
4291 			if (_memRef) {
4292 				if (range >= _memRef->count) {
4293 					panic("memRefEntry");
4294 				}
4295 				memRefEntry = &_memRef->entries[range];
4296 				numBytes    = memRefEntry->size;
4297 				startPage   = -1ULL;
4298 				if (byteAlignUPL) {
4299 					startPageOffset = 0;
4300 				} else {
4301 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4302 				}
4303 			} else {
4304 				// Get the startPage address and length of vec[range]
4305 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4306 				if (byteAlignUPL) {
4307 					startPageOffset = 0;
4308 				} else {
4309 					startPageOffset = startPage & PAGE_MASK;
4310 					startPage = trunc_page_64(startPage);
4311 				}
4312 			}
4313 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4314 			numBytes += startPageOffset;
4315 
4316 			if (mapper) {
4317 				iopl.fMappedPage = mapBase + pageIndex;
4318 			} else {
4319 				iopl.fMappedPage = 0;
4320 			}
4321 
4322 			// Iterate over the current range, creating UPLs
4323 			while (numBytes) {
4324 				vm_address_t kernelStart = (vm_address_t) startPage;
4325 				vm_map_t theMap;
4326 				if (curMap) {
4327 					theMap = curMap;
4328 				} else if (_memRef) {
4329 					theMap = NULL;
4330 				} else {
4331 					assert(_task == kernel_task);
4332 					theMap = IOPageableMapForAddress(kernelStart);
4333 				}
4334 
4335 				// ioplFlags is an in/out parameter
4336 				upl_control_flags_t ioplFlags = uplFlags;
4337 				dataP = getDataP(_memoryEntries);
4338 				pageInfo = getPageList(dataP);
4339 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4340 
4341 				mach_vm_size_t ioplPhysSize;
4342 				upl_size_t     ioplSize;
4343 				unsigned int   numPageInfo;
4344 
4345 				if (_memRef) {
4346 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4347 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4348 				} else {
4349 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4350 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4351 				}
4352 				if (error != KERN_SUCCESS) {
4353 					if (_memRef) {
4354 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4355 					} else {
4356 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4357 					}
4358 					printf("entry size error %d\n", error);
4359 					goto abortExit;
4360 				}
4361 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4362 				numPageInfo = atop_32(ioplPhysSize);
4363 				if (byteAlignUPL) {
4364 					if (numBytes > ioplPhysSize) {
4365 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4366 					} else {
4367 						ioplSize = ((typeof(ioplSize))numBytes);
4368 					}
4369 				} else {
4370 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4371 				}
4372 
4373 				if (_memRef) {
4374 					memory_object_offset_t entryOffset;
4375 
4376 					entryOffset = mdOffset;
4377 					if (byteAlignUPL) {
4378 						entryOffset = (entryOffset - memRefEntry->offset);
4379 					} else {
4380 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4381 					}
4382 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4383 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4384 					}
4385 					error = memory_object_iopl_request(memRefEntry->entry,
4386 					    entryOffset,
4387 					    &ioplSize,
4388 					    &iopl.fIOPL,
4389 					    baseInfo,
4390 					    &numPageInfo,
4391 					    &ioplFlags,
4392 					    tag);
4393 				} else if ((theMap == kernel_map)
4394 				    && (kernelStart >= io_kernel_static_start)
4395 				    && (kernelStart < io_kernel_static_end)) {
4396 					error = io_get_kernel_static_upl(theMap,
4397 					    kernelStart,
4398 					    &ioplSize,
4399 					    &iopl.fPageOffset,
4400 					    &iopl.fIOPL,
4401 					    baseInfo,
4402 					    &numPageInfo,
4403 					    &highPage);
4404 				} else {
4405 					assert(theMap);
4406 					error = vm_map_create_upl(theMap,
4407 					    startPage,
4408 					    (upl_size_t*)&ioplSize,
4409 					    &iopl.fIOPL,
4410 					    baseInfo,
4411 					    &numPageInfo,
4412 					    &ioplFlags,
4413 					    tag);
4414 				}
4415 
4416 				if (error != KERN_SUCCESS) {
4417 					traceInterval.setEndArg2(error);
4418 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4419 					goto abortExit;
4420 				}
4421 
4422 				assert(ioplSize);
4423 
4424 				if (iopl.fIOPL) {
4425 					highPage = upl_get_highest_page(iopl.fIOPL);
4426 				}
4427 				if (highPage > highestPage) {
4428 					highestPage = highPage;
4429 				}
4430 
4431 				if (baseInfo->device) {
4432 					numPageInfo = 1;
4433 					iopl.fFlags = kIOPLOnDevice;
4434 				} else {
4435 					iopl.fFlags = 0;
4436 				}
4437 
4438 				if (byteAlignUPL) {
4439 					if (iopl.fIOPL) {
4440 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4441 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4442 					}
4443 					if (startPage != (mach_vm_address_t)-1) {
4444 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4445 						startPage -= iopl.fPageOffset;
4446 					}
4447 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4448 					numBytes += iopl.fPageOffset;
4449 				}
4450 
4451 				iopl.fIOMDOffset = mdOffset;
4452 				iopl.fPageInfo = pageIndex;
4453 
4454 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4455 					// Clean up partial created and unsaved iopl
4456 					if (iopl.fIOPL) {
4457 						upl_abort(iopl.fIOPL, 0);
4458 						upl_deallocate(iopl.fIOPL);
4459 					}
4460 					error = kIOReturnNoMemory;
4461 					traceInterval.setEndArg2(error);
4462 					goto abortExit;
4463 				}
4464 				dataP = NULL;
4465 
4466 				// Check for a multiple iopl's in one virtual range
4467 				pageIndex += numPageInfo;
4468 				mdOffset -= iopl.fPageOffset;
4469 				numBytesWired += ioplSize;
4470 				if (ioplSize < numBytes) {
4471 					numBytes -= ioplSize;
4472 					if (startPage != (mach_vm_address_t)-1) {
4473 						startPage += ioplSize;
4474 					}
4475 					mdOffset += ioplSize;
4476 					iopl.fPageOffset = 0;
4477 					if (mapper) {
4478 						iopl.fMappedPage = mapBase + pageIndex;
4479 					}
4480 				} else {
4481 					mdOffset += numBytes;
4482 					break;
4483 				}
4484 			}
4485 		}
4486 
4487 		_highestPage = highestPage;
4488 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4489 
4490 		if (UPL_COPYOUT_FROM & uplFlags) {
4491 			_flags |= kIOMemoryPreparedReadOnly;
4492 		}
4493 		traceInterval.setEndCodes(numBytesWired, error);
4494 	}
4495 
4496 #if IOTRACKING
4497 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4498 		dataP = getDataP(_memoryEntries);
4499 		if (!dataP->fWireTracking.link.next) {
4500 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4501 		}
4502 	}
4503 #endif /* IOTRACKING */
4504 
4505 	return error;
4506 
4507 abortExit:
4508 	{
4509 		dataP = getDataP(_memoryEntries);
4510 		UInt done = getNumIOPL(_memoryEntries, dataP);
4511 		ioPLBlock *ioplList = getIOPLList(dataP);
4512 
4513 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4514 			if (ioplList[ioplIdx].fIOPL) {
4515 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4516 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4517 			}
4518 		}
4519 		_memoryEntries->setLength(computeDataSize(0, 0));
4520 	}
4521 
4522 	if (error == KERN_FAILURE) {
4523 		error = kIOReturnCannotWire;
4524 	} else if (error == KERN_MEMORY_ERROR) {
4525 		error = kIOReturnNoResources;
4526 	}
4527 
4528 	return error;
4529 }
4530 
4531 bool
initMemoryEntries(size_t size,IOMapper * mapper)4532 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4533 {
4534 	ioGMDData * dataP;
4535 
4536 	if (size > UINT_MAX) {
4537 		return false;
4538 	}
4539 	if (!_memoryEntries) {
4540 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4541 		if (!_memoryEntries) {
4542 			return false;
4543 		}
4544 	} else if (!_memoryEntries->initWithCapacity(size)) {
4545 		return false;
4546 	}
4547 
4548 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4549 	dataP = getDataP(_memoryEntries);
4550 
4551 	if (mapper == kIOMapperWaitSystem) {
4552 		IOMapper::checkForSystemMapper();
4553 		mapper = IOMapper::gSystem;
4554 	}
4555 	dataP->fMapper               = mapper;
4556 	dataP->fPageCnt              = 0;
4557 	dataP->fMappedBase           = 0;
4558 	dataP->fDMAMapNumAddressBits = 64;
4559 	dataP->fDMAMapAlignment      = 0;
4560 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4561 	dataP->fCompletionError      = false;
4562 	dataP->fMappedBaseValid      = false;
4563 
4564 	return true;
4565 }
4566 
4567 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4568 IOMemoryDescriptor::dmaMap(
4569 	IOMapper                    * mapper,
4570 	IOMemoryDescriptor          * memory,
4571 	IODMACommand                * command,
4572 	const IODMAMapSpecification * mapSpec,
4573 	uint64_t                      offset,
4574 	uint64_t                      length,
4575 	uint64_t                    * mapAddress,
4576 	uint64_t                    * mapLength)
4577 {
4578 	IOReturn err;
4579 	uint32_t mapOptions;
4580 
4581 	mapOptions = 0;
4582 	mapOptions |= kIODMAMapReadAccess;
4583 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4584 		mapOptions |= kIODMAMapWriteAccess;
4585 	}
4586 
4587 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4588 	    mapSpec, command, NULL, mapAddress, mapLength);
4589 
4590 	if (kIOReturnSuccess == err) {
4591 		dmaMapRecord(mapper, command, *mapLength);
4592 	}
4593 
4594 	return err;
4595 }
4596 
4597 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4598 IOMemoryDescriptor::dmaMapRecord(
4599 	IOMapper                    * mapper,
4600 	IODMACommand                * command,
4601 	uint64_t                      mapLength)
4602 {
4603 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4604 	kern_allocation_name_t alloc;
4605 	int16_t                prior;
4606 
4607 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4608 		kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4609 	}
4610 
4611 	if (!command) {
4612 		return;
4613 	}
4614 	prior = OSAddAtomic16(1, &_dmaReferences);
4615 	if (!prior) {
4616 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4617 			_mapName  = alloc;
4618 			mapLength = _length;
4619 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4620 		} else {
4621 			_mapName = NULL;
4622 		}
4623 	}
4624 }
4625 
4626 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4627 IOMemoryDescriptor::dmaUnmap(
4628 	IOMapper                    * mapper,
4629 	IODMACommand                * command,
4630 	uint64_t                      offset,
4631 	uint64_t                      mapAddress,
4632 	uint64_t                      mapLength)
4633 {
4634 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4635 	IOReturn ret;
4636 	kern_allocation_name_t alloc;
4637 	kern_allocation_name_t mapName;
4638 	int16_t prior;
4639 
4640 	mapName = NULL;
4641 	prior = 0;
4642 	if (command) {
4643 		mapName = _mapName;
4644 		if (_dmaReferences) {
4645 			prior = OSAddAtomic16(-1, &_dmaReferences);
4646 		} else {
4647 			panic("_dmaReferences underflow");
4648 		}
4649 	}
4650 
4651 	if (!mapLength) {
4652 		traceInterval.setEndArg1(kIOReturnSuccess);
4653 		return kIOReturnSuccess;
4654 	}
4655 
4656 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4657 
4658 	if ((alloc = mapper->fAllocName)) {
4659 		kern_allocation_update_size(alloc, -mapLength, NULL);
4660 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4661 			mapLength = _length;
4662 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4663 		}
4664 	}
4665 
4666 	traceInterval.setEndArg1(ret);
4667 	return ret;
4668 }
4669 
4670 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4671 IOGeneralMemoryDescriptor::dmaMap(
4672 	IOMapper                    * mapper,
4673 	IOMemoryDescriptor          * memory,
4674 	IODMACommand                * command,
4675 	const IODMAMapSpecification * mapSpec,
4676 	uint64_t                      offset,
4677 	uint64_t                      length,
4678 	uint64_t                    * mapAddress,
4679 	uint64_t                    * mapLength)
4680 {
4681 	IOReturn          err = kIOReturnSuccess;
4682 	ioGMDData *       dataP;
4683 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4684 
4685 	*mapAddress = 0;
4686 	if (kIOMemoryHostOnly & _flags) {
4687 		return kIOReturnSuccess;
4688 	}
4689 	if (kIOMemoryRemote & _flags) {
4690 		return kIOReturnNotAttached;
4691 	}
4692 
4693 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4694 	    || offset || (length != _length)) {
4695 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4696 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4697 		const ioPLBlock * ioplList = getIOPLList(dataP);
4698 		upl_page_info_t * pageList;
4699 		uint32_t          mapOptions = 0;
4700 
4701 		IODMAMapSpecification mapSpec;
4702 		bzero(&mapSpec, sizeof(mapSpec));
4703 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4704 		mapSpec.alignment = dataP->fDMAMapAlignment;
4705 
4706 		// For external UPLs the fPageInfo field points directly to
4707 		// the upl's upl_page_info_t array.
4708 		if (ioplList->fFlags & kIOPLExternUPL) {
4709 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4710 			mapOptions |= kIODMAMapPagingPath;
4711 		} else {
4712 			pageList = getPageList(dataP);
4713 		}
4714 
4715 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4716 			mapOptions |= kIODMAMapPageListFullyOccupied;
4717 		}
4718 
4719 		assert(dataP->fDMAAccess);
4720 		mapOptions |= dataP->fDMAAccess;
4721 
4722 		// Check for direct device non-paged memory
4723 		if (ioplList->fFlags & kIOPLOnDevice) {
4724 			mapOptions |= kIODMAMapPhysicallyContiguous;
4725 		}
4726 
4727 		IODMAMapPageList dmaPageList =
4728 		{
4729 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4730 			.pageListCount = _pages,
4731 			.pageList      = &pageList[0]
4732 		};
4733 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4734 		    command, &dmaPageList, mapAddress, mapLength);
4735 
4736 		if (kIOReturnSuccess == err) {
4737 			dmaMapRecord(mapper, command, *mapLength);
4738 		}
4739 	}
4740 
4741 	return err;
4742 }
4743 
4744 /*
4745  * prepare
4746  *
4747  * Prepare the memory for an I/O transfer.  This involves paging in
4748  * the memory, if necessary, and wiring it down for the duration of
4749  * the transfer.  The complete() method completes the processing of
4750  * the memory after the I/O transfer finishes.  This method needn't
4751  * called for non-pageable memory.
4752  */
4753 
4754 IOReturn
prepare(IODirection forDirection)4755 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4756 {
4757 	IOReturn     error    = kIOReturnSuccess;
4758 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4759 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4760 
4761 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4762 		traceInterval.setEndArg1(kIOReturnSuccess);
4763 		return kIOReturnSuccess;
4764 	}
4765 
4766 	assert(!(kIOMemoryRemote & _flags));
4767 	if (kIOMemoryRemote & _flags) {
4768 		traceInterval.setEndArg1(kIOReturnNotAttached);
4769 		return kIOReturnNotAttached;
4770 	}
4771 
4772 	if (_prepareLock) {
4773 		IOLockLock(_prepareLock);
4774 	}
4775 
4776 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4777 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4778 			error = kIOReturnNotReady;
4779 			goto finish;
4780 		}
4781 		error = wireVirtual(forDirection);
4782 	}
4783 
4784 	if (kIOReturnSuccess == error) {
4785 		if (1 == ++_wireCount) {
4786 			if (kIOMemoryClearEncrypt & _flags) {
4787 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4788 			}
4789 
4790 			ktraceEmitPhysicalSegments();
4791 		}
4792 	}
4793 
4794 finish:
4795 
4796 	if (_prepareLock) {
4797 		IOLockUnlock(_prepareLock);
4798 	}
4799 	traceInterval.setEndArg1(error);
4800 
4801 	return error;
4802 }
4803 
4804 /*
4805  * complete
4806  *
4807  * Complete processing of the memory after an I/O transfer finishes.
4808  * This method should not be called unless a prepare was previously
4809  * issued; the prepare() and complete() must occur in pairs, before
4810  * before and after an I/O transfer involving pageable memory.
4811  */
4812 
4813 IOReturn
complete(IODirection forDirection)4814 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4815 {
4816 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4817 	ioGMDData  * dataP;
4818 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4819 
4820 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4821 		traceInterval.setEndArg1(kIOReturnSuccess);
4822 		return kIOReturnSuccess;
4823 	}
4824 
4825 	assert(!(kIOMemoryRemote & _flags));
4826 	if (kIOMemoryRemote & _flags) {
4827 		traceInterval.setEndArg1(kIOReturnNotAttached);
4828 		return kIOReturnNotAttached;
4829 	}
4830 
4831 	if (_prepareLock) {
4832 		IOLockLock(_prepareLock);
4833 	}
4834 	do{
4835 		assert(_wireCount);
4836 		if (!_wireCount) {
4837 			break;
4838 		}
4839 		dataP = getDataP(_memoryEntries);
4840 		if (!dataP) {
4841 			break;
4842 		}
4843 
4844 		if (kIODirectionCompleteWithError & forDirection) {
4845 			dataP->fCompletionError = true;
4846 		}
4847 
4848 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4849 			performOperation(kIOMemorySetEncrypted, 0, _length);
4850 		}
4851 
4852 		_wireCount--;
4853 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4854 			ioPLBlock *ioplList = getIOPLList(dataP);
4855 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4856 
4857 			if (_wireCount) {
4858 				// kIODirectionCompleteWithDataValid & forDirection
4859 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4860 					vm_tag_t tag;
4861 					tag = (typeof(tag))getVMTag(kernel_map);
4862 					for (ind = 0; ind < count; ind++) {
4863 						if (ioplList[ind].fIOPL) {
4864 							iopl_valid_data(ioplList[ind].fIOPL, tag);
4865 						}
4866 					}
4867 				}
4868 			} else {
4869 				if (_dmaReferences) {
4870 					panic("complete() while dma active");
4871 				}
4872 
4873 				if (dataP->fMappedBaseValid) {
4874 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4875 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4876 				}
4877 #if IOTRACKING
4878 				if (dataP->fWireTracking.link.next) {
4879 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4880 				}
4881 #endif /* IOTRACKING */
4882 				// Only complete iopls that we created which are for TypeVirtual
4883 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4884 					for (ind = 0; ind < count; ind++) {
4885 						if (ioplList[ind].fIOPL) {
4886 							if (dataP->fCompletionError) {
4887 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4888 							} else {
4889 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
4890 							}
4891 							upl_deallocate(ioplList[ind].fIOPL);
4892 						}
4893 					}
4894 				} else if (kIOMemoryTypeUPL == type) {
4895 					upl_set_referenced(ioplList[0].fIOPL, false);
4896 				}
4897 
4898 				_memoryEntries->setLength(computeDataSize(0, 0));
4899 
4900 				dataP->fPreparationID = kIOPreparationIDUnprepared;
4901 				_flags &= ~kIOMemoryPreparedReadOnly;
4902 
4903 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4904 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4905 				}
4906 			}
4907 		}
4908 	}while (false);
4909 
4910 	if (_prepareLock) {
4911 		IOLockUnlock(_prepareLock);
4912 	}
4913 
4914 	traceInterval.setEndArg1(kIOReturnSuccess);
4915 	return kIOReturnSuccess;
4916 }
4917 
4918 IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4919 IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4920 {
4921 	IOOptionBits createOptions = 0;
4922 
4923 	if (!(kIOMap64Bit & options)) {
4924 		panic("IOMemoryDescriptor::makeMapping !64bit");
4925 	}
4926 	if (!(kIOMapReadOnly & options)) {
4927 		createOptions |= kIOMemoryReferenceWrite;
4928 #if DEVELOPMENT || DEBUG
4929 		if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4930 		    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4931 			OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4932 		}
4933 #endif
4934 	}
4935 	return createOptions;
4936 }
4937 
4938 /*
4939  * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4940  * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4941  * creation.
4942  */
4943 
4944 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4945 IOGeneralMemoryDescriptor::makeMapping(
4946 	IOMemoryDescriptor *    owner,
4947 	task_t                  __intoTask,
4948 	IOVirtualAddress        __address,
4949 	IOOptionBits            options,
4950 	IOByteCount             __offset,
4951 	IOByteCount             __length )
4952 {
4953 	IOReturn err = kIOReturnSuccess;
4954 	IOMemoryMap * mapping;
4955 
4956 	if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4957 		struct IOMemoryReference * newRef;
4958 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4959 		if (kIOReturnSuccess == err) {
4960 			if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4961 				memoryReferenceFree(newRef);
4962 			}
4963 		}
4964 	}
4965 	if (kIOReturnSuccess != err) {
4966 		return NULL;
4967 	}
4968 	mapping = IOMemoryDescriptor::makeMapping(
4969 		owner, __intoTask, __address, options, __offset, __length);
4970 
4971 #if IOTRACKING
4972 	if ((mapping == (IOMemoryMap *) __address)
4973 	    && (0 == (kIOMapStatic & mapping->fOptions))
4974 	    && (NULL == mapping->fSuperMap)
4975 	    && ((kIOTracking & gIOKitDebug) || _task)) {
4976 		// only dram maps in the default on development case
4977 		IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4978 	}
4979 #endif /* IOTRACKING */
4980 
4981 	return mapping;
4982 }
4983 
4984 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4985 IOGeneralMemoryDescriptor::doMap(
4986 	vm_map_t                __addressMap,
4987 	IOVirtualAddress *      __address,
4988 	IOOptionBits            options,
4989 	IOByteCount             __offset,
4990 	IOByteCount             __length )
4991 {
4992 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4993 	traceInterval.setEndArg1(kIOReturnSuccess);
4994 #ifndef __LP64__
4995 	if (!(kIOMap64Bit & options)) {
4996 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
4997 	}
4998 #endif /* !__LP64__ */
4999 
5000 	kern_return_t  err;
5001 
5002 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
5003 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5004 	mach_vm_size_t length  = mapping->fLength;
5005 
5006 	IOOptionBits type = _flags & kIOMemoryTypeMask;
5007 	Ranges vec = _ranges;
5008 
5009 	mach_vm_address_t range0Addr = 0;
5010 	mach_vm_size_t    range0Len = 0;
5011 
5012 	if ((offset >= _length) || ((offset + length) > _length)) {
5013 		traceInterval.setEndArg1(kIOReturnBadArgument);
5014 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
5015 		// assert(offset == 0 && _length == 0 && length == 0);
5016 		return kIOReturnBadArgument;
5017 	}
5018 
5019 	assert(!(kIOMemoryRemote & _flags));
5020 	if (kIOMemoryRemote & _flags) {
5021 		return 0;
5022 	}
5023 
5024 	if (vec.v) {
5025 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
5026 	}
5027 
5028 	// mapping source == dest? (could be much better)
5029 	if (_task
5030 	    && (mapping->fAddressTask == _task)
5031 	    && (mapping->fAddressMap == get_task_map(_task))
5032 	    && (options & kIOMapAnywhere)
5033 	    && (!(kIOMapUnique & options))
5034 	    && (!(kIOMapGuardedMask & options))
5035 	    && (1 == _rangesCount)
5036 	    && (0 == offset)
5037 	    && range0Addr
5038 	    && (length <= range0Len)) {
5039 		mapping->fAddress = range0Addr;
5040 		mapping->fOptions |= kIOMapStatic;
5041 
5042 		return kIOReturnSuccess;
5043 	}
5044 
5045 	if (!_memRef) {
5046 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
5047 		if (kIOReturnSuccess != err) {
5048 			traceInterval.setEndArg1(err);
5049 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5050 			return err;
5051 		}
5052 	}
5053 
5054 
5055 	memory_object_t pager;
5056 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
5057 
5058 	// <upl_transpose //
5059 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
5060 		do{
5061 			upl_t               redirUPL2;
5062 			upl_size_t          size;
5063 			upl_control_flags_t flags;
5064 			unsigned int        lock_count;
5065 
5066 			if (!_memRef || (1 != _memRef->count)) {
5067 				err = kIOReturnNotReadable;
5068 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5069 				break;
5070 			}
5071 
5072 			size = (upl_size_t) round_page(mapping->fLength);
5073 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5074 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5075 
5076 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5077 			    NULL, NULL,
5078 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
5079 				redirUPL2 = NULL;
5080 			}
5081 
5082 			for (lock_count = 0;
5083 			    IORecursiveLockHaveLock(gIOMemoryLock);
5084 			    lock_count++) {
5085 				UNLOCK;
5086 			}
5087 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5088 			for (;
5089 			    lock_count;
5090 			    lock_count--) {
5091 				LOCK;
5092 			}
5093 
5094 			if (kIOReturnSuccess != err) {
5095 				IOLog("upl_transpose(%x)\n", err);
5096 				err = kIOReturnSuccess;
5097 			}
5098 
5099 			if (redirUPL2) {
5100 				upl_commit(redirUPL2, NULL, 0);
5101 				upl_deallocate(redirUPL2);
5102 				redirUPL2 = NULL;
5103 			}
5104 			{
5105 				// swap the memEntries since they now refer to different vm_objects
5106 				IOMemoryReference * me = _memRef;
5107 				_memRef = mapping->fMemory->_memRef;
5108 				mapping->fMemory->_memRef = me;
5109 			}
5110 			if (pager) {
5111 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5112 			}
5113 		}while (false);
5114 	}
5115 	// upl_transpose> //
5116 	else {
5117 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5118 		if (err) {
5119 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5120 		}
5121 		if ((err == KERN_SUCCESS) && pager) {
5122 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5123 
5124 			if (err != KERN_SUCCESS) {
5125 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5126 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5127 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5128 			}
5129 		}
5130 	}
5131 
5132 	traceInterval.setEndArg1(err);
5133 	if (err) {
5134 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5135 	}
5136 	return err;
5137 }
5138 
5139 #if IOTRACKING
5140 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5141 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5142     mach_vm_address_t * address, mach_vm_size_t * size)
5143 {
5144 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5145 
5146 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5147 
5148 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5149 		return kIOReturnNotReady;
5150 	}
5151 
5152 	*task    = map->fAddressTask;
5153 	*address = map->fAddress;
5154 	*size    = map->fLength;
5155 
5156 	return kIOReturnSuccess;
5157 }
5158 #endif /* IOTRACKING */
5159 
5160 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5161 IOGeneralMemoryDescriptor::doUnmap(
5162 	vm_map_t                addressMap,
5163 	IOVirtualAddress        __address,
5164 	IOByteCount             __length )
5165 {
5166 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5167 	IOReturn ret;
5168 	ret = super::doUnmap(addressMap, __address, __length);
5169 	traceInterval.setEndArg1(ret);
5170 	return ret;
5171 }
5172 
5173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5174 
5175 #undef super
5176 #define super OSObject
5177 
5178 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5179 
5180 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5181 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5182 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5183 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5184 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5185 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5186 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5187 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5188 
5189 /* ex-inline function implementation */
5190 IOPhysicalAddress
getPhysicalAddress()5191 IOMemoryMap::getPhysicalAddress()
5192 {
5193 	return getPhysicalSegment( 0, NULL );
5194 }
5195 
5196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5197 
5198 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5199 IOMemoryMap::init(
5200 	task_t                  intoTask,
5201 	mach_vm_address_t       toAddress,
5202 	IOOptionBits            _options,
5203 	mach_vm_size_t          _offset,
5204 	mach_vm_size_t          _length )
5205 {
5206 	if (!intoTask) {
5207 		return false;
5208 	}
5209 
5210 	if (!super::init()) {
5211 		return false;
5212 	}
5213 
5214 	fAddressMap  = get_task_map(intoTask);
5215 	if (!fAddressMap) {
5216 		return false;
5217 	}
5218 	vm_map_reference(fAddressMap);
5219 
5220 	fAddressTask = intoTask;
5221 	fOptions     = _options;
5222 	fLength      = _length;
5223 	fOffset      = _offset;
5224 	fAddress     = toAddress;
5225 
5226 	return true;
5227 }
5228 
5229 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5230 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5231 {
5232 	if (!_memory) {
5233 		return false;
5234 	}
5235 
5236 	if (!fSuperMap) {
5237 		if ((_offset + fLength) > _memory->getLength()) {
5238 			return false;
5239 		}
5240 		fOffset = _offset;
5241 	}
5242 
5243 
5244 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5245 	if (fMemory) {
5246 		if (fMemory != _memory) {
5247 			fMemory->removeMapping(this);
5248 		}
5249 	}
5250 	fMemory = os::move(tempval);
5251 
5252 	return true;
5253 }
5254 
5255 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5256 IOMemoryDescriptor::doMap(
5257 	vm_map_t                __addressMap,
5258 	IOVirtualAddress *      __address,
5259 	IOOptionBits            options,
5260 	IOByteCount             __offset,
5261 	IOByteCount             __length )
5262 {
5263 	return kIOReturnUnsupported;
5264 }
5265 
5266 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5267 IOMemoryDescriptor::handleFault(
5268 	void *                  _pager,
5269 	mach_vm_size_t          sourceOffset,
5270 	mach_vm_size_t          length)
5271 {
5272 	if (kIOMemoryRedirected & _flags) {
5273 #if DEBUG
5274 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5275 #endif
5276 		do {
5277 			SLEEP;
5278 		} while (kIOMemoryRedirected & _flags);
5279 	}
5280 	return kIOReturnSuccess;
5281 }
5282 
5283 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5284 IOMemoryDescriptor::populateDevicePager(
5285 	void *                  _pager,
5286 	vm_map_t                addressMap,
5287 	mach_vm_address_t       address,
5288 	mach_vm_size_t          sourceOffset,
5289 	mach_vm_size_t          length,
5290 	IOOptionBits            options )
5291 {
5292 	IOReturn            err = kIOReturnSuccess;
5293 	memory_object_t     pager = (memory_object_t) _pager;
5294 	mach_vm_size_t      size;
5295 	mach_vm_size_t      bytes;
5296 	mach_vm_size_t      page;
5297 	mach_vm_size_t      pageOffset;
5298 	mach_vm_size_t      pagerOffset;
5299 	IOPhysicalLength    segLen, chunk;
5300 	addr64_t            physAddr;
5301 	IOOptionBits        type;
5302 
5303 	type = _flags & kIOMemoryTypeMask;
5304 
5305 	if (reserved->dp.pagerContig) {
5306 		sourceOffset = 0;
5307 		pagerOffset  = 0;
5308 	}
5309 
5310 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5311 	assert( physAddr );
5312 	pageOffset = physAddr - trunc_page_64( physAddr );
5313 	pagerOffset = sourceOffset;
5314 
5315 	size = length + pageOffset;
5316 	physAddr -= pageOffset;
5317 
5318 	segLen += pageOffset;
5319 	bytes = size;
5320 	do{
5321 		// in the middle of the loop only map whole pages
5322 		if (segLen >= bytes) {
5323 			segLen = bytes;
5324 		} else if (segLen != trunc_page_64(segLen)) {
5325 			err = kIOReturnVMError;
5326 		}
5327 		if (physAddr != trunc_page_64(physAddr)) {
5328 			err = kIOReturnBadArgument;
5329 		}
5330 
5331 		if (kIOReturnSuccess != err) {
5332 			break;
5333 		}
5334 
5335 #if DEBUG || DEVELOPMENT
5336 		if ((kIOMemoryTypeUPL != type)
5337 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5338 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5339 			    physAddr, (uint64_t)segLen);
5340 		}
5341 #endif /* DEBUG || DEVELOPMENT */
5342 
5343 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5344 		for (page = 0;
5345 		    (page < segLen) && (KERN_SUCCESS == err);
5346 		    page += chunk) {
5347 			err = device_pager_populate_object(pager, pagerOffset,
5348 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5349 			pagerOffset += chunk;
5350 		}
5351 
5352 		assert(KERN_SUCCESS == err);
5353 		if (err) {
5354 			break;
5355 		}
5356 
5357 		// This call to vm_fault causes an early pmap level resolution
5358 		// of the mappings created above for kernel mappings, since
5359 		// faulting in later can't take place from interrupt level.
5360 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5361 			err = vm_fault(addressMap,
5362 			    (vm_map_offset_t)trunc_page_64(address),
5363 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5364 			    FALSE, VM_KERN_MEMORY_NONE,
5365 			    THREAD_UNINT, NULL,
5366 			    (vm_map_offset_t)0);
5367 
5368 			if (KERN_SUCCESS != err) {
5369 				break;
5370 			}
5371 		}
5372 
5373 		sourceOffset += segLen - pageOffset;
5374 		address += segLen;
5375 		bytes -= segLen;
5376 		pageOffset = 0;
5377 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5378 
5379 	if (bytes) {
5380 		err = kIOReturnBadArgument;
5381 	}
5382 
5383 	return err;
5384 }
5385 
5386 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5387 IOMemoryDescriptor::doUnmap(
5388 	vm_map_t                addressMap,
5389 	IOVirtualAddress        __address,
5390 	IOByteCount             __length )
5391 {
5392 	IOReturn          err;
5393 	IOMemoryMap *     mapping;
5394 	mach_vm_address_t address;
5395 	mach_vm_size_t    length;
5396 
5397 	if (__length) {
5398 		panic("doUnmap");
5399 	}
5400 
5401 	mapping = (IOMemoryMap *) __address;
5402 	addressMap = mapping->fAddressMap;
5403 	address    = mapping->fAddress;
5404 	length     = mapping->fLength;
5405 
5406 	if (kIOMapOverwrite & mapping->fOptions) {
5407 		err = KERN_SUCCESS;
5408 	} else {
5409 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5410 			addressMap = IOPageableMapForAddress( address );
5411 		}
5412 #if DEBUG
5413 		if (kIOLogMapping & gIOKitDebug) {
5414 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5415 			    addressMap, address, length );
5416 		}
5417 #endif
5418 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5419 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5420 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5421 		}
5422 	}
5423 
5424 #if IOTRACKING
5425 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5426 #endif /* IOTRACKING */
5427 
5428 	return err;
5429 }
5430 
5431 IOReturn
redirect(task_t safeTask,bool doRedirect)5432 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5433 {
5434 	IOReturn            err = kIOReturnSuccess;
5435 	IOMemoryMap *       mapping = NULL;
5436 	OSSharedPtr<OSIterator>        iter;
5437 
5438 	LOCK;
5439 
5440 	if (doRedirect) {
5441 		_flags |= kIOMemoryRedirected;
5442 	} else {
5443 		_flags &= ~kIOMemoryRedirected;
5444 	}
5445 
5446 	do {
5447 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5448 			memory_object_t   pager;
5449 
5450 			if (reserved) {
5451 				pager = (memory_object_t) reserved->dp.devicePager;
5452 			} else {
5453 				pager = MACH_PORT_NULL;
5454 			}
5455 
5456 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5457 				mapping->redirect( safeTask, doRedirect );
5458 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5459 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5460 				}
5461 			}
5462 
5463 			iter.reset();
5464 		}
5465 	} while (false);
5466 
5467 	if (!doRedirect) {
5468 		WAKEUP;
5469 	}
5470 
5471 	UNLOCK;
5472 
5473 #ifndef __LP64__
5474 	// temporary binary compatibility
5475 	IOSubMemoryDescriptor * subMem;
5476 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5477 		err = subMem->redirect( safeTask, doRedirect );
5478 	} else {
5479 		err = kIOReturnSuccess;
5480 	}
5481 #endif /* !__LP64__ */
5482 
5483 	return err;
5484 }
5485 
5486 IOReturn
redirect(task_t safeTask,bool doRedirect)5487 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5488 {
5489 	IOReturn err = kIOReturnSuccess;
5490 
5491 	if (fSuperMap) {
5492 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5493 	} else {
5494 		LOCK;
5495 
5496 		do{
5497 			if (!fAddress) {
5498 				break;
5499 			}
5500 			if (!fAddressMap) {
5501 				break;
5502 			}
5503 
5504 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5505 			    && (0 == (fOptions & kIOMapStatic))) {
5506 				IOUnmapPages( fAddressMap, fAddress, fLength );
5507 				err = kIOReturnSuccess;
5508 #if DEBUG
5509 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5510 #endif
5511 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5512 				IOOptionBits newMode;
5513 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5514 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5515 			}
5516 		}while (false);
5517 		UNLOCK;
5518 	}
5519 
5520 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5521 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5522 	    && safeTask
5523 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5524 		fMemory->redirect(safeTask, doRedirect);
5525 	}
5526 
5527 	return err;
5528 }
5529 
5530 IOReturn
unmap(void)5531 IOMemoryMap::unmap( void )
5532 {
5533 	IOReturn    err;
5534 
5535 	LOCK;
5536 
5537 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5538 	    && (0 == (kIOMapStatic & fOptions))) {
5539 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5540 	} else {
5541 		err = kIOReturnSuccess;
5542 	}
5543 
5544 	if (fAddressMap) {
5545 		vm_map_deallocate(fAddressMap);
5546 		fAddressMap = NULL;
5547 	}
5548 
5549 	fAddress = 0;
5550 
5551 	UNLOCK;
5552 
5553 	return err;
5554 }
5555 
5556 void
taskDied(void)5557 IOMemoryMap::taskDied( void )
5558 {
5559 	LOCK;
5560 	if (fUserClientUnmap) {
5561 		unmap();
5562 	}
5563 #if IOTRACKING
5564 	else {
5565 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5566 	}
5567 #endif /* IOTRACKING */
5568 
5569 	if (fAddressMap) {
5570 		vm_map_deallocate(fAddressMap);
5571 		fAddressMap = NULL;
5572 	}
5573 	fAddressTask = NULL;
5574 	fAddress     = 0;
5575 	UNLOCK;
5576 }
5577 
5578 IOReturn
userClientUnmap(void)5579 IOMemoryMap::userClientUnmap( void )
5580 {
5581 	fUserClientUnmap = true;
5582 	return kIOReturnSuccess;
5583 }
5584 
5585 // Overload the release mechanism.  All mappings must be a member
5586 // of a memory descriptors _mappings set.  This means that we
5587 // always have 2 references on a mapping.  When either of these mappings
5588 // are released we need to free ourselves.
5589 void
taggedRelease(const void * tag) const5590 IOMemoryMap::taggedRelease(const void *tag) const
5591 {
5592 	LOCK;
5593 	super::taggedRelease(tag, 2);
5594 	UNLOCK;
5595 }
5596 
5597 void
free()5598 IOMemoryMap::free()
5599 {
5600 	unmap();
5601 
5602 	if (fMemory) {
5603 		LOCK;
5604 		fMemory->removeMapping(this);
5605 		UNLOCK;
5606 		fMemory.reset();
5607 	}
5608 
5609 	if (fSuperMap) {
5610 		fSuperMap.reset();
5611 	}
5612 
5613 	if (fRedirUPL) {
5614 		upl_commit(fRedirUPL, NULL, 0);
5615 		upl_deallocate(fRedirUPL);
5616 	}
5617 
5618 	super::free();
5619 }
5620 
5621 IOByteCount
getLength()5622 IOMemoryMap::getLength()
5623 {
5624 	return fLength;
5625 }
5626 
5627 IOVirtualAddress
getVirtualAddress()5628 IOMemoryMap::getVirtualAddress()
5629 {
5630 #ifndef __LP64__
5631 	if (fSuperMap) {
5632 		fSuperMap->getVirtualAddress();
5633 	} else if (fAddressMap
5634 	    && vm_map_is_64bit(fAddressMap)
5635 	    && (sizeof(IOVirtualAddress) < 8)) {
5636 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5637 	}
5638 #endif /* !__LP64__ */
5639 
5640 	return fAddress;
5641 }
5642 
5643 #ifndef __LP64__
5644 mach_vm_address_t
getAddress()5645 IOMemoryMap::getAddress()
5646 {
5647 	return fAddress;
5648 }
5649 
5650 mach_vm_size_t
getSize()5651 IOMemoryMap::getSize()
5652 {
5653 	return fLength;
5654 }
5655 #endif /* !__LP64__ */
5656 
5657 
5658 task_t
getAddressTask()5659 IOMemoryMap::getAddressTask()
5660 {
5661 	if (fSuperMap) {
5662 		return fSuperMap->getAddressTask();
5663 	} else {
5664 		return fAddressTask;
5665 	}
5666 }
5667 
5668 IOOptionBits
getMapOptions()5669 IOMemoryMap::getMapOptions()
5670 {
5671 	return fOptions;
5672 }
5673 
5674 IOMemoryDescriptor *
getMemoryDescriptor()5675 IOMemoryMap::getMemoryDescriptor()
5676 {
5677 	return fMemory.get();
5678 }
5679 
5680 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5681 IOMemoryMap::copyCompatible(
5682 	IOMemoryMap * newMapping )
5683 {
5684 	task_t              task      = newMapping->getAddressTask();
5685 	mach_vm_address_t   toAddress = newMapping->fAddress;
5686 	IOOptionBits        _options  = newMapping->fOptions;
5687 	mach_vm_size_t      _offset   = newMapping->fOffset;
5688 	mach_vm_size_t      _length   = newMapping->fLength;
5689 
5690 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5691 		return NULL;
5692 	}
5693 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5694 		return NULL;
5695 	}
5696 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5697 		return NULL;
5698 	}
5699 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5700 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5701 		return NULL;
5702 	}
5703 
5704 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5705 		return NULL;
5706 	}
5707 
5708 	if (_offset < fOffset) {
5709 		return NULL;
5710 	}
5711 
5712 	_offset -= fOffset;
5713 
5714 	if ((_offset + _length) > fLength) {
5715 		return NULL;
5716 	}
5717 
5718 	if ((fLength == _length) && (!_offset)) {
5719 		retain();
5720 		newMapping = this;
5721 	} else {
5722 		newMapping->fSuperMap.reset(this, OSRetain);
5723 		newMapping->fOffset   = fOffset + _offset;
5724 		newMapping->fAddress  = fAddress + _offset;
5725 	}
5726 
5727 	return newMapping;
5728 }
5729 
5730 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5731 IOMemoryMap::wireRange(
5732 	uint32_t                options,
5733 	mach_vm_size_t          offset,
5734 	mach_vm_size_t          length)
5735 {
5736 	IOReturn kr;
5737 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5738 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5739 	vm_prot_t prot;
5740 
5741 	prot = (kIODirectionOutIn & options);
5742 	if (prot) {
5743 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5744 	} else {
5745 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5746 	}
5747 
5748 	return kr;
5749 }
5750 
5751 
5752 IOPhysicalAddress
5753 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5754 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5755 #else /* !__LP64__ */
5756 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5757 #endif /* !__LP64__ */
5758 {
5759 	IOPhysicalAddress   address;
5760 
5761 	LOCK;
5762 #ifdef __LP64__
5763 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5764 #else /* !__LP64__ */
5765 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5766 #endif /* !__LP64__ */
5767 	UNLOCK;
5768 
5769 	return address;
5770 }
5771 
5772 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5773 
5774 #undef super
5775 #define super OSObject
5776 
5777 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5778 
5779 void
initialize(void)5780 IOMemoryDescriptor::initialize( void )
5781 {
5782 	if (NULL == gIOMemoryLock) {
5783 		gIOMemoryLock = IORecursiveLockAlloc();
5784 	}
5785 
5786 	gIOLastPage = IOGetLastPageNumber();
5787 }
5788 
5789 void
free(void)5790 IOMemoryDescriptor::free( void )
5791 {
5792 	if (_mappings) {
5793 		_mappings.reset();
5794 	}
5795 
5796 	if (reserved) {
5797 		cleanKernelReserved(reserved);
5798 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5799 		reserved = NULL;
5800 	}
5801 	super::free();
5802 }
5803 
5804 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5805 IOMemoryDescriptor::setMapping(
5806 	task_t                  intoTask,
5807 	IOVirtualAddress        mapAddress,
5808 	IOOptionBits            options )
5809 {
5810 	return createMappingInTask( intoTask, mapAddress,
5811 	           options | kIOMapStatic,
5812 	           0, getLength());
5813 }
5814 
5815 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5816 IOMemoryDescriptor::map(
5817 	IOOptionBits            options )
5818 {
5819 	return createMappingInTask( kernel_task, 0,
5820 	           options | kIOMapAnywhere,
5821 	           0, getLength());
5822 }
5823 
5824 #ifndef __LP64__
5825 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5826 IOMemoryDescriptor::map(
5827 	task_t                  intoTask,
5828 	IOVirtualAddress        atAddress,
5829 	IOOptionBits            options,
5830 	IOByteCount             offset,
5831 	IOByteCount             length )
5832 {
5833 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5834 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5835 		return NULL;
5836 	}
5837 
5838 	return createMappingInTask(intoTask, atAddress,
5839 	           options, offset, length);
5840 }
5841 #endif /* !__LP64__ */
5842 
5843 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5844 IOMemoryDescriptor::createMappingInTask(
5845 	task_t                  intoTask,
5846 	mach_vm_address_t       atAddress,
5847 	IOOptionBits            options,
5848 	mach_vm_size_t          offset,
5849 	mach_vm_size_t          length)
5850 {
5851 	IOMemoryMap * result;
5852 	IOMemoryMap * mapping;
5853 
5854 	if (0 == length) {
5855 		length = getLength();
5856 	}
5857 
5858 	mapping = new IOMemoryMap;
5859 
5860 	if (mapping
5861 	    && !mapping->init( intoTask, atAddress,
5862 	    options, offset, length )) {
5863 		mapping->release();
5864 		mapping = NULL;
5865 	}
5866 
5867 	if (mapping) {
5868 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5869 	} else {
5870 		result = nullptr;
5871 	}
5872 
5873 #if DEBUG
5874 	if (!result) {
5875 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5876 		    this, atAddress, (uint32_t) options, offset, length);
5877 	}
5878 #endif
5879 
5880 	// already retained through makeMapping
5881 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5882 
5883 	return retval;
5884 }
5885 
5886 #ifndef __LP64__ // there is only a 64 bit version for LP64
5887 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5888 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5889     IOOptionBits         options,
5890     IOByteCount          offset)
5891 {
5892 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5893 }
5894 #endif
5895 
5896 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5897 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5898     IOOptionBits         options,
5899     mach_vm_size_t       offset)
5900 {
5901 	IOReturn err = kIOReturnSuccess;
5902 	OSSharedPtr<IOMemoryDescriptor> physMem;
5903 
5904 	LOCK;
5905 
5906 	if (fAddress && fAddressMap) {
5907 		do{
5908 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5909 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5910 				physMem = fMemory;
5911 			}
5912 
5913 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5914 				upl_size_t          size = (typeof(size))round_page(fLength);
5915 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5916 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5917 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5918 				    NULL, NULL,
5919 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5920 					fRedirUPL = NULL;
5921 				}
5922 
5923 				if (physMem) {
5924 					IOUnmapPages( fAddressMap, fAddress, fLength );
5925 					if ((false)) {
5926 						physMem->redirect(NULL, true);
5927 					}
5928 				}
5929 			}
5930 
5931 			if (newBackingMemory) {
5932 				if (newBackingMemory != fMemory) {
5933 					fOffset = 0;
5934 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5935 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5936 					    offset, fLength)) {
5937 						err = kIOReturnError;
5938 					}
5939 				}
5940 				if (fRedirUPL) {
5941 					upl_commit(fRedirUPL, NULL, 0);
5942 					upl_deallocate(fRedirUPL);
5943 					fRedirUPL = NULL;
5944 				}
5945 				if ((false) && physMem) {
5946 					physMem->redirect(NULL, false);
5947 				}
5948 			}
5949 		}while (false);
5950 	}
5951 
5952 	UNLOCK;
5953 
5954 	return err;
5955 }
5956 
5957 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5958 IOMemoryDescriptor::makeMapping(
5959 	IOMemoryDescriptor *    owner,
5960 	task_t                  __intoTask,
5961 	IOVirtualAddress        __address,
5962 	IOOptionBits            options,
5963 	IOByteCount             __offset,
5964 	IOByteCount             __length )
5965 {
5966 #ifndef __LP64__
5967 	if (!(kIOMap64Bit & options)) {
5968 		panic("IOMemoryDescriptor::makeMapping !64bit");
5969 	}
5970 #endif /* !__LP64__ */
5971 
5972 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
5973 	__block IOMemoryMap * result  = NULL;
5974 
5975 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
5976 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5977 	mach_vm_size_t length  = mapping->fLength;
5978 
5979 	mapping->fOffset = offset;
5980 
5981 	LOCK;
5982 
5983 	do{
5984 		if (kIOMapStatic & options) {
5985 			result = mapping;
5986 			addMapping(mapping);
5987 			mapping->setMemoryDescriptor(this, 0);
5988 			continue;
5989 		}
5990 
5991 		if (kIOMapUnique & options) {
5992 			addr64_t phys;
5993 			IOByteCount       physLen;
5994 
5995 //	    if (owner != this)		continue;
5996 
5997 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5998 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5999 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
6000 				if (!phys || (physLen < length)) {
6001 					continue;
6002 				}
6003 
6004 				mapDesc = IOMemoryDescriptor::withAddressRange(
6005 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
6006 				if (!mapDesc) {
6007 					continue;
6008 				}
6009 				offset = 0;
6010 				mapping->fOffset = offset;
6011 			}
6012 		} else {
6013 			// look for a compatible existing mapping
6014 			if (_mappings) {
6015 				_mappings->iterateObjects(^(OSObject * object)
6016 				{
6017 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
6018 					if ((result = lookMapping->copyCompatible(mapping))) {
6019 					        addMapping(result);
6020 					        result->setMemoryDescriptor(this, offset);
6021 					        return true;
6022 					}
6023 					return false;
6024 				});
6025 			}
6026 			if (result || (options & kIOMapReference)) {
6027 				if (result != mapping) {
6028 					mapping->release();
6029 					mapping = NULL;
6030 				}
6031 				continue;
6032 			}
6033 		}
6034 
6035 		if (!mapDesc) {
6036 			mapDesc.reset(this, OSRetain);
6037 		}
6038 		IOReturn
6039 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
6040 		if (kIOReturnSuccess == kr) {
6041 			result = mapping;
6042 			mapDesc->addMapping(result);
6043 			result->setMemoryDescriptor(mapDesc.get(), offset);
6044 		} else {
6045 			mapping->release();
6046 			mapping = NULL;
6047 		}
6048 	}while (false);
6049 
6050 	UNLOCK;
6051 
6052 	return result;
6053 }
6054 
6055 void
addMapping(IOMemoryMap * mapping)6056 IOMemoryDescriptor::addMapping(
6057 	IOMemoryMap * mapping )
6058 {
6059 	if (mapping) {
6060 		if (NULL == _mappings) {
6061 			_mappings = OSSet::withCapacity(1);
6062 		}
6063 		if (_mappings) {
6064 			_mappings->setObject( mapping );
6065 		}
6066 	}
6067 }
6068 
6069 void
removeMapping(IOMemoryMap * mapping)6070 IOMemoryDescriptor::removeMapping(
6071 	IOMemoryMap * mapping )
6072 {
6073 	if (_mappings) {
6074 		_mappings->removeObject( mapping);
6075 	}
6076 }
6077 
6078 void
setMapperOptions(uint16_t options)6079 IOMemoryDescriptor::setMapperOptions( uint16_t options)
6080 {
6081 	_iomapperOptions = options;
6082 }
6083 
6084 uint16_t
getMapperOptions(void)6085 IOMemoryDescriptor::getMapperOptions( void )
6086 {
6087 	return _iomapperOptions;
6088 }
6089 
6090 #ifndef __LP64__
6091 // obsolete initializers
6092 // - initWithOptions is the designated initializer
6093 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6094 IOMemoryDescriptor::initWithAddress(void *      address,
6095     IOByteCount   length,
6096     IODirection direction)
6097 {
6098 	return false;
6099 }
6100 
6101 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6102 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6103     IOByteCount    length,
6104     IODirection  direction,
6105     task_t       task)
6106 {
6107 	return false;
6108 }
6109 
6110 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6111 IOMemoryDescriptor::initWithPhysicalAddress(
6112 	IOPhysicalAddress      address,
6113 	IOByteCount            length,
6114 	IODirection            direction )
6115 {
6116 	return false;
6117 }
6118 
6119 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6120 IOMemoryDescriptor::initWithRanges(
6121 	IOVirtualRange * ranges,
6122 	UInt32           withCount,
6123 	IODirection      direction,
6124 	task_t           task,
6125 	bool             asReference)
6126 {
6127 	return false;
6128 }
6129 
6130 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6131 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
6132     UInt32           withCount,
6133     IODirection      direction,
6134     bool             asReference)
6135 {
6136 	return false;
6137 }
6138 
6139 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6140 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6141     IOByteCount * lengthOfSegment)
6142 {
6143 	return NULL;
6144 }
6145 #endif /* !__LP64__ */
6146 
6147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6148 
6149 bool
serialize(OSSerialize * s) const6150 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6151 {
6152 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6153 	OSSharedPtr<OSObject>           values[2] = {NULL};
6154 	OSSharedPtr<OSArray>            array;
6155 
6156 	struct SerData {
6157 		user_addr_t address;
6158 		user_size_t length;
6159 	};
6160 
6161 	unsigned int index;
6162 
6163 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6164 
6165 	if (s == NULL) {
6166 		return false;
6167 	}
6168 
6169 	array = OSArray::withCapacity(4);
6170 	if (!array) {
6171 		return false;
6172 	}
6173 
6174 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6175 	if (!vcopy) {
6176 		return false;
6177 	}
6178 
6179 	keys[0] = OSSymbol::withCString("address");
6180 	keys[1] = OSSymbol::withCString("length");
6181 
6182 	// Copy the volatile data so we don't have to allocate memory
6183 	// while the lock is held.
6184 	LOCK;
6185 	if (vcopy.size() == _rangesCount) {
6186 		Ranges vec = _ranges;
6187 		for (index = 0; index < vcopy.size(); index++) {
6188 			mach_vm_address_t addr; mach_vm_size_t len;
6189 			getAddrLenForInd(addr, len, type, vec, index, _task);
6190 			vcopy[index].address = addr;
6191 			vcopy[index].length  = len;
6192 		}
6193 	} else {
6194 		// The descriptor changed out from under us.  Give up.
6195 		UNLOCK;
6196 		return false;
6197 	}
6198 	UNLOCK;
6199 
6200 	for (index = 0; index < vcopy.size(); index++) {
6201 		user_addr_t addr = vcopy[index].address;
6202 		IOByteCount len = (IOByteCount) vcopy[index].length;
6203 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6204 		if (values[0] == NULL) {
6205 			return false;
6206 		}
6207 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6208 		if (values[1] == NULL) {
6209 			return false;
6210 		}
6211 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6212 		if (dict == NULL) {
6213 			return false;
6214 		}
6215 		array->setObject(dict.get());
6216 		dict.reset();
6217 		values[0].reset();
6218 		values[1].reset();
6219 	}
6220 
6221 	return array->serialize(s);
6222 }
6223 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6224 
6225 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6226 #ifdef __LP64__
6227 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6228 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6229 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6230 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6231 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6232 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6233 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6234 #else /* !__LP64__ */
6235 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6236 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6237 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6238 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6239 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6240 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6241 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6242 #endif /* !__LP64__ */
6243 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6244 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6245 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6246 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6247 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6248 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6249 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6250 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6251 
6252 /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6253 KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6254     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6255 
6256 /* ex-inline function implementation */
6257 IOPhysicalAddress
getPhysicalAddress()6258 IOMemoryDescriptor::getPhysicalAddress()
6259 {
6260 	return getPhysicalSegment( 0, NULL );
6261 }
6262 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6263 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6264 
6265 OSPtr<_IOMemoryDescriptorMixedData>
6266 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6267 {
6268 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6269 	if (me && !me->initWithCapacity(capacity)) {
6270 		return nullptr;
6271 	}
6272 	return me;
6273 }
6274 
6275 bool
initWithCapacity(size_t capacity)6276 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6277 {
6278 	if (_data && (!capacity || (_capacity < capacity))) {
6279 		freeMemory();
6280 	}
6281 
6282 	if (!OSObject::init()) {
6283 		return false;
6284 	}
6285 
6286 	if (!_data && capacity) {
6287 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6288 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6289 		if (!_data) {
6290 			return false;
6291 		}
6292 		_capacity = capacity;
6293 	}
6294 
6295 	_length = 0;
6296 
6297 	return true;
6298 }
6299 
6300 void
free()6301 _IOMemoryDescriptorMixedData::free()
6302 {
6303 	freeMemory();
6304 	OSObject::free();
6305 }
6306 
6307 void
freeMemory()6308 _IOMemoryDescriptorMixedData::freeMemory()
6309 {
6310 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6311 	_data = nullptr;
6312 	_capacity = _length = 0;
6313 }
6314 
6315 bool
appendBytes(const void * bytes,size_t length)6316 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6317 {
6318 	const auto oldLength = getLength();
6319 	size_t newLength;
6320 	if (os_add_overflow(oldLength, length, &newLength)) {
6321 		return false;
6322 	}
6323 
6324 	if (!setLength(newLength)) {
6325 		return false;
6326 	}
6327 
6328 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6329 	if (bytes) {
6330 		bcopy(bytes, dest, length);
6331 	}
6332 
6333 	return true;
6334 }
6335 
6336 bool
setLength(size_t length)6337 _IOMemoryDescriptorMixedData::setLength(size_t length)
6338 {
6339 	if (!_data || (length > _capacity)) {
6340 		void *newData;
6341 
6342 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6343 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6344 		    NULL);
6345 		if (!newData) {
6346 			return false;
6347 		}
6348 
6349 		_data = newData;
6350 		_capacity = length;
6351 	}
6352 
6353 	_length = length;
6354 	return true;
6355 }
6356 
6357 const void *
getBytes() const6358 _IOMemoryDescriptorMixedData::getBytes() const
6359 {
6360 	return _length ? _data : nullptr;
6361 }
6362 
6363 size_t
getLength() const6364 _IOMemoryDescriptorMixedData::getLength() const
6365 {
6366 	return _data ? _length : 0;
6367 }
6368