xref: /xnu-12377.81.4/iokit/Kernel/IOMemoryDescriptor.cpp (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #include <sys/cdefs.h>
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39 
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48 
49 #include "IOKitKernelInternal.h"
50 
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60 
61 #include <sys/uio.h>
62 
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout_xnu.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68 
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <mach/mach_host.h>
73 #include <vm/vm_fault_xnu.h>
74 #include <vm/vm_protos.h>
75 #include <vm/vm_memory_entry.h>
76 #include <vm/vm_kern_xnu.h>
77 #include <vm/vm_iokit.h>
78 #include <vm/vm_map_xnu.h>
79 #include <kern/thread.h>
80 #if HAS_MTE
81 #include <vm/vm_memtag.h>
82 #endif /* HAS_MTE */
83 
84 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
85 extern void ipc_port_release_send(ipc_port_t port);
86 
87 __END_DECLS
88 
89 #define kIOMapperWaitSystem     ((IOMapper *) 1)
90 
91 static IOMapper * gIOSystemMapper = NULL;
92 
93 ppnum_t           gIOLastPage;
94 
95 enum {
96 	kIOMapGuardSizeLarge = 65536
97 };
98 
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 
101 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
102 
103 #define super IOMemoryDescriptor
104 
105 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
106     IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
107 
108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109 
110 static IORecursiveLock * gIOMemoryLock;
111 
112 #define LOCK    IORecursiveLockLock( gIOMemoryLock)
113 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock)
114 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
115 #define WAKEUP  \
116     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
117 
118 #if 0
119 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); }
120 #else
121 #define DEBG(fmt, args...)      {}
122 #endif
123 
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125 
126 // Some data structures and accessor macros used by the initWithOptions
127 // Function
128 
129 enum ioPLBlockFlags {
130 	kIOPLOnDevice  = 0x00000001,
131 	kIOPLExternUPL = 0x00000002,
132 };
133 
134 struct IOMDPersistentInitData {
135 	const IOGeneralMemoryDescriptor * fMD;
136 	IOMemoryReference               * fMemRef;
137 };
138 
139 struct ioPLBlock {
140 	upl_t fIOPL;
141 	vm_address_t fPageInfo; // Pointer to page list or index into it
142 	uint64_t fIOMDOffset;       // The offset of this iopl in descriptor
143 	ppnum_t fMappedPage;        // Page number of first page in this iopl
144 	unsigned int fPageOffset;   // Offset within first page of iopl
145 	unsigned int fFlags;        // Flags
146 };
147 
148 enum { kMaxWireTags = 6 };
149 
150 struct ioGMDData {
151 	IOMapper *  fMapper;
152 	uint64_t    fDMAMapAlignment;
153 	uint64_t    fMappedBase;
154 	uint64_t    fMappedLength;
155 	uint64_t    fPreparationID;
156 #if IOTRACKING
157 	IOTracking  fWireTracking;
158 #endif /* IOTRACKING */
159 	unsigned int      fPageCnt;
160 	uint8_t           fDMAMapNumAddressBits;
161 	unsigned char     fCompletionError:1;
162 	unsigned char     fMappedBaseValid:1;
163 	unsigned char     _resv:4;
164 	unsigned char     fDMAAccess:2;
165 
166 	/* variable length arrays */
167 	upl_page_info_t fPageList[1]
168 #if __LP64__
169 	// align fPageList as for ioPLBlock
170 	__attribute__((aligned(sizeof(upl_t))))
171 #endif
172 	;
173 	//ioPLBlock fBlocks[1];
174 };
175 
176 #pragma GCC visibility push(hidden)
177 
178 class _IOMemoryDescriptorMixedData : public OSObject
179 {
180 	OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
181 
182 public:
183 	static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
184 	bool initWithCapacity(size_t capacity);
185 	virtual void free() APPLE_KEXT_OVERRIDE;
186 
187 	bool appendBytes(const void * bytes, size_t length);
188 	bool setLength(size_t length);
189 
190 	const void * getBytes() const;
191 	size_t getLength() const;
192 
193 private:
194 	void freeMemory();
195 
196 	void *  _data = nullptr;
197 	size_t  _length = 0;
198 	size_t  _capacity = 0;
199 };
200 
201 #pragma GCC visibility pop
202 
203 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytes())
204 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
205 #define getNumIOPL(osd, d)      \
206     ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
207 #define getPageList(d)  (&(d->fPageList[0]))
208 #define computeDataSize(p, u) \
209     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
210 
211 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
212 
213 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
214 
215 extern "C" {
216 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)217 device_data_action(
218 	uintptr_t               device_handle,
219 	ipc_port_t              device_pager,
220 	vm_prot_t               protection,
221 	vm_object_offset_t      offset,
222 	vm_size_t               size)
223 {
224 	kern_return_t        kr;
225 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
226 	OSSharedPtr<IOMemoryDescriptor> memDesc;
227 
228 	LOCK;
229 	if (ref->dp.memory) {
230 		memDesc.reset(ref->dp.memory, OSRetain);
231 		kr = memDesc->handleFault(device_pager, offset, size);
232 		memDesc.reset();
233 	} else {
234 		kr = KERN_ABORTED;
235 	}
236 	UNLOCK;
237 
238 	return kr;
239 }
240 
241 kern_return_t
device_close(uintptr_t device_handle)242 device_close(
243 	uintptr_t     device_handle)
244 {
245 	IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
246 
247 	IOFreeType( ref, IOMemoryDescriptorReserved );
248 
249 	return kIOReturnSuccess;
250 }
251 };      // end extern "C"
252 
253 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
254 
255 // Note this inline function uses C++ reference arguments to return values
256 // This means that pointers are not passed and NULLs don't have to be
257 // checked for as a NULL reference is illegal.
258 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)259 getAddrLenForInd(
260 	mach_vm_address_t                &addr,
261 	mach_vm_size_t                   &len, // Output variables
262 	UInt32                            type,
263 	IOGeneralMemoryDescriptor::Ranges r,
264 	UInt32                            ind,
265 	task_t                            task __unused)
266 {
267 	assert(kIOMemoryTypeUIO == type
268 	    || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
269 	    || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
270 	if (kIOMemoryTypeUIO == type) {
271 		user_size_t us;
272 		user_addr_t ad;
273 		uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
274 	}
275 #ifndef __LP64__
276 	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
277 		IOAddressRange cur = r.v64[ind];
278 		addr = cur.address;
279 		len  = cur.length;
280 	}
281 #endif /* !__LP64__ */
282 	else {
283 		IOVirtualRange cur = r.v[ind];
284 		addr = cur.address;
285 		len  = cur.length;
286 	}
287 }
288 
289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
290 
291 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)292 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
293 {
294 	IOReturn err = kIOReturnSuccess;
295 
296 	*control = VM_PURGABLE_SET_STATE;
297 
298 	enum { kIOMemoryPurgeableControlMask = 15 };
299 
300 	switch (kIOMemoryPurgeableControlMask & newState) {
301 	case kIOMemoryPurgeableKeepCurrent:
302 		*control = VM_PURGABLE_GET_STATE;
303 		break;
304 
305 	case kIOMemoryPurgeableNonVolatile:
306 		*state = VM_PURGABLE_NONVOLATILE;
307 		break;
308 	case kIOMemoryPurgeableVolatile:
309 		*state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
310 		break;
311 	case kIOMemoryPurgeableEmpty:
312 		*state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
313 		break;
314 	default:
315 		err = kIOReturnBadArgument;
316 		break;
317 	}
318 
319 	if (*control == VM_PURGABLE_SET_STATE) {
320 		// let VM know this call is from the kernel and is allowed to alter
321 		// the volatility of the memory entry even if it was created with
322 		// MAP_MEM_PURGABLE_KERNEL_ONLY
323 		*control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
324 	}
325 
326 	return err;
327 }
328 
329 static IOReturn
purgeableStateBits(int * state)330 purgeableStateBits(int * state)
331 {
332 	IOReturn err = kIOReturnSuccess;
333 
334 	switch (VM_PURGABLE_STATE_MASK & *state) {
335 	case VM_PURGABLE_NONVOLATILE:
336 		*state = kIOMemoryPurgeableNonVolatile;
337 		break;
338 	case VM_PURGABLE_VOLATILE:
339 		*state = kIOMemoryPurgeableVolatile;
340 		break;
341 	case VM_PURGABLE_EMPTY:
342 		*state = kIOMemoryPurgeableEmpty;
343 		break;
344 	default:
345 		*state = kIOMemoryPurgeableNonVolatile;
346 		err = kIOReturnNotReady;
347 		break;
348 	}
349 	return err;
350 }
351 
352 typedef struct {
353 	unsigned int wimg;
354 	unsigned int object_type;
355 } iokit_memtype_entry;
356 
357 static const iokit_memtype_entry iomd_mem_types[] = {
358 	[kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
359 	[kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
360 	[kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
361 	[kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
362 	[kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
363 	[kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
364 	[kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
365 	[kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
366 	[kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
367 	[kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
368 };
369 
370 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)371 vmProtForCacheMode(IOOptionBits cacheMode)
372 {
373 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
374 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
375 		cacheMode = kIODefaultCache;
376 	}
377 	vm_prot_t prot = 0;
378 	SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
379 	return prot;
380 }
381 
382 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)383 pagerFlagsForCacheMode(IOOptionBits cacheMode)
384 {
385 	assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
386 	if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
387 		cacheMode = kIODefaultCache;
388 	}
389 	if (cacheMode == kIODefaultCache) {
390 		return -1U;
391 	}
392 	return iomd_mem_types[cacheMode].wimg;
393 }
394 
395 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)396 cacheModeForPagerFlags(unsigned int pagerFlags)
397 {
398 	pagerFlags &= VM_WIMG_MASK;
399 	IOOptionBits cacheMode = kIODefaultCache;
400 	for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
401 		if (iomd_mem_types[i].wimg == pagerFlags) {
402 			cacheMode = i;
403 			break;
404 		}
405 	}
406 	return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
407 }
408 
409 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411 
412 struct IOMemoryEntry {
413 	ipc_port_t entry;
414 	int64_t    offset;
415 	uint64_t   size;
416 	uint64_t   start;
417 };
418 
419 struct IOMemoryReference {
420 	volatile SInt32             refCount;
421 	vm_prot_t                   prot;
422 	uint32_t                    capacity;
423 	uint32_t                    count;
424 	struct IOMemoryReference  * mapRef;
425 	IOMemoryEntry               entries[0];
426 };
427 
428 enum{
429 	kIOMemoryReferenceReuse = 0x00000001,
430 	kIOMemoryReferenceWrite = 0x00000002,
431 	kIOMemoryReferenceCOW   = 0x00000004,
432 };
433 
434 SInt32 gIOMemoryReferenceCount;
435 
436 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)437 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
438 {
439 	IOMemoryReference * ref;
440 	size_t              oldCapacity;
441 
442 	if (realloc) {
443 		oldCapacity = realloc->capacity;
444 	} else {
445 		oldCapacity = 0;
446 	}
447 
448 	// Use the kalloc API instead of manually handling the reallocation
449 	ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
450 	    oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
451 	if (ref) {
452 		if (oldCapacity == 0) {
453 			ref->refCount = 1;
454 			OSIncrementAtomic(&gIOMemoryReferenceCount);
455 		}
456 		ref->capacity = capacity;
457 	}
458 	return ref;
459 }
460 
461 void
memoryReferenceFree(IOMemoryReference * ref)462 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
463 {
464 	IOMemoryEntry * entries;
465 
466 	if (ref->mapRef) {
467 		memoryReferenceFree(ref->mapRef);
468 		ref->mapRef = NULL;
469 	}
470 
471 	entries = ref->entries + ref->count;
472 	while (entries > &ref->entries[0]) {
473 		entries--;
474 		ipc_port_release_send(entries->entry);
475 	}
476 	kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
477 
478 	OSDecrementAtomic(&gIOMemoryReferenceCount);
479 }
480 
481 void
memoryReferenceRelease(IOMemoryReference * ref)482 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
483 {
484 	if (1 == OSDecrementAtomic(&ref->refCount)) {
485 		memoryReferenceFree(ref);
486 	}
487 }
488 
489 
490 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)491 IOGeneralMemoryDescriptor::memoryReferenceCreate(
492 	IOOptionBits         options,
493 	IOMemoryReference ** reference)
494 {
495 	enum { kCapacity = 4, kCapacityInc = 4 };
496 
497 	kern_return_t        err;
498 	IOMemoryReference *  ref;
499 	IOMemoryEntry *      entries;
500 	IOMemoryEntry *      cloneEntries = NULL;
501 	vm_map_t             map;
502 	ipc_port_t           entry, cloneEntry;
503 	vm_prot_t            prot;
504 	memory_object_size_t actualSize;
505 	uint32_t             rangeIdx;
506 	uint32_t             count;
507 	mach_vm_address_t    entryAddr, endAddr, entrySize;
508 	mach_vm_size_t       srcAddr, srcLen;
509 	mach_vm_size_t       nextAddr, nextLen;
510 	mach_vm_size_t       offset, remain;
511 	vm_map_offset_t      overmap_start = 0, overmap_end = 0;
512 	int                  misaligned_start = 0, misaligned_end = 0;
513 	IOByteCount          physLen;
514 	IOOptionBits         type = (_flags & kIOMemoryTypeMask);
515 	IOOptionBits         cacheMode;
516 	unsigned int         pagerFlags;
517 	vm_tag_t             tag;
518 	vm_named_entry_kernel_flags_t vmne_kflags;
519 
520 	ref = memoryReferenceAlloc(kCapacity, NULL);
521 	if (!ref) {
522 		return kIOReturnNoMemory;
523 	}
524 
525 	tag = (vm_tag_t) getVMTag(kernel_map);
526 	vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
527 	entries = &ref->entries[0];
528 	count = 0;
529 	err = KERN_SUCCESS;
530 
531 	offset = 0;
532 	rangeIdx = 0;
533 	remain = _length;
534 	if (_task) {
535 		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
536 
537 		// account for IOBMD setLength(), use its capacity as length
538 		IOBufferMemoryDescriptor * bmd;
539 		if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
540 			nextLen = bmd->getCapacity();
541 			remain  = nextLen;
542 		}
543 	} else {
544 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
545 		nextLen = physLen;
546 
547 		// default cache mode for physical
548 		if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
549 			IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
550 			_flags |= (mode << kIOMemoryBufferCacheShift);
551 		}
552 	}
553 
554 	// cache mode & vm_prot
555 	prot = VM_PROT_READ;
556 	cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
557 	prot |= vmProtForCacheMode(cacheMode);
558 	// VM system requires write access to change cache mode
559 	if (kIODefaultCache != cacheMode) {
560 		prot |= VM_PROT_WRITE;
561 	}
562 	if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
563 		prot |= VM_PROT_WRITE;
564 	}
565 	if (kIOMemoryReferenceWrite & options) {
566 		prot |= VM_PROT_WRITE;
567 	}
568 	if (kIOMemoryReferenceCOW   & options) {
569 		prot |= MAP_MEM_VM_COPY;
570 	}
571 
572 	if (kIOMemoryUseReserve & _flags) {
573 		prot |= MAP_MEM_GRAB_SECLUDED;
574 	}
575 
576 	if ((kIOMemoryReferenceReuse & options) && _memRef) {
577 		cloneEntries = &_memRef->entries[0];
578 		prot |= MAP_MEM_NAMED_REUSE;
579 	}
580 
581 	if (_task) {
582 		// virtual ranges
583 
584 		if (kIOMemoryBufferPageable & _flags) {
585 			int ledger_tag, ledger_no_footprint;
586 
587 			// IOBufferMemoryDescriptor alloc - set flags for entry + object create
588 			prot |= MAP_MEM_NAMED_CREATE;
589 
590 			// default accounting settings:
591 			//   + "none" ledger tag
592 			//   + include in footprint
593 			// can be changed later with ::setOwnership()
594 			ledger_tag = VM_LEDGER_TAG_NONE;
595 			ledger_no_footprint = 0;
596 
597 			if (kIOMemoryBufferPurgeable & _flags) {
598 				prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
599 				if (VM_KERN_MEMORY_SKYWALK == tag) {
600 					// Skywalk purgeable memory accounting:
601 					//    + "network" ledger tag
602 					//    + not included in footprint
603 					ledger_tag = VM_LEDGER_TAG_NETWORK;
604 					ledger_no_footprint = 1;
605 				} else {
606 					// regular purgeable memory accounting:
607 					//    + no ledger tag
608 					//    + included in footprint
609 					ledger_tag = VM_LEDGER_TAG_NONE;
610 					ledger_no_footprint = 0;
611 				}
612 			}
613 			vmne_kflags.vmnekf_ledger_tag = ledger_tag;
614 			vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
615 			if (kIOMemoryUseReserve & _flags) {
616 				prot |= MAP_MEM_GRAB_SECLUDED;
617 			}
618 
619 			prot |= VM_PROT_WRITE;
620 			map = NULL;
621 		} else {
622 			prot |= MAP_MEM_USE_DATA_ADDR;
623 			map = get_task_map(_task);
624 		}
625 		DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
626 
627 		while (remain) {
628 			srcAddr  = nextAddr;
629 			srcLen   = nextLen;
630 			nextAddr = 0;
631 			nextLen  = 0;
632 			// coalesce addr range
633 			for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
634 				getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
635 				if ((srcAddr + srcLen) != nextAddr) {
636 					break;
637 				}
638 				srcLen += nextLen;
639 			}
640 
641 			if (MAP_MEM_USE_DATA_ADDR & prot) {
642 				entryAddr = srcAddr;
643 				endAddr   = srcAddr + srcLen;
644 			} else {
645 				entryAddr = trunc_page_64(srcAddr);
646 				endAddr   = round_page_64(srcAddr + srcLen);
647 			}
648 			if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
649 				DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
650 			}
651 
652 			do{
653 				entrySize = (endAddr - entryAddr);
654 				if (!entrySize) {
655 					break;
656 				}
657 				actualSize = entrySize;
658 
659 				cloneEntry = MACH_PORT_NULL;
660 				if (MAP_MEM_NAMED_REUSE & prot) {
661 					if (cloneEntries < &_memRef->entries[_memRef->count]) {
662 						cloneEntry = cloneEntries->entry;
663 					} else {
664 						prot &= ~MAP_MEM_NAMED_REUSE;
665 					}
666 				}
667 
668 				mach_vm_offset_t entryAddrForVm = entryAddr;
669 #if HAS_MTE
670 				vmne_kflags.vmnekf_is_iokit = TRUE;
671 				/* If we're holding a specific address and map, canonicalize the
672 				 * address before passing it through to the VM.
673 				 */
674 				if (entryAddr != 0 && map != NULL) {
675 					entryAddrForVm = vm_memtag_canonicalize(map, entryAddr);
676 				}
677 #endif /* HAS_MTE */
678 				err = mach_make_memory_entry_internal(map,
679 				    &actualSize, entryAddrForVm, prot, vmne_kflags, &entry, cloneEntry);
680 
681 				if (KERN_SUCCESS != err) {
682 					DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddrForVm, actualSize, prot, err);
683 					break;
684 				}
685 				if (MAP_MEM_USE_DATA_ADDR & prot) {
686 					if (actualSize > entrySize) {
687 						actualSize = entrySize;
688 					}
689 				} else if (actualSize > entrySize) {
690 					panic("mach_make_memory_entry_64 actualSize");
691 				}
692 
693 				memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
694 
695 				if (count && overmap_start) {
696 					/*
697 					 * Track misaligned start for all
698 					 * except the first entry.
699 					 */
700 					misaligned_start++;
701 				}
702 
703 				if (overmap_end) {
704 					/*
705 					 * Ignore misaligned end for the
706 					 * last entry.
707 					 */
708 					if ((entryAddr + actualSize) != endAddr) {
709 						misaligned_end++;
710 					}
711 				}
712 
713 				if (count) {
714 					/* Middle entries */
715 					if (misaligned_start || misaligned_end) {
716 						DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
717 						ipc_port_release_send(entry);
718 						err = KERN_NOT_SUPPORTED;
719 						break;
720 					}
721 				}
722 
723 				if (count >= ref->capacity) {
724 					ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
725 					entries = &ref->entries[count];
726 				}
727 				entries->entry  = entry;
728 				entries->size   = actualSize;
729 				entries->offset = offset + (entryAddr - srcAddr);
730 				entries->start = entryAddr;
731 				entryAddr += actualSize;
732 				if (MAP_MEM_NAMED_REUSE & prot) {
733 					if ((cloneEntries->entry == entries->entry)
734 					    && (cloneEntries->size == entries->size)
735 					    && (cloneEntries->offset == entries->offset)) {
736 						cloneEntries++;
737 					} else {
738 						prot &= ~MAP_MEM_NAMED_REUSE;
739 					}
740 				}
741 				entries++;
742 				count++;
743 			}while (true);
744 			offset += srcLen;
745 			remain -= srcLen;
746 		}
747 	} else {
748 		// _task == 0, physical or kIOMemoryTypeUPL
749 		memory_object_t pager;
750 		vm_size_t       size = ptoa_64(_pages);
751 
752 		if (!getKernelReserved()) {
753 			panic("getKernelReserved");
754 		}
755 
756 		reserved->dp.pagerContig = (1 == _rangesCount);
757 		reserved->dp.memory      = this;
758 
759 		pagerFlags = pagerFlagsForCacheMode(cacheMode);
760 		if (-1U == pagerFlags) {
761 			panic("phys is kIODefaultCache");
762 		}
763 		if (reserved->dp.pagerContig) {
764 			pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
765 		}
766 
767 		pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
768 		    size, pagerFlags);
769 		assert(pager);
770 		if (!pager) {
771 			DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
772 			err = kIOReturnVMError;
773 		} else {
774 			srcAddr  = nextAddr;
775 			entryAddr = trunc_page_64(srcAddr);
776 			err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
777 			    size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
778 			assert(KERN_SUCCESS == err);
779 			if (KERN_SUCCESS != err) {
780 				device_pager_deallocate(pager);
781 			} else {
782 				reserved->dp.devicePager = pager;
783 				entries->entry  = entry;
784 				entries->size   = size;
785 				entries->offset = offset + (entryAddr - srcAddr);
786 				entries++;
787 				count++;
788 			}
789 		}
790 	}
791 
792 	ref->count = count;
793 	ref->prot  = prot;
794 
795 	if (_task && (KERN_SUCCESS == err)
796 	    && (kIOMemoryMapCopyOnWrite & _flags)
797 	    && !(kIOMemoryReferenceCOW & options)) {
798 		err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
799 		if (KERN_SUCCESS != err) {
800 			DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
801 		}
802 	}
803 
804 	if (KERN_SUCCESS == err) {
805 		if (MAP_MEM_NAMED_REUSE & prot) {
806 			memoryReferenceFree(ref);
807 			OSIncrementAtomic(&_memRef->refCount);
808 			ref = _memRef;
809 		}
810 	} else {
811 		DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
812 		memoryReferenceFree(ref);
813 		ref = NULL;
814 	}
815 
816 	*reference = ref;
817 
818 	return err;
819 }
820 
821 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)822 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
823 {
824 	switch (kIOMapGuardedMask & options) {
825 	default:
826 	case kIOMapGuardedSmall:
827 		return vm_map_page_size(map);
828 	case kIOMapGuardedLarge:
829 		assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
830 		return kIOMapGuardSizeLarge;
831 	}
832 	;
833 }
834 
835 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)836 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
837     vm_map_offset_t addr, mach_vm_size_t size)
838 {
839 	kern_return_t   kr;
840 	vm_map_offset_t actualAddr;
841 	mach_vm_size_t  actualSize;
842 
843 	actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
844 	actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
845 
846 	if (kIOMapGuardedMask & options) {
847 		mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
848 		actualAddr -= guardSize;
849 		actualSize += 2 * guardSize;
850 	}
851 	kr = mach_vm_deallocate(map, actualAddr, actualSize);
852 
853 	return kr;
854 }
855 
856 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)857 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
858 {
859 	IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
860 	IOReturn                        err;
861 	vm_map_offset_t                 addr;
862 	mach_vm_size_t                  size;
863 	mach_vm_size_t                  guardSize;
864 	vm_map_kernel_flags_t           vmk_flags;
865 
866 	addr = ref->mapped;
867 	size = ref->size;
868 	guardSize = 0;
869 
870 	if (kIOMapGuardedMask & ref->options) {
871 		if (!(kIOMapAnywhere & ref->options)) {
872 			return kIOReturnBadArgument;
873 		}
874 		guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
875 		size += 2 * guardSize;
876 	}
877 	if (kIOMapAnywhere & ref->options) {
878 		vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
879 	} else {
880 		vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
881 	}
882 	vmk_flags.vm_tag = ref->tag;
883 
884 	/*
885 	 * Mapping memory into the kernel_map using IOMDs use the data range.
886 	 * Memory being mapped should not contain kernel pointers.
887 	 */
888 	if (map == kernel_map) {
889 		vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
890 	}
891 
892 	err = mach_vm_map_kernel(map, &addr, size,
893 #if __ARM_MIXED_PAGE_SIZE__
894 	    // TODO4K this should not be necessary...
895 	    (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
896 #else /* __ARM_MIXED_PAGE_SIZE__ */
897 	    (vm_map_offset_t) 0,
898 #endif /* __ARM_MIXED_PAGE_SIZE__ */
899 	    vmk_flags,
900 	    IPC_PORT_NULL,
901 	    (memory_object_offset_t) 0,
902 	    false,                       /* copy */
903 	    ref->prot,
904 	    ref->prot,
905 	    VM_INHERIT_NONE);
906 	if (KERN_SUCCESS == err) {
907 		ref->mapped = (mach_vm_address_t) addr;
908 		ref->map = map;
909 		if (kIOMapGuardedMask & ref->options) {
910 			vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
911 
912 			err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
913 			assert(KERN_SUCCESS == err);
914 			err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
915 			assert(KERN_SUCCESS == err);
916 			ref->mapped += guardSize;
917 		}
918 	}
919 
920 	return err;
921 }
922 
923 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)924 IOGeneralMemoryDescriptor::memoryReferenceMap(
925 	IOMemoryReference * ref,
926 	vm_map_t            map,
927 	mach_vm_size_t      inoffset,
928 	mach_vm_size_t      size,
929 	IOOptionBits        options,
930 	mach_vm_address_t * inaddr)
931 {
932 	IOReturn        err;
933 	int64_t         offset = inoffset;
934 	uint32_t        rangeIdx, entryIdx;
935 	vm_map_offset_t addr, mapAddr;
936 	vm_map_offset_t pageOffset, entryOffset, remain, chunk;
937 
938 	mach_vm_address_t nextAddr;
939 	mach_vm_size_t    nextLen;
940 	IOByteCount       physLen;
941 	IOMemoryEntry   * entry;
942 	vm_prot_t         prot, memEntryCacheMode;
943 	IOOptionBits      type;
944 	IOOptionBits      cacheMode;
945 	vm_tag_t          tag;
946 	// for the kIOMapPrefault option.
947 	upl_page_info_t * pageList = NULL;
948 	UInt              currentPageIndex = 0;
949 	bool              didAlloc;
950 
951 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
952 
953 	if (ref->mapRef) {
954 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
955 		return err;
956 	}
957 
958 	if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
959 		err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
960 		return err;
961 	}
962 
963 	type = _flags & kIOMemoryTypeMask;
964 
965 	prot = VM_PROT_READ;
966 	if (!(kIOMapReadOnly & options)) {
967 		prot |= VM_PROT_WRITE;
968 	}
969 	prot &= ref->prot;
970 
971 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
972 	if (kIODefaultCache != cacheMode) {
973 		// VM system requires write access to update named entry cache mode
974 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
975 	}
976 
977 	tag = (typeof(tag))getVMTag(map);
978 
979 	if (_task) {
980 		// Find first range for offset
981 		if (!_rangesCount) {
982 			return kIOReturnBadArgument;
983 		}
984 		for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
985 			getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
986 			if (remain < nextLen) {
987 				break;
988 			}
989 			remain -= nextLen;
990 		}
991 	} else {
992 		rangeIdx = 0;
993 		remain   = 0;
994 		nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
995 		nextLen  = size;
996 	}
997 
998 	assert(remain < nextLen);
999 	if (remain >= nextLen) {
1000 		DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
1001 		return kIOReturnBadArgument;
1002 	}
1003 
1004 	nextAddr  += remain;
1005 	nextLen   -= remain;
1006 #if __ARM_MIXED_PAGE_SIZE__
1007 	pageOffset = (vm_map_page_mask(map) & nextAddr);
1008 #else /* __ARM_MIXED_PAGE_SIZE__ */
1009 	pageOffset = (page_mask & nextAddr);
1010 #endif /* __ARM_MIXED_PAGE_SIZE__ */
1011 	addr       = 0;
1012 	didAlloc   = false;
1013 
1014 	if (!(options & kIOMapAnywhere)) {
1015 		addr = *inaddr;
1016 		if (pageOffset != (vm_map_page_mask(map) & addr)) {
1017 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1018 		}
1019 		addr -= pageOffset;
1020 	}
1021 
1022 	// find first entry for offset
1023 	for (entryIdx = 0;
1024 	    (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1025 	    entryIdx++) {
1026 	}
1027 	entryIdx--;
1028 	entry = &ref->entries[entryIdx];
1029 
1030 	// allocate VM
1031 #if __ARM_MIXED_PAGE_SIZE__
1032 	size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1033 #else
1034 	size = round_page_64(size + pageOffset);
1035 #endif
1036 	if (kIOMapOverwrite & options) {
1037 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1038 			map = IOPageableMapForAddress(addr);
1039 		}
1040 		err = KERN_SUCCESS;
1041 	} else {
1042 		IOMemoryDescriptorMapAllocRef ref;
1043 		ref.map     = map;
1044 		ref.tag     = tag;
1045 		ref.options = options;
1046 		ref.size    = size;
1047 		ref.prot    = prot;
1048 		if (options & kIOMapAnywhere) {
1049 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1050 			ref.mapped = 0;
1051 		} else {
1052 			ref.mapped = addr;
1053 		}
1054 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1055 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1056 		} else {
1057 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1058 		}
1059 		if (KERN_SUCCESS == err) {
1060 			addr     = ref.mapped;
1061 			map      = ref.map;
1062 			didAlloc = true;
1063 		}
1064 	}
1065 
1066 	/*
1067 	 * If the memory is associated with a device pager but doesn't have a UPL,
1068 	 * it will be immediately faulted in through the pager via populateDevicePager().
1069 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1070 	 * operations.
1071 	 */
1072 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1073 		options &= ~kIOMapPrefault;
1074 	}
1075 
1076 	/*
1077 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1078 	 * memory type, and the underlying data.
1079 	 */
1080 	if (options & kIOMapPrefault) {
1081 		/*
1082 		 * The memory must have been wired by calling ::prepare(), otherwise
1083 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1084 		 */
1085 		assert(_wireCount != 0);
1086 		assert(_memoryEntries != NULL);
1087 		if ((_wireCount == 0) ||
1088 		    (_memoryEntries == NULL)) {
1089 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1090 			return kIOReturnBadArgument;
1091 		}
1092 
1093 		// Get the page list.
1094 		ioGMDData* dataP = getDataP(_memoryEntries);
1095 		ioPLBlock const* ioplList = getIOPLList(dataP);
1096 		pageList = getPageList(dataP);
1097 
1098 		// Get the number of IOPLs.
1099 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1100 
1101 		/*
1102 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1103 		 * the offset. The research will go past it, so we'll need to go back to the
1104 		 * right range at the end.
1105 		 */
1106 		UInt ioplIndex = 0;
1107 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1108 			ioplIndex++;
1109 		}
1110 		ioplIndex--;
1111 
1112 		// Retrieve the IOPL info block.
1113 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1114 
1115 		/*
1116 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1117 		 * array.
1118 		 */
1119 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1120 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1121 		} else {
1122 			pageList = &pageList[ioplInfo.fPageInfo];
1123 		}
1124 
1125 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1126 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1127 
1128 		// Retrieve the index of the first page corresponding to the offset.
1129 		currentPageIndex = atop_32(offsetInIOPL);
1130 	}
1131 
1132 	// enter mappings
1133 	remain  = size;
1134 	mapAddr = addr;
1135 	addr    += pageOffset;
1136 
1137 	while (remain && (KERN_SUCCESS == err)) {
1138 		entryOffset = offset - entry->offset;
1139 		if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1140 			err = kIOReturnNotAligned;
1141 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1142 			break;
1143 		}
1144 
1145 		if (kIODefaultCache != cacheMode) {
1146 			vm_size_t unused = 0;
1147 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1148 			    memEntryCacheMode, NULL, entry->entry);
1149 			assert(KERN_SUCCESS == err);
1150 		}
1151 
1152 		entryOffset -= pageOffset;
1153 		if (entryOffset >= entry->size) {
1154 			panic("entryOffset");
1155 		}
1156 		chunk = entry->size - entryOffset;
1157 		if (chunk) {
1158 			vm_map_kernel_flags_t vmk_flags = {
1159 				.vmf_fixed = true,
1160 				.vmf_overwrite = true,
1161 				.vm_tag = tag,
1162 				.vmkf_iokit_acct = true,
1163 			};
1164 
1165 			if (chunk > remain) {
1166 				chunk = remain;
1167 			}
1168 			if (options & kIOMapPrefault) {
1169 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1170 
1171 				err = vm_map_enter_mem_object_prefault(map,
1172 				    &mapAddr,
1173 				    chunk, 0 /* mask */,
1174 				    vmk_flags,
1175 				    entry->entry,
1176 				    entryOffset,
1177 				    prot,                        // cur
1178 				    prot,                        // max
1179 				    &pageList[currentPageIndex],
1180 				    nb_pages);
1181 
1182 				if (err || vm_map_page_mask(map) < PAGE_MASK) {
1183 					DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1184 				}
1185 				// Compute the next index in the page list.
1186 				currentPageIndex += nb_pages;
1187 				assert(currentPageIndex <= _pages);
1188 			} else {
1189 				err = mach_vm_map_kernel(map,
1190 				    &mapAddr,
1191 				    chunk, 0 /* mask */,
1192 				    vmk_flags,
1193 				    entry->entry,
1194 				    entryOffset,
1195 				    false,               // copy
1196 				    prot,               // cur
1197 				    prot,               // max
1198 				    VM_INHERIT_NONE);
1199 			}
1200 			if (KERN_SUCCESS != err) {
1201 				DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1202 				break;
1203 			}
1204 			remain -= chunk;
1205 			if (!remain) {
1206 				break;
1207 			}
1208 			mapAddr  += chunk;
1209 			offset   += chunk - pageOffset;
1210 		}
1211 		pageOffset = 0;
1212 		entry++;
1213 		entryIdx++;
1214 		if (entryIdx >= ref->count) {
1215 			err = kIOReturnOverrun;
1216 			DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1217 			break;
1218 		}
1219 	}
1220 
1221 	if ((KERN_SUCCESS != err) && didAlloc) {
1222 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1223 		addr = 0;
1224 	}
1225 	*inaddr = addr;
1226 
1227 	if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1228 		DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1229 	}
1230 	return err;
1231 }
1232 
1233 #define LOGUNALIGN 0
1234 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1235 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1236 	IOMemoryReference * ref,
1237 	vm_map_t            map,
1238 	mach_vm_size_t      inoffset,
1239 	mach_vm_size_t      size,
1240 	IOOptionBits        options,
1241 	mach_vm_address_t * inaddr)
1242 {
1243 	IOReturn            err;
1244 	int64_t             offset = inoffset;
1245 	uint32_t            entryIdx, firstEntryIdx;
1246 	vm_map_offset_t     addr, mapAddr, mapAddrOut;
1247 	vm_map_offset_t     entryOffset, remain, chunk;
1248 
1249 	IOMemoryEntry    * entry;
1250 	vm_prot_t          prot, memEntryCacheMode;
1251 	IOOptionBits       type;
1252 	IOOptionBits       cacheMode;
1253 	vm_tag_t           tag;
1254 	// for the kIOMapPrefault option.
1255 	upl_page_info_t  * pageList = NULL;
1256 	UInt               currentPageIndex = 0;
1257 	bool               didAlloc;
1258 
1259 	DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1260 
1261 	if (ref->mapRef) {
1262 		err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1263 		return err;
1264 	}
1265 
1266 #if LOGUNALIGN
1267 	printf("MAP offset %qx, %qx\n", inoffset, size);
1268 #endif
1269 
1270 	type = _flags & kIOMemoryTypeMask;
1271 
1272 	prot = VM_PROT_READ;
1273 	if (!(kIOMapReadOnly & options)) {
1274 		prot |= VM_PROT_WRITE;
1275 	}
1276 	prot &= ref->prot;
1277 
1278 	cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1279 	if (kIODefaultCache != cacheMode) {
1280 		// VM system requires write access to update named entry cache mode
1281 		memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1282 	}
1283 
1284 	tag = (vm_tag_t) getVMTag(map);
1285 
1286 	addr       = 0;
1287 	didAlloc   = false;
1288 
1289 	if (!(options & kIOMapAnywhere)) {
1290 		addr = *inaddr;
1291 	}
1292 
1293 	// find first entry for offset
1294 	for (firstEntryIdx = 0;
1295 	    (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1296 	    firstEntryIdx++) {
1297 	}
1298 	firstEntryIdx--;
1299 
1300 	// calculate required VM space
1301 
1302 	entryIdx = firstEntryIdx;
1303 	entry = &ref->entries[entryIdx];
1304 
1305 	remain  = size;
1306 	int64_t iteroffset = offset;
1307 	uint64_t mapSize = 0;
1308 	while (remain) {
1309 		entryOffset = iteroffset - entry->offset;
1310 		if (entryOffset >= entry->size) {
1311 			panic("entryOffset");
1312 		}
1313 
1314 #if LOGUNALIGN
1315 		printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1316 		    entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1317 #endif
1318 
1319 		chunk = entry->size - entryOffset;
1320 		if (chunk) {
1321 			if (chunk > remain) {
1322 				chunk = remain;
1323 			}
1324 			mach_vm_size_t entrySize;
1325 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1326 			assert(KERN_SUCCESS == err);
1327 			mapSize += entrySize;
1328 
1329 			remain -= chunk;
1330 			if (!remain) {
1331 				break;
1332 			}
1333 			iteroffset   += chunk; // - pageOffset;
1334 		}
1335 		entry++;
1336 		entryIdx++;
1337 		if (entryIdx >= ref->count) {
1338 			panic("overrun");
1339 			err = kIOReturnOverrun;
1340 			break;
1341 		}
1342 	}
1343 
1344 	if (kIOMapOverwrite & options) {
1345 		if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1346 			map = IOPageableMapForAddress(addr);
1347 		}
1348 		err = KERN_SUCCESS;
1349 	} else {
1350 		IOMemoryDescriptorMapAllocRef ref;
1351 		ref.map     = map;
1352 		ref.tag     = tag;
1353 		ref.options = options;
1354 		ref.size    = mapSize;
1355 		ref.prot    = prot;
1356 		if (options & kIOMapAnywhere) {
1357 			// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1358 			ref.mapped = 0;
1359 		} else {
1360 			ref.mapped = addr;
1361 		}
1362 		if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1363 			err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1364 		} else {
1365 			err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1366 		}
1367 
1368 		if (KERN_SUCCESS == err) {
1369 			addr     = ref.mapped;
1370 			map      = ref.map;
1371 			didAlloc = true;
1372 		}
1373 #if LOGUNALIGN
1374 		IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1375 #endif
1376 	}
1377 
1378 	/*
1379 	 * If the memory is associated with a device pager but doesn't have a UPL,
1380 	 * it will be immediately faulted in through the pager via populateDevicePager().
1381 	 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1382 	 * operations.
1383 	 */
1384 	if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1385 		options &= ~kIOMapPrefault;
1386 	}
1387 
1388 	/*
1389 	 * Prefaulting is only possible if we wired the memory earlier. Check the
1390 	 * memory type, and the underlying data.
1391 	 */
1392 	if (options & kIOMapPrefault) {
1393 		/*
1394 		 * The memory must have been wired by calling ::prepare(), otherwise
1395 		 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1396 		 */
1397 		assert(_wireCount != 0);
1398 		assert(_memoryEntries != NULL);
1399 		if ((_wireCount == 0) ||
1400 		    (_memoryEntries == NULL)) {
1401 			return kIOReturnBadArgument;
1402 		}
1403 
1404 		// Get the page list.
1405 		ioGMDData* dataP = getDataP(_memoryEntries);
1406 		ioPLBlock const* ioplList = getIOPLList(dataP);
1407 		pageList = getPageList(dataP);
1408 
1409 		// Get the number of IOPLs.
1410 		UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1411 
1412 		/*
1413 		 * Scan through the IOPL Info Blocks, looking for the first block containing
1414 		 * the offset. The research will go past it, so we'll need to go back to the
1415 		 * right range at the end.
1416 		 */
1417 		UInt ioplIndex = 0;
1418 		while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1419 			ioplIndex++;
1420 		}
1421 		ioplIndex--;
1422 
1423 		// Retrieve the IOPL info block.
1424 		ioPLBlock ioplInfo = ioplList[ioplIndex];
1425 
1426 		/*
1427 		 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1428 		 * array.
1429 		 */
1430 		if (ioplInfo.fFlags & kIOPLExternUPL) {
1431 			pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1432 		} else {
1433 			pageList = &pageList[ioplInfo.fPageInfo];
1434 		}
1435 
1436 		// Rebase [offset] into the IOPL in order to looks for the first page index.
1437 		mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1438 
1439 		// Retrieve the index of the first page corresponding to the offset.
1440 		currentPageIndex = atop_32(offsetInIOPL);
1441 	}
1442 
1443 	// enter mappings
1444 	remain   = size;
1445 	mapAddr  = addr;
1446 	entryIdx = firstEntryIdx;
1447 	entry = &ref->entries[entryIdx];
1448 
1449 	while (remain && (KERN_SUCCESS == err)) {
1450 #if LOGUNALIGN
1451 		printf("offset %qx, %qx\n", offset, entry->offset);
1452 #endif
1453 		if (kIODefaultCache != cacheMode) {
1454 			vm_size_t unused = 0;
1455 			err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1456 			    memEntryCacheMode, NULL, entry->entry);
1457 			assert(KERN_SUCCESS == err);
1458 		}
1459 		entryOffset = offset - entry->offset;
1460 		if (entryOffset >= entry->size) {
1461 			panic("entryOffset");
1462 		}
1463 		chunk = entry->size - entryOffset;
1464 #if LOGUNALIGN
1465 		printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1466 #endif
1467 		if (chunk) {
1468 			vm_map_kernel_flags_t vmk_flags = {
1469 				.vmf_fixed = true,
1470 				.vmf_overwrite = true,
1471 				.vmf_return_data_addr = true,
1472 				.vm_tag = tag,
1473 				.vmkf_iokit_acct = true,
1474 			};
1475 
1476 			if (chunk > remain) {
1477 				chunk = remain;
1478 			}
1479 			mapAddrOut = mapAddr;
1480 			if (options & kIOMapPrefault) {
1481 				UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1482 
1483 				err = vm_map_enter_mem_object_prefault(map,
1484 				    &mapAddrOut,
1485 				    chunk, 0 /* mask */,
1486 				    vmk_flags,
1487 				    entry->entry,
1488 				    entryOffset,
1489 				    prot,                        // cur
1490 				    prot,                        // max
1491 				    &pageList[currentPageIndex],
1492 				    nb_pages);
1493 
1494 				// Compute the next index in the page list.
1495 				currentPageIndex += nb_pages;
1496 				assert(currentPageIndex <= _pages);
1497 			} else {
1498 #if LOGUNALIGN
1499 				printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1500 #endif
1501 #if HAS_MTE
1502 				/* The memory that originated this IOMD might've been MTE-enabled,
1503 				 * so we need to inform the VM that MTE policies apply.
1504 				 */
1505 				vmk_flags.vmf_mte = true;
1506 				vmk_flags.vmkf_is_iokit = true;
1507 #endif /* HAS_MTE */
1508 				err = mach_vm_map_kernel(map,
1509 				    &mapAddrOut,
1510 				    chunk, 0 /* mask */,
1511 				    vmk_flags,
1512 				    entry->entry,
1513 				    entryOffset,
1514 				    false,               // copy
1515 				    prot,               // cur
1516 				    prot,               // max
1517 				    VM_INHERIT_NONE);
1518 			}
1519 			if (KERN_SUCCESS != err) {
1520 				panic("map enter err %x", err);
1521 				break;
1522 			}
1523 #if LOGUNALIGN
1524 			printf("mapAddr o %qx\n", mapAddrOut);
1525 #endif
1526 			if (entryIdx == firstEntryIdx) {
1527 				addr = mapAddrOut;
1528 			}
1529 			remain -= chunk;
1530 			if (!remain) {
1531 				break;
1532 			}
1533 			mach_vm_size_t entrySize;
1534 			err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1535 			assert(KERN_SUCCESS == err);
1536 			mapAddr += entrySize;
1537 			offset  += chunk;
1538 		}
1539 
1540 		entry++;
1541 		entryIdx++;
1542 		if (entryIdx >= ref->count) {
1543 			err = kIOReturnOverrun;
1544 			break;
1545 		}
1546 	}
1547 
1548 	if (KERN_SUCCESS != err) {
1549 		DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1550 	}
1551 
1552 	if ((KERN_SUCCESS != err) && didAlloc) {
1553 		(void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1554 		addr = 0;
1555 	}
1556 	*inaddr = addr;
1557 
1558 	return err;
1559 }
1560 
1561 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1562 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1563 	IOMemoryReference * ref,
1564 	uint64_t          * offset)
1565 {
1566 	kern_return_t kr;
1567 	vm_object_offset_t data_offset = 0;
1568 	uint64_t total;
1569 	uint32_t idx;
1570 
1571 	assert(ref->count);
1572 	if (offset) {
1573 		*offset = (uint64_t) data_offset;
1574 	}
1575 	total = 0;
1576 	for (idx = 0; idx < ref->count; idx++) {
1577 		kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1578 		    &data_offset);
1579 		if (KERN_SUCCESS != kr) {
1580 			DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1581 		} else if (0 != data_offset) {
1582 			DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1583 		}
1584 		if (offset && !idx) {
1585 			*offset = (uint64_t) data_offset;
1586 		}
1587 		total += round_page(data_offset + ref->entries[idx].size);
1588 	}
1589 
1590 	DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1591 	    (offset ? *offset : (vm_object_offset_t)-1), total);
1592 
1593 	return total;
1594 }
1595 
1596 
1597 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount,IOByteCount * swappedPageCount)1598 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1599 	IOMemoryReference * ref,
1600 	IOByteCount       * residentPageCount,
1601 	IOByteCount       * dirtyPageCount,
1602 	IOByteCount       * swappedPageCount)
1603 {
1604 	IOReturn        err;
1605 	IOMemoryEntry * entries;
1606 	UInt64 resident, dirty, swapped;
1607 	UInt64 totalResident, totalDirty, totalSwapped;
1608 
1609 	totalResident = totalDirty = totalSwapped = 0;
1610 	err = kIOReturnSuccess;
1611 	entries = ref->entries + ref->count;
1612 	while (entries > &ref->entries[0]) {
1613 		entries--;
1614 		err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty, &swapped);
1615 		if (KERN_SUCCESS != err) {
1616 			break;
1617 		}
1618 		totalResident += resident;
1619 		totalDirty    += dirty;
1620 		totalSwapped  += swapped;
1621 	}
1622 
1623 	if (residentPageCount) {
1624 		*residentPageCount = totalResident;
1625 	}
1626 	if (dirtyPageCount) {
1627 		*dirtyPageCount    = totalDirty;
1628 	}
1629 	if (swappedPageCount) {
1630 		*swappedPageCount  = totalSwapped;
1631 	}
1632 	return err;
1633 }
1634 
1635 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1636 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1637 	IOMemoryReference * ref,
1638 	IOOptionBits        newState,
1639 	IOOptionBits      * oldState)
1640 {
1641 	IOReturn        err;
1642 	IOMemoryEntry * entries;
1643 	vm_purgable_t   control;
1644 	int             totalState, state;
1645 
1646 	totalState = kIOMemoryPurgeableNonVolatile;
1647 	err = kIOReturnSuccess;
1648 	entries = ref->entries + ref->count;
1649 	while (entries > &ref->entries[0]) {
1650 		entries--;
1651 
1652 		err = purgeableControlBits(newState, &control, &state);
1653 		if (KERN_SUCCESS != err) {
1654 			break;
1655 		}
1656 		err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1657 		if (KERN_SUCCESS != err) {
1658 			break;
1659 		}
1660 		err = purgeableStateBits(&state);
1661 		if (KERN_SUCCESS != err) {
1662 			break;
1663 		}
1664 
1665 		if (kIOMemoryPurgeableEmpty == state) {
1666 			totalState = kIOMemoryPurgeableEmpty;
1667 		} else if (kIOMemoryPurgeableEmpty == totalState) {
1668 			continue;
1669 		} else if (kIOMemoryPurgeableVolatile == totalState) {
1670 			continue;
1671 		} else if (kIOMemoryPurgeableVolatile == state) {
1672 			totalState = kIOMemoryPurgeableVolatile;
1673 		} else {
1674 			totalState = kIOMemoryPurgeableNonVolatile;
1675 		}
1676 	}
1677 
1678 	if (oldState) {
1679 		*oldState = totalState;
1680 	}
1681 	return err;
1682 }
1683 
1684 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1685 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1686 	IOMemoryReference * ref,
1687 	task_t              newOwner,
1688 	int                 newLedgerTag,
1689 	IOOptionBits        newLedgerOptions)
1690 {
1691 	IOReturn        err, totalErr;
1692 	IOMemoryEntry * entries;
1693 
1694 	totalErr = kIOReturnSuccess;
1695 	entries = ref->entries + ref->count;
1696 	while (entries > &ref->entries[0]) {
1697 		entries--;
1698 
1699 		err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1700 		if (KERN_SUCCESS != err) {
1701 			totalErr = err;
1702 		}
1703 	}
1704 
1705 	return totalErr;
1706 }
1707 
1708 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1709 
1710 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1711 IOMemoryDescriptor::withAddress(void *      address,
1712     IOByteCount   length,
1713     IODirection direction)
1714 {
1715 	return IOMemoryDescriptor::
1716 	       withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1717 }
1718 
1719 #ifndef __LP64__
1720 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1721 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1722     IOByteCount  length,
1723     IODirection  direction,
1724     task_t       task)
1725 {
1726 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1727 	if (that) {
1728 		if (that->initWithAddress(address, length, direction, task)) {
1729 			return os::move(that);
1730 		}
1731 	}
1732 	return nullptr;
1733 }
1734 #endif /* !__LP64__ */
1735 
1736 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1737 IOMemoryDescriptor::withPhysicalAddress(
1738 	IOPhysicalAddress       address,
1739 	IOByteCount             length,
1740 	IODirection             direction )
1741 {
1742 	return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1743 }
1744 
1745 #ifndef __LP64__
1746 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1747 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1748     UInt32           withCount,
1749     IODirection      direction,
1750     task_t           task,
1751     bool             asReference)
1752 {
1753 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1754 	if (that) {
1755 		if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1756 			return os::move(that);
1757 		}
1758 	}
1759 	return nullptr;
1760 }
1761 #endif /* !__LP64__ */
1762 
1763 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1764 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1765     mach_vm_size_t length,
1766     IOOptionBits   options,
1767     task_t         task)
1768 {
1769 	IOAddressRange range = { address, length };
1770 	return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1771 }
1772 
1773 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1774 IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1775     UInt32           rangeCount,
1776     IOOptionBits     options,
1777     task_t           task)
1778 {
1779 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1780 	if (that) {
1781 		if (task) {
1782 			options |= kIOMemoryTypeVirtual64;
1783 		} else {
1784 			options |= kIOMemoryTypePhysical64;
1785 		}
1786 
1787 		if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1788 			return os::move(that);
1789 		}
1790 	}
1791 
1792 	return nullptr;
1793 }
1794 
1795 
1796 /*
1797  * withOptions:
1798  *
1799  * Create a new IOMemoryDescriptor. The buffer is made up of several
1800  * virtual address ranges, from a given task.
1801  *
1802  * Passing the ranges as a reference will avoid an extra allocation.
1803  */
1804 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1805 IOMemoryDescriptor::withOptions(void *          buffers,
1806     UInt32          count,
1807     UInt32          offset,
1808     task_t          task,
1809     IOOptionBits    opts,
1810     IOMapper *      mapper)
1811 {
1812 	OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1813 
1814 	if (self
1815 	    && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1816 		return nullptr;
1817 	}
1818 
1819 	return os::move(self);
1820 }
1821 
1822 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1823 IOMemoryDescriptor::initWithOptions(void *         buffers,
1824     UInt32         count,
1825     UInt32         offset,
1826     task_t         task,
1827     IOOptionBits   options,
1828     IOMapper *     mapper)
1829 {
1830 	return false;
1831 }
1832 
1833 #ifndef __LP64__
1834 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1835 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1836     UInt32          withCount,
1837     IODirection     direction,
1838     bool            asReference)
1839 {
1840 	OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1841 	if (that) {
1842 		if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1843 			return os::move(that);
1844 		}
1845 	}
1846 	return nullptr;
1847 }
1848 
1849 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1850 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *   of,
1851     IOByteCount             offset,
1852     IOByteCount             length,
1853     IODirection             direction)
1854 {
1855 	return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1856 }
1857 #endif /* !__LP64__ */
1858 
1859 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1860 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1861 {
1862 	IOGeneralMemoryDescriptor *origGenMD =
1863 	    OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1864 
1865 	if (origGenMD) {
1866 		return IOGeneralMemoryDescriptor::
1867 		       withPersistentMemoryDescriptor(origGenMD);
1868 	} else {
1869 		return nullptr;
1870 	}
1871 }
1872 
1873 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1874 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1875 {
1876 	IOMemoryReference * memRef;
1877 	OSSharedPtr<IOGeneralMemoryDescriptor> self;
1878 
1879 	if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1880 		return nullptr;
1881 	}
1882 
1883 	if (memRef == originalMD->_memRef) {
1884 		self.reset(originalMD, OSRetain);
1885 		originalMD->memoryReferenceRelease(memRef);
1886 		return os::move(self);
1887 	}
1888 
1889 	self = OSMakeShared<IOGeneralMemoryDescriptor>();
1890 	IOMDPersistentInitData initData = { originalMD, memRef };
1891 
1892 	if (self
1893 	    && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1894 		return nullptr;
1895 	}
1896 	return os::move(self);
1897 }
1898 
1899 #ifndef __LP64__
1900 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1901 IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1902     IOByteCount   withLength,
1903     IODirection withDirection)
1904 {
1905 	_singleRange.v.address = (vm_offset_t) address;
1906 	_singleRange.v.length  = withLength;
1907 
1908 	return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1909 }
1910 
1911 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1912 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1913     IOByteCount    withLength,
1914     IODirection  withDirection,
1915     task_t       withTask)
1916 {
1917 	_singleRange.v.address = address;
1918 	_singleRange.v.length  = withLength;
1919 
1920 	return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1921 }
1922 
1923 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1924 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1925 	IOPhysicalAddress      address,
1926 	IOByteCount            withLength,
1927 	IODirection            withDirection )
1928 {
1929 	_singleRange.p.address = address;
1930 	_singleRange.p.length  = withLength;
1931 
1932 	return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1933 }
1934 
1935 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1936 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1937 	IOPhysicalRange * ranges,
1938 	UInt32            count,
1939 	IODirection       direction,
1940 	bool              reference)
1941 {
1942 	IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1943 
1944 	if (reference) {
1945 		mdOpts |= kIOMemoryAsReference;
1946 	}
1947 
1948 	return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1949 }
1950 
1951 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1952 IOGeneralMemoryDescriptor::initWithRanges(
1953 	IOVirtualRange * ranges,
1954 	UInt32           count,
1955 	IODirection      direction,
1956 	task_t           task,
1957 	bool             reference)
1958 {
1959 	IOOptionBits mdOpts = direction;
1960 
1961 	if (reference) {
1962 		mdOpts |= kIOMemoryAsReference;
1963 	}
1964 
1965 	if (task) {
1966 		mdOpts |= kIOMemoryTypeVirtual;
1967 
1968 		// Auto-prepare if this is a kernel memory descriptor as very few
1969 		// clients bother to prepare() kernel memory.
1970 		// But it was not enforced so what are you going to do?
1971 		if (task == kernel_task) {
1972 			mdOpts |= kIOMemoryAutoPrepare;
1973 		}
1974 	} else {
1975 		mdOpts |= kIOMemoryTypePhysical;
1976 	}
1977 
1978 	return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1979 }
1980 #endif /* !__LP64__ */
1981 
1982 /*
1983  * initWithOptions:
1984  *
1985  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1986  * from a given task, several physical ranges, an UPL from the ubc
1987  * system or a uio (may be 64bit) from the BSD subsystem.
1988  *
1989  * Passing the ranges as a reference will avoid an extra allocation.
1990  *
1991  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1992  * existing instance -- note this behavior is not commonly supported in other
1993  * I/O Kit classes, although it is supported here.
1994  */
1995 
1996 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1997 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers,
1998     UInt32       count,
1999     UInt32       offset,
2000     task_t       task,
2001     IOOptionBits options,
2002     IOMapper *   mapper)
2003 {
2004 	IOOptionBits type = options & kIOMemoryTypeMask;
2005 
2006 #ifndef __LP64__
2007 	if (task
2008 	    && (kIOMemoryTypeVirtual == type)
2009 	    && vm_map_is_64bit(get_task_map(task))
2010 	    && ((IOVirtualRange *) buffers)->address) {
2011 		OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
2012 		return false;
2013 	}
2014 #endif /* !__LP64__ */
2015 
2016 	// Grab the original MD's configuation data to initialse the
2017 	// arguments to this function.
2018 	if (kIOMemoryTypePersistentMD == type) {
2019 		IOMDPersistentInitData *initData = (typeof(initData))buffers;
2020 		const IOGeneralMemoryDescriptor *orig = initData->fMD;
2021 		ioGMDData *dataP = getDataP(orig->_memoryEntries);
2022 
2023 		// Only accept persistent memory descriptors with valid dataP data.
2024 		assert(orig->_rangesCount == 1);
2025 		if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2026 			return false;
2027 		}
2028 
2029 		_memRef = initData->fMemRef; // Grab the new named entry
2030 		options = orig->_flags & ~kIOMemoryAsReference;
2031 		type = options & kIOMemoryTypeMask;
2032 		buffers = orig->_ranges.v;
2033 		count = orig->_rangesCount;
2034 
2035 		// Now grab the original task and whatever mapper was previously used
2036 		task = orig->_task;
2037 		mapper = dataP->fMapper;
2038 
2039 		// We are ready to go through the original initialisation now
2040 	}
2041 
2042 	switch (type) {
2043 	case kIOMemoryTypeUIO:
2044 	case kIOMemoryTypeVirtual:
2045 #ifndef __LP64__
2046 	case kIOMemoryTypeVirtual64:
2047 #endif /* !__LP64__ */
2048 		assert(task);
2049 		if (!task) {
2050 			return false;
2051 		}
2052 		break;
2053 
2054 	case kIOMemoryTypePhysical:     // Neither Physical nor UPL should have a task
2055 #ifndef __LP64__
2056 	case kIOMemoryTypePhysical64:
2057 #endif /* !__LP64__ */
2058 	case kIOMemoryTypeUPL:
2059 		assert(!task);
2060 		break;
2061 	default:
2062 		return false; /* bad argument */
2063 	}
2064 
2065 	assert(buffers);
2066 	assert(count);
2067 
2068 	/*
2069 	 * We can check the _initialized  instance variable before having ever set
2070 	 * it to an initial value because I/O Kit guarantees that all our instance
2071 	 * variables are zeroed on an object's allocation.
2072 	 */
2073 
2074 	if (_initialized) {
2075 		/*
2076 		 * An existing memory descriptor is being retargeted to point to
2077 		 * somewhere else.  Clean up our present state.
2078 		 */
2079 		IOOptionBits type = _flags & kIOMemoryTypeMask;
2080 		if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2081 			while (_wireCount) {
2082 				complete();
2083 			}
2084 		}
2085 		if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2086 			if (kIOMemoryTypeUIO == type) {
2087 				uio_free((uio_t) _ranges.v);
2088 			}
2089 #ifndef __LP64__
2090 			else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2091 				IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2092 			}
2093 #endif /* !__LP64__ */
2094 			else {
2095 				IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2096 			}
2097 		}
2098 
2099 		options |= (kIOMemoryRedirected & _flags);
2100 		if (!(kIOMemoryRedirected & options)) {
2101 			if (_memRef) {
2102 				memoryReferenceRelease(_memRef);
2103 				_memRef = NULL;
2104 			}
2105 			if (_mappings) {
2106 				_mappings->flushCollection();
2107 			}
2108 		}
2109 	} else {
2110 		if (!super::init()) {
2111 			return false;
2112 		}
2113 		_initialized = true;
2114 	}
2115 
2116 	// Grab the appropriate mapper
2117 	if (kIOMemoryHostOrRemote & options) {
2118 		options |= kIOMemoryMapperNone;
2119 	}
2120 	if (kIOMemoryMapperNone & options) {
2121 		mapper = NULL; // No Mapper
2122 	} else if (mapper == kIOMapperSystem) {
2123 		IOMapper::checkForSystemMapper();
2124 		gIOSystemMapper = mapper = IOMapper::gSystem;
2125 	}
2126 
2127 	// Remove the dynamic internal use flags from the initial setting
2128 	options               &= ~(kIOMemoryPreparedReadOnly);
2129 	_flags                 = options;
2130 	_task                  = task;
2131 
2132 #ifndef __LP64__
2133 	_direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
2134 #endif /* !__LP64__ */
2135 
2136 	_dmaReferences = 0;
2137 	__iomd_reservedA = 0;
2138 	__iomd_reservedB = 0;
2139 	_highestPage = 0;
2140 
2141 	if (kIOMemoryThreadSafe & options) {
2142 		if (!_prepareLock) {
2143 			_prepareLock = IOLockAlloc();
2144 		}
2145 	} else if (_prepareLock) {
2146 		IOLockFree(_prepareLock);
2147 		_prepareLock = NULL;
2148 	}
2149 
2150 	if (kIOMemoryTypeUPL == type) {
2151 		ioGMDData *dataP;
2152 		unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2153 
2154 		if (!initMemoryEntries(dataSize, mapper)) {
2155 			return false;
2156 		}
2157 		dataP = getDataP(_memoryEntries);
2158 		dataP->fPageCnt = 0;
2159 		switch (kIOMemoryDirectionMask & options) {
2160 		case kIODirectionOut:
2161 			dataP->fDMAAccess = kIODMAMapReadAccess;
2162 			break;
2163 		case kIODirectionIn:
2164 			dataP->fDMAAccess = kIODMAMapWriteAccess;
2165 			break;
2166 		case kIODirectionNone:
2167 		case kIODirectionOutIn:
2168 		default:
2169 			panic("bad dir for upl 0x%x", (int) options);
2170 			break;
2171 		}
2172 		//       _wireCount++;	// UPLs start out life wired
2173 
2174 		_length    = count;
2175 		_pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2176 
2177 		ioPLBlock iopl;
2178 		iopl.fIOPL = (upl_t) buffers;
2179 		upl_set_referenced(iopl.fIOPL, true);
2180 		upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2181 
2182 		if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2183 			panic("short external upl");
2184 		}
2185 
2186 		_highestPage = upl_get_highest_page(iopl.fIOPL);
2187 		DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2188 
2189 		// Set the flag kIOPLOnDevice convieniently equal to 1
2190 		iopl.fFlags  = pageList->device | kIOPLExternUPL;
2191 		if (!pageList->device) {
2192 			// Pre-compute the offset into the UPL's page list
2193 			pageList = &pageList[atop_32(offset)];
2194 			offset &= PAGE_MASK;
2195 		}
2196 		iopl.fIOMDOffset = 0;
2197 		iopl.fMappedPage = 0;
2198 		iopl.fPageInfo = (vm_address_t) pageList;
2199 		iopl.fPageOffset = offset;
2200 		_memoryEntries->appendBytes(&iopl, sizeof(iopl));
2201 	} else {
2202 		// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2203 		// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2204 
2205 		// Initialize the memory descriptor
2206 		if (options & kIOMemoryAsReference) {
2207 #ifndef __LP64__
2208 			_rangesIsAllocated = false;
2209 #endif /* !__LP64__ */
2210 
2211 			// Hack assignment to get the buffer arg into _ranges.
2212 			// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2213 			// work, C++ sigh.
2214 			// This also initialises the uio & physical ranges.
2215 			_ranges.v = (IOVirtualRange *) buffers;
2216 		} else {
2217 #ifndef __LP64__
2218 			_rangesIsAllocated = true;
2219 #endif /* !__LP64__ */
2220 			switch (type) {
2221 			case kIOMemoryTypeUIO:
2222 				_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2223 				break;
2224 
2225 #ifndef __LP64__
2226 			case kIOMemoryTypeVirtual64:
2227 			case kIOMemoryTypePhysical64:
2228 				if (count == 1
2229 #ifndef __arm__
2230 				    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2231 #endif
2232 				    ) {
2233 					if (type == kIOMemoryTypeVirtual64) {
2234 						type = kIOMemoryTypeVirtual;
2235 					} else {
2236 						type = kIOMemoryTypePhysical;
2237 					}
2238 					_flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2239 					_rangesIsAllocated = false;
2240 					_ranges.v = &_singleRange.v;
2241 					_singleRange.v.address = ((IOAddressRange *) buffers)->address;
2242 					_singleRange.v.length  = ((IOAddressRange *) buffers)->length;
2243 					break;
2244 				}
2245 				_ranges.v64 = IONew(IOAddressRange, count);
2246 				if (!_ranges.v64) {
2247 					return false;
2248 				}
2249 				bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2250 				break;
2251 #endif /* !__LP64__ */
2252 			case kIOMemoryTypeVirtual:
2253 			case kIOMemoryTypePhysical:
2254 				if (count == 1) {
2255 					_flags |= kIOMemoryAsReference;
2256 #ifndef __LP64__
2257 					_rangesIsAllocated = false;
2258 #endif /* !__LP64__ */
2259 					_ranges.v = &_singleRange.v;
2260 				} else {
2261 					_ranges.v = IONew(IOVirtualRange, count);
2262 					if (!_ranges.v) {
2263 						return false;
2264 					}
2265 				}
2266 				bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2267 				break;
2268 			}
2269 		}
2270 		_rangesCount = count;
2271 
2272 		// Find starting address within the vector of ranges
2273 		Ranges vec = _ranges;
2274 		mach_vm_size_t totalLength = 0;
2275 		unsigned int ind, pages = 0;
2276 		for (ind = 0; ind < count; ind++) {
2277 			mach_vm_address_t addr;
2278 			mach_vm_address_t endAddr;
2279 			mach_vm_size_t    len;
2280 
2281 			// addr & len are returned by this function
2282 			getAddrLenForInd(addr, len, type, vec, ind, _task);
2283 			if (_task) {
2284 				mach_vm_size_t phys_size;
2285 				kern_return_t kret;
2286 				kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2287 				if (KERN_SUCCESS != kret) {
2288 					break;
2289 				}
2290 				if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2291 					break;
2292 				}
2293 			} else {
2294 				if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2295 					break;
2296 				}
2297 				if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2298 					break;
2299 				}
2300 				if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2301 					break;
2302 				}
2303 			}
2304 			if (os_add_overflow(totalLength, len, &totalLength)) {
2305 				break;
2306 			}
2307 			if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2308 				uint64_t highPage = atop_64(addr + len - 1);
2309 				if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2310 					_highestPage = (ppnum_t) highPage;
2311 					DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2312 				}
2313 			}
2314 		}
2315 		if ((ind < count)
2316 		    || (totalLength != ((IOByteCount) totalLength))) {
2317 			return false;                                   /* overflow */
2318 		}
2319 		_length      = totalLength;
2320 		_pages       = pages;
2321 
2322 		// Auto-prepare memory at creation time.
2323 		// Implied completion when descriptor is free-ed
2324 
2325 		if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2326 			_wireCount++; // Physical MDs are, by definition, wired
2327 		} else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2328 			ioGMDData *dataP;
2329 			unsigned dataSize;
2330 
2331 			if (_pages > atop_64(max_mem)) {
2332 				return false;
2333 			}
2334 
2335 			dataSize = computeDataSize(_pages, /* upls */ count * 2);
2336 			if (!initMemoryEntries(dataSize, mapper)) {
2337 				return false;
2338 			}
2339 			dataP = getDataP(_memoryEntries);
2340 			dataP->fPageCnt = _pages;
2341 
2342 			if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2343 			    && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2344 				_kernelTag = IOMemoryTag(kernel_map);
2345 				if (_kernelTag == gIOSurfaceTag) {
2346 					_userTag = VM_MEMORY_IOSURFACE;
2347 				}
2348 			}
2349 
2350 			if ((kIOMemoryPersistent & _flags) && !_memRef) {
2351 				IOReturn
2352 				    err = memoryReferenceCreate(0, &_memRef);
2353 				if (kIOReturnSuccess != err) {
2354 					return false;
2355 				}
2356 			}
2357 
2358 			if ((_flags & kIOMemoryAutoPrepare)
2359 			    && prepare() != kIOReturnSuccess) {
2360 				return false;
2361 			}
2362 		}
2363 	}
2364 
2365 	return true;
2366 }
2367 
2368 /*
2369  * free
2370  *
2371  * Free resources.
2372  */
2373 void
free()2374 IOGeneralMemoryDescriptor::free()
2375 {
2376 	IOOptionBits type = _flags & kIOMemoryTypeMask;
2377 
2378 	if (reserved && reserved->dp.memory) {
2379 		LOCK;
2380 		reserved->dp.memory = NULL;
2381 		UNLOCK;
2382 	}
2383 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2384 		ioGMDData * dataP;
2385 		if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2386 			dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2387 			dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2388 		}
2389 	} else {
2390 		while (_wireCount) {
2391 			complete();
2392 		}
2393 	}
2394 
2395 	if (_memoryEntries) {
2396 		_memoryEntries.reset();
2397 	}
2398 
2399 	if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2400 		if (kIOMemoryTypeUIO == type) {
2401 			uio_free((uio_t) _ranges.v);
2402 		}
2403 #ifndef __LP64__
2404 		else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2405 			IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2406 		}
2407 #endif /* !__LP64__ */
2408 		else {
2409 			IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2410 		}
2411 
2412 		_ranges.v = NULL;
2413 	}
2414 
2415 	if (reserved) {
2416 		cleanKernelReserved(reserved);
2417 		if (reserved->dp.devicePager) {
2418 			// memEntry holds a ref on the device pager which owns reserved
2419 			// (IOMemoryDescriptorReserved) so no reserved access after this point
2420 			device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2421 		} else {
2422 			IOFreeType(reserved, IOMemoryDescriptorReserved);
2423 		}
2424 		reserved = NULL;
2425 	}
2426 
2427 	if (_memRef) {
2428 		memoryReferenceRelease(_memRef);
2429 	}
2430 	if (_prepareLock) {
2431 		IOLockFree(_prepareLock);
2432 	}
2433 
2434 	super::free();
2435 }
2436 
2437 #ifndef __LP64__
2438 void
unmapFromKernel()2439 IOGeneralMemoryDescriptor::unmapFromKernel()
2440 {
2441 	panic("IOGMD::unmapFromKernel deprecated");
2442 }
2443 
2444 void
mapIntoKernel(unsigned rangeIndex)2445 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2446 {
2447 	panic("IOGMD::mapIntoKernel deprecated");
2448 }
2449 #endif /* !__LP64__ */
2450 
2451 /*
2452  * getDirection:
2453  *
2454  * Get the direction of the transfer.
2455  */
2456 IODirection
getDirection() const2457 IOMemoryDescriptor::getDirection() const
2458 {
2459 #ifndef __LP64__
2460 	if (_direction) {
2461 		return _direction;
2462 	}
2463 #endif /* !__LP64__ */
2464 	return (IODirection) (_flags & kIOMemoryDirectionMask);
2465 }
2466 
2467 /*
2468  * getLength:
2469  *
2470  * Get the length of the transfer (over all ranges).
2471  */
2472 IOByteCount
getLength() const2473 IOMemoryDescriptor::getLength() const
2474 {
2475 	return _length;
2476 }
2477 
2478 void
setTag(IOOptionBits tag)2479 IOMemoryDescriptor::setTag( IOOptionBits tag )
2480 {
2481 	_tag = tag;
2482 }
2483 
2484 IOOptionBits
getTag(void)2485 IOMemoryDescriptor::getTag( void )
2486 {
2487 	return _tag;
2488 }
2489 
2490 uint64_t
getFlags(void)2491 IOMemoryDescriptor::getFlags(void)
2492 {
2493 	return _flags;
2494 }
2495 
2496 OSObject *
copyContext(const OSSymbol * key) const2497 IOMemoryDescriptor::copyContext(const OSSymbol * key) const
2498 {
2499 	if (reserved && reserved->contextObjects) {
2500 		OSObject * context = reserved->contextObjects->getObject(key);
2501 		if (context) {
2502 			context->retain();
2503 		}
2504 		return context;
2505 	} else {
2506 		return NULL;
2507 	}
2508 }
2509 
2510 OSObject *
copyContext(const char * key) const2511 IOMemoryDescriptor::copyContext(const char * key) const
2512 {
2513 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2514 	return copyContext(sym.get());
2515 }
2516 
2517 OSObject *
copySharingContext(const char * key) const2518 IOMemoryDescriptor::copySharingContext(const char * key) const
2519 {
2520 	OSObject * context = NULL;
2521 	OSObject * obj = copyContext(kIOMemoryDescriptorSharingContextKey);
2522 	OSDictionary * dict = OSDynamicCast(OSDictionary, obj);
2523 	if (dict) {
2524 		context = dict->getObject(key);
2525 		if (context) {
2526 			context->retain();
2527 		}
2528 	}
2529 	OSSafeReleaseNULL(obj);
2530 	return context;
2531 }
2532 
2533 void
setContext(const OSSymbol * key,OSObject * obj)2534 IOMemoryDescriptor::setContext(const OSSymbol * key, OSObject * obj)
2535 {
2536 	if (this->reserved == NULL && obj == NULL) {
2537 		// No existing object, and no object to set
2538 		return;
2539 	}
2540 
2541 	IOMemoryDescriptorReserved * reserved = getKernelReserved();
2542 	if (reserved) {
2543 		if (NULL == reserved->contextObjects) {
2544 			reserved->contextObjects = OSDictionary::withCapacity(2);
2545 		}
2546 		if (obj) {
2547 			reserved->contextObjects->setObject(key, obj);
2548 		} else {
2549 			reserved->contextObjects->removeObject(key);
2550 		}
2551 	}
2552 }
2553 
2554 void
setContext(const char * key,OSObject * obj)2555 IOMemoryDescriptor::setContext(const char * key, OSObject * obj)
2556 {
2557 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2558 	setContext(sym.get(), obj);
2559 }
2560 
2561 OSObject *
copyContext(void) const2562 IOMemoryDescriptor::copyContext(void) const
2563 {
2564 	return copyContext((const OSSymbol *) kOSBooleanFalse);
2565 }
2566 enum {
2567 	kIOMemoryDescriptorInternalFlagsSharing = 0x0001,
2568 };
2569 
2570 void
setSharingContext(const char * key,OSObject * obj)2571 IOMemoryDescriptor::setSharingContext(const char * key, OSObject * obj)
2572 {
2573 	OSSharedPtr<const OSSymbol> sym = OSSymbol::withCString(key);
2574 	OSSharedPtr<OSDictionary> dict = OSDictionary::withCapacity(1);
2575 
2576 	dict->setObject(sym.get(), obj);
2577 	setContext(kIOMemoryDescriptorSharingContextKey, dict.get());
2578 	OSBitOrAtomic16(kIOMemoryDescriptorInternalFlagsSharing, &_internalIOMDFlags);
2579 }
2580 
2581 bool
hasSharingContext(void)2582 IOMemoryDescriptor::hasSharingContext(void)
2583 {
2584 	return 0 != (kIOMemoryDescriptorInternalFlagsSharing & _internalIOMDFlags);
2585 }
2586 
2587 void
setContext(OSObject * obj)2588 IOMemoryDescriptor::setContext(OSObject * obj)
2589 {
2590 	setContext((const OSSymbol *) kOSBooleanFalse, obj);
2591 }
2592 
2593 #ifndef __LP64__
2594 #pragma clang diagnostic push
2595 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2596 
2597 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
2598 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2599 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
2600 {
2601 	addr64_t physAddr = 0;
2602 
2603 	if (prepare() == kIOReturnSuccess) {
2604 		physAddr = getPhysicalSegment64( offset, length );
2605 		complete();
2606 	}
2607 
2608 	return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2609 }
2610 
2611 #pragma clang diagnostic pop
2612 
2613 #endif /* !__LP64__ */
2614 
2615 #if HAS_MTE
2616 /* Ideally this would be a method on IOMD that's overridden by IOGMD, but there's
2617  * ABI considerations with extending the vtable so just make it a free function for now.
2618  */
2619 static void
handleCopyAbortedTCF(void)2620 handleCopyAbortedTCF(void)
2621 {
2622 	/*
2623 	 * Only calls passing through an IOGMD will have a faultable provider, so we check
2624 	 * for one here, as we might have recovered from a tag check fault through e.g.
2625 	 * an IOSubMD that cannot provide one.
2626 	 */
2627 	task_t task_providing_faultable_buffer = current_thread_get_iomd_faultable_access_buffer_provider();
2628 	if (task_providing_faultable_buffer) {
2629 		/*
2630 		 * Register an AST over the victim task so that a proper MTE exception
2631 		 * will be generated when it gets scheduled. The fault handler already
2632 		 * recorded the necessary data that the exception-synthesizing code
2633 		 * will require to create the exception.
2634 		 */
2635 		task_set_ast_mte_synthesize_mach_exception(task_providing_faultable_buffer);
2636 	}
2637 }
2638 #endif /* HAS_MTE */
2639 
2640 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2641 IOMemoryDescriptor::readBytes
2642 (IOByteCount offset, void *bytes, IOByteCount length)
2643 {
2644 	addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2645 	IOByteCount endoffset;
2646 	IOByteCount remaining;
2647 
2648 	// Check that this entire I/O is within the available range
2649 	if ((offset > _length)
2650 	    || os_add_overflow(length, offset, &endoffset)
2651 	    || (endoffset > _length)) {
2652 		assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2653 		return 0;
2654 	}
2655 	if (offset >= _length) {
2656 		return 0;
2657 	}
2658 
2659 	assert(!(kIOMemoryRemote & _flags));
2660 	if (kIOMemoryRemote & _flags) {
2661 		return 0;
2662 	}
2663 
2664 	if (kIOMemoryThreadSafe & _flags) {
2665 		LOCK;
2666 	}
2667 
2668 	remaining = length = min(length, _length - offset);
2669 	while (remaining) { // (process another target segment?)
2670 		addr64_t        srcAddr64;
2671 		IOByteCount     srcLen;
2672 		int             options = cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap;
2673 
2674 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2675 		srcAddr64 = getPhysicalSegment(offset, &srcLen, getPhysSegmentOptions);
2676 		if (!srcAddr64) {
2677 			break;
2678 		}
2679 
2680 		// Clip segment length to remaining
2681 		if (srcLen > remaining) {
2682 			srcLen = remaining;
2683 		}
2684 
2685 		if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2686 			srcLen = (UINT_MAX - PAGE_SIZE + 1);
2687 		}
2688 
2689 #if HAS_MTE
2690 		if (pmap_is_tagged_page((ppnum_t)atop(srcAddr64))) {
2691 			if (current_thread_get_iomd_faultable_access_buffer_provider() != NULL) {
2692 				/*
2693 				 * We're going to wind up accessing the memory via peeking into the
2694 				 * physical aperture. Our physical aperture access will naturally be
2695 				 * canonically tagged, which will mismatch the correct tag. This option
2696 				 * tells bcopy_phys to actually fixup via LDG the tag. We won't catch
2697 				 * UaFs with this, but any OOB will fault therefore...
2698 				 */
2699 				options |= cppvFixupPhysmapTag;
2700 
2701 				/*
2702 				 * ...this flag sets up machinery such that fault on this access will be
2703 				 * recoverable (i.e. this thread will continue execution). We can do that
2704 				 * only when coming through an IOGMD and having a faultable task to blame.
2705 				 */
2706 				options |= cppvDenoteAccessMayFault;
2707 
2708 				/*
2709 				 * And if we do fault during the access, it also means we don't have
2710 				 * recourse to read the memory contents.
2711 				 * Unfortunately, consumers of this API expect it to always work, so
2712 				 * in an attempt to minimize risk we'll zero the buffer upfront,
2713 				 * so if we failed to read it'll look as though we just read zeroes.
2714 				 */
2715 				memset((void*)dstAddr, 0, srcLen);
2716 			} else {
2717 				/*
2718 				 * We don't have a task to blame, resort to the unsafe TCO copy.
2719 				 * We could just return EFAULT here, but that would require callers to
2720 				 * actively check for it, which unfortunately may not be the case as
2721 				 * these operations never failed before.
2722 				 *
2723 				 * Defer a proper support to buffered creation of IOMDs.
2724 				 */
2725 				options |= cppvDisableTagCheck;
2726 			}
2727 		}
2728 #endif /* HAS_MTE */
2729 
2730 		kern_return_t copy_ret = copypv(srcAddr64, dstAddr, (unsigned int) srcLen, options);
2731 #if HAS_MTE
2732 		/*
2733 		 * copypv recovery handler will only fire in case of a tag check fault. Let's handle
2734 		 * the special case here.
2735 		 */
2736 		if (copy_ret == KERN_ABORTED) {
2737 			handleCopyAbortedTCF();
2738 		}
2739 #else /* HAS_MTE */
2740 #pragma unused(copy_ret)
2741 #endif /* HAS_MTE */
2742 
2743 		dstAddr   += srcLen;
2744 		offset    += srcLen;
2745 		remaining -= srcLen;
2746 	}
2747 
2748 	if (kIOMemoryThreadSafe & _flags) {
2749 		UNLOCK;
2750 	}
2751 
2752 	assert(!remaining);
2753 
2754 	return length - remaining;
2755 }
2756 
2757 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2758 IOMemoryDescriptor::writeBytes
2759 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2760 {
2761 	addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2762 	IOByteCount remaining;
2763 	IOByteCount endoffset;
2764 	IOByteCount offset = inoffset;
2765 
2766 	assert( !(kIOMemoryPreparedReadOnly & _flags));
2767 
2768 	// Check that this entire I/O is within the available range
2769 	if ((offset > _length)
2770 	    || os_add_overflow(length, offset, &endoffset)
2771 	    || (endoffset > _length)) {
2772 		assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2773 		return 0;
2774 	}
2775 	if (kIOMemoryPreparedReadOnly & _flags) {
2776 		return 0;
2777 	}
2778 	if (offset >= _length) {
2779 		return 0;
2780 	}
2781 
2782 	assert(!(kIOMemoryRemote & _flags));
2783 	if (kIOMemoryRemote & _flags) {
2784 		return 0;
2785 	}
2786 
2787 	if (kIOMemoryThreadSafe & _flags) {
2788 		LOCK;
2789 	}
2790 
2791 	remaining = length = min(length, _length - offset);
2792 	while (remaining) { // (process another target segment?)
2793 		addr64_t    dstAddr64;
2794 		IOByteCount dstLen;
2795 		int         options = cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap;
2796 
2797 		IOOptionBits getPhysSegmentOptions = kIOMemoryMapperNone;
2798 		dstAddr64 = getPhysicalSegment(offset, &dstLen, getPhysSegmentOptions);
2799 		if (!dstAddr64) {
2800 			break;
2801 		}
2802 
2803 		// Clip segment length to remaining
2804 		if (dstLen > remaining) {
2805 			dstLen = remaining;
2806 		}
2807 
2808 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2809 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
2810 		}
2811 
2812 #if HAS_MTE
2813 		if (pmap_is_tagged_page((ppnum_t)atop(dstAddr64))) {
2814 			/* Same drill as readBytes(), please check the comment there for details. */
2815 			if (current_thread_get_iomd_faultable_access_buffer_provider() != NULL) {
2816 				options |= cppvFixupPhysmapTag;
2817 				options |= cppvDenoteAccessMayFault;
2818 			} else {
2819 				options |= cppvDisableTagCheck;
2820 			}
2821 		}
2822 #endif /* HAS_MTE */
2823 
2824 		if (!srcAddr) {
2825 			bzero_phys(dstAddr64, (unsigned int) dstLen);
2826 		} else {
2827 			kern_return_t copy_ret = copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, options);
2828 #if HAS_MTE
2829 			/*
2830 			 * copypv recovery handler will only fire in case of a tag check fault. Let's handle
2831 			 * the special case here.
2832 			 */
2833 			if (copy_ret == KERN_ABORTED) {
2834 				handleCopyAbortedTCF();
2835 			}
2836 #else /* HAS_MTE */
2837 #pragma unused(copy_ret)
2838 #endif /* HAS_MTE */
2839 			srcAddr   += dstLen;
2840 		}
2841 		offset    += dstLen;
2842 		remaining -= dstLen;
2843 	}
2844 
2845 	if (kIOMemoryThreadSafe & _flags) {
2846 		UNLOCK;
2847 	}
2848 
2849 	assert(!remaining);
2850 
2851 #if defined(__x86_64__)
2852 	// copypv does not cppvFsnk on intel
2853 #else
2854 	if (!srcAddr) {
2855 		performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2856 	}
2857 #endif
2858 
2859 	return length - remaining;
2860 }
2861 
2862 #ifndef __LP64__
2863 void
setPosition(IOByteCount position)2864 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2865 {
2866 	panic("IOGMD::setPosition deprecated");
2867 }
2868 #endif /* !__LP64__ */
2869 
2870 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2871 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2872 
2873 uint64_t
getPreparationID(void)2874 IOGeneralMemoryDescriptor::getPreparationID( void )
2875 {
2876 	ioGMDData *dataP;
2877 
2878 	if (!_wireCount) {
2879 		return kIOPreparationIDUnprepared;
2880 	}
2881 
2882 	if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2883 	    || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2884 		IOMemoryDescriptor::setPreparationID();
2885 		return IOMemoryDescriptor::getPreparationID();
2886 	}
2887 
2888 	if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2889 		return kIOPreparationIDUnprepared;
2890 	}
2891 
2892 	if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2893 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2894 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2895 	}
2896 	return dataP->fPreparationID;
2897 }
2898 
2899 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2900 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2901 {
2902 	if (reserved->creator) {
2903 		task_deallocate(reserved->creator);
2904 		reserved->creator = NULL;
2905 	}
2906 
2907 	reserved->contextObjects = NULL;
2908 }
2909 
2910 IOMemoryDescriptorReserved *
getKernelReserved(void)2911 IOMemoryDescriptor::getKernelReserved( void )
2912 {
2913 	if (!reserved) {
2914 		reserved = IOMallocType(IOMemoryDescriptorReserved);
2915 	}
2916 	return reserved;
2917 }
2918 
2919 void
setPreparationID(void)2920 IOMemoryDescriptor::setPreparationID( void )
2921 {
2922 	if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2923 		SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2924 		OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2925 	}
2926 }
2927 
2928 uint64_t
getPreparationID(void)2929 IOMemoryDescriptor::getPreparationID( void )
2930 {
2931 	if (reserved) {
2932 		return reserved->preparationID;
2933 	} else {
2934 		return kIOPreparationIDUnsupported;
2935 	}
2936 }
2937 
2938 void
setDescriptorID(void)2939 IOMemoryDescriptor::setDescriptorID( void )
2940 {
2941 	if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2942 		SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2943 		OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2944 	}
2945 }
2946 
2947 uint64_t
getDescriptorID(void)2948 IOMemoryDescriptor::getDescriptorID( void )
2949 {
2950 	setDescriptorID();
2951 
2952 	if (reserved) {
2953 		return reserved->descriptorID;
2954 	} else {
2955 		return kIODescriptorIDInvalid;
2956 	}
2957 }
2958 
2959 IOReturn
ktraceEmitPhysicalSegments(void)2960 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2961 {
2962 	if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2963 		return kIOReturnSuccess;
2964 	}
2965 
2966 	assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2967 	if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2968 		return kIOReturnBadArgument;
2969 	}
2970 
2971 	uint64_t descriptorID = getDescriptorID();
2972 	assert(descriptorID != kIODescriptorIDInvalid);
2973 	if (getDescriptorID() == kIODescriptorIDInvalid) {
2974 		return kIOReturnBadArgument;
2975 	}
2976 
2977 	IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2978 
2979 #if __LP64__
2980 	static const uint8_t num_segments_page = 8;
2981 #else
2982 	static const uint8_t num_segments_page = 4;
2983 #endif
2984 	static const uint8_t num_segments_long = 2;
2985 
2986 	IOPhysicalAddress segments_page[num_segments_page];
2987 	IOPhysicalRange   segments_long[num_segments_long];
2988 	memset(segments_page, UINT32_MAX, sizeof(segments_page));
2989 	memset(segments_long, 0, sizeof(segments_long));
2990 
2991 	uint8_t segment_page_idx = 0;
2992 	uint8_t segment_long_idx = 0;
2993 
2994 	IOPhysicalRange physical_segment;
2995 	for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2996 		physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2997 
2998 		if (physical_segment.length == 0) {
2999 			break;
3000 		}
3001 
3002 		/**
3003 		 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages.  To optimize for trace
3004 		 * buffer memory, pack segment events according to the following.
3005 		 *
3006 		 * Mappings must be emitted in ascending order starting from offset 0.  Mappings can be associated with the previous
3007 		 * IOMDPA_MAPPED event emitted on by the current thread_id.
3008 		 *
3009 		 * IOMDPA_SEGMENTS_PAGE        = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
3010 		 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
3011 		 * - unmapped pages will have a ppn of MAX_INT_32
3012 		 * IOMDPA_SEGMENTS_LONG	= up to 2 virtually contiguous mappings of variable length
3013 		 * - address_0, length_0, address_0, length_1
3014 		 * - unmapped pages will have an address of 0
3015 		 *
3016 		 * During each iteration do the following depending on the length of the mapping:
3017 		 * 1. add the current segment to the appropriate queue of pending segments
3018 		 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
3019 		 * 1a. if FALSE emit and reset all events in the previous queue
3020 		 * 2. check if we have filled up the current queue of pending events
3021 		 * 2a. if TRUE emit and reset all events in the pending queue
3022 		 * 3. after completing all iterations emit events in the current queue
3023 		 */
3024 
3025 		bool emit_page = false;
3026 		bool emit_long = false;
3027 		if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
3028 			segments_page[segment_page_idx] = physical_segment.address;
3029 			segment_page_idx++;
3030 
3031 			emit_long = segment_long_idx != 0;
3032 			emit_page = segment_page_idx == num_segments_page;
3033 
3034 			if (os_unlikely(emit_long)) {
3035 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
3036 				    segments_long[0].address, segments_long[0].length,
3037 				    segments_long[1].address, segments_long[1].length);
3038 			}
3039 
3040 			if (os_unlikely(emit_page)) {
3041 #if __LP64__
3042 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3043 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
3044 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
3045 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
3046 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
3047 #else
3048 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3049 				    (ppnum_t) atop_32(segments_page[1]),
3050 				    (ppnum_t) atop_32(segments_page[2]),
3051 				    (ppnum_t) atop_32(segments_page[3]),
3052 				    (ppnum_t) atop_32(segments_page[4]));
3053 #endif
3054 			}
3055 		} else {
3056 			segments_long[segment_long_idx] = physical_segment;
3057 			segment_long_idx++;
3058 
3059 			emit_page = segment_page_idx != 0;
3060 			emit_long = segment_long_idx == num_segments_long;
3061 
3062 			if (os_unlikely(emit_page)) {
3063 #if __LP64__
3064 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3065 				    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
3066 				    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
3067 				    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
3068 				    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
3069 #else
3070 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3071 				    (ppnum_t) atop_32(segments_page[1]),
3072 				    (ppnum_t) atop_32(segments_page[2]),
3073 				    (ppnum_t) atop_32(segments_page[3]),
3074 				    (ppnum_t) atop_32(segments_page[4]));
3075 #endif
3076 			}
3077 
3078 			if (emit_long) {
3079 				IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
3080 				    segments_long[0].address, segments_long[0].length,
3081 				    segments_long[1].address, segments_long[1].length);
3082 			}
3083 		}
3084 
3085 		if (os_unlikely(emit_page)) {
3086 			memset(segments_page, UINT32_MAX, sizeof(segments_page));
3087 			segment_page_idx = 0;
3088 		}
3089 
3090 		if (os_unlikely(emit_long)) {
3091 			memset(segments_long, 0, sizeof(segments_long));
3092 			segment_long_idx = 0;
3093 		}
3094 	}
3095 
3096 	if (segment_page_idx != 0) {
3097 		assert(segment_long_idx == 0);
3098 #if __LP64__
3099 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3100 		    ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
3101 		    ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
3102 		    ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
3103 		    ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
3104 #else
3105 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
3106 		    (ppnum_t) atop_32(segments_page[1]),
3107 		    (ppnum_t) atop_32(segments_page[2]),
3108 		    (ppnum_t) atop_32(segments_page[3]),
3109 		    (ppnum_t) atop_32(segments_page[4]));
3110 #endif
3111 	} else if (segment_long_idx != 0) {
3112 		assert(segment_page_idx == 0);
3113 		IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
3114 		    segments_long[0].address, segments_long[0].length,
3115 		    segments_long[1].address, segments_long[1].length);
3116 	}
3117 
3118 	return kIOReturnSuccess;
3119 }
3120 
3121 void
setVMTags(uint32_t kernelTag,uint32_t userTag)3122 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
3123 {
3124 	_kernelTag = (vm_tag_t) kernelTag;
3125 	_userTag   = (vm_tag_t) userTag;
3126 }
3127 
3128 uint32_t
getVMTag(vm_map_t map)3129 IOMemoryDescriptor::getVMTag(vm_map_t map)
3130 {
3131 	if (vm_kernel_map_is_kernel(map)) {
3132 		if (VM_KERN_MEMORY_NONE != _kernelTag) {
3133 			return (uint32_t) _kernelTag;
3134 		}
3135 	} else {
3136 		if (VM_KERN_MEMORY_NONE != _userTag) {
3137 			return (uint32_t) _userTag;
3138 		}
3139 	}
3140 	return IOMemoryTag(map);
3141 }
3142 
3143 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3144 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3145 {
3146 	IOReturn err = kIOReturnSuccess;
3147 	DMACommandOps params;
3148 	IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
3149 	ioGMDData *dataP;
3150 
3151 	params = (op & ~kIOMDDMACommandOperationMask & op);
3152 	op &= kIOMDDMACommandOperationMask;
3153 
3154 	if (kIOMDDMAMap == op) {
3155 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3156 			return kIOReturnUnderrun;
3157 		}
3158 
3159 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3160 
3161 		if (!_memoryEntries
3162 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3163 			return kIOReturnNoMemory;
3164 		}
3165 
3166 		if (_memoryEntries && data->fMapper) {
3167 			bool remap, keepMap;
3168 			dataP = getDataP(_memoryEntries);
3169 
3170 			if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
3171 				dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
3172 			}
3173 			if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
3174 				dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
3175 			}
3176 
3177 			keepMap = (data->fMapper == gIOSystemMapper);
3178 			keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3179 
3180 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3181 				IOLockLock(_prepareLock);
3182 			}
3183 
3184 			remap = (!keepMap);
3185 			remap |= (dataP->fDMAMapNumAddressBits < 64)
3186 			    && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3187 			remap |= (dataP->fDMAMapAlignment > page_size);
3188 
3189 			if (remap || !dataP->fMappedBaseValid) {
3190 				err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3191 				if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3192 					dataP->fMappedBase      = data->fAlloc;
3193 					dataP->fMappedBaseValid = true;
3194 					dataP->fMappedLength    = data->fAllocLength;
3195 					data->fAllocLength      = 0;    // IOMD owns the alloc now
3196 				}
3197 			} else {
3198 				data->fAlloc = dataP->fMappedBase;
3199 				data->fAllocLength = 0;         // give out IOMD map
3200 				md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3201 			}
3202 
3203 			if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3204 				IOLockUnlock(_prepareLock);
3205 			}
3206 		}
3207 		return err;
3208 	}
3209 	if (kIOMDDMAUnmap == op) {
3210 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3211 			return kIOReturnUnderrun;
3212 		}
3213 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3214 
3215 		if (_pages) {
3216 			err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3217 		}
3218 
3219 		return kIOReturnSuccess;
3220 	}
3221 
3222 	if (kIOMDAddDMAMapSpec == op) {
3223 		if (dataSize < sizeof(IODMAMapSpecification)) {
3224 			return kIOReturnUnderrun;
3225 		}
3226 
3227 		IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3228 
3229 		if (!_memoryEntries
3230 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3231 			return kIOReturnNoMemory;
3232 		}
3233 
3234 		if (_memoryEntries) {
3235 			dataP = getDataP(_memoryEntries);
3236 			if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3237 				dataP->fDMAMapNumAddressBits = data->numAddressBits;
3238 			}
3239 			if (data->alignment > dataP->fDMAMapAlignment) {
3240 				dataP->fDMAMapAlignment = data->alignment;
3241 			}
3242 		}
3243 		return kIOReturnSuccess;
3244 	}
3245 
3246 	if (kIOMDGetCharacteristics == op) {
3247 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3248 			return kIOReturnUnderrun;
3249 		}
3250 
3251 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3252 		data->fLength = _length;
3253 		data->fSGCount = _rangesCount;
3254 		data->fPages = _pages;
3255 		data->fDirection = getDirection();
3256 		if (!_wireCount) {
3257 			data->fIsPrepared = false;
3258 		} else {
3259 			data->fIsPrepared = true;
3260 			data->fHighestPage = _highestPage;
3261 			if (_memoryEntries) {
3262 				dataP = getDataP(_memoryEntries);
3263 				ioPLBlock *ioplList = getIOPLList(dataP);
3264 				UInt count = getNumIOPL(_memoryEntries, dataP);
3265 				if (count == 1) {
3266 					data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3267 				}
3268 			}
3269 		}
3270 
3271 		return kIOReturnSuccess;
3272 	} else if (kIOMDDMAActive == op) {
3273 		if (params) {
3274 			int16_t prior;
3275 			prior = OSAddAtomic16(1, &md->_dmaReferences);
3276 			if (!prior) {
3277 				md->_mapName = NULL;
3278 			}
3279 		} else {
3280 			if (md->_dmaReferences) {
3281 				OSAddAtomic16(-1, &md->_dmaReferences);
3282 			} else {
3283 				panic("_dmaReferences underflow");
3284 			}
3285 		}
3286 	} else if (kIOMDWalkSegments != op) {
3287 		return kIOReturnBadArgument;
3288 	}
3289 
3290 	// Get the next segment
3291 	struct InternalState {
3292 		IOMDDMAWalkSegmentArgs fIO;
3293 		mach_vm_size_t fOffset2Index;
3294 		mach_vm_size_t fNextOffset;
3295 		UInt fIndex;
3296 	} *isP;
3297 
3298 	// Find the next segment
3299 	if (dataSize < sizeof(*isP)) {
3300 		return kIOReturnUnderrun;
3301 	}
3302 
3303 	isP = (InternalState *) vData;
3304 	uint64_t offset = isP->fIO.fOffset;
3305 	uint8_t mapped = isP->fIO.fMapped;
3306 	uint64_t mappedBase;
3307 
3308 	if (mapped && (kIOMemoryRemote & _flags)) {
3309 		return kIOReturnNotAttached;
3310 	}
3311 
3312 	if (IOMapper::gSystem && mapped
3313 	    && (!(kIOMemoryHostOnly & _flags))
3314 	    && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3315 //	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3316 		if (!_memoryEntries
3317 		    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3318 			return kIOReturnNoMemory;
3319 		}
3320 
3321 		dataP = getDataP(_memoryEntries);
3322 		if (dataP->fMapper) {
3323 			IODMAMapSpecification mapSpec;
3324 			bzero(&mapSpec, sizeof(mapSpec));
3325 			mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3326 			mapSpec.alignment = dataP->fDMAMapAlignment;
3327 			err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3328 			if (kIOReturnSuccess != err) {
3329 				return err;
3330 			}
3331 			dataP->fMappedBaseValid = true;
3332 		}
3333 	}
3334 
3335 	if (mapped) {
3336 		if (IOMapper::gSystem
3337 		    && (!(kIOMemoryHostOnly & _flags))
3338 		    && _memoryEntries
3339 		    && (dataP = getDataP(_memoryEntries))
3340 		    && dataP->fMappedBaseValid) {
3341 			mappedBase = dataP->fMappedBase;
3342 		} else {
3343 			mapped = 0;
3344 		}
3345 	}
3346 
3347 	if (offset >= _length) {
3348 		return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3349 	}
3350 
3351 	// Validate the previous offset
3352 	UInt ind;
3353 	mach_vm_size_t off2Ind = isP->fOffset2Index;
3354 	if (!params
3355 	    && offset
3356 	    && (offset == isP->fNextOffset || off2Ind <= offset)) {
3357 		ind = isP->fIndex;
3358 	} else {
3359 		ind = off2Ind = 0; // Start from beginning
3360 	}
3361 	mach_vm_size_t length;
3362 	UInt64 address;
3363 
3364 	if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3365 		// Physical address based memory descriptor
3366 		const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3367 
3368 		// Find the range after the one that contains the offset
3369 		mach_vm_size_t len;
3370 		for (len = 0; off2Ind <= offset; ind++) {
3371 			len = physP[ind].length;
3372 			off2Ind += len;
3373 		}
3374 
3375 		// Calculate length within range and starting address
3376 		length   = off2Ind - offset;
3377 		address  = physP[ind - 1].address + len - length;
3378 
3379 		if (true && mapped) {
3380 			address = mappedBase + offset;
3381 		} else {
3382 			// see how far we can coalesce ranges
3383 			while (ind < _rangesCount && address + length == physP[ind].address) {
3384 				len = physP[ind].length;
3385 				length += len;
3386 				off2Ind += len;
3387 				ind++;
3388 			}
3389 		}
3390 
3391 		// correct contiguous check overshoot
3392 		ind--;
3393 		off2Ind -= len;
3394 	}
3395 #ifndef __LP64__
3396 	else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3397 		// Physical address based memory descriptor
3398 		const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3399 
3400 		// Find the range after the one that contains the offset
3401 		mach_vm_size_t len;
3402 		for (len = 0; off2Ind <= offset; ind++) {
3403 			len = physP[ind].length;
3404 			off2Ind += len;
3405 		}
3406 
3407 		// Calculate length within range and starting address
3408 		length   = off2Ind - offset;
3409 		address  = physP[ind - 1].address + len - length;
3410 
3411 		if (true && mapped) {
3412 			address = mappedBase + offset;
3413 		} else {
3414 			// see how far we can coalesce ranges
3415 			while (ind < _rangesCount && address + length == physP[ind].address) {
3416 				len = physP[ind].length;
3417 				length += len;
3418 				off2Ind += len;
3419 				ind++;
3420 			}
3421 		}
3422 		// correct contiguous check overshoot
3423 		ind--;
3424 		off2Ind -= len;
3425 	}
3426 #endif /* !__LP64__ */
3427 	else {
3428 		do {
3429 			if (!_wireCount) {
3430 				panic("IOGMD: not wired for the IODMACommand");
3431 			}
3432 
3433 			assert(_memoryEntries);
3434 
3435 			dataP = getDataP(_memoryEntries);
3436 			const ioPLBlock *ioplList = getIOPLList(dataP);
3437 			UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3438 			upl_page_info_t *pageList = getPageList(dataP);
3439 
3440 			assert(numIOPLs > 0);
3441 
3442 			// Scan through iopl info blocks looking for block containing offset
3443 			while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3444 				ind++;
3445 			}
3446 
3447 			// Go back to actual range as search goes past it
3448 			ioPLBlock ioplInfo = ioplList[ind - 1];
3449 			off2Ind = ioplInfo.fIOMDOffset;
3450 
3451 			if (ind < numIOPLs) {
3452 				length = ioplList[ind].fIOMDOffset;
3453 			} else {
3454 				length = _length;
3455 			}
3456 			length -= offset;       // Remainder within iopl
3457 
3458 			// Subtract offset till this iopl in total list
3459 			offset -= off2Ind;
3460 
3461 			// If a mapped address is requested and this is a pre-mapped IOPL
3462 			// then just need to compute an offset relative to the mapped base.
3463 			if (mapped) {
3464 				offset += (ioplInfo.fPageOffset & PAGE_MASK);
3465 				address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3466 				continue; // Done leave do/while(false) now
3467 			}
3468 
3469 			// The offset is rebased into the current iopl.
3470 			// Now add the iopl 1st page offset.
3471 			offset += ioplInfo.fPageOffset;
3472 
3473 			// For external UPLs the fPageInfo field points directly to
3474 			// the upl's upl_page_info_t array.
3475 			if (ioplInfo.fFlags & kIOPLExternUPL) {
3476 				pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3477 			} else {
3478 				pageList = &pageList[ioplInfo.fPageInfo];
3479 			}
3480 
3481 			// Check for direct device non-paged memory
3482 			if (ioplInfo.fFlags & kIOPLOnDevice) {
3483 				address = ptoa_64(pageList->phys_addr) + offset;
3484 				continue; // Done leave do/while(false) now
3485 			}
3486 
3487 			// Now we need compute the index into the pageList
3488 			UInt pageInd = atop_32(offset);
3489 			offset &= PAGE_MASK;
3490 
3491 			// Compute the starting address of this segment
3492 			IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3493 			if (!pageAddr) {
3494 				panic("!pageList phys_addr");
3495 			}
3496 
3497 			address = ptoa_64(pageAddr) + offset;
3498 
3499 			// length is currently set to the length of the remainider of the iopl.
3500 			// We need to check that the remainder of the iopl is contiguous.
3501 			// This is indicated by pageList[ind].phys_addr being sequential.
3502 			IOByteCount contigLength = PAGE_SIZE - offset;
3503 			while (contigLength < length
3504 			    && ++pageAddr == pageList[++pageInd].phys_addr) {
3505 				contigLength += PAGE_SIZE;
3506 			}
3507 
3508 			if (contigLength < length) {
3509 				length = contigLength;
3510 			}
3511 
3512 			assert(address);
3513 			assert(length);
3514 		} while (false);
3515 	}
3516 
3517 	// Update return values and state
3518 	isP->fIO.fIOVMAddr = address;
3519 	isP->fIO.fLength   = length;
3520 	isP->fIndex        = ind;
3521 	isP->fOffset2Index = off2Ind;
3522 	isP->fNextOffset   = isP->fIO.fOffset + length;
3523 
3524 	return kIOReturnSuccess;
3525 }
3526 
3527 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3528 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3529 {
3530 	IOReturn          ret;
3531 	mach_vm_address_t address = 0;
3532 	mach_vm_size_t    length  = 0;
3533 	IOMapper *        mapper  = gIOSystemMapper;
3534 	IOOptionBits      type    = _flags & kIOMemoryTypeMask;
3535 
3536 	if (lengthOfSegment) {
3537 		*lengthOfSegment = 0;
3538 	}
3539 
3540 	if (offset >= _length) {
3541 		return 0;
3542 	}
3543 
3544 	// IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3545 	// support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3546 	// map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3547 	// due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3548 
3549 	if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3550 		unsigned rangesIndex = 0;
3551 		Ranges vec = _ranges;
3552 		mach_vm_address_t addr;
3553 
3554 		// Find starting address within the vector of ranges
3555 		for (;;) {
3556 			getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3557 			if (offset < length) {
3558 				break;
3559 			}
3560 			offset -= length; // (make offset relative)
3561 			rangesIndex++;
3562 		}
3563 
3564 		// Now that we have the starting range,
3565 		// lets find the last contiguous range
3566 		addr   += offset;
3567 		length -= offset;
3568 
3569 		for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3570 			mach_vm_address_t newAddr;
3571 			mach_vm_size_t    newLen;
3572 
3573 			getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3574 			if (addr + length != newAddr) {
3575 				break;
3576 			}
3577 			length += newLen;
3578 		}
3579 		if (addr) {
3580 			address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3581 		}
3582 	} else {
3583 		IOMDDMAWalkSegmentState _state;
3584 		IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3585 
3586 		state->fOffset = offset;
3587 		state->fLength = _length - offset;
3588 		state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3589 
3590 		ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3591 
3592 		if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3593 			DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3594 			    ret, this, state->fOffset,
3595 			    state->fIOVMAddr, state->fLength);
3596 		}
3597 		if (kIOReturnSuccess == ret) {
3598 			address = state->fIOVMAddr;
3599 			length  = state->fLength;
3600 		}
3601 
3602 		// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3603 		// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3604 
3605 		if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3606 			if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3607 				addr64_t    origAddr = address;
3608 				IOByteCount origLen  = length;
3609 
3610 				address = mapper->mapToPhysicalAddress(origAddr);
3611 				length = page_size - (address & (page_size - 1));
3612 				while ((length < origLen)
3613 				    && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3614 					length += page_size;
3615 				}
3616 				if (length > origLen) {
3617 					length = origLen;
3618 				}
3619 			}
3620 		}
3621 	}
3622 
3623 	if (!address) {
3624 		length = 0;
3625 	}
3626 
3627 	if (lengthOfSegment) {
3628 		*lengthOfSegment = length;
3629 	}
3630 
3631 	return address;
3632 }
3633 
3634 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)3635 IOGeneralMemoryDescriptor::readBytes
3636 (IOByteCount offset, void *bytes, IOByteCount length)
3637 {
3638 #if HAS_MTE
3639 	/* We might fault while accessing the wired memory if the underlying memory
3640 	 * is tagged and someone else has since changed the tag.
3641 	 * We need to set up context on the current thread so that we can keep track
3642 	 * of who caused us to fault within the fault handler.
3643 	 * Here, 'who caused us to fault' would be the other end of the true
3644 	 * share (i.e. the task which handed us the memory in the first place).
3645 	 * Ideally we'd only set up this context just surrounding the potentially
3646 	 * faulting access, but the access happens deeper within IOMD machinery
3647 	 * by which point we've lost this _task ivar. So, just hold onto it here.
3648 	 * (Note that it's possible for the culprit who changed the tag to be
3649 	 * another party holding a true share to the mapping, separate from the task
3650 	 * which handed us the memory that backs the IOMD, but we're not going to
3651 	 * worry too much about this case for now.)
3652 	 */
3653 	/* Note that we don't need any special handling for when _task is NULL. */
3654 	current_thread_enter_iomd_faultable_access_with_buffer_provider(_task);
3655 #endif /* HAS_MTE */
3656 	IOByteCount count = super::readBytes(offset, bytes, length);
3657 #if HAS_MTE
3658 	current_thread_exit_iomd_faultable_access();
3659 #endif /* HAS_MTE */
3660 	return count;
3661 }
3662 
3663 IOByteCount
writeBytes(IOByteCount offset,const void * bytes,IOByteCount withLength)3664 IOGeneralMemoryDescriptor::writeBytes
3665 (IOByteCount offset, const void* bytes, IOByteCount withLength)
3666 {
3667 #if HAS_MTE
3668 	/* We might fault while accessing the wired memory if the underlying memory
3669 	 * is tagged and someone else has since changed the tag.
3670 	 * We need to set up context on the current thread so that we can keep track
3671 	 * of who caused us to fault within the fault handler.
3672 	 * Here, 'who caused us to fault' would be the other end of the true
3673 	 * share (i.e. the task which handed us the memory in the first place).
3674 	 * Ideally we'd only set up this context just surrounding the potentially
3675 	 * faulting access, but the access happens deeper within IOMD machinery
3676 	 * by which point we've lost this _task ivar. So, just hold onto it here.
3677 	 * (Note that it's possible for the culprit who changed the tag to be
3678 	 * another party holding a true share to the mapping, separate from the task
3679 	 * which handed us the memory that backs the IOMD, but we're not going to
3680 	 * worry too much about this case for now.)
3681 	 */
3682 	/* Note that we don't need any special handling for when _task is NULL. */
3683 	current_thread_enter_iomd_faultable_access_with_buffer_provider(_task);
3684 #endif /* HAS_MTE */
3685 	IOByteCount count = super::writeBytes(offset, bytes, withLength);
3686 #if HAS_MTE
3687 	current_thread_exit_iomd_faultable_access();
3688 #endif /* HAS_MTE */
3689 	return count;
3690 }
3691 
3692 #ifndef __LP64__
3693 #pragma clang diagnostic push
3694 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3695 
3696 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3697 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3698 {
3699 	addr64_t address = 0;
3700 
3701 	if (options & _kIOMemorySourceSegment) {
3702 		address = getSourceSegment(offset, lengthOfSegment);
3703 	} else if (options & kIOMemoryMapperNone) {
3704 		address = getPhysicalSegment64(offset, lengthOfSegment);
3705 	} else {
3706 		address = getPhysicalSegment(offset, lengthOfSegment);
3707 	}
3708 
3709 	return address;
3710 }
3711 #pragma clang diagnostic pop
3712 
3713 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3714 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3715 {
3716 	return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3717 }
3718 
3719 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3720 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3721 {
3722 	addr64_t    address = 0;
3723 	IOByteCount length  = 0;
3724 
3725 	address = getPhysicalSegment(offset, lengthOfSegment, 0);
3726 
3727 	if (lengthOfSegment) {
3728 		length = *lengthOfSegment;
3729 	}
3730 
3731 	if ((address + length) > 0x100000000ULL) {
3732 		panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3733 		    address, (long) length, (getMetaClass())->getClassName());
3734 	}
3735 
3736 	return (IOPhysicalAddress) address;
3737 }
3738 
3739 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3740 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3741 {
3742 	IOPhysicalAddress phys32;
3743 	IOByteCount       length;
3744 	addr64_t          phys64;
3745 	IOMapper *        mapper = NULL;
3746 
3747 	phys32 = getPhysicalSegment(offset, lengthOfSegment);
3748 	if (!phys32) {
3749 		return 0;
3750 	}
3751 
3752 	if (gIOSystemMapper) {
3753 		mapper = gIOSystemMapper;
3754 	}
3755 
3756 	if (mapper) {
3757 		IOByteCount origLen;
3758 
3759 		phys64 = mapper->mapToPhysicalAddress(phys32);
3760 		origLen = *lengthOfSegment;
3761 		length = page_size - (phys64 & (page_size - 1));
3762 		while ((length < origLen)
3763 		    && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3764 			length += page_size;
3765 		}
3766 		if (length > origLen) {
3767 			length = origLen;
3768 		}
3769 
3770 		*lengthOfSegment = length;
3771 	} else {
3772 		phys64 = (addr64_t) phys32;
3773 	}
3774 
3775 	return phys64;
3776 }
3777 
3778 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3779 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3780 {
3781 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3782 }
3783 
3784 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3785 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3786 {
3787 	return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3788 }
3789 
3790 #pragma clang diagnostic push
3791 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3792 
3793 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3794 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3795     IOByteCount * lengthOfSegment)
3796 {
3797 	if (_task == kernel_task) {
3798 		return (void *) getSourceSegment(offset, lengthOfSegment);
3799 	} else {
3800 		panic("IOGMD::getVirtualSegment deprecated");
3801 	}
3802 
3803 	return NULL;
3804 }
3805 #pragma clang diagnostic pop
3806 #endif /* !__LP64__ */
3807 
3808 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3809 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3810 {
3811 	IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3812 	DMACommandOps params;
3813 	IOReturn err;
3814 
3815 	params = (op & ~kIOMDDMACommandOperationMask & op);
3816 	op &= kIOMDDMACommandOperationMask;
3817 
3818 	if (kIOMDGetCharacteristics == op) {
3819 		if (dataSize < sizeof(IOMDDMACharacteristics)) {
3820 			return kIOReturnUnderrun;
3821 		}
3822 
3823 		IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3824 		data->fLength = getLength();
3825 		data->fSGCount = 0;
3826 		data->fDirection = getDirection();
3827 		data->fIsPrepared = true; // Assume prepared - fails safe
3828 	} else if (kIOMDWalkSegments == op) {
3829 		if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3830 			return kIOReturnUnderrun;
3831 		}
3832 
3833 		IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3834 		IOByteCount offset  = (IOByteCount) data->fOffset;
3835 		IOPhysicalLength length, nextLength;
3836 		addr64_t         addr, nextAddr;
3837 
3838 		if (data->fMapped) {
3839 			panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3840 		}
3841 		addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3842 		offset += length;
3843 		while (offset < getLength()) {
3844 			nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3845 			if ((addr + length) != nextAddr) {
3846 				break;
3847 			}
3848 			length += nextLength;
3849 			offset += nextLength;
3850 		}
3851 		data->fIOVMAddr = addr;
3852 		data->fLength   = length;
3853 	} else if (kIOMDAddDMAMapSpec == op) {
3854 		return kIOReturnUnsupported;
3855 	} else if (kIOMDDMAMap == op) {
3856 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3857 			return kIOReturnUnderrun;
3858 		}
3859 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3860 
3861 		err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3862 
3863 		return err;
3864 	} else if (kIOMDDMAUnmap == op) {
3865 		if (dataSize < sizeof(IOMDDMAMapArgs)) {
3866 			return kIOReturnUnderrun;
3867 		}
3868 		IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3869 
3870 		err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3871 
3872 		return kIOReturnSuccess;
3873 	} else {
3874 		return kIOReturnBadArgument;
3875 	}
3876 
3877 	return kIOReturnSuccess;
3878 }
3879 
3880 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3881 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3882     IOOptionBits * oldState )
3883 {
3884 	IOReturn      err = kIOReturnSuccess;
3885 
3886 	vm_purgable_t control;
3887 	int           state;
3888 
3889 	assert(!(kIOMemoryRemote & _flags));
3890 	if (kIOMemoryRemote & _flags) {
3891 		return kIOReturnNotAttached;
3892 	}
3893 
3894 	if (_memRef) {
3895 		err = super::setPurgeable(newState, oldState);
3896 	} else {
3897 		if (kIOMemoryThreadSafe & _flags) {
3898 			LOCK;
3899 		}
3900 		do{
3901 			// Find the appropriate vm_map for the given task
3902 			vm_map_t curMap;
3903 			if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3904 				err = kIOReturnNotReady;
3905 				break;
3906 			} else if (!_task) {
3907 				err = kIOReturnUnsupported;
3908 				break;
3909 			} else {
3910 				curMap = get_task_map(_task);
3911 				if (NULL == curMap) {
3912 					err = KERN_INVALID_ARGUMENT;
3913 					break;
3914 				}
3915 			}
3916 
3917 			// can only do one range
3918 			Ranges vec = _ranges;
3919 			IOOptionBits type = _flags & kIOMemoryTypeMask;
3920 			mach_vm_address_t addr;
3921 			mach_vm_size_t    len;
3922 			getAddrLenForInd(addr, len, type, vec, 0, _task);
3923 
3924 			err = purgeableControlBits(newState, &control, &state);
3925 			if (kIOReturnSuccess != err) {
3926 				break;
3927 			}
3928 			err = vm_map_purgable_control(curMap, addr, control, &state);
3929 			if (oldState) {
3930 				if (kIOReturnSuccess == err) {
3931 					err = purgeableStateBits(&state);
3932 					*oldState = state;
3933 				}
3934 			}
3935 		}while (false);
3936 		if (kIOMemoryThreadSafe & _flags) {
3937 			UNLOCK;
3938 		}
3939 	}
3940 
3941 	return err;
3942 }
3943 
3944 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3945 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3946     IOOptionBits * oldState )
3947 {
3948 	IOReturn err = kIOReturnNotReady;
3949 
3950 	if (kIOMemoryThreadSafe & _flags) {
3951 		LOCK;
3952 	}
3953 	if (_memRef) {
3954 		err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3955 	}
3956 	if (kIOMemoryThreadSafe & _flags) {
3957 		UNLOCK;
3958 	}
3959 
3960 	return err;
3961 }
3962 
3963 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3964 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3965     int newLedgerTag,
3966     IOOptionBits newLedgerOptions )
3967 {
3968 	IOReturn      err = kIOReturnSuccess;
3969 
3970 	assert(!(kIOMemoryRemote & _flags));
3971 	if (kIOMemoryRemote & _flags) {
3972 		return kIOReturnNotAttached;
3973 	}
3974 
3975 	if (iokit_iomd_setownership_enabled == FALSE) {
3976 		return kIOReturnUnsupported;
3977 	}
3978 
3979 	if (_memRef) {
3980 		err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3981 	} else {
3982 		err = kIOReturnUnsupported;
3983 	}
3984 
3985 	return err;
3986 }
3987 
3988 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3989 IOMemoryDescriptor::setOwnership( task_t newOwner,
3990     int newLedgerTag,
3991     IOOptionBits newLedgerOptions )
3992 {
3993 	IOReturn err = kIOReturnNotReady;
3994 
3995 	assert(!(kIOMemoryRemote & _flags));
3996 	if (kIOMemoryRemote & _flags) {
3997 		return kIOReturnNotAttached;
3998 	}
3999 
4000 	if (iokit_iomd_setownership_enabled == FALSE) {
4001 		return kIOReturnUnsupported;
4002 	}
4003 
4004 	if (kIOMemoryThreadSafe & _flags) {
4005 		LOCK;
4006 	}
4007 	if (_memRef) {
4008 		err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
4009 	} else {
4010 		IOMultiMemoryDescriptor * mmd;
4011 		IOSubMemoryDescriptor   * smd;
4012 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
4013 			err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
4014 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
4015 			err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
4016 		}
4017 	}
4018 	if (kIOMemoryThreadSafe & _flags) {
4019 		UNLOCK;
4020 	}
4021 
4022 	return err;
4023 }
4024 
4025 
4026 uint64_t
getDMAMapLength(uint64_t * offset)4027 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
4028 {
4029 	uint64_t length;
4030 
4031 	if (_memRef) {
4032 		length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
4033 	} else {
4034 		IOByteCount       iterate, segLen;
4035 		IOPhysicalAddress sourceAddr, sourceAlign;
4036 
4037 		if (kIOMemoryThreadSafe & _flags) {
4038 			LOCK;
4039 		}
4040 		length = 0;
4041 		iterate = 0;
4042 		while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
4043 			sourceAlign = (sourceAddr & page_mask);
4044 			if (offset && !iterate) {
4045 				*offset = sourceAlign;
4046 			}
4047 			length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
4048 			iterate += segLen;
4049 		}
4050 		if (!iterate) {
4051 			length = getLength();
4052 			if (offset) {
4053 				*offset = 0;
4054 			}
4055 		}
4056 		if (kIOMemoryThreadSafe & _flags) {
4057 			UNLOCK;
4058 		}
4059 	}
4060 
4061 	return length;
4062 }
4063 
4064 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount,IOByteCount * swappedPageCount)4065 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
4066     IOByteCount * dirtyPageCount,
4067     IOByteCount * swappedPageCount )
4068 {
4069 	IOReturn err = kIOReturnNotReady;
4070 
4071 	assert(!(kIOMemoryRemote & _flags));
4072 	if (kIOMemoryRemote & _flags) {
4073 		return kIOReturnNotAttached;
4074 	}
4075 
4076 	if (kIOMemoryThreadSafe & _flags) {
4077 		LOCK;
4078 	}
4079 	if (_memRef) {
4080 		err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount, swappedPageCount);
4081 	} else {
4082 		IOMultiMemoryDescriptor * mmd;
4083 		IOSubMemoryDescriptor   * smd;
4084 		if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
4085 			err = smd->getPageCounts(residentPageCount, dirtyPageCount, swappedPageCount);
4086 		} else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
4087 			err = mmd->getPageCounts(residentPageCount, dirtyPageCount, swappedPageCount);
4088 		}
4089 	}
4090 	if (kIOMemoryThreadSafe & _flags) {
4091 		UNLOCK;
4092 	}
4093 
4094 	return err;
4095 }
4096 
4097 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)4098 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
4099     IOByteCount * dirtyPageCount )
4100 {
4101 	return getPageCounts(residentPageCount, dirtyPageCount, NULL);
4102 }
4103 
4104 
4105 #if defined(__arm64__)
4106 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
4107 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
4108 #else /* defined(__arm64__) */
4109 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
4110 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
4111 #endif /* defined(__arm64__) */
4112 
4113 static void
SetEncryptOp(addr64_t pa,unsigned int count)4114 SetEncryptOp(addr64_t pa, unsigned int count)
4115 {
4116 	ppnum_t page, end;
4117 
4118 	page = (ppnum_t) atop_64(round_page_64(pa));
4119 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
4120 	for (; page < end; page++) {
4121 		pmap_clear_noencrypt(page);
4122 	}
4123 }
4124 
4125 static void
ClearEncryptOp(addr64_t pa,unsigned int count)4126 ClearEncryptOp(addr64_t pa, unsigned int count)
4127 {
4128 	ppnum_t page, end;
4129 
4130 	page = (ppnum_t) atop_64(round_page_64(pa));
4131 	end  = (ppnum_t) atop_64(trunc_page_64(pa + count));
4132 	for (; page < end; page++) {
4133 		pmap_set_noencrypt(page);
4134 	}
4135 }
4136 
4137 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)4138 IOMemoryDescriptor::performOperation( IOOptionBits options,
4139     IOByteCount offset, IOByteCount length )
4140 {
4141 	IOByteCount remaining;
4142 	unsigned int res;
4143 	void (*func)(addr64_t pa, unsigned int count) = NULL;
4144 #if defined(__arm64__)
4145 	void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
4146 #endif
4147 
4148 	assert(!(kIOMemoryRemote & _flags));
4149 	if (kIOMemoryRemote & _flags) {
4150 		return kIOReturnNotAttached;
4151 	}
4152 
4153 	switch (options) {
4154 	case kIOMemoryIncoherentIOFlush:
4155 #if defined(__arm64__)
4156 		func_ext = &dcache_incoherent_io_flush64;
4157 #if __ARM_COHERENT_IO__
4158 		func_ext(0, 0, 0, &res);
4159 		return kIOReturnSuccess;
4160 #else /* __ARM_COHERENT_IO__ */
4161 		break;
4162 #endif /* __ARM_COHERENT_IO__ */
4163 #else /* defined(__arm64__) */
4164 		func = &dcache_incoherent_io_flush64;
4165 		break;
4166 #endif /* defined(__arm64__) */
4167 	case kIOMemoryIncoherentIOStore:
4168 #if defined(__arm64__)
4169 		func_ext = &dcache_incoherent_io_store64;
4170 #if __ARM_COHERENT_IO__
4171 		func_ext(0, 0, 0, &res);
4172 		return kIOReturnSuccess;
4173 #else /* __ARM_COHERENT_IO__ */
4174 		break;
4175 #endif /* __ARM_COHERENT_IO__ */
4176 #else /* defined(__arm64__) */
4177 		func = &dcache_incoherent_io_store64;
4178 		break;
4179 #endif /* defined(__arm64__) */
4180 
4181 	case kIOMemorySetEncrypted:
4182 		func = &SetEncryptOp;
4183 		break;
4184 	case kIOMemoryClearEncrypted:
4185 		func = &ClearEncryptOp;
4186 		break;
4187 	}
4188 
4189 #if defined(__arm64__)
4190 	if ((func == NULL) && (func_ext == NULL)) {
4191 		return kIOReturnUnsupported;
4192 	}
4193 #else /* defined(__arm64__) */
4194 	if (!func) {
4195 		return kIOReturnUnsupported;
4196 	}
4197 #endif /* defined(__arm64__) */
4198 
4199 	if (kIOMemoryThreadSafe & _flags) {
4200 		LOCK;
4201 	}
4202 
4203 	res = 0x0UL;
4204 	remaining = length = min(length, getLength() - offset);
4205 	while (remaining) {
4206 		// (process another target segment?)
4207 		addr64_t    dstAddr64;
4208 		IOByteCount dstLen;
4209 
4210 		dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
4211 		if (!dstAddr64) {
4212 			break;
4213 		}
4214 
4215 		// Clip segment length to remaining
4216 		if (dstLen > remaining) {
4217 			dstLen = remaining;
4218 		}
4219 		if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
4220 			dstLen = (UINT_MAX - PAGE_SIZE + 1);
4221 		}
4222 		if (remaining > UINT_MAX) {
4223 			remaining = UINT_MAX;
4224 		}
4225 
4226 #if defined(__arm64__)
4227 		if (func) {
4228 			(*func)(dstAddr64, (unsigned int) dstLen);
4229 		}
4230 		if (func_ext) {
4231 			(*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
4232 			if (res != 0x0UL) {
4233 				remaining = 0;
4234 				break;
4235 			}
4236 		}
4237 #else /* defined(__arm64__) */
4238 		(*func)(dstAddr64, (unsigned int) dstLen);
4239 #endif /* defined(__arm64__) */
4240 
4241 		offset    += dstLen;
4242 		remaining -= dstLen;
4243 	}
4244 
4245 	if (kIOMemoryThreadSafe & _flags) {
4246 		UNLOCK;
4247 	}
4248 
4249 	return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4250 }
4251 
4252 /*
4253  *
4254  */
4255 
4256 #if defined(__i386__) || defined(__x86_64__)
4257 
4258 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4259 
4260 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4261  * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4262  * kernel non-text data -- should we just add another range instead?
4263  */
4264 #define io_kernel_static_start  vm_kernel_stext
4265 #define io_kernel_static_end    (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4266 
4267 #elif defined(__arm64__)
4268 
4269 extern vm_offset_t              static_memory_end;
4270 
4271 #if defined(__arm64__)
4272 #define io_kernel_static_start vm_kext_base
4273 #else /* defined(__arm64__) */
4274 #define io_kernel_static_start vm_kernel_stext
4275 #endif /* defined(__arm64__) */
4276 
4277 #define io_kernel_static_end    static_memory_end
4278 
4279 #else
4280 #error io_kernel_static_end is undefined for this architecture
4281 #endif
4282 
4283 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4284 io_get_kernel_static_upl(
4285 	vm_map_t                /* map */,
4286 	uintptr_t               offset,
4287 	upl_size_t              *upl_size,
4288 	unsigned int            *page_offset,
4289 	upl_t                   *upl,
4290 	upl_page_info_array_t   page_list,
4291 	unsigned int            *count,
4292 	ppnum_t                 *highest_page)
4293 {
4294 	unsigned int pageCount, page;
4295 	ppnum_t phys;
4296 	ppnum_t highestPage = 0;
4297 
4298 	pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4299 	if (pageCount > *count) {
4300 		pageCount = *count;
4301 	}
4302 	*upl_size = (upl_size_t) ptoa_64(pageCount);
4303 
4304 	*upl = NULL;
4305 	*page_offset = ((unsigned int) page_mask & offset);
4306 
4307 	for (page = 0; page < pageCount; page++) {
4308 		phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4309 		if (!phys) {
4310 			break;
4311 		}
4312 		page_list[page].phys_addr = phys;
4313 		page_list[page].free_when_done = 0;
4314 		page_list[page].absent    = 0;
4315 		page_list[page].dirty     = 0;
4316 		page_list[page].precious  = 0;
4317 		page_list[page].device    = 0;
4318 		if (phys > highestPage) {
4319 			highestPage = phys;
4320 		}
4321 	}
4322 
4323 	*highest_page = highestPage;
4324 
4325 	return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4326 }
4327 
4328 IOReturn
wireVirtual(IODirection forDirection)4329 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4330 {
4331 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4332 	IOReturn error = kIOReturnSuccess;
4333 	ioGMDData *dataP;
4334 	upl_page_info_array_t pageInfo;
4335 	ppnum_t mapBase;
4336 	vm_tag_t tag = VM_KERN_MEMORY_NONE;
4337 	mach_vm_size_t numBytesWired = 0;
4338 
4339 	assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4340 
4341 	if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4342 		forDirection = (IODirection) (forDirection | getDirection());
4343 	}
4344 
4345 	dataP = getDataP(_memoryEntries);
4346 	upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4347 	switch (kIODirectionOutIn & forDirection) {
4348 	case kIODirectionOut:
4349 		// Pages do not need to be marked as dirty on commit
4350 		uplFlags = UPL_COPYOUT_FROM;
4351 		dataP->fDMAAccess = kIODMAMapReadAccess;
4352 		break;
4353 
4354 	case kIODirectionIn:
4355 		dataP->fDMAAccess = kIODMAMapWriteAccess;
4356 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4357 		break;
4358 
4359 	default:
4360 		dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4361 		uplFlags = 0;   // i.e. ~UPL_COPYOUT_FROM
4362 		break;
4363 	}
4364 
4365 	if (_wireCount) {
4366 		if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4367 			OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4368 			    (size_t)VM_KERNEL_ADDRPERM(this));
4369 			error = kIOReturnNotWritable;
4370 		}
4371 	} else {
4372 		IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4373 		IOMapper *mapper;
4374 
4375 		mapper = dataP->fMapper;
4376 		dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4377 
4378 		uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4379 		tag = _kernelTag;
4380 		if (VM_KERN_MEMORY_NONE == tag) {
4381 			tag = IOMemoryTag(kernel_map);
4382 		}
4383 
4384 		if (kIODirectionPrepareToPhys32 & forDirection) {
4385 			if (!mapper) {
4386 				uplFlags |= UPL_NEED_32BIT_ADDR;
4387 			}
4388 			if (dataP->fDMAMapNumAddressBits > 32) {
4389 				dataP->fDMAMapNumAddressBits = 32;
4390 			}
4391 		}
4392 		if (kIODirectionPrepareNoFault    & forDirection) {
4393 			uplFlags |= UPL_REQUEST_NO_FAULT;
4394 		}
4395 		if (kIODirectionPrepareNoZeroFill & forDirection) {
4396 			uplFlags |= UPL_NOZEROFILLIO;
4397 		}
4398 		if (kIODirectionPrepareNonCoherent & forDirection) {
4399 			uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4400 		}
4401 
4402 		mapBase = 0;
4403 
4404 		// Note that appendBytes(NULL) zeros the data up to the desired length
4405 		size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4406 		if (uplPageSize > ((unsigned int)uplPageSize)) {
4407 			error = kIOReturnNoMemory;
4408 			traceInterval.setEndArg2(error);
4409 			return error;
4410 		}
4411 		if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4412 			error = kIOReturnNoMemory;
4413 			traceInterval.setEndArg2(error);
4414 			return error;
4415 		}
4416 		dataP = NULL;
4417 
4418 		// Find the appropriate vm_map for the given task
4419 		vm_map_t curMap;
4420 		if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4421 			curMap = NULL;
4422 		} else {
4423 			curMap = get_task_map(_task);
4424 		}
4425 
4426 		// Iterate over the vector of virtual ranges
4427 		Ranges vec = _ranges;
4428 		unsigned int pageIndex  = 0;
4429 		IOByteCount mdOffset    = 0;
4430 		ppnum_t highestPage     = 0;
4431 		bool         byteAlignUPL;
4432 
4433 		IOMemoryEntry * memRefEntry = NULL;
4434 		if (_memRef) {
4435 			memRefEntry = &_memRef->entries[0];
4436 			byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4437 		} else {
4438 			byteAlignUPL = true;
4439 		}
4440 
4441 		for (UInt range = 0; mdOffset < _length; range++) {
4442 			ioPLBlock iopl;
4443 			mach_vm_address_t startPage, startPageOffset;
4444 			mach_vm_size_t    numBytes;
4445 			ppnum_t highPage = 0;
4446 
4447 			if (_memRef) {
4448 				if (range >= _memRef->count) {
4449 					panic("memRefEntry");
4450 				}
4451 				memRefEntry = &_memRef->entries[range];
4452 				numBytes    = memRefEntry->size;
4453 				startPage   = -1ULL;
4454 				if (byteAlignUPL) {
4455 					startPageOffset = 0;
4456 				} else {
4457 					startPageOffset = (memRefEntry->start & PAGE_MASK);
4458 				}
4459 			} else {
4460 				// Get the startPage address and length of vec[range]
4461 				getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4462 				if (byteAlignUPL) {
4463 					startPageOffset = 0;
4464 				} else {
4465 					startPageOffset = startPage & PAGE_MASK;
4466 					startPage = trunc_page_64(startPage);
4467 				}
4468 			}
4469 			iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4470 			numBytes += startPageOffset;
4471 
4472 			if (mapper) {
4473 				iopl.fMappedPage = mapBase + pageIndex;
4474 			} else {
4475 				iopl.fMappedPage = 0;
4476 			}
4477 
4478 			// Iterate over the current range, creating UPLs
4479 			while (numBytes) {
4480 				vm_address_t kernelStart = (vm_address_t) startPage;
4481 				vm_map_t theMap;
4482 				if (curMap) {
4483 					theMap = curMap;
4484 				} else if (_memRef) {
4485 					theMap = NULL;
4486 				} else {
4487 					assert(_task == kernel_task);
4488 					theMap = IOPageableMapForAddress(kernelStart);
4489 				}
4490 
4491 				// ioplFlags is an in/out parameter
4492 				upl_control_flags_t ioplFlags = uplFlags;
4493 				dataP = getDataP(_memoryEntries);
4494 				pageInfo = getPageList(dataP);
4495 				upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4496 
4497 				mach_vm_size_t ioplPhysSize;
4498 				upl_size_t     ioplSize;
4499 				unsigned int   numPageInfo;
4500 
4501 				if (_memRef) {
4502 					error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4503 					DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4504 				} else {
4505 					error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4506 					DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4507 				}
4508 				if (error != KERN_SUCCESS) {
4509 					if (_memRef) {
4510 						DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4511 					} else {
4512 						DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4513 					}
4514 					printf("entry size error %d\n", error);
4515 					goto abortExit;
4516 				}
4517 				ioplPhysSize    = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4518 				numPageInfo = atop_32(ioplPhysSize);
4519 				if (byteAlignUPL) {
4520 					if (numBytes > ioplPhysSize) {
4521 						ioplSize = ((typeof(ioplSize))ioplPhysSize);
4522 					} else {
4523 						ioplSize = ((typeof(ioplSize))numBytes);
4524 					}
4525 				} else {
4526 					ioplSize = ((typeof(ioplSize))ioplPhysSize);
4527 				}
4528 
4529 				if (_memRef) {
4530 					memory_object_offset_t entryOffset;
4531 
4532 					entryOffset = mdOffset;
4533 					if (byteAlignUPL) {
4534 						entryOffset = (entryOffset - memRefEntry->offset);
4535 					} else {
4536 						entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4537 					}
4538 					if (ioplSize > (memRefEntry->size - entryOffset)) {
4539 						ioplSize =  ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4540 					}
4541 					error = memory_object_iopl_request(memRefEntry->entry,
4542 					    entryOffset,
4543 					    &ioplSize,
4544 					    &iopl.fIOPL,
4545 					    baseInfo,
4546 					    &numPageInfo,
4547 					    &ioplFlags,
4548 					    tag);
4549 				} else if ((theMap == kernel_map)
4550 				    && (kernelStart >= io_kernel_static_start)
4551 				    && (kernelStart < io_kernel_static_end)) {
4552 					error = io_get_kernel_static_upl(theMap,
4553 					    kernelStart,
4554 					    &ioplSize,
4555 					    &iopl.fPageOffset,
4556 					    &iopl.fIOPL,
4557 					    baseInfo,
4558 					    &numPageInfo,
4559 					    &highPage);
4560 				} else {
4561 					assert(theMap);
4562 					error = vm_map_create_upl(theMap,
4563 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
4564 					    vm_memtag_canonicalize(theMap, startPage),
4565 #else /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
4566 					    startPage,
4567 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
4568 					    (upl_size_t*)&ioplSize,
4569 					    &iopl.fIOPL,
4570 					    baseInfo,
4571 					    &numPageInfo,
4572 					    &ioplFlags,
4573 					    tag);
4574 				}
4575 
4576 				if (error != KERN_SUCCESS) {
4577 					traceInterval.setEndArg2(error);
4578 					DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4579 					goto abortExit;
4580 				}
4581 
4582 				assert(ioplSize);
4583 
4584 				if (iopl.fIOPL) {
4585 					highPage = upl_get_highest_page(iopl.fIOPL);
4586 				}
4587 				if (highPage > highestPage) {
4588 					highestPage = highPage;
4589 				}
4590 
4591 				if (baseInfo->device) {
4592 					numPageInfo = 1;
4593 					iopl.fFlags = kIOPLOnDevice;
4594 				} else {
4595 					iopl.fFlags = 0;
4596 				}
4597 
4598 				if (byteAlignUPL) {
4599 					if (iopl.fIOPL) {
4600 						DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4601 						iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4602 					}
4603 					if (startPage != (mach_vm_address_t)-1) {
4604 						// assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4605 						startPage -= iopl.fPageOffset;
4606 					}
4607 					ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4608 					numBytes += iopl.fPageOffset;
4609 				}
4610 
4611 				iopl.fIOMDOffset = mdOffset;
4612 				iopl.fPageInfo = pageIndex;
4613 
4614 				if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4615 					// Clean up partial created and unsaved iopl
4616 					if (iopl.fIOPL) {
4617 						upl_abort(iopl.fIOPL, 0);
4618 						upl_deallocate(iopl.fIOPL);
4619 					}
4620 					error = kIOReturnNoMemory;
4621 					traceInterval.setEndArg2(error);
4622 					goto abortExit;
4623 				}
4624 				dataP = NULL;
4625 
4626 				// Check for a multiple iopl's in one virtual range
4627 				pageIndex += numPageInfo;
4628 				mdOffset -= iopl.fPageOffset;
4629 				numBytesWired += ioplSize;
4630 				if (ioplSize < numBytes) {
4631 					numBytes -= ioplSize;
4632 					if (startPage != (mach_vm_address_t)-1) {
4633 						startPage += ioplSize;
4634 					}
4635 					mdOffset += ioplSize;
4636 					iopl.fPageOffset = 0;
4637 					if (mapper) {
4638 						iopl.fMappedPage = mapBase + pageIndex;
4639 					}
4640 				} else {
4641 					mdOffset += numBytes;
4642 					break;
4643 				}
4644 			}
4645 		}
4646 
4647 		_highestPage = highestPage;
4648 		DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4649 
4650 		if (UPL_COPYOUT_FROM & uplFlags) {
4651 			_flags |= kIOMemoryPreparedReadOnly;
4652 		}
4653 		traceInterval.setEndCodes(numBytesWired, error);
4654 	}
4655 
4656 #if IOTRACKING
4657 	if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4658 		dataP = getDataP(_memoryEntries);
4659 		if (!dataP->fWireTracking.link.next) {
4660 			IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4661 		}
4662 	}
4663 #endif /* IOTRACKING */
4664 
4665 	return error;
4666 
4667 abortExit:
4668 	{
4669 		dataP = getDataP(_memoryEntries);
4670 		UInt done = getNumIOPL(_memoryEntries, dataP);
4671 		ioPLBlock *ioplList = getIOPLList(dataP);
4672 
4673 		for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4674 			if (ioplList[ioplIdx].fIOPL) {
4675 				upl_abort(ioplList[ioplIdx].fIOPL, 0);
4676 				upl_deallocate(ioplList[ioplIdx].fIOPL);
4677 			}
4678 		}
4679 		_memoryEntries->setLength(computeDataSize(0, 0));
4680 	}
4681 
4682 	if (error == KERN_FAILURE) {
4683 		error = kIOReturnCannotWire;
4684 	} else if (error == KERN_MEMORY_ERROR) {
4685 		error = kIOReturnNoResources;
4686 	}
4687 
4688 	return error;
4689 }
4690 
4691 bool
initMemoryEntries(size_t size,IOMapper * mapper)4692 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4693 {
4694 	ioGMDData * dataP;
4695 
4696 	if (size > UINT_MAX) {
4697 		return false;
4698 	}
4699 	if (!_memoryEntries) {
4700 		_memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4701 		if (!_memoryEntries) {
4702 			return false;
4703 		}
4704 	} else if (!_memoryEntries->initWithCapacity(size)) {
4705 		return false;
4706 	}
4707 
4708 	_memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4709 	dataP = getDataP(_memoryEntries);
4710 
4711 	if (mapper == kIOMapperWaitSystem) {
4712 		IOMapper::checkForSystemMapper();
4713 		mapper = IOMapper::gSystem;
4714 	}
4715 	dataP->fMapper               = mapper;
4716 	dataP->fPageCnt              = 0;
4717 	dataP->fMappedBase           = 0;
4718 	dataP->fDMAMapNumAddressBits = 64;
4719 	dataP->fDMAMapAlignment      = 0;
4720 	dataP->fPreparationID        = kIOPreparationIDUnprepared;
4721 	dataP->fCompletionError      = false;
4722 	dataP->fMappedBaseValid      = false;
4723 
4724 	return true;
4725 }
4726 
4727 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4728 IOMemoryDescriptor::dmaMap(
4729 	IOMapper                    * mapper,
4730 	IOMemoryDescriptor          * memory,
4731 	IODMACommand                * command,
4732 	const IODMAMapSpecification * mapSpec,
4733 	uint64_t                      offset,
4734 	uint64_t                      length,
4735 	uint64_t                    * mapAddress,
4736 	uint64_t                    * mapLength)
4737 {
4738 	IOReturn err;
4739 	uint32_t mapOptions;
4740 
4741 	mapOptions = 0;
4742 	mapOptions |= kIODMAMapReadAccess;
4743 	if (!(kIOMemoryPreparedReadOnly & _flags)) {
4744 		mapOptions |= kIODMAMapWriteAccess;
4745 	}
4746 
4747 	err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4748 	    mapSpec, command, NULL, mapAddress, mapLength);
4749 
4750 	if (kIOReturnSuccess == err) {
4751 		dmaMapRecord(mapper, command, *mapLength);
4752 	}
4753 
4754 	return err;
4755 }
4756 
4757 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4758 IOMemoryDescriptor::dmaMapRecord(
4759 	IOMapper                    * mapper,
4760 	IODMACommand                * command,
4761 	uint64_t                      mapLength)
4762 {
4763 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4764 	kern_allocation_name_t alloc;
4765 	int16_t                prior;
4766 
4767 	if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4768 		kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4769 	}
4770 
4771 	if (!command) {
4772 		return;
4773 	}
4774 	prior = OSAddAtomic16(1, &_dmaReferences);
4775 	if (!prior) {
4776 		if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4777 			_mapName  = alloc;
4778 			mapLength = _length;
4779 			kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4780 		} else {
4781 			_mapName = NULL;
4782 		}
4783 	}
4784 }
4785 
4786 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4787 IOMemoryDescriptor::dmaUnmap(
4788 	IOMapper                    * mapper,
4789 	IODMACommand                * command,
4790 	uint64_t                      offset,
4791 	uint64_t                      mapAddress,
4792 	uint64_t                      mapLength)
4793 {
4794 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4795 	IOReturn ret;
4796 	kern_allocation_name_t alloc;
4797 	kern_allocation_name_t mapName;
4798 	int16_t prior;
4799 
4800 	mapName = NULL;
4801 	prior = 0;
4802 	if (command) {
4803 		mapName = _mapName;
4804 		if (_dmaReferences) {
4805 			prior = OSAddAtomic16(-1, &_dmaReferences);
4806 		} else {
4807 			panic("_dmaReferences underflow");
4808 		}
4809 	}
4810 
4811 	if (!mapLength) {
4812 		traceInterval.setEndArg1(kIOReturnSuccess);
4813 		return kIOReturnSuccess;
4814 	}
4815 
4816 	ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4817 
4818 	if ((alloc = mapper->fAllocName)) {
4819 		kern_allocation_update_size(alloc, -mapLength, NULL);
4820 		if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4821 			mapLength = _length;
4822 			kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4823 		}
4824 	}
4825 
4826 	traceInterval.setEndArg1(ret);
4827 	return ret;
4828 }
4829 
4830 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4831 IOGeneralMemoryDescriptor::dmaMap(
4832 	IOMapper                    * mapper,
4833 	IOMemoryDescriptor          * memory,
4834 	IODMACommand                * command,
4835 	const IODMAMapSpecification * mapSpec,
4836 	uint64_t                      offset,
4837 	uint64_t                      length,
4838 	uint64_t                    * mapAddress,
4839 	uint64_t                    * mapLength)
4840 {
4841 	IOReturn          err = kIOReturnSuccess;
4842 	ioGMDData *       dataP;
4843 	IOOptionBits      type = _flags & kIOMemoryTypeMask;
4844 
4845 	*mapAddress = 0;
4846 	if (kIOMemoryHostOnly & _flags) {
4847 		return kIOReturnSuccess;
4848 	}
4849 	if (kIOMemoryRemote & _flags) {
4850 		return kIOReturnNotAttached;
4851 	}
4852 
4853 	if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4854 	    || offset || (length != _length)) {
4855 		err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4856 	} else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4857 		const ioPLBlock * ioplList = getIOPLList(dataP);
4858 		upl_page_info_t * pageList;
4859 		uint32_t          mapOptions = 0;
4860 
4861 		IODMAMapSpecification mapSpec;
4862 		bzero(&mapSpec, sizeof(mapSpec));
4863 		mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4864 		mapSpec.alignment = dataP->fDMAMapAlignment;
4865 
4866 		// For external UPLs the fPageInfo field points directly to
4867 		// the upl's upl_page_info_t array.
4868 		if (ioplList->fFlags & kIOPLExternUPL) {
4869 			pageList = (upl_page_info_t *) ioplList->fPageInfo;
4870 			mapOptions |= kIODMAMapPagingPath;
4871 		} else {
4872 			pageList = getPageList(dataP);
4873 		}
4874 
4875 		if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4876 			mapOptions |= kIODMAMapPageListFullyOccupied;
4877 		}
4878 
4879 		assert(dataP->fDMAAccess);
4880 		mapOptions |= dataP->fDMAAccess;
4881 
4882 		// Check for direct device non-paged memory
4883 		if (ioplList->fFlags & kIOPLOnDevice) {
4884 			mapOptions |= kIODMAMapPhysicallyContiguous;
4885 		}
4886 
4887 		IODMAMapPageList dmaPageList =
4888 		{
4889 			.pageOffset    = (uint32_t)(ioplList->fPageOffset & page_mask),
4890 			.pageListCount = _pages,
4891 			.pageList      = &pageList[0]
4892 		};
4893 		err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4894 		    command, &dmaPageList, mapAddress, mapLength);
4895 
4896 		if (kIOReturnSuccess == err) {
4897 			dmaMapRecord(mapper, command, *mapLength);
4898 		}
4899 	}
4900 
4901 	return err;
4902 }
4903 
4904 /*
4905  * prepare
4906  *
4907  * Prepare the memory for an I/O transfer.  This involves paging in
4908  * the memory, if necessary, and wiring it down for the duration of
4909  * the transfer.  The complete() method completes the processing of
4910  * the memory after the I/O transfer finishes.  This method needn't
4911  * called for non-pageable memory.
4912  */
4913 
4914 IOReturn
prepare(IODirection forDirection)4915 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4916 {
4917 	IOReturn     error    = kIOReturnSuccess;
4918 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4919 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4920 
4921 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4922 		traceInterval.setEndArg1(kIOReturnSuccess);
4923 		return kIOReturnSuccess;
4924 	}
4925 
4926 	assert(!(kIOMemoryRemote & _flags));
4927 	if (kIOMemoryRemote & _flags) {
4928 		traceInterval.setEndArg1(kIOReturnNotAttached);
4929 		return kIOReturnNotAttached;
4930 	}
4931 
4932 	if (_prepareLock) {
4933 		IOLockLock(_prepareLock);
4934 	}
4935 
4936 	if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4937 		if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4938 			error = kIOReturnNotReady;
4939 			goto finish;
4940 		}
4941 		error = wireVirtual(forDirection);
4942 	}
4943 
4944 	if (kIOReturnSuccess == error) {
4945 		if (1 == ++_wireCount) {
4946 			if (kIOMemoryClearEncrypt & _flags) {
4947 				performOperation(kIOMemoryClearEncrypted, 0, _length);
4948 			}
4949 
4950 			ktraceEmitPhysicalSegments();
4951 		}
4952 	}
4953 
4954 finish:
4955 
4956 	if (_prepareLock) {
4957 		IOLockUnlock(_prepareLock);
4958 	}
4959 	traceInterval.setEndArg1(error);
4960 
4961 	return error;
4962 }
4963 
4964 /*
4965  * complete
4966  *
4967  * Complete processing of the memory after an I/O transfer finishes.
4968  * This method should not be called unless a prepare was previously
4969  * issued; the prepare() and complete() must occur in pairs, before
4970  * before and after an I/O transfer involving pageable memory.
4971  */
4972 
4973 IOReturn
complete(IODirection forDirection)4974 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4975 {
4976 	IOOptionBits type = _flags & kIOMemoryTypeMask;
4977 	ioGMDData  * dataP;
4978 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4979 
4980 	if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4981 		traceInterval.setEndArg1(kIOReturnSuccess);
4982 		return kIOReturnSuccess;
4983 	}
4984 
4985 	assert(!(kIOMemoryRemote & _flags));
4986 	if (kIOMemoryRemote & _flags) {
4987 		traceInterval.setEndArg1(kIOReturnNotAttached);
4988 		return kIOReturnNotAttached;
4989 	}
4990 
4991 	if (_prepareLock) {
4992 		IOLockLock(_prepareLock);
4993 	}
4994 	do{
4995 		assert(_wireCount);
4996 		if (!_wireCount) {
4997 			break;
4998 		}
4999 		dataP = getDataP(_memoryEntries);
5000 		if (!dataP) {
5001 			break;
5002 		}
5003 
5004 		if (kIODirectionCompleteWithError & forDirection) {
5005 			dataP->fCompletionError = true;
5006 		}
5007 
5008 		if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
5009 			performOperation(kIOMemorySetEncrypted, 0, _length);
5010 		}
5011 
5012 		_wireCount--;
5013 		if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
5014 			ioPLBlock *ioplList = getIOPLList(dataP);
5015 			UInt ind, count = getNumIOPL(_memoryEntries, dataP);
5016 
5017 			if (_wireCount) {
5018 				// kIODirectionCompleteWithDataValid & forDirection
5019 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
5020 					vm_tag_t tag;
5021 					tag = (typeof(tag))getVMTag(kernel_map);
5022 					for (ind = 0; ind < count; ind++) {
5023 						if (ioplList[ind].fIOPL) {
5024 							iopl_valid_data(ioplList[ind].fIOPL, tag);
5025 						}
5026 					}
5027 				}
5028 			} else {
5029 				if (_dmaReferences) {
5030 					panic("complete() while dma active");
5031 				}
5032 
5033 				if (dataP->fMappedBaseValid) {
5034 					dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
5035 					dataP->fMappedBaseValid = dataP->fMappedBase = 0;
5036 				}
5037 #if IOTRACKING
5038 				if (dataP->fWireTracking.link.next) {
5039 					IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
5040 				}
5041 #endif /* IOTRACKING */
5042 				// Only complete iopls that we created which are for TypeVirtual
5043 				if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
5044 					for (ind = 0; ind < count; ind++) {
5045 						if (ioplList[ind].fIOPL) {
5046 							if (dataP->fCompletionError) {
5047 								upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
5048 							} else {
5049 								upl_commit(ioplList[ind].fIOPL, NULL, 0);
5050 							}
5051 							upl_deallocate(ioplList[ind].fIOPL);
5052 						}
5053 					}
5054 				} else if (kIOMemoryTypeUPL == type) {
5055 					upl_set_referenced(ioplList[0].fIOPL, false);
5056 				}
5057 
5058 				_memoryEntries->setLength(computeDataSize(0, 0));
5059 
5060 				dataP->fPreparationID = kIOPreparationIDUnprepared;
5061 				_flags &= ~kIOMemoryPreparedReadOnly;
5062 
5063 				if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
5064 					IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
5065 				}
5066 			}
5067 		}
5068 	}while (false);
5069 
5070 	if (_prepareLock) {
5071 		IOLockUnlock(_prepareLock);
5072 	}
5073 
5074 	traceInterval.setEndArg1(kIOReturnSuccess);
5075 	return kIOReturnSuccess;
5076 }
5077 
5078 IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)5079 IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
5080 {
5081 	IOOptionBits createOptions = 0;
5082 
5083 	if (!(kIOMap64Bit & options)) {
5084 		panic("IOMemoryDescriptor::makeMapping !64bit");
5085 	}
5086 	if (!(kIOMapReadOnly & options)) {
5087 		createOptions |= kIOMemoryReferenceWrite;
5088 #if DEVELOPMENT || DEBUG
5089 		if ((kIODirectionOut == (kIODirectionOutIn & _flags))
5090 		    && (!reserved || (reserved->creator != mapping->fAddressTask))) {
5091 			OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
5092 		}
5093 #endif
5094 	}
5095 	return createOptions;
5096 }
5097 
5098 /*
5099  * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
5100  * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
5101  * creation.
5102  */
5103 
5104 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5105 IOGeneralMemoryDescriptor::makeMapping(
5106 	IOMemoryDescriptor *    owner,
5107 	task_t                  __intoTask,
5108 	IOVirtualAddress        __address,
5109 	IOOptionBits            options,
5110 	IOByteCount             __offset,
5111 	IOByteCount             __length )
5112 {
5113 	IOReturn err = kIOReturnSuccess;
5114 	IOMemoryMap * mapping;
5115 
5116 	if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
5117 		struct IOMemoryReference * newRef;
5118 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
5119 		if (kIOReturnSuccess == err) {
5120 			if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
5121 				memoryReferenceFree(newRef);
5122 			}
5123 		}
5124 	}
5125 	if (kIOReturnSuccess != err) {
5126 		return NULL;
5127 	}
5128 	mapping = IOMemoryDescriptor::makeMapping(
5129 		owner, __intoTask, __address, options, __offset, __length);
5130 
5131 #if IOTRACKING
5132 	if ((mapping == (IOMemoryMap *) __address)
5133 	    && (0 == (kIOMapStatic & mapping->fOptions))
5134 	    && (NULL == mapping->fSuperMap)
5135 	    && ((kIOTracking & gIOKitDebug) || _task)) {
5136 		// only dram maps in the default on development case
5137 		IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
5138 	}
5139 #endif /* IOTRACKING */
5140 
5141 	return mapping;
5142 }
5143 
5144 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5145 IOGeneralMemoryDescriptor::doMap(
5146 	vm_map_t                __addressMap,
5147 	IOVirtualAddress *      __address,
5148 	IOOptionBits            options,
5149 	IOByteCount             __offset,
5150 	IOByteCount             __length )
5151 {
5152 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
5153 	traceInterval.setEndArg1(kIOReturnSuccess);
5154 #ifndef __LP64__
5155 	if (!(kIOMap64Bit & options)) {
5156 		panic("IOGeneralMemoryDescriptor::doMap !64bit");
5157 	}
5158 #endif /* !__LP64__ */
5159 
5160 	kern_return_t  err;
5161 
5162 	IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
5163 	mach_vm_size_t offset  = mapping->fOffset + __offset;
5164 	mach_vm_size_t length  = mapping->fLength;
5165 
5166 	IOOptionBits type = _flags & kIOMemoryTypeMask;
5167 	Ranges vec = _ranges;
5168 
5169 	mach_vm_address_t range0Addr = 0;
5170 	mach_vm_size_t    range0Len = 0;
5171 
5172 	if ((offset >= _length) || ((offset + length) > _length)) {
5173 		traceInterval.setEndArg1(kIOReturnBadArgument);
5174 		DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
5175 		// assert(offset == 0 && _length == 0 && length == 0);
5176 		return kIOReturnBadArgument;
5177 	}
5178 
5179 	assert(!(kIOMemoryRemote & _flags));
5180 	if (kIOMemoryRemote & _flags) {
5181 		return 0;
5182 	}
5183 
5184 	if (vec.v) {
5185 		getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
5186 	}
5187 
5188 	// mapping source == dest? (could be much better)
5189 	if (_task
5190 	    && (mapping->fAddressTask == _task)
5191 	    && (mapping->fAddressMap == get_task_map(_task))
5192 	    && (options & kIOMapAnywhere)
5193 	    && (!(kIOMapUnique & options))
5194 	    && (!(kIOMapGuardedMask & options))
5195 	    && (1 == _rangesCount)
5196 	    && (0 == offset)
5197 	    && range0Addr
5198 	    && (length <= range0Len)) {
5199 		mapping->fAddress = range0Addr;
5200 		mapping->fOptions |= kIOMapStatic;
5201 
5202 		return kIOReturnSuccess;
5203 	}
5204 
5205 	if (!_memRef) {
5206 		err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
5207 		if (kIOReturnSuccess != err) {
5208 			traceInterval.setEndArg1(err);
5209 			DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5210 			return err;
5211 		}
5212 	}
5213 
5214 
5215 	memory_object_t pager;
5216 	pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
5217 
5218 	// <upl_transpose //
5219 	if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
5220 		do{
5221 			upl_t               redirUPL2;
5222 			upl_size_t          size;
5223 			upl_control_flags_t flags;
5224 			unsigned int        lock_count;
5225 
5226 			if (!_memRef || (1 != _memRef->count)) {
5227 				err = kIOReturnNotReadable;
5228 				DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5229 				break;
5230 			}
5231 
5232 			size = (upl_size_t) round_page(mapping->fLength);
5233 			flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5234 			    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5235 
5236 			if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
5237 			    NULL, NULL,
5238 			    &flags, (vm_tag_t) getVMTag(kernel_map))) {
5239 				redirUPL2 = NULL;
5240 			}
5241 
5242 			for (lock_count = 0;
5243 			    IORecursiveLockHaveLock(gIOMemoryLock);
5244 			    lock_count++) {
5245 				UNLOCK;
5246 			}
5247 			err = upl_transpose(redirUPL2, mapping->fRedirUPL);
5248 			for (;
5249 			    lock_count;
5250 			    lock_count--) {
5251 				LOCK;
5252 			}
5253 
5254 			if (kIOReturnSuccess != err) {
5255 				IOLog("upl_transpose(%x)\n", err);
5256 				err = kIOReturnSuccess;
5257 			}
5258 
5259 			if (redirUPL2) {
5260 				upl_commit(redirUPL2, NULL, 0);
5261 				upl_deallocate(redirUPL2);
5262 				redirUPL2 = NULL;
5263 			}
5264 			{
5265 				// swap the memEntries since they now refer to different vm_objects
5266 				IOMemoryReference * me = _memRef;
5267 				_memRef = mapping->fMemory->_memRef;
5268 				mapping->fMemory->_memRef = me;
5269 			}
5270 			if (pager) {
5271 				err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5272 			}
5273 		}while (false);
5274 	}
5275 	// upl_transpose> //
5276 	else {
5277 		err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5278 		if (err) {
5279 			DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5280 		}
5281 		if ((err == KERN_SUCCESS) && pager) {
5282 			err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5283 
5284 			if (err != KERN_SUCCESS) {
5285 				doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5286 			} else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5287 				mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5288 			}
5289 		}
5290 	}
5291 
5292 	traceInterval.setEndArg1(err);
5293 	if (err) {
5294 		DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5295 	}
5296 	return err;
5297 }
5298 
5299 #if IOTRACKING
5300 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5301 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5302     mach_vm_address_t * address, mach_vm_size_t * size)
5303 {
5304 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5305 
5306 	IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5307 
5308 	if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5309 		return kIOReturnNotReady;
5310 	}
5311 
5312 	*task    = map->fAddressTask;
5313 	*address = map->fAddress;
5314 	*size    = map->fLength;
5315 
5316 	return kIOReturnSuccess;
5317 }
5318 #endif /* IOTRACKING */
5319 
5320 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5321 IOGeneralMemoryDescriptor::doUnmap(
5322 	vm_map_t                addressMap,
5323 	IOVirtualAddress        __address,
5324 	IOByteCount             __length )
5325 {
5326 	IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5327 	IOReturn ret;
5328 	ret = super::doUnmap(addressMap, __address, __length);
5329 	traceInterval.setEndArg1(ret);
5330 	return ret;
5331 }
5332 
5333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5334 
5335 #undef super
5336 #define super OSObject
5337 
5338 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5339 
5340 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5341 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5342 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5343 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5344 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5345 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5346 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5347 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5348 
5349 /* ex-inline function implementation */
5350 IOPhysicalAddress
getPhysicalAddress()5351 IOMemoryMap::getPhysicalAddress()
5352 {
5353 	return getPhysicalSegment( 0, NULL );
5354 }
5355 
5356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5357 
5358 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5359 IOMemoryMap::init(
5360 	task_t                  intoTask,
5361 	mach_vm_address_t       toAddress,
5362 	IOOptionBits            _options,
5363 	mach_vm_size_t          _offset,
5364 	mach_vm_size_t          _length )
5365 {
5366 	if (!intoTask) {
5367 		return false;
5368 	}
5369 
5370 	if (!super::init()) {
5371 		return false;
5372 	}
5373 
5374 	fAddressMap  = get_task_map(intoTask);
5375 	if (!fAddressMap) {
5376 		return false;
5377 	}
5378 	vm_map_reference(fAddressMap);
5379 
5380 	fAddressTask = intoTask;
5381 	fOptions     = _options;
5382 	fLength      = _length;
5383 	fOffset      = _offset;
5384 	fAddress     = toAddress;
5385 
5386 	return true;
5387 }
5388 
5389 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5390 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5391 {
5392 	if (!_memory) {
5393 		return false;
5394 	}
5395 
5396 	if (!fSuperMap) {
5397 		if ((_offset + fLength) > _memory->getLength()) {
5398 			return false;
5399 		}
5400 		fOffset = _offset;
5401 	}
5402 
5403 
5404 	OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5405 	if (fMemory) {
5406 		if (fMemory != _memory) {
5407 			fMemory->removeMapping(this);
5408 		}
5409 	}
5410 	fMemory = os::move(tempval);
5411 
5412 	return true;
5413 }
5414 
5415 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5416 IOMemoryDescriptor::doMap(
5417 	vm_map_t                __addressMap,
5418 	IOVirtualAddress *      __address,
5419 	IOOptionBits            options,
5420 	IOByteCount             __offset,
5421 	IOByteCount             __length )
5422 {
5423 	return kIOReturnUnsupported;
5424 }
5425 
5426 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5427 IOMemoryDescriptor::handleFault(
5428 	void *                  _pager,
5429 	mach_vm_size_t          sourceOffset,
5430 	mach_vm_size_t          length)
5431 {
5432 	if (kIOMemoryRedirected & _flags) {
5433 #if DEBUG
5434 		IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5435 #endif
5436 		do {
5437 			SLEEP;
5438 		} while (kIOMemoryRedirected & _flags);
5439 	}
5440 	return kIOReturnSuccess;
5441 }
5442 
5443 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5444 IOMemoryDescriptor::populateDevicePager(
5445 	void *                  _pager,
5446 	vm_map_t                addressMap,
5447 	mach_vm_address_t       address,
5448 	mach_vm_size_t          sourceOffset,
5449 	mach_vm_size_t          length,
5450 	IOOptionBits            options )
5451 {
5452 	IOReturn            err = kIOReturnSuccess;
5453 	memory_object_t     pager = (memory_object_t) _pager;
5454 	mach_vm_size_t      size;
5455 	mach_vm_size_t      bytes;
5456 	mach_vm_size_t      page;
5457 	mach_vm_size_t      pageOffset;
5458 	mach_vm_size_t      pagerOffset;
5459 	IOPhysicalLength    segLen, chunk;
5460 	addr64_t            physAddr;
5461 	IOOptionBits        type;
5462 
5463 	type = _flags & kIOMemoryTypeMask;
5464 
5465 	if (reserved->dp.pagerContig) {
5466 		sourceOffset = 0;
5467 		pagerOffset  = 0;
5468 	}
5469 
5470 	physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5471 	assert( physAddr );
5472 	pageOffset = physAddr - trunc_page_64( physAddr );
5473 	pagerOffset = sourceOffset;
5474 
5475 	size = length + pageOffset;
5476 	physAddr -= pageOffset;
5477 
5478 	segLen += pageOffset;
5479 	bytes = size;
5480 	do{
5481 		// in the middle of the loop only map whole pages
5482 		if (segLen >= bytes) {
5483 			segLen = bytes;
5484 		} else if (segLen != trunc_page_64(segLen)) {
5485 			err = kIOReturnVMError;
5486 		}
5487 		if (physAddr != trunc_page_64(physAddr)) {
5488 			err = kIOReturnBadArgument;
5489 		}
5490 
5491 		if (kIOReturnSuccess != err) {
5492 			break;
5493 		}
5494 
5495 #if DEBUG || DEVELOPMENT
5496 		if ((kIOMemoryTypeUPL != type)
5497 		    && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5498 			OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5499 			    physAddr, (uint64_t)segLen);
5500 		}
5501 #endif /* DEBUG || DEVELOPMENT */
5502 
5503 		chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5504 		for (page = 0;
5505 		    (page < segLen) && (KERN_SUCCESS == err);
5506 		    page += chunk) {
5507 			err = device_pager_populate_object(pager, pagerOffset,
5508 			    (ppnum_t)(atop_64(physAddr + page)), chunk);
5509 			pagerOffset += chunk;
5510 		}
5511 
5512 		assert(KERN_SUCCESS == err);
5513 		if (err) {
5514 			break;
5515 		}
5516 
5517 		// This call to vm_fault causes an early pmap level resolution
5518 		// of the mappings created above for kernel mappings, since
5519 		// faulting in later can't take place from interrupt level.
5520 		if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5521 			err = vm_fault(addressMap,
5522 			    (vm_map_offset_t)trunc_page_64(address),
5523 			    options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5524 			    FALSE, VM_KERN_MEMORY_NONE,
5525 			    THREAD_UNINT, NULL,
5526 			    (vm_map_offset_t)0);
5527 
5528 			if (KERN_SUCCESS != err) {
5529 				break;
5530 			}
5531 		}
5532 
5533 		sourceOffset += segLen - pageOffset;
5534 		address += segLen;
5535 		bytes -= segLen;
5536 		pageOffset = 0;
5537 	}while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5538 
5539 	if (bytes) {
5540 		err = kIOReturnBadArgument;
5541 	}
5542 
5543 	return err;
5544 }
5545 
5546 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5547 IOMemoryDescriptor::doUnmap(
5548 	vm_map_t                addressMap,
5549 	IOVirtualAddress        __address,
5550 	IOByteCount             __length )
5551 {
5552 	IOReturn          err;
5553 	IOMemoryMap *     mapping;
5554 	mach_vm_address_t address;
5555 	mach_vm_size_t    length;
5556 
5557 	if (__length) {
5558 		panic("doUnmap");
5559 	}
5560 
5561 	mapping = (IOMemoryMap *) __address;
5562 	addressMap = mapping->fAddressMap;
5563 	address    = mapping->fAddress;
5564 	length     = mapping->fLength;
5565 
5566 	if (kIOMapOverwrite & mapping->fOptions) {
5567 		err = KERN_SUCCESS;
5568 	} else {
5569 		if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5570 			addressMap = IOPageableMapForAddress( address );
5571 		}
5572 #if DEBUG
5573 		if (kIOLogMapping & gIOKitDebug) {
5574 			IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5575 			    addressMap, address, length );
5576 		}
5577 #endif
5578 		err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5579 		if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5580 			DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5581 		}
5582 	}
5583 
5584 #if IOTRACKING
5585 	IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5586 #endif /* IOTRACKING */
5587 
5588 	return err;
5589 }
5590 
5591 IOReturn
redirect(task_t safeTask,bool doRedirect)5592 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5593 {
5594 	IOReturn            err = kIOReturnSuccess;
5595 	IOMemoryMap *       mapping = NULL;
5596 	OSSharedPtr<OSIterator>        iter;
5597 
5598 	LOCK;
5599 
5600 	if (doRedirect) {
5601 		_flags |= kIOMemoryRedirected;
5602 	} else {
5603 		_flags &= ~kIOMemoryRedirected;
5604 	}
5605 
5606 	do {
5607 		if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5608 			memory_object_t   pager;
5609 
5610 			if (reserved) {
5611 				pager = (memory_object_t) reserved->dp.devicePager;
5612 			} else {
5613 				pager = MACH_PORT_NULL;
5614 			}
5615 
5616 			while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5617 				mapping->redirect( safeTask, doRedirect );
5618 				if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5619 					err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5620 				}
5621 			}
5622 
5623 			iter.reset();
5624 		}
5625 	} while (false);
5626 
5627 	if (!doRedirect) {
5628 		WAKEUP;
5629 	}
5630 
5631 	UNLOCK;
5632 
5633 #ifndef __LP64__
5634 	// temporary binary compatibility
5635 	IOSubMemoryDescriptor * subMem;
5636 	if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5637 		err = subMem->redirect( safeTask, doRedirect );
5638 	} else {
5639 		err = kIOReturnSuccess;
5640 	}
5641 #endif /* !__LP64__ */
5642 
5643 	return err;
5644 }
5645 
5646 IOReturn
redirect(task_t safeTask,bool doRedirect)5647 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5648 {
5649 	IOReturn err = kIOReturnSuccess;
5650 
5651 	if (fSuperMap) {
5652 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5653 	} else {
5654 		LOCK;
5655 
5656 		do{
5657 			if (!fAddress) {
5658 				break;
5659 			}
5660 			if (!fAddressMap) {
5661 				break;
5662 			}
5663 
5664 			if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5665 			    && (0 == (fOptions & kIOMapStatic))) {
5666 				IOUnmapPages( fAddressMap, fAddress, fLength );
5667 				err = kIOReturnSuccess;
5668 #if DEBUG
5669 				IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5670 #endif
5671 			} else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5672 				IOOptionBits newMode;
5673 				newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5674 				IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5675 			}
5676 		}while (false);
5677 		UNLOCK;
5678 	}
5679 
5680 	if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5681 	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5682 	    && safeTask
5683 	    && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5684 		fMemory->redirect(safeTask, doRedirect);
5685 	}
5686 
5687 	return err;
5688 }
5689 
5690 IOReturn
unmap(void)5691 IOMemoryMap::unmap( void )
5692 {
5693 	IOReturn    err;
5694 
5695 	LOCK;
5696 
5697 	if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5698 	    && (0 == (kIOMapStatic & fOptions))) {
5699 		err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5700 	} else {
5701 		err = kIOReturnSuccess;
5702 	}
5703 
5704 	if (fAddressMap) {
5705 		vm_map_deallocate(fAddressMap);
5706 		fAddressMap = NULL;
5707 	}
5708 
5709 	fAddress = 0;
5710 
5711 	UNLOCK;
5712 
5713 	return err;
5714 }
5715 
5716 void
taskDied(void)5717 IOMemoryMap::taskDied( void )
5718 {
5719 	LOCK;
5720 	if (fUserClientUnmap) {
5721 		unmap();
5722 	}
5723 #if IOTRACKING
5724 	else {
5725 		IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5726 	}
5727 #endif /* IOTRACKING */
5728 
5729 	if (fAddressMap) {
5730 		vm_map_deallocate(fAddressMap);
5731 		fAddressMap = NULL;
5732 	}
5733 	fAddressTask = NULL;
5734 	fAddress     = 0;
5735 	UNLOCK;
5736 }
5737 
5738 IOReturn
userClientUnmap(void)5739 IOMemoryMap::userClientUnmap( void )
5740 {
5741 	fUserClientUnmap = true;
5742 	return kIOReturnSuccess;
5743 }
5744 
5745 // Overload the release mechanism.  All mappings must be a member
5746 // of a memory descriptors _mappings set.  This means that we
5747 // always have 2 references on a mapping.  When either of these mappings
5748 // are released we need to free ourselves.
5749 void
taggedRelease(const void * tag) const5750 IOMemoryMap::taggedRelease(const void *tag) const
5751 {
5752 	LOCK;
5753 	super::taggedRelease(tag, 2);
5754 	UNLOCK;
5755 }
5756 
5757 void
free()5758 IOMemoryMap::free()
5759 {
5760 	unmap();
5761 
5762 	if (fMemory) {
5763 		LOCK;
5764 		fMemory->removeMapping(this);
5765 		UNLOCK;
5766 		fMemory.reset();
5767 	}
5768 
5769 	if (fSuperMap) {
5770 		fSuperMap.reset();
5771 	}
5772 
5773 	if (fRedirUPL) {
5774 		upl_commit(fRedirUPL, NULL, 0);
5775 		upl_deallocate(fRedirUPL);
5776 	}
5777 
5778 	super::free();
5779 }
5780 
5781 IOByteCount
getLength()5782 IOMemoryMap::getLength()
5783 {
5784 	return fLength;
5785 }
5786 
5787 IOVirtualAddress
getVirtualAddress()5788 IOMemoryMap::getVirtualAddress()
5789 {
5790 #ifndef __LP64__
5791 	if (fSuperMap) {
5792 		fSuperMap->getVirtualAddress();
5793 	} else if (fAddressMap
5794 	    && vm_map_is_64bit(fAddressMap)
5795 	    && (sizeof(IOVirtualAddress) < 8)) {
5796 		OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5797 	}
5798 #endif /* !__LP64__ */
5799 
5800 	return fAddress;
5801 }
5802 
5803 #ifndef __LP64__
5804 mach_vm_address_t
getAddress()5805 IOMemoryMap::getAddress()
5806 {
5807 	return fAddress;
5808 }
5809 
5810 mach_vm_size_t
getSize()5811 IOMemoryMap::getSize()
5812 {
5813 	return fLength;
5814 }
5815 #endif /* !__LP64__ */
5816 
5817 
5818 task_t
getAddressTask()5819 IOMemoryMap::getAddressTask()
5820 {
5821 	if (fSuperMap) {
5822 		return fSuperMap->getAddressTask();
5823 	} else {
5824 		return fAddressTask;
5825 	}
5826 }
5827 
5828 IOOptionBits
getMapOptions()5829 IOMemoryMap::getMapOptions()
5830 {
5831 	return fOptions;
5832 }
5833 
5834 IOMemoryDescriptor *
getMemoryDescriptor()5835 IOMemoryMap::getMemoryDescriptor()
5836 {
5837 	return fMemory.get();
5838 }
5839 
5840 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5841 IOMemoryMap::copyCompatible(
5842 	IOMemoryMap * newMapping )
5843 {
5844 	task_t              task      = newMapping->getAddressTask();
5845 	mach_vm_address_t   toAddress = newMapping->fAddress;
5846 	IOOptionBits        _options  = newMapping->fOptions;
5847 	mach_vm_size_t      _offset   = newMapping->fOffset;
5848 	mach_vm_size_t      _length   = newMapping->fLength;
5849 
5850 	if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5851 		return NULL;
5852 	}
5853 	if ((fOptions ^ _options) & kIOMapReadOnly) {
5854 		return NULL;
5855 	}
5856 	if ((fOptions ^ _options) & kIOMapGuardedMask) {
5857 		return NULL;
5858 	}
5859 	if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5860 	    && ((fOptions ^ _options) & kIOMapCacheMask)) {
5861 		return NULL;
5862 	}
5863 
5864 	if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5865 		return NULL;
5866 	}
5867 
5868 	if (_offset < fOffset) {
5869 		return NULL;
5870 	}
5871 
5872 	_offset -= fOffset;
5873 
5874 	if ((_offset + _length) > fLength) {
5875 		return NULL;
5876 	}
5877 
5878 	if ((fLength == _length) && (!_offset)) {
5879 		retain();
5880 		newMapping = this;
5881 	} else {
5882 		newMapping->fSuperMap.reset(this, OSRetain);
5883 		newMapping->fOffset   = fOffset + _offset;
5884 		newMapping->fAddress  = fAddress + _offset;
5885 	}
5886 
5887 	return newMapping;
5888 }
5889 
5890 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5891 IOMemoryMap::wireRange(
5892 	uint32_t                options,
5893 	mach_vm_size_t          offset,
5894 	mach_vm_size_t          length)
5895 {
5896 	IOReturn kr;
5897 	mach_vm_address_t start = trunc_page_64(fAddress + offset);
5898 	mach_vm_address_t end   = round_page_64(fAddress + offset + length);
5899 	vm_prot_t prot;
5900 
5901 	prot = (kIODirectionOutIn & options);
5902 	if (prot) {
5903 		kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5904 	} else {
5905 		kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5906 	}
5907 
5908 	return kr;
5909 }
5910 
5911 
5912 IOPhysicalAddress
5913 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5914 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5915 #else /* !__LP64__ */
5916 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5917 #endif /* !__LP64__ */
5918 {
5919 	IOPhysicalAddress   address;
5920 
5921 	LOCK;
5922 #ifdef __LP64__
5923 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5924 #else /* !__LP64__ */
5925 	address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5926 #endif /* !__LP64__ */
5927 	UNLOCK;
5928 
5929 	return address;
5930 }
5931 
5932 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5933 
5934 #undef super
5935 #define super OSObject
5936 
5937 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5938 
5939 void
initialize(void)5940 IOMemoryDescriptor::initialize( void )
5941 {
5942 	if (NULL == gIOMemoryLock) {
5943 		gIOMemoryLock = IORecursiveLockAlloc();
5944 	}
5945 
5946 	gIOLastPage = IOGetLastPageNumber();
5947 }
5948 
5949 void
free(void)5950 IOMemoryDescriptor::free( void )
5951 {
5952 	if (_mappings) {
5953 		_mappings.reset();
5954 	}
5955 
5956 	if (reserved) {
5957 		cleanKernelReserved(reserved);
5958 		IOFreeType(reserved, IOMemoryDescriptorReserved);
5959 		reserved = NULL;
5960 	}
5961 	super::free();
5962 }
5963 
5964 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5965 IOMemoryDescriptor::setMapping(
5966 	task_t                  intoTask,
5967 	IOVirtualAddress        mapAddress,
5968 	IOOptionBits            options )
5969 {
5970 	return createMappingInTask( intoTask, mapAddress,
5971 	           options | kIOMapStatic,
5972 	           0, getLength());
5973 }
5974 
5975 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5976 IOMemoryDescriptor::map(
5977 	IOOptionBits            options )
5978 {
5979 	return createMappingInTask( kernel_task, 0,
5980 	           options | kIOMapAnywhere,
5981 	           0, getLength());
5982 }
5983 
5984 #ifndef __LP64__
5985 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5986 IOMemoryDescriptor::map(
5987 	task_t                  intoTask,
5988 	IOVirtualAddress        atAddress,
5989 	IOOptionBits            options,
5990 	IOByteCount             offset,
5991 	IOByteCount             length )
5992 {
5993 	if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5994 		OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5995 		return NULL;
5996 	}
5997 
5998 	return createMappingInTask(intoTask, atAddress,
5999 	           options, offset, length);
6000 }
6001 #endif /* !__LP64__ */
6002 
6003 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)6004 IOMemoryDescriptor::createMappingInTask(
6005 	task_t                  intoTask,
6006 	mach_vm_address_t       atAddress,
6007 	IOOptionBits            options,
6008 	mach_vm_size_t          offset,
6009 	mach_vm_size_t          length)
6010 {
6011 	IOMemoryMap * result;
6012 	IOMemoryMap * mapping;
6013 
6014 	if (0 == length) {
6015 		length = getLength();
6016 	}
6017 
6018 	mapping = new IOMemoryMap;
6019 
6020 	if (mapping
6021 	    && !mapping->init( intoTask, atAddress,
6022 	    options, offset, length )) {
6023 		mapping->release();
6024 		mapping = NULL;
6025 	}
6026 
6027 	if (mapping) {
6028 		result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
6029 	} else {
6030 		result = nullptr;
6031 	}
6032 
6033 #if DEBUG
6034 	if (!result) {
6035 		IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
6036 		    this, atAddress, (uint32_t) options, offset, length);
6037 	}
6038 #endif
6039 
6040 	// already retained through makeMapping
6041 	OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
6042 
6043 	return retval;
6044 }
6045 
6046 #ifndef __LP64__ // there is only a 64 bit version for LP64
6047 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)6048 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
6049     IOOptionBits         options,
6050     IOByteCount          offset)
6051 {
6052 	return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
6053 }
6054 #endif
6055 
6056 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)6057 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
6058     IOOptionBits         options,
6059     mach_vm_size_t       offset)
6060 {
6061 	IOReturn err = kIOReturnSuccess;
6062 	OSSharedPtr<IOMemoryDescriptor> physMem;
6063 
6064 	LOCK;
6065 
6066 	if (fAddress && fAddressMap) {
6067 		do{
6068 			if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
6069 			    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
6070 				physMem = fMemory;
6071 			}
6072 
6073 			if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
6074 				upl_size_t          size = (typeof(size))round_page(fLength);
6075 				upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
6076 				    | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
6077 				if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
6078 				    NULL, NULL,
6079 				    &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
6080 					fRedirUPL = NULL;
6081 				}
6082 
6083 				if (physMem) {
6084 					IOUnmapPages( fAddressMap, fAddress, fLength );
6085 					if ((false)) {
6086 						physMem->redirect(NULL, true);
6087 					}
6088 				}
6089 			}
6090 
6091 			if (newBackingMemory) {
6092 				if (newBackingMemory != fMemory) {
6093 					fOffset = 0;
6094 					if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
6095 					    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
6096 					    offset, fLength)) {
6097 						err = kIOReturnError;
6098 					}
6099 				}
6100 				if (fRedirUPL) {
6101 					upl_commit(fRedirUPL, NULL, 0);
6102 					upl_deallocate(fRedirUPL);
6103 					fRedirUPL = NULL;
6104 				}
6105 				if ((false) && physMem) {
6106 					physMem->redirect(NULL, false);
6107 				}
6108 			}
6109 		}while (false);
6110 	}
6111 
6112 	UNLOCK;
6113 
6114 	return err;
6115 }
6116 
6117 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)6118 IOMemoryDescriptor::makeMapping(
6119 	IOMemoryDescriptor *    owner,
6120 	task_t                  __intoTask,
6121 	IOVirtualAddress        __address,
6122 	IOOptionBits            options,
6123 	IOByteCount             __offset,
6124 	IOByteCount             __length )
6125 {
6126 #ifndef __LP64__
6127 	if (!(kIOMap64Bit & options)) {
6128 		panic("IOMemoryDescriptor::makeMapping !64bit");
6129 	}
6130 #endif /* !__LP64__ */
6131 
6132 	OSSharedPtr<IOMemoryDescriptor> mapDesc;
6133 	__block IOMemoryMap * result  = NULL;
6134 
6135 	IOMemoryMap *  mapping = (IOMemoryMap *) __address;
6136 	mach_vm_size_t offset  = mapping->fOffset + __offset;
6137 	mach_vm_size_t length  = mapping->fLength;
6138 
6139 	mapping->fOffset = offset;
6140 
6141 	LOCK;
6142 
6143 	do{
6144 		if (kIOMapStatic & options) {
6145 			result = mapping;
6146 			addMapping(mapping);
6147 			mapping->setMemoryDescriptor(this, 0);
6148 			continue;
6149 		}
6150 
6151 		if (kIOMapUnique & options) {
6152 			addr64_t phys;
6153 			IOByteCount       physLen;
6154 
6155 //	    if (owner != this)		continue;
6156 
6157 			if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
6158 			    || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
6159 				phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
6160 				if (!phys || (physLen < length)) {
6161 					continue;
6162 				}
6163 
6164 				mapDesc = IOMemoryDescriptor::withAddressRange(
6165 					phys, length, getDirection() | kIOMemoryMapperNone, NULL);
6166 				if (!mapDesc) {
6167 					continue;
6168 				}
6169 				offset = 0;
6170 				mapping->fOffset = offset;
6171 			}
6172 		} else {
6173 			// look for a compatible existing mapping
6174 			if (_mappings) {
6175 				_mappings->iterateObjects(^(OSObject * object)
6176 				{
6177 					IOMemoryMap * lookMapping = (IOMemoryMap *) object;
6178 					if ((result = lookMapping->copyCompatible(mapping))) {
6179 					        addMapping(result);
6180 					        result->setMemoryDescriptor(this, offset);
6181 					        return true;
6182 					}
6183 					return false;
6184 				});
6185 			}
6186 			if (result || (options & kIOMapReference)) {
6187 				if (result != mapping) {
6188 					mapping->release();
6189 					mapping = NULL;
6190 				}
6191 				continue;
6192 			}
6193 		}
6194 
6195 		if (!mapDesc) {
6196 			mapDesc.reset(this, OSRetain);
6197 		}
6198 		IOReturn
6199 		    kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
6200 		if (kIOReturnSuccess == kr) {
6201 			result = mapping;
6202 			mapDesc->addMapping(result);
6203 			result->setMemoryDescriptor(mapDesc.get(), offset);
6204 		} else {
6205 			mapping->release();
6206 			mapping = NULL;
6207 		}
6208 	}while (false);
6209 
6210 	UNLOCK;
6211 
6212 	return result;
6213 }
6214 
6215 void
addMapping(IOMemoryMap * mapping)6216 IOMemoryDescriptor::addMapping(
6217 	IOMemoryMap * mapping )
6218 {
6219 	if (mapping) {
6220 		if (NULL == _mappings) {
6221 			_mappings = OSSet::withCapacity(1);
6222 		}
6223 		if (_mappings) {
6224 			_mappings->setObject( mapping );
6225 		}
6226 	}
6227 }
6228 
6229 void
removeMapping(IOMemoryMap * mapping)6230 IOMemoryDescriptor::removeMapping(
6231 	IOMemoryMap * mapping )
6232 {
6233 	if (_mappings) {
6234 		_mappings->removeObject( mapping);
6235 	}
6236 }
6237 
6238 void
setMapperOptions(uint16_t options)6239 IOMemoryDescriptor::setMapperOptions( uint16_t options)
6240 {
6241 	_iomapperOptions = options;
6242 }
6243 
6244 uint16_t
getMapperOptions(void)6245 IOMemoryDescriptor::getMapperOptions( void )
6246 {
6247 	return _iomapperOptions;
6248 }
6249 
6250 #ifndef __LP64__
6251 // obsolete initializers
6252 // - initWithOptions is the designated initializer
6253 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6254 IOMemoryDescriptor::initWithAddress(void *      address,
6255     IOByteCount   length,
6256     IODirection direction)
6257 {
6258 	return false;
6259 }
6260 
6261 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6262 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6263     IOByteCount    length,
6264     IODirection  direction,
6265     task_t       task)
6266 {
6267 	return false;
6268 }
6269 
6270 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6271 IOMemoryDescriptor::initWithPhysicalAddress(
6272 	IOPhysicalAddress      address,
6273 	IOByteCount            length,
6274 	IODirection            direction )
6275 {
6276 	return false;
6277 }
6278 
6279 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6280 IOMemoryDescriptor::initWithRanges(
6281 	IOVirtualRange * ranges,
6282 	UInt32           withCount,
6283 	IODirection      direction,
6284 	task_t           task,
6285 	bool             asReference)
6286 {
6287 	return false;
6288 }
6289 
6290 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6291 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange * ranges,
6292     UInt32           withCount,
6293     IODirection      direction,
6294     bool             asReference)
6295 {
6296 	return false;
6297 }
6298 
6299 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6300 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6301     IOByteCount * lengthOfSegment)
6302 {
6303 	return NULL;
6304 }
6305 #endif /* !__LP64__ */
6306 
6307 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6308 
6309 bool
serialize(OSSerialize * s) const6310 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6311 {
6312 	OSSharedPtr<OSSymbol const>     keys[2] = {NULL};
6313 	OSSharedPtr<OSObject>           values[2] = {NULL};
6314 	OSSharedPtr<OSArray>            array;
6315 
6316 	struct SerData {
6317 		user_addr_t address;
6318 		user_size_t length;
6319 	};
6320 
6321 	unsigned int index;
6322 
6323 	IOOptionBits type = _flags & kIOMemoryTypeMask;
6324 
6325 	if (s == NULL) {
6326 		return false;
6327 	}
6328 
6329 	array = OSArray::withCapacity(4);
6330 	if (!array) {
6331 		return false;
6332 	}
6333 
6334 	OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6335 	if (!vcopy) {
6336 		return false;
6337 	}
6338 
6339 	keys[0] = OSSymbol::withCString("address");
6340 	keys[1] = OSSymbol::withCString("length");
6341 
6342 	// Copy the volatile data so we don't have to allocate memory
6343 	// while the lock is held.
6344 	LOCK;
6345 	if (vcopy.size() == _rangesCount) {
6346 		Ranges vec = _ranges;
6347 		for (index = 0; index < vcopy.size(); index++) {
6348 			mach_vm_address_t addr; mach_vm_size_t len;
6349 			getAddrLenForInd(addr, len, type, vec, index, _task);
6350 			vcopy[index].address = addr;
6351 			vcopy[index].length  = len;
6352 		}
6353 	} else {
6354 		// The descriptor changed out from under us.  Give up.
6355 		UNLOCK;
6356 		return false;
6357 	}
6358 	UNLOCK;
6359 
6360 	for (index = 0; index < vcopy.size(); index++) {
6361 		user_addr_t addr = vcopy[index].address;
6362 		IOByteCount len = (IOByteCount) vcopy[index].length;
6363 		values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6364 		if (values[0] == NULL) {
6365 			return false;
6366 		}
6367 		values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6368 		if (values[1] == NULL) {
6369 			return false;
6370 		}
6371 		OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6372 		if (dict == NULL) {
6373 			return false;
6374 		}
6375 		array->setObject(dict.get());
6376 		dict.reset();
6377 		values[0].reset();
6378 		values[1].reset();
6379 	}
6380 
6381 	return array->serialize(s);
6382 }
6383 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6384 
6385 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6386 #ifdef __LP64__
6387 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6388 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6389 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6390 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6391 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6392 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6393 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6394 #else /* !__LP64__ */
6395 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6396 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6397 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6398 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6399 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6400 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6401 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6402 #endif /* !__LP64__ */
6403 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6404 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6405 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6406 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6407 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6408 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6409 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6410 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6411 
6412 /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6413 KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6414     struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6415 
6416 /* ex-inline function implementation */
6417 IOPhysicalAddress
getPhysicalAddress()6418 IOMemoryDescriptor::getPhysicalAddress()
6419 {
6420 	return getPhysicalSegment( 0, NULL );
6421 }
6422 
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6423 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6424 
6425 OSPtr<_IOMemoryDescriptorMixedData>
6426 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6427 {
6428 	OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6429 	if (me && !me->initWithCapacity(capacity)) {
6430 		return nullptr;
6431 	}
6432 	return me;
6433 }
6434 
6435 bool
initWithCapacity(size_t capacity)6436 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6437 {
6438 	if (_data && (!capacity || (_capacity < capacity))) {
6439 		freeMemory();
6440 	}
6441 
6442 	if (!OSObject::init()) {
6443 		return false;
6444 	}
6445 
6446 	if (!_data && capacity) {
6447 		_data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6448 		    Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6449 		if (!_data) {
6450 			return false;
6451 		}
6452 		_capacity = capacity;
6453 	}
6454 
6455 	_length = 0;
6456 
6457 	return true;
6458 }
6459 
6460 void
free()6461 _IOMemoryDescriptorMixedData::free()
6462 {
6463 	freeMemory();
6464 	OSObject::free();
6465 }
6466 
6467 void
freeMemory()6468 _IOMemoryDescriptorMixedData::freeMemory()
6469 {
6470 	kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6471 	_data = nullptr;
6472 	_capacity = _length = 0;
6473 }
6474 
6475 bool
appendBytes(const void * bytes,size_t length)6476 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6477 {
6478 	const auto oldLength = getLength();
6479 	size_t newLength;
6480 	if (os_add_overflow(oldLength, length, &newLength)) {
6481 		return false;
6482 	}
6483 
6484 	if (!setLength(newLength)) {
6485 		return false;
6486 	}
6487 
6488 	unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6489 	if (bytes) {
6490 		bcopy(bytes, dest, length);
6491 	}
6492 
6493 	return true;
6494 }
6495 
6496 bool
setLength(size_t length)6497 _IOMemoryDescriptorMixedData::setLength(size_t length)
6498 {
6499 	if (!_data || (length > _capacity)) {
6500 		void *newData;
6501 
6502 		newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6503 		    length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6504 		    NULL);
6505 		if (!newData) {
6506 			return false;
6507 		}
6508 
6509 		_data = newData;
6510 		_capacity = length;
6511 	}
6512 
6513 	_length = length;
6514 	return true;
6515 }
6516 
6517 const void *
getBytes() const6518 _IOMemoryDescriptorMixedData::getBytes() const
6519 {
6520 	return _length ? _data : nullptr;
6521 }
6522 
6523 size_t
getLength() const6524 _IOMemoryDescriptorMixedData::getLength() const
6525 {
6526 	return _data ? _length : 0;
6527 }
6528