xref: /xnu-8019.80.24/iokit/bsddev/skywalk/IOSkywalkSupport.cpp (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #if defined(__x86_64__)
29 #include <libkern/c++/OSKext.h> // IOSKCopyKextIdentifierWithAddress()
30 #endif
31 
32 #include <IOKit/IOBufferMemoryDescriptor.h>
33 #include <IOKit/IOMultiMemoryDescriptor.h>
34 #include <IOKit/IOCommand.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/skywalk/IOSkywalkSupport.h>
37 #include <skywalk/os_skywalk_private.h>
38 #include <sys/errno.h>
39 #include <sys/queue.h>
40 
41 #include <mach/mach_vm.h>
42 #include <mach/vm_map.h>
43 #include <mach/vm_types.h>
44 
45 #define DLOG(fmt, args...)          SK_DF(SK_VERB_IOSK, fmt, ##args)
46 #define IOSK_SIZE_OK(x)         (((x) != 0) && (round_page(x) == (x)))
47 #define IOSK_OFFSET_OK(x)       (round_page(x) == (x))
48 
49 static vm_tag_t
getVMTagForMap(vm_map_t map)50 getVMTagForMap( vm_map_t map )
51 {
52 	return (map == kernel_map) ?
53 	       VM_KERN_MEMORY_SKYWALK : VM_MEMORY_SKYWALK;
54 }
55 
56 class IOSKMemoryArray : public IOMultiMemoryDescriptor
57 {
58 	OSDeclareFinalStructors( IOSKMemoryArray );
59 
60 public:
61 	bool overwriteMappingInTask(
62 		task_t              intoTask,
63 		mach_vm_address_t * startAddr,
64 		IOOptionBits        options );
65 };
66 
67 class IOSKMemoryBuffer : public IOBufferMemoryDescriptor
68 {
69 	OSDeclareFinalStructors( IOSKMemoryBuffer );
70 
71 public:
72 	bool initWithSpec( task_t            inTask,
73 	    mach_vm_size_t    capacity,
74 	    mach_vm_address_t alignment,
75 	    const IOSKMemoryBufferSpec * spec );
76 
77 	virtual void * getBytesNoCopy( void ) APPLE_KEXT_OVERRIDE;
78 
79 	virtual void * getBytesNoCopy(vm_size_t start, vm_size_t withLength) APPLE_KEXT_OVERRIDE;
80 
81 	bool
isWired(void) const82 	isWired( void ) const
83 	{
84 		return _wireCount != 0;
85 	}
86 
87 	IOSKMemoryBufferSpec    fSpec;
88 	void                    *fKernelAddr;
89 	IOMemoryMap             *fKernelReadOnlyMapping;
90 
91 protected:
92 	virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
93 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
94 };
95 
96 // FIXME: rename IOSKMemoryBuffer -> IOSKBuffer
97 typedef IOSKMemoryBuffer    IOSKBuffer;
98 
99 // IOSKRegionMapper:
100 // Tracks all memory mappings of a single IOSKRegion, with an array of
101 // IOMemoryMaps to map the region's memory segments.
102 // Created and released by the parent IOSKMapper.
103 
104 class IOSKRegionMapper : public OSObject
105 {
106 	OSDeclareFinalStructors( IOSKRegionMapper );
107 
108 public:
109 	bool initWithMapper( IOSKMapper * mapper, IOSKRegion * region,
110 	    IOSKOffset regionOffset );
111 
112 	IOReturn    map( IOSKIndex segIndex, IOSKBuffer * buffer );
113 	void        unmap( IOSKIndex segIndex, vm_prot_t prot );
114 
115 	kern_return_t mapOverwrite( vm_map_offset_t addr,
116 	    vm_map_size_t size, vm_prot_t prot );
117 
118 private:
119 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
120 
121 	IOSKMapper *        fMapper;
122 	IOSKRegion *        fRegion;
123 	IOMemoryMap **      fMemoryMaps;
124 	IOSKCount           fMemoryMapCount;
125 	IOSKOffset          fRegionOffset;
126 };
127 
128 // IOSKMapper:
129 // Manages all memory mappings of a single task, with an array of
130 // IOSKRegionMappers to map all memory regions of a memory arena.
131 // Retains the IOSKArena.
132 
133 class IOSKMapper : public OSObject
134 {
135 	OSDeclareFinalStructors( IOSKMapper );
136 	friend class IOSKRegionMapper;
137 
138 public:
139 	bool     initWithTask( task_t task, IOSKArena * arena );
140 
141 	IOReturn map( IOSKIndex regIndex, IOSKIndex segIndex, IOSKBuffer * buffer );
142 	void     unmap( IOSKIndex regIndex, IOSKIndex segIndex, vm_prot_t prot );
143 
144 	mach_vm_address_t
getMapAddress(mach_vm_size_t * size) const145 	getMapAddress( mach_vm_size_t * size ) const
146 	{
147 		if (size) {
148 			*size = fMapSize;
149 		}
150 		return fMapAddr;
151 	}
152 
153 	IOSKArena *
getArena(void) const154 	getArena( void ) const
155 	{
156 		return fArena;
157 	}
158 	bool
isRedirected(void) const159 	isRedirected( void ) const
160 	{
161 		return fRedirected;
162 	}
163 	void
redirectMap(void)164 	redirectMap( void )
165 	{
166 		fRedirected = true;
167 	}
168 
169 private:
170 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
171 
172 	task_t              fTask;
173 	vm_map_t            fTaskMap;
174 	IOSKArena *         fArena;
175 	OSArray *           fSubMaps;
176 	mach_vm_address_t   fMapAddr;
177 	mach_vm_size_t      fMapSize;
178 	bool                fRedirected;
179 };
180 
181 // IOSKArena:
182 // An array of IOSKRegions is used to create an IOSKArena.
183 // One or more IOSKMapper can map the arena memory to tasks.
184 // Retains the IOSKRegions, also circularly retains the IOSKMapper(s)
185 // until the client calls IOSKMapperDestroy().
186 
187 class IOSKArena : public OSObject
188 {
189 	OSDeclareFinalStructors( IOSKArena );
190 
191 public:
192 	bool     initWithRegions( IOSKRegion ** regions,
193 	    IOSKCount regionCount );
194 
195 	IOReturn createMapperForTask( task_t task,
196 	    LIBKERN_RETURNS_RETAINED IOSKMapper ** mapper );
197 	void     redirectMap( IOSKMapper * mapper );
198 
199 	IOSKSize
getArenaSize(void) const200 	getArenaSize( void ) const
201 	{
202 		return fArenaSize;
203 	}
204 	IOSKCount
getRegionCount(void) const205 	getRegionCount( void ) const
206 	{
207 		return fRegions->getCount();
208 	}
209 	IOSKRegion * getRegion( IOSKIndex regIndex ) const;
210 
211 	IOReturn map( const IOSKRegion * region,
212 	    IOSKOffset regionOffset,
213 	    IOSKIndex regionIndex,
214 	    IOSKIndex segmentIndex,
215 	    IOSKMemoryBuffer * buffer );
216 
217 	void     unmap( const IOSKRegion * region,
218 	    IOSKOffset regionOffset,
219 	    IOSKIndex regionIndex,
220 	    IOSKIndex segmentIndex,
221 	    vm_prot_t prot,
222 	    bool isRedirected,
223 	    const void * context );
224 
225 	bool     addMapper( const IOSKMapper * mapper );
226 	void     removeMapper( const IOSKMapper * mapper );
227 
228 private:
229 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
230 
231 	IOLock *        fArenaLock;
232 	OSSet *         fMappers;
233 	OSArray *       fRegions;
234 	IOSKSize        fArenaSize;
235 };
236 
237 // IOSKRegion:
238 // An IOSKRegion manages a dynamic array of IOSKBuffers representing each
239 // memory segment in the region. Each IOSKRegion can be shared by multiple
240 // IOSKArenas, and the IOSKRegion keeps state specific to each arena - the
241 // offset and the index of the region within the arena. A lock is used to
242 // serialize updates to the IOSKBuffer array and the arenas.
243 // Retains the IOSKBuffers.
244 
245 class IOSKRegion : public OSObject
246 {
247 	OSDeclareFinalStructors( IOSKRegion );
248 
249 public:
250 	bool     initWithSpec( const IOSKRegionSpec * spec,
251 	    IOSKSize segSize, IOSKCount segCount );
252 
253 	IOReturn setSegmentBuffer( IOSKIndex index, IOSKBuffer * buf );
254 	void     clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
255 
256 	bool     attachArena( IOSKArena * arena,
257 	    IOSKOffset regionOffset, IOSKIndex regionIndex );
258 	void     detachArena( const IOSKArena * arena );
259 
260 	IOReturn updateMappingsForArena( IOSKArena * arena, bool redirect,
261 	    const void * context = NULL );
262 
263 	IOSKCount
getSegmentCount(void) const264 	getSegmentCount( void ) const
265 	{
266 		return fSegmentCount;
267 	}
268 	IOSKSize
getSegmentSize(void) const269 	getSegmentSize( void ) const
270 	{
271 		return fSegmentSize;
272 	}
273 	IOSKSize
getRegionSize(void) const274 	getRegionSize( void ) const
275 	{
276 		return fSegmentCount * fSegmentSize;
277 	}
278 
279 private:
280 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
281 
282 	struct Segment {
283 		IOSKBuffer *  fBuffer;
284 	};
285 
286 	struct ArenaEntry {
287 		SLIST_ENTRY(ArenaEntry) link;
288 		IOSKArena *   fArena;
289 		IOSKOffset    fRegionOffset;
290 		IOSKIndex     fRegionIndex;
291 	};
292 	SLIST_HEAD(ArenaHead, ArenaEntry);
293 
294 	IOReturn _setSegmentBuffer( const IOSKIndex index, IOSKMemoryBuffer * buf );
295 	void     _clearSegmentBuffer( const IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
296 	ArenaEntry * findArenaEntry( const IOSKArena * arena );
297 
298 	IOSKRegionSpec fSpec;
299 	IOLock *    fRegionLock;
300 	ArenaHead * fArenaHead;
301 	Segment *   fSegments;
302 	IOSKCount   fSegmentCount;
303 	IOSKSize    fSegmentSize;
304 };
305 
306 #undef  super
307 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKRegionMapper,OSObject)308 OSDefineMetaClassAndFinalStructors( IOSKRegionMapper, OSObject )
309 
310 bool
311 IOSKRegionMapper::initWithMapper(
312 	IOSKMapper * mapper, IOSKRegion * region, IOSKOffset regionOffset )
313 {
314 	if ((mapper == NULL) || (region == NULL) || !super::init()) {
315 		return false;
316 	}
317 
318 	// parent mapper retains the arena, which retains the regions
319 	assert(IOSK_OFFSET_OK(regionOffset));
320 	fMapper = mapper;
321 	fRegion = region;
322 	fRegionOffset = regionOffset;
323 
324 	fMemoryMapCount = region->getSegmentCount();
325 	assert(fMemoryMapCount != 0);
326 	fMemoryMaps = IONew(IOMemoryMap *, fMemoryMapCount);
327 	if (!fMemoryMaps) {
328 		return false;
329 	}
330 
331 	bzero(fMemoryMaps, sizeof(IOMemoryMap *) * fMemoryMapCount);
332 
333 	DLOG("SKRegionMapper %p mapper %p region %p offset 0x%x",
334 	    this, mapper, region, regionOffset);
335 	return true;
336 }
337 
338 void
free(void)339 IOSKRegionMapper::free( void )
340 {
341 	DLOG("SKRegionMapper %p", this);
342 
343 	if (fMemoryMaps) {
344 		assert(fMemoryMapCount != 0);
345 		for (IOSKIndex i = 0; i < fMemoryMapCount; i++) {
346 			if (fMemoryMaps[i]) {
347 				fMemoryMaps[i]->release();
348 				fMemoryMaps[i] = NULL;
349 			}
350 		}
351 		IODelete(fMemoryMaps, IOMemoryMap *, fMemoryMapCount);
352 		fMemoryMaps = NULL;
353 		fMemoryMapCount = 0;
354 	}
355 
356 	fMapper = NULL;
357 	fRegion = NULL;
358 	super::free();
359 }
360 
361 IOReturn
map(IOSKIndex segIndex,IOSKBuffer * buffer)362 IOSKRegionMapper::map( IOSKIndex segIndex, IOSKBuffer * buffer )
363 {
364 	mach_vm_address_t   addr;
365 	mach_vm_offset_t    offset;
366 	IOMemoryMap *       map;
367 	IOOptionBits        options = kIOMapOverwrite;
368 	IOReturn            ret = kIOReturnSuccess;
369 
370 	assert(segIndex < fMemoryMapCount);
371 	assert(buffer != NULL);
372 
373 	if ((segIndex >= fMemoryMapCount) || (buffer == NULL)) {
374 		return kIOReturnBadArgument;
375 	}
376 
377 	// redundant map requests are expected when the arena is mapped
378 	// by more than one mapper.
379 	if ((map = fMemoryMaps[segIndex]) != NULL) {
380 		assert(map->getMemoryDescriptor() == buffer);
381 		return kIOReturnSuccess;
382 	}
383 
384 	if (buffer->fSpec.user_writable == FALSE) {
385 		options |= kIOMapReadOnly;
386 	}
387 
388 	offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
389 	assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
390 	addr = fMapper->fMapAddr + offset;
391 
392 	map = buffer->createMappingInTask(fMapper->fTask, addr, options);
393 	fMemoryMaps[segIndex] = map;
394 	assert((map == NULL) || (map->getLength() == fRegion->getSegmentSize()));
395 	if (map == NULL) {
396 		ret = kIOReturnVMError;
397 	}
398 
399 	SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
400 	    "%p buffer %p index %u map %p offset 0x%x size 0x%x",
401 	    this, buffer, segIndex, fMemoryMaps[segIndex],
402 	    (uint32_t)offset, fRegion->getSegmentSize());
403 
404 	return ret;
405 }
406 
407 void
unmap(IOSKIndex segIndex,vm_prot_t prot)408 IOSKRegionMapper::unmap( IOSKIndex segIndex, vm_prot_t prot )
409 {
410 	mach_vm_address_t   addr;
411 	mach_vm_offset_t    offset;
412 	IOMemoryMap *       map;
413 	kern_return_t       kr;
414 
415 	assert(segIndex < fMemoryMapCount);
416 
417 	// redundant unmap requests are expected when the arena is mapped
418 	// by more than one mapper.
419 	if ((segIndex >= fMemoryMapCount) || ((map = fMemoryMaps[segIndex]) == NULL)) {
420 		return;
421 	}
422 
423 	offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
424 	assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
425 	addr = fMapper->fMapAddr + offset;
426 
427 	kr = mapOverwrite(addr, fRegion->getSegmentSize(), prot);
428 	assert(KERN_SUCCESS == kr);
429 
430 	map->release();
431 	fMemoryMaps[segIndex] = map = NULL;
432 
433 	DLOG("SKRegionMapper %p index %u offset 0x%x size 0x%x",
434 	    this, segIndex, (uint32_t)offset, fRegion->getSegmentSize());
435 }
436 
437 kern_return_t
mapOverwrite(vm_map_offset_t addr,vm_map_size_t size,vm_prot_t prot)438 IOSKRegionMapper::mapOverwrite(
439 	vm_map_offset_t addr, vm_map_size_t size, vm_prot_t prot )
440 {
441 	int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
442 	kern_return_t kr;
443 
444 	kr = vm_map_enter_mem_object(
445 		fMapper->fTaskMap,
446 		&addr,
447 		size,
448 		(vm_map_offset_t)0,
449 		flags,
450 		VM_MAP_KERNEL_FLAGS_NONE,
451 		getVMTagForMap(fMapper->fTaskMap),
452 		IPC_PORT_NULL,
453 		(vm_object_offset_t)0,
454 		FALSE,
455 		prot,
456 		VM_PROT_DEFAULT,
457 		VM_INHERIT_NONE);
458 
459 	SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
460 	    "SKRegionMapper %p addr 0x%llx size 0x%llx prot 0x%x "
461 	    "kr 0x%x", this, (uint64_t)addr, (uint64_t)size, prot, kr);
462 	return kr;
463 }
464 
465 #undef  super
466 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKMapper,OSObject)467 OSDefineMetaClassAndFinalStructors( IOSKMapper, OSObject )
468 
469 bool
470 IOSKMapper::initWithTask(
471 	task_t task, IOSKArena * arena )
472 {
473 	IOSKRegionMapper *  subMap;
474 	IOSKRegion *        region;
475 	IOSKCount           regionCount;
476 	IOSKOffset          regionOffset = 0;
477 	vm_map_offset_t     addr;
478 	vm_map_size_t       size;
479 	kern_return_t       kr;
480 	int     flags;
481 	bool    ok = false;
482 
483 	if ((task == TASK_NULL) || (arena == NULL) || !super::init()) {
484 		return false;
485 	}
486 
487 	fTask = task;
488 	fTaskMap = get_task_map(task);
489 	if (fTaskMap == VM_MAP_NULL) {
490 		return false;
491 	}
492 
493 	arena->retain();
494 	fArena = arena;
495 
496 	regionCount = fArena->getRegionCount();
497 	assert(regionCount != 0);
498 
499 	fSubMaps = OSArray::withCapacity(regionCount);
500 	if (!fSubMaps) {
501 		return false;
502 	}
503 
504 	for (IOSKIndex i = 0; i < regionCount; i++) {
505 		region = fArena->getRegion(i);
506 		assert(region != NULL);
507 
508 		subMap = new IOSKRegionMapper;
509 		if (subMap && !subMap->initWithMapper(this, region, regionOffset)) {
510 			subMap->release();
511 			subMap = NULL;
512 		}
513 		if (!subMap) {
514 			break;
515 		}
516 
517 		// array retains the regions
518 		ok = fSubMaps->setObject(subMap);
519 		subMap->release();
520 		subMap = NULL;
521 		if (!ok) {
522 			break;
523 		}
524 
525 		// offset of next region
526 		regionOffset += region->getRegionSize();
527 	}
528 	if (fSubMaps->getCount() != regionCount) {
529 		return false;
530 	}
531 
532 	addr = 0;
533 	size = fArena->getArenaSize();
534 	assert(regionOffset == size);
535 	assert(IOSK_SIZE_OK(size));
536 	flags = VM_FLAGS_ANYWHERE;
537 
538 	// reserve address space on given task with PROT_NONE
539 	kr = vm_map_enter_mem_object(
540 		fTaskMap,
541 		&addr,
542 		size,
543 		(vm_map_offset_t)0,
544 		flags,
545 		VM_MAP_KERNEL_FLAGS_NONE,
546 		getVMTagForMap(fTaskMap),
547 		IPC_PORT_NULL,
548 		(vm_object_offset_t)0,
549 		FALSE,
550 		VM_PROT_NONE,
551 		VM_PROT_DEFAULT,
552 		VM_INHERIT_NONE);
553 
554 	ok = false;
555 	if (KERN_SUCCESS == kr) {
556 		fMapAddr = (mach_vm_address_t)addr;
557 		fMapSize = (mach_vm_size_t)size;
558 		ok = true;
559 	}
560 
561 	SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
562 	    "SKMapper %p task 0x%llx map %p addr 0x%llx size 0x%llx subMaps %u "
563 	    "kr 0x%x", this, (uint64_t)task, fTaskMap, (uint64_t)addr,
564 	    (uint64_t)size, fSubMaps->getCount(), kr);
565 
566 
567 	return ok;
568 }
569 
570 void
free(void)571 IOSKMapper::free( void )
572 {
573 	DLOG("SKMapper %p", this);
574 
575 	if (fSubMaps != NULL) {
576 		fSubMaps->release();
577 		fSubMaps = NULL;
578 	}
579 
580 	if (fArena != NULL) {
581 		fArena->release();
582 		fArena = NULL;
583 	}
584 
585 	if (fMapSize != 0) {
586 		mach_vm_deallocate(fTaskMap, fMapAddr, fMapSize);
587 		fTaskMap = NULL;
588 		fMapAddr = 0;
589 		fMapSize = 0;
590 	}
591 
592 	fTask = NULL;
593 	fTaskMap = NULL;
594 
595 	super::free();
596 }
597 
598 IOReturn
map(IOSKIndex regionIndex,IOSKIndex segmentIndex,IOSKBuffer * buffer)599 IOSKMapper::map(
600 	IOSKIndex regionIndex, IOSKIndex segmentIndex, IOSKBuffer * buffer )
601 {
602 	IOSKRegionMapper * subMap;
603 	IOReturn ret = kIOReturnBadArgument;
604 
605 	// route to the region mapper at regionIndex
606 	assert(regionIndex < fSubMaps->getCount());
607 	subMap = (typeof(subMap))fSubMaps->getObject(regionIndex);
608 	if (subMap) {
609 		ret = subMap->map(segmentIndex, buffer);
610 	}
611 
612 	return ret;
613 }
614 
615 void
unmap(IOSKIndex regionIndex,IOSKIndex segmentIndex,vm_prot_t prot)616 IOSKMapper::unmap(
617 	IOSKIndex regionIndex, IOSKIndex segmentIndex, vm_prot_t prot )
618 {
619 	IOSKRegionMapper * subMap;
620 
621 	// route to the region mapper at regionIndex
622 	assert(regionIndex < fSubMaps->getCount());
623 	subMap = (typeof(subMap))fSubMaps->getObject(regionIndex);
624 	if (subMap) {
625 		subMap->unmap(segmentIndex, prot);
626 	}
627 }
628 
629 #undef  super
630 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKArena,OSObject)631 OSDefineMetaClassAndFinalStructors( IOSKArena, OSObject )
632 
633 bool
634 IOSKArena::initWithRegions(
635 	IOSKRegion ** regions, IOSKCount regionCount )
636 {
637 	IOSKRegion * region;
638 	IOSKSize     regionSize;
639 	IOSKOffset   regionOffset = 0;
640 	bool         ok = false;
641 
642 	assert(regions != NULL);
643 	assert(regionCount != 0);
644 
645 	do {
646 		if ((regions == NULL) || (regionCount == 0) || !super::init()) {
647 			break;
648 		}
649 
650 		fArenaLock = IOLockAlloc();
651 		if (fArenaLock == NULL) {
652 			break;
653 		}
654 
655 		fRegions = OSArray::withObjects((const OSObject **)regions, regionCount);
656 		if (!fRegions) {
657 			break;
658 		}
659 
660 		ok = true;
661 		for (uint32_t i = 0; i < regionCount; i++) {
662 			region = OSDynamicCast(IOSKRegion, fRegions->getObject(i));
663 			ok = (region != NULL);
664 			if (!ok) {
665 				break;
666 			}
667 
668 			regionSize = region->getRegionSize();
669 			assert(IOSK_SIZE_OK(regionSize));
670 
671 			// attach to each region and assign region offset/index
672 			ok = region->attachArena(this, regionOffset, i);
673 			if (!ok) {
674 				break;
675 			}
676 
677 			// offset of next region
678 			regionOffset += regionSize;
679 			assert(IOSK_OFFSET_OK(regionOffset));
680 		}
681 		fArenaSize = regionOffset;
682 	} while (false);
683 
684 	DLOG("SKArena %p regions %u size 0x%x ok %d",
685 	    this, regionCount, fArenaSize, ok);
686 	return ok;
687 }
688 
689 void
free(void)690 IOSKArena::free( void )
691 {
692 	DLOG("IOSKArena %p", this);
693 
694 	if (fRegions) {
695 		IOSKRegion * region;
696 		OSObject * object;
697 
698 		// detach from regions to stop mapping requests
699 		for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
700 			region = OSDynamicCast(IOSKRegion, object);
701 			if (region) {
702 				region->detachArena(this);
703 			}
704 		}
705 
706 		fRegions->release();
707 		fRegions = NULL;
708 	}
709 
710 	if (fMappers) {
711 		assert(fMappers->getCount() == 0);
712 		fMappers->release();
713 		fMappers = NULL;
714 	}
715 
716 	if (fArenaLock != NULL) {
717 		IOLockFree(fArenaLock);
718 		fArenaLock = NULL;
719 	}
720 
721 	super::free();
722 }
723 
724 IOReturn
createMapperForTask(task_t task,IOSKMapper ** outMapper)725 IOSKArena::createMapperForTask( task_t task, IOSKMapper ** outMapper )
726 {
727 	IOSKRegion * region;
728 	OSObject *   object;
729 	IOSKMapper * mapper;
730 	IOReturn     result, ret = kIOReturnSuccess;
731 
732 	assert(task != TASK_NULL);
733 	assert(outMapper != NULL);
734 
735 	mapper = new IOSKMapper;
736 	if (mapper && !mapper->initWithTask(task, this)) {
737 		mapper->release();
738 		mapper = NULL;
739 	}
740 	if (!mapper || !addMapper(mapper)) {
741 		ret = kIOReturnNoMemory;
742 		goto done;
743 	}
744 
745 	// request all regions to refresh the arena's mappings,
746 	// which now includes the newly added mapper.
747 	for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
748 		region = OSDynamicCast(IOSKRegion, object);
749 		assert(region != NULL);
750 		result = region->updateMappingsForArena(this, false);
751 		assert(kIOReturnSuccess == result);
752 		if (result != kIOReturnSuccess) {
753 			ret = result;
754 		}
755 	}
756 
757 done:
758 	if ((ret != kIOReturnSuccess) && mapper) {
759 		mapper->release();
760 		mapper = NULL;
761 	}
762 	*outMapper = mapper;
763 	return ret;
764 }
765 
766 IOReturn
map(const IOSKRegion * region __unused,IOSKOffset regionOffset __unused,IOSKIndex regionIndex,IOSKIndex segmentIndex,IOSKBuffer * buffer)767 IOSKArena::map(
768 	const IOSKRegion * region __unused,
769 	IOSKOffset regionOffset __unused,
770 	IOSKIndex regionIndex, IOSKIndex segmentIndex,
771 	IOSKBuffer * buffer )
772 {
773 	IOSKMapper * mapper;
774 	OSIterator * iter;
775 	IOReturn result, ret = kIOReturnSuccess;
776 
777 	IOLockLock(fArenaLock);
778 
779 	if (fMappers && (iter = OSCollectionIterator::withCollection(fMappers))) {
780 		while ((mapper = (typeof(mapper))iter->getNextObject())) {
781 			// skip any redirected mapper
782 			if (mapper->isRedirected()) {
783 				continue;
784 			}
785 			result = mapper->map(regionIndex, segmentIndex, buffer);
786 			assert(kIOReturnSuccess == result);
787 			if (result != kIOReturnSuccess) {
788 				ret = result;
789 			}
790 		}
791 		iter->release();
792 	}
793 
794 	IOLockUnlock(fArenaLock);
795 	return ret;
796 }
797 
798 void
unmap(const IOSKRegion * region __unused,IOSKOffset regionOffset __unused,IOSKIndex regionIndex,IOSKIndex segmentIndex,vm_prot_t prot,bool redirecting,const void * context)799 IOSKArena::unmap(
800 	const IOSKRegion * region __unused,
801 	IOSKOffset regionOffset __unused,
802 	IOSKIndex regionIndex, IOSKIndex segmentIndex,
803 	vm_prot_t prot, bool redirecting, const void * context )
804 {
805 	IOSKMapper * mapper;
806 	const IOSKMapper * redirectMapper = (typeof(redirectMapper))context;
807 	OSIterator * iter;
808 
809 	IOLockLock(fArenaLock);
810 
811 	if (fMappers && (iter = OSCollectionIterator::withCollection(fMappers))) {
812 		while ((mapper = (typeof(mapper))iter->getNextObject())) {
813 			if (redirecting) {
814 				if ((redirectMapper == NULL) || (redirectMapper == mapper)) {
815 					// redirecting can be specific to one mapper
816 					mapper->unmap(regionIndex, segmentIndex, prot);
817 					mapper->redirectMap();
818 				}
819 			} else if (!mapper->isRedirected()) {
820 				mapper->unmap(regionIndex, segmentIndex, prot);
821 			}
822 		}
823 		iter->release();
824 	}
825 
826 	IOLockUnlock(fArenaLock);
827 }
828 
829 void
redirectMap(IOSKMapper * mapper)830 IOSKArena::redirectMap( IOSKMapper * mapper )
831 {
832 	OSObject *   object;
833 	IOSKRegion * region;
834 	IOReturn     ret;
835 
836 	// request all (redirectable) regions to redirect the arena's mapper,
837 	// mapper=0 will redirect all mappers.
838 
839 	for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
840 		region = OSDynamicCast(IOSKRegion, object);
841 		assert(region != NULL);
842 		ret = region->updateMappingsForArena(this, true, (const void *)mapper);
843 		assert(kIOReturnSuccess == ret);
844 	}
845 }
846 
847 IOSKRegion *
getRegion(IOSKIndex regionIndex) const848 IOSKArena::getRegion( IOSKIndex regionIndex ) const
849 {
850 	assert(regionIndex < getRegionCount());
851 	return OSDynamicCast(IOSKRegion, fRegions->getObject(regionIndex));
852 }
853 
854 bool
addMapper(const IOSKMapper * mapper)855 IOSKArena::addMapper( const IOSKMapper * mapper )
856 {
857 	bool ok = false;
858 
859 	assert(mapper != NULL);
860 	if (!mapper) {
861 		return false;
862 	}
863 
864 	IOLockLock(fArenaLock);
865 
866 	if (!fMappers) {
867 		fMappers = OSSet::withCapacity(2);
868 	}
869 	if (fMappers) {
870 		ok = fMappers->setObject(mapper);
871 	}
872 
873 	IOLockUnlock(fArenaLock);
874 
875 	DLOG("arena %p mapper %p ok %d", this, mapper, ok);
876 	return ok;
877 }
878 
879 void
removeMapper(const IOSKMapper * mapper)880 IOSKArena::removeMapper( const IOSKMapper * mapper )
881 {
882 	assert(mapper != NULL);
883 	if (!mapper) {
884 		return;
885 	}
886 
887 	IOLockLock(fArenaLock);
888 
889 	if (fMappers) {
890 		fMappers->removeObject(mapper);
891 	}
892 
893 	IOLockUnlock(fArenaLock);
894 	DLOG("arena %p mapper %p", this, mapper);
895 }
896 
897 #undef  super
898 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKRegion,OSObject)899 OSDefineMetaClassAndFinalStructors( IOSKRegion, OSObject )
900 
901 bool
902 IOSKRegion::initWithSpec( const IOSKRegionSpec * spec,
903     IOSKSize segmentSize, IOSKCount segmentCount )
904 {
905 	bool ok = false;
906 
907 	do {
908 		if (!IOSK_SIZE_OK(segmentSize) || (segmentCount == 0) || !super::init()) {
909 			break;
910 		}
911 
912 		if (spec) {
913 			fSpec = *spec;
914 		}
915 		fSegmentCount = segmentCount;
916 		fSegmentSize = segmentSize;
917 
918 		fRegionLock = IOLockAlloc();
919 		if (fRegionLock == NULL) {
920 			break;
921 		}
922 
923 		fSegments = IONew(Segment, fSegmentCount);
924 		if (fSegments == NULL) {
925 			break;
926 		}
927 		bzero(fSegments, sizeof(IOSKRegion::Segment) * fSegmentCount);
928 		ok = true;
929 	} while (false);
930 
931 	SK_DF(ok ? SK_VERB_IOSK : SK_VERB_ERROR,
932 	    "SKRegion %p segment size 0x%x count %u ok %d",
933 	    this, segmentSize, segmentCount, ok);
934 
935 	return ok;
936 }
937 
938 void
free(void)939 IOSKRegion::free( void )
940 {
941 	DLOG("SKRegion %p", this);
942 
943 	if (fArenaHead) {
944 		ArenaEntry *entry, *tentry;
945 		SLIST_FOREACH_SAFE(entry, fArenaHead, link, tentry) {
946 			// Arena didn't detach from the region before release()
947 			assert(entry->fArena == NULL);
948 			IOFreeType(entry, ArenaEntry);
949 		}
950 		IOFreeType(fArenaHead, ArenaHead);
951 	}
952 
953 	if (fSegments != NULL) {
954 		assert(fSegmentCount != 0);
955 		for (uint32_t i = 0; i < fSegmentCount; i++) {
956 			_clearSegmentBuffer(i, NULL);
957 		}
958 
959 		IODelete(fSegments, Segment, fSegmentCount);
960 		fSegments = NULL;
961 	}
962 
963 	if (fRegionLock != NULL) {
964 		IOLockFree(fRegionLock);
965 		fRegionLock = NULL;
966 	}
967 
968 	super::free();
969 }
970 
971 IOReturn
_setSegmentBuffer(const IOSKIndex segmentIndex,IOSKBuffer * buffer)972 IOSKRegion::_setSegmentBuffer(
973 	const IOSKIndex segmentIndex, IOSKBuffer * buffer )
974 {
975 	Segment *   seg;
976 	IOReturn    ret = kIOReturnSuccess;
977 
978 	assert(buffer != NULL);
979 	assert(segmentIndex < fSegmentCount);
980 
981 	if (!buffer || (buffer->getCapacity() != fSegmentSize) ||
982 	    (segmentIndex >= fSegmentCount)) {
983 		ret = kIOReturnBadArgument;
984 		goto done;
985 	}
986 
987 	seg = &fSegments[segmentIndex];
988 	assert(seg->fBuffer == NULL);
989 
990 	if (seg->fBuffer == NULL) {
991 		buffer->retain();
992 		seg->fBuffer = buffer;
993 
994 		// update mappings for all arenas containing this region,
995 		// or none if no arena is attached.
996 		if (fArenaHead != NULL) {
997 			ArenaEntry * entry;
998 			SLIST_FOREACH(entry, fArenaHead, link) {
999 				if (entry->fArena != NULL) {
1000 					ret = entry->fArena->map(this,
1001 					    entry->fRegionOffset, entry->fRegionIndex,
1002 					    segmentIndex, buffer);
1003 					assert(kIOReturnSuccess == ret);
1004 					if (ret != kIOReturnSuccess) {
1005 						break;
1006 					}
1007 				}
1008 			}
1009 		}
1010 	}
1011 
1012 	if (ret != kIOReturnSuccess) {
1013 		_clearSegmentBuffer(segmentIndex, NULL);
1014 	}
1015 
1016 done:
1017 	SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1018 	    "SKRegion %p set segment[%u] buffer %p ret 0x%x",
1019 	    this, segmentIndex, buffer, ret);
1020 
1021 	return ret;
1022 }
1023 
1024 void
_clearSegmentBuffer(const IOSKIndex segmentIndex,IOSKMemoryBufferRef * prevBuffer)1025 IOSKRegion::_clearSegmentBuffer(
1026 	const IOSKIndex segmentIndex, IOSKMemoryBufferRef * prevBuffer  )
1027 {
1028 	Segment * seg;
1029 	bool cleared = false;
1030 	IOSKBuffer * foundBuffer = NULL;
1031 
1032 	assert(segmentIndex < fSegmentCount);
1033 	if (segmentIndex >= fSegmentCount) {
1034 		goto done;
1035 	}
1036 
1037 	seg = &fSegments[segmentIndex];
1038 	if (seg->fBuffer != NULL) {
1039 		foundBuffer = seg->fBuffer;
1040 
1041 		// update mappings for all arenas containing this region,
1042 		// or none if no arena is attached.
1043 		if (fArenaHead != NULL) {
1044 			vm_prot_t prot = VM_PROT_NONE;
1045 			ArenaEntry * entry;
1046 
1047 			SLIST_FOREACH(entry, fArenaHead, link) {
1048 				if (entry->fArena != NULL) {
1049 					entry->fArena->unmap(this,
1050 					    entry->fRegionOffset, entry->fRegionIndex,
1051 					    segmentIndex, prot, false, NULL);
1052 				}
1053 			}
1054 		}
1055 
1056 		seg->fBuffer->release();
1057 		seg->fBuffer = NULL;
1058 		cleared = true;
1059 	}
1060 
1061 	if (prevBuffer) {
1062 		*prevBuffer = foundBuffer;
1063 	}
1064 
1065 done:
1066 	DLOG("SKRegion %p clear segment[%u] ok %d",
1067 	    this, segmentIndex, cleared);
1068 }
1069 
1070 IOReturn
setSegmentBuffer(IOSKIndex index,IOSKMemoryBuffer * buffer)1071 IOSKRegion::setSegmentBuffer(
1072 	IOSKIndex index, IOSKMemoryBuffer * buffer )
1073 {
1074 	IOReturn ret;
1075 
1076 	IOLockLock(fRegionLock);
1077 	ret = _setSegmentBuffer(index, buffer);
1078 	IOLockUnlock(fRegionLock);
1079 	return ret;
1080 }
1081 
1082 void
clearSegmentBuffer(IOSKIndex index,IOSKMemoryBufferRef * prevBuffer)1083 IOSKRegion::clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer )
1084 {
1085 	IOLockLock(fRegionLock);
1086 	_clearSegmentBuffer(index, prevBuffer);
1087 	IOLockUnlock(fRegionLock);
1088 }
1089 
1090 IOSKRegion::ArenaEntry *
findArenaEntry(const IOSKArena * arena)1091 IOSKRegion::findArenaEntry( const IOSKArena * arena )
1092 {
1093 	ArenaEntry * found = NULL;
1094 
1095 	assert(arena != NULL);
1096 
1097 	if (fArenaHead) {
1098 		ArenaEntry * entry;
1099 		SLIST_FOREACH(entry, fArenaHead, link) {
1100 			if (entry->fArena == arena) {
1101 				found = entry;
1102 				break;
1103 			}
1104 		}
1105 	}
1106 	return found;
1107 }
1108 
1109 bool
attachArena(IOSKArena * arena,IOSKOffset regionOffset,IOSKIndex regionIndex)1110 IOSKRegion::attachArena(
1111 	IOSKArena * arena, IOSKOffset regionOffset, IOSKIndex regionIndex )
1112 {
1113 	bool ok = false;
1114 
1115 	assert(arena != NULL);
1116 	if (!arena) {
1117 		return false;
1118 	}
1119 
1120 	IOLockLock(fRegionLock);
1121 
1122 	if (!fArenaHead) {
1123 		fArenaHead = IOMallocType(ArenaHead);
1124 		SLIST_INIT(fArenaHead);
1125 	}
1126 	if (fArenaHead) {
1127 		ArenaEntry * entry = NULL;
1128 		ArenaEntry * empty = NULL;
1129 		ArenaEntry * dup = NULL;
1130 
1131 		SLIST_FOREACH(entry, fArenaHead, link) {
1132 			// duplicates not allowed
1133 			assert(entry->fArena != arena);
1134 			if (entry->fArena == arena) {
1135 				dup = entry;
1136 				break;
1137 			}
1138 
1139 			if ((empty == NULL) && (entry->fArena == NULL)) {
1140 				empty = entry;
1141 			}
1142 		}
1143 
1144 		if (dup != NULL) {
1145 			// do nothing
1146 		} else if (empty != NULL) {
1147 			// update the empty/available entry
1148 			empty->fArena = arena;
1149 			empty->fRegionOffset = regionOffset;
1150 			empty->fRegionIndex = regionIndex;
1151 			ok = true;
1152 		} else {
1153 			// append a new entry
1154 			ArenaEntry * newEntry = IOMallocType(ArenaEntry);
1155 			newEntry->fArena = arena;
1156 			newEntry->fRegionOffset = regionOffset;
1157 			newEntry->fRegionIndex = regionIndex;
1158 			SLIST_INSERT_HEAD(fArenaHead, newEntry, link);
1159 			ok = true;
1160 		}
1161 	}
1162 
1163 	IOLockUnlock(fRegionLock);
1164 
1165 	DLOG("SKRegion %p attach arena %p offset 0x%x index %u ok %d",
1166 	    this, arena, regionOffset, regionIndex, ok);
1167 	return ok;
1168 }
1169 
1170 void
detachArena(const IOSKArena * arena)1171 IOSKRegion::detachArena( const IOSKArena * arena )
1172 {
1173 	ArenaEntry * entry;
1174 	bool detached = false;
1175 
1176 	assert(arena != NULL);
1177 	if (!arena) {
1178 		return;
1179 	}
1180 
1181 	IOLockLock(fRegionLock);
1182 
1183 	entry = findArenaEntry(arena);
1184 	if (entry != NULL) {
1185 		bzero(entry, sizeof(*entry));
1186 		detached = true;
1187 	}
1188 
1189 	IOLockUnlock(fRegionLock);
1190 	DLOG("SKRegion %p detach arena %p ok %d", this, arena, detached);
1191 }
1192 
1193 IOReturn
updateMappingsForArena(IOSKArena * arena,bool redirect,const void * context)1194 IOSKRegion::updateMappingsForArena(
1195 	IOSKArena * arena, bool redirect, const void * context )
1196 {
1197 	ArenaEntry * entry;
1198 	Segment *   seg;
1199 	vm_prot_t   prot;
1200 	IOReturn    result = kIOReturnSuccess;
1201 
1202 	assert(arena != NULL);
1203 	if (redirect && fSpec.noRedirect) {
1204 		DLOG("SKRegion %p no redirect", this);
1205 		return kIOReturnSuccess;
1206 	}
1207 
1208 	IOLockLock(fRegionLock);
1209 
1210 	entry = findArenaEntry(arena);
1211 	if (entry != NULL) {
1212 		assert(entry->fArena == arena);
1213 
1214 		for (uint32_t index = 0; index < fSegmentCount; index++) {
1215 			seg = &fSegments[index];
1216 			if ((seg->fBuffer == NULL) || redirect) {
1217 				prot = VM_PROT_NONE;
1218 				if (redirect && (seg->fBuffer != NULL)) {
1219 					prot = VM_PROT_READ;
1220 					if (seg->fBuffer->fSpec.user_writable) {
1221 						prot |= VM_PROT_WRITE;
1222 					}
1223 				}
1224 
1225 				arena->unmap(this, entry->fRegionOffset, entry->fRegionIndex,
1226 				    index, prot, redirect, context);
1227 			} else {
1228 				result = arena->map(this, entry->fRegionOffset,
1229 				    entry->fRegionIndex,
1230 				    index, seg->fBuffer);
1231 			}
1232 		}
1233 	}
1234 
1235 	IOLockUnlock(fRegionLock);
1236 	SK_DF(result == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1237 	    "%p update arena %p redirect %d ret 0x%x",
1238 	    this, arena, redirect, result);
1239 	return result;
1240 }
1241 
OSDefineMetaClassAndFinalStructors(IOSKMemoryArray,IOMultiMemoryDescriptor)1242 OSDefineMetaClassAndFinalStructors( IOSKMemoryArray, IOMultiMemoryDescriptor )
1243 
1244 bool
1245 IOSKMemoryArray::overwriteMappingInTask(
1246 	task_t              intoTask,
1247 	mach_vm_address_t * startAddr,
1248 	IOOptionBits        options )
1249 {
1250 	bool ok = true;
1251 
1252 	for (uint32_t i = 0; i < _descriptorsCount; i++) {
1253 		IOMemoryDescriptor * iomd = _descriptors[i];
1254 		IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, iomd);
1255 		IOSKMemoryArray *  ma = OSDynamicCast(IOSKMemoryArray, iomd);
1256 
1257 		if (mb) {
1258 			IOMemoryMap * rwMap;
1259 
1260 			if (mb->fSpec.user_writable) {
1261 				// overwrite read-only mapping to read-write
1262 				rwMap = mb->createMappingInTask(intoTask,
1263 				    *startAddr, options | kIOMapOverwrite);
1264 				if (rwMap) {
1265 					DLOG("map_rw %d: addr 0x%llx, size 0x%x",
1266 					    i, *startAddr, (uint32_t)iomd->getLength());
1267 					rwMap->release();
1268 				} else {
1269 					DLOG("overwrite map failed");
1270 					ok = false;
1271 					break;
1272 				}
1273 			} else {
1274 				DLOG("map_ro %d: addr 0x%llx, size 0x%x",
1275 				    i, *startAddr, (uint32_t)iomd->getLength());
1276 			}
1277 
1278 			//DLOG("map increment 0x%x", (uint32_t)iomd->getLength());
1279 			*startAddr += iomd->getLength();
1280 		} else if (ma) {
1281 			ok = ma->overwriteMappingInTask(intoTask, startAddr, options);
1282 			if (!ok) {
1283 				break;
1284 			}
1285 		}
1286 	}
1287 
1288 	return ok;
1289 }
1290 
1291 #undef  super
1292 #define super IOBufferMemoryDescriptor
OSDefineMetaClassAndFinalStructorsWithZone(IOSKMemoryBuffer,IOBufferMemoryDescriptor,ZC_NONE)1293 OSDefineMetaClassAndFinalStructorsWithZone( IOSKMemoryBuffer,
1294     IOBufferMemoryDescriptor, ZC_NONE )
1295 
1296 bool
1297 IOSKMemoryBuffer::initWithSpec(
1298 	task_t            inTask,
1299 	mach_vm_size_t    capacity,
1300 	mach_vm_address_t alignment,
1301 	const IOSKMemoryBufferSpec * spec )
1302 {
1303 	bool ok = true;
1304 	IOOptionBits options = kIOMemoryKernelUserShared;
1305 
1306 	if (spec) {
1307 		fSpec = *spec;
1308 	}
1309 	if (fSpec.iodir_in) {
1310 		options |= kIODirectionIn;
1311 	}
1312 	if (fSpec.iodir_out) {
1313 		options |= kIODirectionOut;
1314 	}
1315 	if (fSpec.purgeable) {
1316 		options |= (kIOMemoryPageable | kIOMemoryPurgeable);
1317 	}
1318 	if (fSpec.inhibitCache) {
1319 		options |= kIOMapInhibitCache;
1320 	}
1321 	if (fSpec.physcontig) {
1322 		options |= kIOMemoryPhysicallyContiguous;
1323 	}
1324 
1325 	setVMTags(VM_KERN_MEMORY_SKYWALK, VM_MEMORY_SKYWALK);
1326 
1327 	if (fSpec.kernel_writable) {
1328 		if (fSpec.puredata) {
1329 			/* purely data; use data buffers heap */
1330 			ok = initWithPhysicalMask(
1331 				inTask, options, capacity, alignment, 0);
1332 		} else {
1333 			/* may have pointers; use default heap */
1334 			ok = initControlWithPhysicalMask(
1335 				inTask, options, capacity, alignment, 0);
1336 		}
1337 		if (!ok) {
1338 			return false;
1339 		}
1340 		fKernelAddr = super::getBytesNoCopy();
1341 		return true;
1342 	} else {
1343 		/*
1344 		 * To create kernel read-only BMD:
1345 		 * 1. init with TASK_NULL (which isn’t mapped anywhere);
1346 		 * 2. then map read-only into kernel_task
1347 		 * Note that kernel virtual address has to be obtained from
1348 		 * the secondary kernel read-only mapping.
1349 		 */
1350 		options |= kIOMapReadOnly;
1351 		if (fSpec.puredata) {
1352 			/* purely data; use data buffers heap */
1353 			ok = initWithPhysicalMask(
1354 				TASK_NULL, options, capacity, alignment, 0);
1355 		} else {
1356 			/* may have pointers; use default heap */
1357 			ok = initControlWithPhysicalMask(
1358 				TASK_NULL, options, capacity, alignment, 0);
1359 		}
1360 		if (!ok) {
1361 			return false;
1362 		}
1363 		/* RO mapping will retain this, see ::taggedRelease() */
1364 		fKernelReadOnlyMapping = super::createMappingInTask(kernel_task, 0, options);
1365 		if (fKernelReadOnlyMapping == NULL) {
1366 			return false;
1367 		}
1368 		fKernelAddr = (void *)fKernelReadOnlyMapping->getVirtualAddress();
1369 		assert(fKernelAddr != NULL);
1370 		return true;
1371 	}
1372 }
1373 
1374 void
taggedRelease(const void * tag) const1375 IOSKMemoryBuffer::taggedRelease(const void *tag) const
1376 {
1377 	/*
1378 	 * RO buffer has extra retain from fKernelReadOnlyMapping, needs to
1379 	 * explicitly release when refcnt == 2 to free ourselves.
1380 	 */
1381 	if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1382 		super::taggedRelease(tag, 2);
1383 	} else {
1384 		super::taggedRelease(tag);
1385 	}
1386 }
1387 
1388 void
free(void)1389 IOSKMemoryBuffer::free( void )
1390 {
1391 	if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1392 		OSSafeReleaseNULL(fKernelReadOnlyMapping);
1393 		fKernelAddr = NULL;
1394 	}
1395 	super::free();
1396 }
1397 
1398 void *
getBytesNoCopy(void)1399 IOSKMemoryBuffer::getBytesNoCopy( void )
1400 {
1401 	return fKernelAddr;
1402 }
1403 
1404 void *
getBytesNoCopy(vm_size_t start,vm_size_t withLength)1405 IOSKMemoryBuffer::getBytesNoCopy( vm_size_t start, vm_size_t withLength )
1406 {
1407 	IOVirtualAddress address;
1408 
1409 	if ((start + withLength) < start) {
1410 		return NULL;
1411 	}
1412 
1413 	address = (IOVirtualAddress) fKernelAddr;
1414 
1415 	if (start < _length && (start + withLength) <= _length) {
1416 		return (void *)(address + start);
1417 	}
1418 	return NULL;
1419 }
1420 
1421 static IOSKMemoryBuffer *
RefToMemoryBuffer(IOSKMemoryRef inRef)1422 RefToMemoryBuffer( IOSKMemoryRef inRef )
1423 {
1424 	IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, inRef);
1425 	return mb;
1426 }
1427 
1428 static IOSKMemoryArray *
RefToMemoryArray(IOSKMemoryRef inRef)1429 RefToMemoryArray( IOSKMemoryRef inRef )
1430 {
1431 	IOSKMemoryArray * ma = OSDynamicCast(IOSKMemoryArray, inRef);
1432 	return ma;
1433 }
1434 
1435 __BEGIN_DECLS
1436 
1437 void
IOSKMemoryDestroy(IOSKMemoryRef reference)1438 IOSKMemoryDestroy(
1439 	IOSKMemoryRef reference )
1440 {
1441 	assert(reference);
1442 	if (reference) {
1443 		reference->release();
1444 	}
1445 }
1446 
1447 void
IOSKMemoryMapDestroy(IOSKMemoryMapRef reference)1448 IOSKMemoryMapDestroy(
1449 	IOSKMemoryMapRef reference )
1450 {
1451 	assert(reference);
1452 	if (reference) {
1453 		reference->release();
1454 	}
1455 }
1456 
1457 IOSKMemoryBufferRef
IOSKMemoryBufferCreate(mach_vm_size_t capacity,const IOSKMemoryBufferSpec * spec,mach_vm_address_t * kvaddr)1458 IOSKMemoryBufferCreate(
1459 	mach_vm_size_t capacity,
1460 	const IOSKMemoryBufferSpec * spec,
1461 	mach_vm_address_t * kvaddr )
1462 {
1463 	IOSKMemoryBuffer * mb;
1464 	void * addr = NULL;
1465 
1466 	mach_vm_size_t rounded_capacity = round_page(capacity);
1467 	if (capacity != rounded_capacity) {
1468 		return NULL;
1469 	}
1470 
1471 	mb = new IOSKMemoryBuffer;
1472 	if (mb && !mb->initWithSpec(kernel_task, capacity, PAGE_SIZE, spec)) {
1473 		mb->release();
1474 		mb = NULL;
1475 	}
1476 	if (!mb) {
1477 		DLOG("create capacity=0x%llx failed", capacity);
1478 		goto fail;
1479 	}
1480 
1481 	addr = mb->fKernelAddr;
1482 	if (kvaddr) {
1483 		*kvaddr = (mach_vm_address_t)(uintptr_t)addr;
1484 	}
1485 	DLOG("buffer %p, vaddr %p, capacity 0x%llx", mb, addr, capacity);
1486 
1487 fail:
1488 	return mb;
1489 }
1490 
1491 IOSKMemoryArrayRef
IOSKMemoryArrayCreate(const IOSKMemoryRef refs[],uint32_t count)1492 IOSKMemoryArrayCreate(
1493 	const IOSKMemoryRef refs[],
1494 	uint32_t count )
1495 {
1496 	IOSKMemoryArray * ma;
1497 	IOSKMemoryRef ref;
1498 	bool ok = true;
1499 
1500 	if (!refs || (count < 1)) {
1501 		return NULL;
1502 	}
1503 
1504 	// Validate the references
1505 	for (uint32_t i = 0; i < count; i++) {
1506 		ref = refs[i];
1507 		assert(RefToMemoryBuffer(ref) || RefToMemoryArray(ref));
1508 		if (!RefToMemoryBuffer(ref) && !RefToMemoryArray(ref)) {
1509 			ok = false;
1510 			break;
1511 		}
1512 	}
1513 	if (!ok) {
1514 		return NULL;
1515 	}
1516 
1517 	ma = new IOSKMemoryArray;
1518 	if (ma && !ma->initWithDescriptors((IOMemoryDescriptor **)refs,
1519 	    count, kIODirectionInOut, false)) {
1520 		ma->release();
1521 		ma = NULL;
1522 	}
1523 	if (!ma) {
1524 		DLOG("create count=%u failed", count);
1525 	} else {
1526 		DLOG("array %p count=%u", ma, count);
1527 	}
1528 
1529 	return ma;
1530 }
1531 
1532 IOSKMemoryMapRef
IOSKMemoryMapToTask(IOSKMemoryRef reference,task_t intoTask,mach_vm_address_t * mapAddr,mach_vm_size_t * mapSize)1533 IOSKMemoryMapToTask(
1534 	IOSKMemoryRef       reference,
1535 	task_t              intoTask,
1536 	mach_vm_address_t * mapAddr,
1537 	mach_vm_size_t *    mapSize )
1538 {
1539 	IOOptionBits options = kIOMapAnywhere | kIOMapReadOnly;
1540 	mach_vm_address_t startAddr;
1541 	IOMemoryMap * map = NULL;
1542 
1543 	IOSKMemoryArray * ma = RefToMemoryArray(reference);
1544 
1545 	assert(ma);
1546 	if (!ma) {
1547 		return NULL;
1548 	}
1549 
1550 	assert(intoTask != kernel_task);
1551 	map = ma->createMappingInTask(intoTask, 0, options);
1552 	if (map) {
1553 		bool ok;
1554 
1555 		startAddr = map->getAddress();
1556 		*mapAddr = startAddr;
1557 		*mapSize = map->getSize();
1558 		DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1559 
1560 		options &= ~(kIOMapReadOnly | kIOMapAnywhere);
1561 		ok = ma->overwriteMappingInTask(intoTask, &startAddr, options);
1562 		if (!ok) {
1563 			map->release();
1564 			map = NULL;
1565 		}
1566 	}
1567 	return map;
1568 }
1569 
1570 IOSKMemoryMapRef
IOSKMemoryMapToKernelTask(IOSKMemoryRef reference,mach_vm_address_t * mapAddr,mach_vm_size_t * mapSize)1571 IOSKMemoryMapToKernelTask(
1572 	IOSKMemoryRef       reference,
1573 	mach_vm_address_t * mapAddr,
1574 	mach_vm_size_t *    mapSize )
1575 {
1576 	IOOptionBits options = kIOMapAnywhere;
1577 	mach_vm_address_t startAddr;
1578 	IOMemoryMap * map = NULL;
1579 
1580 	IOSKMemoryArray * ma = RefToMemoryArray(reference);
1581 
1582 	assert(ma);
1583 	if (!ma) {
1584 		return NULL;
1585 	}
1586 
1587 	map = ma->createMappingInTask(kernel_task, 0, options);
1588 	if (map) {
1589 		startAddr = map->getAddress();
1590 		*mapAddr = startAddr;
1591 		*mapSize = map->getSize();
1592 		DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1593 	}
1594 	return map;
1595 }
1596 
1597 IOReturn
IOSKMemoryDiscard(IOSKMemoryRef reference)1598 IOSKMemoryDiscard( IOSKMemoryRef reference )
1599 {
1600 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1601 
1602 	assert(mb);
1603 	assert(mb->fSpec.purgeable);
1604 	if (!mb || !mb->fSpec.purgeable) {
1605 		return kIOReturnBadArgument;
1606 	}
1607 
1608 	return mb->setPurgeable(kIOMemoryPurgeableEmpty |
1609 	           kIOMemoryPurgeableFaultOnAccess, NULL);
1610 }
1611 
1612 IOReturn
IOSKMemoryReclaim(IOSKMemoryRef reference)1613 IOSKMemoryReclaim( IOSKMemoryRef reference )
1614 {
1615 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1616 
1617 	assert(mb);
1618 	assert(mb->fSpec.purgeable);
1619 	if (!mb || !mb->fSpec.purgeable) {
1620 		return kIOReturnBadArgument;
1621 	}
1622 
1623 	return mb->setPurgeable(kIOMemoryPurgeableNonVolatile, NULL);
1624 }
1625 
1626 IOReturn
IOSKMemoryWire(IOSKMemoryRef reference)1627 IOSKMemoryWire( IOSKMemoryRef reference )
1628 {
1629 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1630 
1631 	assert(mb);
1632 	assert(mb->fSpec.purgeable);
1633 	if (!mb || !mb->fSpec.purgeable) {
1634 		return kIOReturnBadArgument;
1635 	}
1636 
1637 	return mb->prepare();
1638 }
1639 
1640 IOReturn
IOSKMemoryUnwire(IOSKMemoryRef reference)1641 IOSKMemoryUnwire( IOSKMemoryRef reference )
1642 {
1643 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1644 
1645 	assert(mb);
1646 	assert(mb->fSpec.purgeable);
1647 	if (!mb || !mb->fSpec.purgeable) {
1648 		return kIOReturnBadArgument;
1649 	}
1650 
1651 	return mb->complete();
1652 }
1653 
1654 static void
IOSKObjectDestroy(const OSObject * object)1655 IOSKObjectDestroy( const OSObject * object )
1656 {
1657 	assert(object != NULL);
1658 	if (object) {
1659 		object->release();
1660 	}
1661 }
1662 
1663 IOSKArenaRef
IOSKArenaCreate(IOSKRegionRef * regionList,IOSKCount regionCount)1664 IOSKArenaCreate( IOSKRegionRef * regionList, IOSKCount regionCount )
1665 {
1666 	IOSKArenaRef arena;
1667 
1668 	arena = new IOSKArena;
1669 	if ((arena != NULL) && !arena->initWithRegions(regionList, regionCount)) {
1670 		arena->release();
1671 		arena = NULL;
1672 	}
1673 	return arena;
1674 }
1675 
1676 void
IOSKArenaDestroy(IOSKArenaRef arena)1677 IOSKArenaDestroy( IOSKArenaRef arena )
1678 {
1679 	IOSKObjectDestroy(arena);
1680 }
1681 
1682 void
IOSKArenaRedirect(IOSKArenaRef arena)1683 IOSKArenaRedirect( IOSKArenaRef arena )
1684 {
1685 	assert(arena != NULL);
1686 	if (arena != NULL) {
1687 		arena->redirectMap(NULL);
1688 	}
1689 }
1690 
1691 IOSKRegionRef
IOSKRegionCreate(const IOSKRegionSpec * regionSpec,IOSKSize segSize,IOSKCount segCount)1692 IOSKRegionCreate( const IOSKRegionSpec * regionSpec,
1693     IOSKSize segSize, IOSKCount segCount )
1694 {
1695 	IOSKRegionRef   region;
1696 
1697 	region = new IOSKRegion;
1698 	if ((region != NULL) && !region->initWithSpec(regionSpec, segSize, segCount)) {
1699 		region->release();
1700 		region = NULL;
1701 	}
1702 	return region;
1703 }
1704 
1705 void
IOSKRegionDestroy(IOSKRegionRef region)1706 IOSKRegionDestroy( IOSKRegionRef region )
1707 {
1708 	IOSKObjectDestroy(region);
1709 }
1710 
1711 IOReturn
IOSKRegionSetBuffer(IOSKRegionRef region,IOSKIndex segmentIndex,IOSKMemoryBufferRef buffer)1712 IOSKRegionSetBuffer( IOSKRegionRef region, IOSKIndex segmentIndex,
1713     IOSKMemoryBufferRef buffer )
1714 {
1715 	IOReturn ret = kIOReturnBadArgument;
1716 
1717 	assert(region != NULL);
1718 	if (region != NULL) {
1719 		ret = region->setSegmentBuffer(segmentIndex, (IOSKBuffer *)buffer);
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 void
IOSKRegionClearBuffer(IOSKRegionRef region,IOSKIndex segmentIndex)1726 IOSKRegionClearBuffer( IOSKRegionRef region, IOSKIndex segmentIndex )
1727 {
1728 	assert(region != NULL);
1729 	if (region != NULL) {
1730 		region->clearSegmentBuffer(segmentIndex, NULL);
1731 	}
1732 }
1733 
1734 void
IOSKRegionClearBufferDebug(IOSKRegionRef region,IOSKIndex segmentIndex,IOSKMemoryBufferRef * prevBufferRef)1735 IOSKRegionClearBufferDebug( IOSKRegionRef region, IOSKIndex segmentIndex,
1736     IOSKMemoryBufferRef * prevBufferRef )
1737 {
1738 	assert(region != NULL);
1739 	if (region != NULL) {
1740 		region->clearSegmentBuffer(segmentIndex, prevBufferRef);
1741 	}
1742 }
1743 
1744 IOSKMapperRef
IOSKMapperCreate(IOSKArenaRef arena,task_t task)1745 IOSKMapperCreate( IOSKArenaRef arena, task_t task )
1746 {
1747 	IOSKMapperRef mapper = NULL;
1748 
1749 	assert(arena != NULL);
1750 	if (arena != NULL) {
1751 		arena->createMapperForTask(task, &mapper);
1752 	}
1753 	return mapper;
1754 }
1755 
1756 void
IOSKMapperDestroy(IOSKMapperRef mapper)1757 IOSKMapperDestroy( IOSKMapperRef mapper )
1758 {
1759 	assert(mapper != NULL);
1760 	if (mapper != NULL) {
1761 		IOSKArena * arena = mapper->getArena();
1762 		assert(arena != NULL);
1763 		arena->removeMapper(mapper);
1764 		IOSKObjectDestroy(mapper);
1765 	}
1766 }
1767 
1768 void
IOSKMapperRedirect(IOSKMapperRef mapper)1769 IOSKMapperRedirect( IOSKMapperRef mapper )
1770 {
1771 	assert(mapper != NULL);
1772 	if (mapper != NULL) {
1773 		IOSKArena * arena = mapper->getArena();
1774 		assert(arena != NULL);
1775 		arena->redirectMap(mapper);
1776 	}
1777 }
1778 
1779 IOReturn
IOSKMapperGetAddress(IOSKMapperRef mapper,mach_vm_address_t * address,mach_vm_size_t * size)1780 IOSKMapperGetAddress( IOSKMapperRef mapper,
1781     mach_vm_address_t * address, mach_vm_size_t * size )
1782 {
1783 	assert(mapper != NULL);
1784 	if ((mapper == NULL) || (address == NULL)) {
1785 		return kIOReturnBadArgument;
1786 	}
1787 
1788 	*address = mapper->getMapAddress(size);
1789 	return kIOReturnSuccess;
1790 }
1791 
1792 boolean_t
IOSKBufferIsWired(IOSKMemoryBufferRef buffer)1793 IOSKBufferIsWired( IOSKMemoryBufferRef buffer )
1794 {
1795 	assert(buffer != NULL);
1796 	return ((IOSKBuffer *)buffer)->isWired();
1797 }
1798 
1799 __END_DECLS
1800 
1801 #if DEVELOPMENT || DEBUG
1802 
1803 extern int IOSkywalkSupportTest(int x);
1804 
1805 int
IOSkywalkSupportTest(int newValue)1806 IOSkywalkSupportTest( int newValue )
1807 {
1808 	static const int kNumRegions = 3;
1809 	static const int kNumBuffers = 6;
1810 	static const int kNumMappers = 3;
1811 	static const int kNumArenas  = 2;
1812 
1813 	IOSKMemoryBufferSpec bspec;
1814 	IOSKRegionSpec      rspec;
1815 	IOSKMemoryBufferRef buffers[kNumBuffers];
1816 	mach_vm_address_t   bufkvas[kNumBuffers];
1817 	IOSKRegionRef       regions[kNumRegions];
1818 	IOSKRegionRef       reverse[kNumRegions];
1819 	IOSKArenaRef        arenas[kNumArenas];
1820 	IOSKMapperRef       mappers[kNumMappers];
1821 	mach_vm_address_t   addrs[kNumMappers];
1822 	mach_vm_size_t      size;
1823 	uint32_t            value;
1824 	uint32_t *          ptr;
1825 	IOReturn            ret;
1826 
1827 	kprintf("IOSKArena count  : %u\n",
1828 	    IOSKArena::gMetaClass.getInstanceCount());
1829 	kprintf("IOSKRegion count : %u\n",
1830 	    IOSKRegion::gMetaClass.getInstanceCount());
1831 	kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1832 	    IOSKMapper::gMetaClass.getInstanceCount(),
1833 	    IOSKRegionMapper::gMetaClass.getInstanceCount());
1834 	kprintf("IOSKBuffer count : %u\n",
1835 	    IOSKBuffer::gMetaClass.getInstanceCount());
1836 
1837 	rspec.noRedirect = true;
1838 	regions[0] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(1), 2);
1839 	assert(regions[0]);
1840 	rspec.noRedirect = false;
1841 	regions[1] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(2), 3);
1842 	assert(regions[1]);
1843 	regions[2] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(3), 4);
1844 	assert(regions[2]);
1845 
1846 	reverse[0] = regions[2];
1847 	reverse[1] = regions[1];
1848 	reverse[2] = regions[0];
1849 
1850 	arenas[0] = IOSKArenaCreate(regions, 3);
1851 	assert(arenas[0]);
1852 	arenas[1] = IOSKArenaCreate(reverse, 3);
1853 	assert(arenas[1]);
1854 
1855 	bzero(&bspec, sizeof(bspec));
1856 	bspec.purgeable = true;
1857 	bspec.user_writable = false;
1858 	buffers[0] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[0]);
1859 	assert(buffers[0]);
1860 	assert(IOSKBufferIsWired(buffers[0]) == false);
1861 	bspec.user_writable = true;
1862 	buffers[1] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[1]);
1863 	assert(buffers[1]);
1864 	buffers[2] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[2]);
1865 	assert(buffers[2]);
1866 	buffers[3] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[3]);
1867 	assert(buffers[3]);
1868 	buffers[4] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[4]);
1869 	assert(buffers[4]);
1870 	buffers[5] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[5]);
1871 	assert(buffers[5]);
1872 
1873 	for (int i = 0; i < kNumBuffers; i++) {
1874 		value = 0x534B0000 | i;
1875 		ptr = (uint32_t *)(uintptr_t)bufkvas[i];
1876 		*ptr = value;
1877 		assert(value == *ptr);
1878 	}
1879 
1880 	ret = IOSKRegionSetBuffer(regions[0], 0, buffers[0]);
1881 	assert(ret == kIOReturnSuccess);
1882 	ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1883 	assert(ret == kIOReturnSuccess);
1884 	ret = IOSKRegionSetBuffer(regions[1], 0, buffers[2]);
1885 	assert(ret == kIOReturnSuccess);
1886 	ret = IOSKRegionSetBuffer(regions[1], 1, buffers[3]);
1887 	assert(ret == kIOReturnSuccess);
1888 	ret = IOSKRegionSetBuffer(regions[2], 0, buffers[4]);
1889 	assert(ret == kIOReturnSuccess);
1890 	ret = IOSKRegionSetBuffer(regions[2], 3, buffers[5]);
1891 	assert(ret == kIOReturnSuccess);
1892 
1893 	mappers[0] = IOSKMapperCreate(arenas[0], current_task());
1894 	assert(mappers[0]);
1895 	mappers[1] = IOSKMapperCreate(arenas[0], current_task());
1896 	assert(mappers[1]);
1897 	mappers[2] = IOSKMapperCreate(arenas[1], current_task());
1898 	assert(mappers[2]);
1899 
1900 	ret = IOSKMapperGetAddress(mappers[0], &addrs[0], &size);
1901 	assert(ret == kIOReturnSuccess);
1902 	assert(size == ptoa(20));
1903 	ret = IOSKMapperGetAddress(mappers[1], &addrs[1], &size);
1904 	assert(ret == kIOReturnSuccess);
1905 	assert(size == ptoa(20));
1906 	ret = IOSKMapperGetAddress(mappers[2], &addrs[2], &size);
1907 	assert(ret == kIOReturnSuccess);
1908 	assert(size == ptoa(20));
1909 
1910 	for (int i = 0; i < kNumMappers; i++) {
1911 		kprintf("mapper[%d] %p map address 0x%llx size 0x%x\n",
1912 		    i, mappers[i], (uint64_t)addrs[i], (uint32_t)size);
1913 	}
1914 
1915 	ptr = (uint32_t *)(uintptr_t)addrs[0];
1916 	assert(*ptr == 0x534B0000);
1917 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(1));
1918 	assert(*ptr == 0x534B0001);
1919 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(2));
1920 	assert(*ptr == 0x534B0002);
1921 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(4));
1922 	assert(*ptr == 0x534B0003);
1923 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(8));
1924 	assert(*ptr == 0x534B0004);
1925 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(17));
1926 	assert(*ptr == 0x534B0005);
1927 
1928 	*ptr = 0x4B530005;
1929 	assert(0x4B530005 == *ptr);
1930 	*ptr = 0x534B0005;
1931 
1932 	IOSKMapperRedirect(mappers[0]);
1933 	*ptr = 0x33333333;
1934 	assert(0x33333333 == *ptr);
1935 	ptr = (uint32_t *)(uintptr_t)addrs[0];
1936 	assert(*ptr == 0x534B0000);
1937 
1938 	ptr = (uint32_t *)(uintptr_t)addrs[2];
1939 	assert(*ptr == 0x534B0004);
1940 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(9));
1941 	assert(*ptr == 0x534B0005);
1942 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(12));
1943 	assert(*ptr == 0x534B0002);
1944 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(14));
1945 	assert(*ptr == 0x534B0003);
1946 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(18));
1947 	assert(*ptr == 0x534B0000);
1948 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(19));
1949 	assert(*ptr == 0x534B0001);
1950 
1951 	IOSKRegionClearBufferDebug(regions[0], 1, NULL);
1952 	ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1953 	assert(ret == kIOReturnSuccess);
1954 	assert(*ptr == 0x534B0001);
1955 
1956 	IOSKArenaRedirect(arenas[0]);
1957 	IOSKArenaRedirect(arenas[1]);
1958 
1959 	for (int i = 0; i < kNumBuffers; i++) {
1960 		IOSKMemoryDestroy(buffers[i]);
1961 	}
1962 	for (int i = 0; i < kNumRegions; i++) {
1963 		IOSKRegionDestroy(regions[i]);
1964 	}
1965 	for (int i = 0; i < kNumArenas; i++) {
1966 		IOSKArenaDestroy(arenas[i]);
1967 	}
1968 	for (int i = 0; i < kNumMappers; i++) {
1969 		IOSKMapperDestroy(mappers[i]);
1970 	}
1971 
1972 	kprintf("IOSKArena count  : %u\n",
1973 	    IOSKArena::gMetaClass.getInstanceCount());
1974 	kprintf("IOSKRegion count : %u\n",
1975 	    IOSKRegion::gMetaClass.getInstanceCount());
1976 	kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1977 	    IOSKMapper::gMetaClass.getInstanceCount(),
1978 	    IOSKRegionMapper::gMetaClass.getInstanceCount());
1979 	kprintf("IOSKBuffer count : %u\n",
1980 	    IOSKBuffer::gMetaClass.getInstanceCount());
1981 
1982 	return 0;
1983 }
1984 
1985 #endif  /* DEVELOPMENT || DEBUG */
1986 
1987 #if defined(__x86_64__)
1988 const OSSymbol *
IOSKCopyKextIdentifierWithAddress(vm_address_t address)1989 IOSKCopyKextIdentifierWithAddress( vm_address_t address )
1990 {
1991 	const OSSymbol * id = NULL;
1992 
1993 	OSKext * kext = OSKext::lookupKextWithAddress(address);
1994 	if (kext) {
1995 		id = kext->getIdentifier();
1996 		if (id) {
1997 			id->retain();
1998 		}
1999 		kext->release();
2000 	}
2001 	return id;
2002 }
2003 #endif /* __x86_64__ */
2004