xref: /xnu-11215.1.10/iokit/bsddev/skywalk/IOSkywalkSupport.cpp (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #if defined(__x86_64__)
29 #include <libkern/c++/OSKext.h> // IOSKCopyKextIdentifierWithAddress()
30 #endif
31 
32 #include <IOKit/IOBufferMemoryDescriptor.h>
33 #include <IOKit/IOMultiMemoryDescriptor.h>
34 #include <IOKit/IOCommand.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/skywalk/IOSkywalkSupport.h>
37 #include <skywalk/os_skywalk_private.h>
38 #include <sys/errno.h>
39 #include <sys/queue.h>
40 #include <vm/vm_map_xnu.h>
41 #include <vm/vm_kern_xnu.h>
42 
43 #include <mach/mach_vm.h>
44 #include <mach/vm_map.h>
45 #include <mach/vm_types.h>
46 
47 #define ELOG(fmt, args...)      SK_ERR(fmt, ##args)
48 #define DLOG(fmt, args...)      SK_DF(SK_VERB_IOSK, fmt, ##args)
49 #define IOSK_SIZE_OK(x)         (((x) != 0) && (round_page(x) == (x)))
50 #define IOSK_OFFSET_OK(x)       (round_page(x) == (x))
51 
52 static vm_tag_t
getVMTagForMap(vm_map_t map)53 getVMTagForMap( vm_map_t map )
54 {
55 	return (map == kernel_map) ?
56 	       VM_KERN_MEMORY_SKYWALK : VM_MEMORY_SKYWALK;
57 }
58 
59 class IOSKMemoryArray : public IOMultiMemoryDescriptor
60 {
61 	OSDeclareFinalStructors( IOSKMemoryArray );
62 
63 public:
64 	bool overwriteMappingInTask(
65 		task_t              intoTask,
66 		mach_vm_address_t * startAddr,
67 		IOOptionBits        options );
68 };
69 
70 class IOSKMemoryBuffer : public IOBufferMemoryDescriptor
71 {
72 	OSDeclareFinalStructors( IOSKMemoryBuffer );
73 
74 public:
75 	bool initWithSpec( task_t            inTask,
76 	    mach_vm_size_t    capacity,
77 	    mach_vm_address_t alignment,
78 	    const IOSKMemoryBufferSpec * spec );
79 
80 	virtual void * getBytesNoCopy( void ) APPLE_KEXT_OVERRIDE;
81 
82 	virtual void * getBytesNoCopy(vm_size_t start, vm_size_t withLength) APPLE_KEXT_OVERRIDE;
83 
84 	bool
isWired(void) const85 	isWired( void ) const
86 	{
87 		return _wireCount != 0;
88 	}
89 
90 	IOSKMemoryBufferSpec    fSpec;
91 	void                    *fKernelAddr;
92 	IOMemoryMap             *fKernelReadOnlyMapping;
93 
94 protected:
95 	virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
96 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
97 };
98 
99 // FIXME: rename IOSKMemoryBuffer -> IOSKBuffer
100 typedef IOSKMemoryBuffer    IOSKBuffer;
101 
102 // IOSKRegionMapper:
103 // Tracks all memory mappings of a single IOSKRegion, with an array of
104 // IOMemoryMaps to map the region's memory segments.
105 // Created and released by the parent IOSKMapper.
106 
107 class IOSKRegionMapper : public OSObject
108 {
109 	OSDeclareFinalStructors( IOSKRegionMapper );
110 
111 public:
112 	bool initWithMapper( IOSKMapper * mapper, IOSKRegion * region,
113 	    IOSKOffset regionOffset );
114 
115 	IOReturn    map( IOSKIndex segIndex, IOSKBuffer * buffer );
116 	void        unmap( IOSKIndex segIndex, vm_prot_t prot );
117 
118 	kern_return_t mapOverwrite( vm_map_offset_t addr,
119 	    vm_map_size_t size, vm_prot_t prot );
120 
121 private:
122 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
123 
124 	IOSKMapper *        fMapper;
125 	IOSKRegion *        fRegion;
126 	IOMemoryMap **      fMemoryMaps;
127 	IOSKCount           fMemoryMapCount;
128 	IOSKOffset          fRegionOffset;
129 };
130 
131 // IOSKMapper:
132 // Manages all memory mappings of a single task, with an array of
133 // IOSKRegionMappers to map all memory regions of a memory arena.
134 // Retains the IOSKArena.
135 
136 class IOSKMapper : public OSObject
137 {
138 	OSDeclareFinalStructors( IOSKMapper );
139 	friend class IOSKRegionMapper;
140 
141 public:
142 	bool     initWithTask( task_t task, IOSKArena * arena );
143 
144 	IOReturn map( IOSKIndex regIndex, IOSKIndex segIndex, IOSKBuffer * buffer );
145 	void     unmap( IOSKIndex regIndex, IOSKIndex segIndex, vm_prot_t prot );
146 
147 	mach_vm_address_t
getMapAddress(mach_vm_size_t * size) const148 	getMapAddress( mach_vm_size_t * size ) const
149 	{
150 		if (size) {
151 			*size = fMapSize;
152 		}
153 		return fMapAddr;
154 	}
155 
156 	IOSKArena *
getArena(void) const157 	getArena( void ) const
158 	{
159 		return fArena;
160 	}
161 	bool
isRedirected(void) const162 	isRedirected( void ) const
163 	{
164 		return fRedirected;
165 	}
166 	void
redirectMap(void)167 	redirectMap( void )
168 	{
169 		fRedirected = true;
170 	}
171 
172 private:
173 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
174 
175 	task_t              fTask;
176 	vm_map_t            fTaskMap;
177 	IOSKArena *         fArena;
178 	OSArray *           fSubMaps;
179 	mach_vm_address_t   fMapAddr;
180 	mach_vm_size_t      fMapSize;
181 	bool                fRedirected;
182 };
183 
184 // IOSKArena:
185 // An array of IOSKRegions is used to create an IOSKArena.
186 // One or more IOSKMapper can map the arena memory to tasks.
187 // Retains the IOSKRegions, also circularly retains the IOSKMapper(s)
188 // until the client calls IOSKMapperDestroy().
189 
190 class IOSKArena : public OSObject
191 {
192 	OSDeclareFinalStructors( IOSKArena );
193 
194 public:
195 	bool     initWithRegions( IOSKRegion ** regions,
196 	    IOSKCount regionCount );
197 
198 	IOReturn createMapperForTask( task_t task,
199 	    LIBKERN_RETURNS_RETAINED IOSKMapper ** mapper );
200 	void     redirectMap( IOSKMapper * mapper );
201 
202 	IOSKSize
getArenaSize(void) const203 	getArenaSize( void ) const
204 	{
205 		return fArenaSize;
206 	}
207 	IOSKCount
getRegionCount(void) const208 	getRegionCount( void ) const
209 	{
210 		return fRegions->getCount();
211 	}
212 	IOSKRegion * getRegion( IOSKIndex regIndex ) const;
213 
214 	IOReturn map( const IOSKRegion * region,
215 	    IOSKOffset regionOffset,
216 	    IOSKIndex regionIndex,
217 	    IOSKIndex segmentIndex,
218 	    IOSKMemoryBuffer * buffer );
219 
220 	void     unmap( const IOSKRegion * region,
221 	    IOSKOffset regionOffset,
222 	    IOSKIndex regionIndex,
223 	    IOSKIndex segmentIndex,
224 	    vm_prot_t prot,
225 	    bool isRedirected,
226 	    const void * context );
227 
228 	bool     addMapper( const IOSKMapper * mapper );
229 	void     removeMapper( const IOSKMapper * mapper );
230 
231 private:
232 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
233 
234 	IOLock *        fArenaLock;
235 	OSSet *         fMappers;
236 	OSArray *       fRegions;
237 	IOSKSize        fArenaSize;
238 };
239 
240 // IOSKRegion:
241 // An IOSKRegion manages a dynamic array of IOSKBuffers representing each
242 // memory segment in the region. Each IOSKRegion can be shared by multiple
243 // IOSKArenas, and the IOSKRegion keeps state specific to each arena - the
244 // offset and the index of the region within the arena. A lock is used to
245 // serialize updates to the IOSKBuffer array and the arenas.
246 // Retains the IOSKBuffers.
247 
248 class IOSKRegion : public OSObject
249 {
250 	OSDeclareFinalStructors( IOSKRegion );
251 
252 public:
253 	bool     initWithSpec( const IOSKRegionSpec * spec,
254 	    IOSKSize segSize, IOSKCount segCount );
255 
256 	IOReturn setSegmentBuffer( IOSKIndex index, IOSKBuffer * buf );
257 	void     clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
258 
259 	bool     attachArena( IOSKArena * arena,
260 	    IOSKOffset regionOffset, IOSKIndex regionIndex );
261 	void     detachArena( const IOSKArena * arena );
262 
263 	IOReturn updateMappingsForArena( IOSKArena * arena, bool redirect,
264 	    const void * context = NULL );
265 
266 	IOSKCount
getSegmentCount(void) const267 	getSegmentCount( void ) const
268 	{
269 		return fSegmentCount;
270 	}
271 	IOSKSize
getSegmentSize(void) const272 	getSegmentSize( void ) const
273 	{
274 		return fSegmentSize;
275 	}
276 	IOSKSize
getRegionSize(void) const277 	getRegionSize( void ) const
278 	{
279 		return fSegmentCount * fSegmentSize;
280 	}
281 
282 private:
283 	virtual void free( void ) APPLE_KEXT_OVERRIDE;
284 
285 	struct Segment {
286 		IOSKBuffer *  fBuffer;
287 	};
288 
289 	struct ArenaEntry {
290 		SLIST_ENTRY(ArenaEntry) link;
291 		IOSKArena *   fArena;
292 		IOSKOffset    fRegionOffset;
293 		IOSKIndex     fRegionIndex;
294 	};
295 	SLIST_HEAD(ArenaHead, ArenaEntry);
296 
297 	IOReturn _setSegmentBuffer( const IOSKIndex index, IOSKMemoryBuffer * buf );
298 	void     _clearSegmentBuffer( const IOSKIndex index, IOSKMemoryBufferRef * prevBuffer );
299 	ArenaEntry * findArenaEntry( const IOSKArena * arena );
300 
301 	IOSKRegionSpec fSpec;
302 	IOLock *    fRegionLock;
303 	ArenaHead   fArenaHead;
304 	Segment *   fSegments;
305 	IOSKCount   fSegmentCount;
306 	IOSKSize    fSegmentSize;
307 };
308 
309 #undef  super
310 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKRegionMapper,OSObject)311 OSDefineMetaClassAndFinalStructors( IOSKRegionMapper, OSObject )
312 
313 bool
314 IOSKRegionMapper::initWithMapper(
315 	IOSKMapper * mapper, IOSKRegion * region, IOSKOffset regionOffset )
316 {
317 	if ((mapper == NULL) || (region == NULL) || !super::init()) {
318 		return false;
319 	}
320 
321 	// parent mapper retains the arena, which retains the regions
322 	assert(IOSK_OFFSET_OK(regionOffset));
323 	fMapper = mapper;
324 	fRegion = region;
325 	fRegionOffset = regionOffset;
326 
327 	fMemoryMapCount = region->getSegmentCount();
328 	assert(fMemoryMapCount != 0);
329 	fMemoryMaps = IONew(IOMemoryMap *, fMemoryMapCount);
330 	if (!fMemoryMaps) {
331 		return false;
332 	}
333 
334 	bzero(fMemoryMaps, sizeof(IOMemoryMap *) * fMemoryMapCount);
335 
336 	DLOG("SKRegionMapper %p mapper %p region %p offset 0x%x",
337 	    this, mapper, region, regionOffset);
338 	return true;
339 }
340 
341 void
free(void)342 IOSKRegionMapper::free( void )
343 {
344 	DLOG("SKRegionMapper %p", this);
345 
346 	if (fMemoryMaps) {
347 		assert(fMemoryMapCount != 0);
348 		for (IOSKIndex i = 0; i < fMemoryMapCount; i++) {
349 			if (fMemoryMaps[i]) {
350 				fMemoryMaps[i]->release();
351 				fMemoryMaps[i] = NULL;
352 			}
353 		}
354 		IODelete(fMemoryMaps, IOMemoryMap *, fMemoryMapCount);
355 		fMemoryMaps = NULL;
356 		fMemoryMapCount = 0;
357 	}
358 
359 	fMapper = NULL;
360 	fRegion = NULL;
361 	super::free();
362 }
363 
364 IOReturn
map(IOSKIndex segIndex,IOSKBuffer * buffer)365 IOSKRegionMapper::map( IOSKIndex segIndex, IOSKBuffer * buffer )
366 {
367 	mach_vm_address_t   addr;
368 	mach_vm_offset_t    offset;
369 	IOMemoryMap *       map;
370 	IOOptionBits        options = kIOMapOverwrite;
371 	IOReturn            ret = kIOReturnSuccess;
372 
373 	assert(segIndex < fMemoryMapCount);
374 	assert(buffer != NULL);
375 
376 	if ((segIndex >= fMemoryMapCount) || (buffer == NULL)) {
377 		return kIOReturnBadArgument;
378 	}
379 
380 	// redundant map requests are expected when the arena is mapped
381 	// by more than one mapper.
382 	if ((map = fMemoryMaps[segIndex]) != NULL) {
383 		assert(map->getMemoryDescriptor() == buffer);
384 		return kIOReturnSuccess;
385 	}
386 
387 	if (buffer->fSpec.user_writable == FALSE) {
388 		options |= kIOMapReadOnly;
389 	}
390 
391 	offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
392 	assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
393 	addr = fMapper->fMapAddr + offset;
394 
395 	map = buffer->createMappingInTask(fMapper->fTask, addr, options);
396 	fMemoryMaps[segIndex] = map;
397 	assert((map == NULL) || (map->getLength() == fRegion->getSegmentSize()));
398 	if (map == NULL) {
399 		ret = kIOReturnVMError;
400 	}
401 
402 	SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
403 	    "%p buffer %p index %u map %p offset 0x%x size 0x%x",
404 	    this, buffer, segIndex, fMemoryMaps[segIndex],
405 	    (uint32_t)offset, fRegion->getSegmentSize());
406 
407 	return ret;
408 }
409 
410 void
unmap(IOSKIndex segIndex,vm_prot_t prot)411 IOSKRegionMapper::unmap( IOSKIndex segIndex, vm_prot_t prot )
412 {
413 	mach_vm_address_t   addr;
414 	mach_vm_offset_t    offset;
415 	IOMemoryMap *       map;
416 	kern_return_t       kr;
417 
418 	assert(segIndex < fMemoryMapCount);
419 
420 	// redundant unmap requests are expected when the arena is mapped
421 	// by more than one mapper.
422 	if ((segIndex >= fMemoryMapCount) || ((map = fMemoryMaps[segIndex]) == NULL)) {
423 		return;
424 	}
425 
426 	offset = fRegionOffset + (segIndex * fRegion->getSegmentSize());
427 	assert((offset + fRegion->getSegmentSize()) <= fMapper->fMapSize);
428 	addr = fMapper->fMapAddr + offset;
429 
430 	kr = mapOverwrite(addr, fRegion->getSegmentSize(), prot);
431 	assert(KERN_SUCCESS == kr);
432 
433 	map->release();
434 	fMemoryMaps[segIndex] = map = NULL;
435 
436 	DLOG("SKRegionMapper %p index %u offset 0x%x size 0x%x",
437 	    this, segIndex, (uint32_t)offset, fRegion->getSegmentSize());
438 }
439 
440 kern_return_t
mapOverwrite(vm_map_offset_t addr,vm_map_size_t size,vm_prot_t prot)441 IOSKRegionMapper::mapOverwrite(
442 	vm_map_offset_t addr, vm_map_size_t size, vm_prot_t prot )
443 {
444 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
445 	kern_return_t kr;
446 
447 	vmk_flags.vmf_overwrite = true;
448 	vmk_flags.vm_tag = getVMTagForMap(fMapper->fTaskMap);
449 
450 	kr = mach_vm_map_kernel(
451 		fMapper->fTaskMap,
452 		&addr,
453 		size,
454 		(vm_map_offset_t)0,
455 		vmk_flags,
456 		IPC_PORT_NULL,
457 		(vm_object_offset_t)0,
458 		FALSE,
459 		prot,
460 		VM_PROT_DEFAULT,
461 		VM_INHERIT_NONE);
462 
463 	SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
464 	    "SKRegionMapper %p addr 0x%llx size 0x%llx prot 0x%x "
465 	    "kr 0x%x", this, (uint64_t)addr, (uint64_t)size, prot, kr);
466 	return kr;
467 }
468 
469 #undef  super
470 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKMapper,OSObject)471 OSDefineMetaClassAndFinalStructors( IOSKMapper, OSObject )
472 
473 bool
474 IOSKMapper::initWithTask(
475 	task_t task, IOSKArena * arena )
476 {
477 	IOSKRegionMapper *  subMap;
478 	IOSKRegion *        region;
479 	IOSKCount           regionCount;
480 	IOSKOffset          regionOffset = 0;
481 	vm_map_offset_t     addr;
482 	vm_map_size_t       size;
483 	kern_return_t       kr;
484 	bool    ok = false;
485 
486 	if ((task == TASK_NULL) || (arena == NULL) || !super::init()) {
487 		return false;
488 	}
489 
490 	fTask = task;
491 	fTaskMap = get_task_map(task);
492 	if (fTaskMap == VM_MAP_NULL) {
493 		return false;
494 	}
495 
496 	arena->retain();
497 	fArena = arena;
498 
499 	regionCount = fArena->getRegionCount();
500 	assert(regionCount != 0);
501 
502 	fSubMaps = OSArray::withCapacity(regionCount);
503 	if (!fSubMaps) {
504 		return false;
505 	}
506 
507 	for (IOSKIndex i = 0; i < regionCount; i++) {
508 		region = fArena->getRegion(i);
509 		assert(region != NULL);
510 
511 		subMap = new IOSKRegionMapper;
512 		if (subMap && !subMap->initWithMapper(this, region, regionOffset)) {
513 			subMap->release();
514 			subMap = NULL;
515 		}
516 		if (!subMap) {
517 			break;
518 		}
519 
520 		// array retains the regions
521 		ok = fSubMaps->setObject(subMap);
522 		subMap->release();
523 		subMap = NULL;
524 		if (!ok) {
525 			break;
526 		}
527 
528 		// offset of next region
529 		regionOffset += region->getRegionSize();
530 	}
531 	if (fSubMaps->getCount() != regionCount) {
532 		return false;
533 	}
534 
535 	addr = 0;
536 	size = fArena->getArenaSize();
537 	assert(regionOffset == size);
538 	assert(IOSK_SIZE_OK(size));
539 
540 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
541 	vmk_flags.vm_tag = getVMTagForMap(fTaskMap);
542 
543 	// reserve address space on given task with PROT_NONE
544 	kr = mach_vm_map_kernel(
545 		fTaskMap,
546 		&addr,
547 		size,
548 		(vm_map_offset_t)0,
549 		vmk_flags,
550 		IPC_PORT_NULL,
551 		(vm_object_offset_t)0,
552 		FALSE,
553 		VM_PROT_NONE,
554 		VM_PROT_DEFAULT,
555 		VM_INHERIT_NONE);
556 
557 	ok = false;
558 	if (KERN_SUCCESS == kr) {
559 		fMapAddr = (mach_vm_address_t)addr;
560 		fMapSize = (mach_vm_size_t)size;
561 		ok = true;
562 	}
563 
564 	SK_DF(kr == KERN_SUCCESS ? SK_VERB_IOSK : SK_VERB_ERROR,
565 	    "SKMapper %p task 0x%llx map %p addr 0x%llx size 0x%llx subMaps %u "
566 	    "kr 0x%x", this, (uint64_t)task, fTaskMap, (uint64_t)addr,
567 	    (uint64_t)size, fSubMaps->getCount(), kr);
568 
569 
570 	return ok;
571 }
572 
573 void
free(void)574 IOSKMapper::free( void )
575 {
576 	DLOG("SKMapper %p", this);
577 
578 	if (fSubMaps != NULL) {
579 		fSubMaps->release();
580 		fSubMaps = NULL;
581 	}
582 
583 	if (fArena != NULL) {
584 		fArena->release();
585 		fArena = NULL;
586 	}
587 
588 	if (fMapSize != 0) {
589 		mach_vm_deallocate(fTaskMap, fMapAddr, fMapSize);
590 		fTaskMap = NULL;
591 		fMapAddr = 0;
592 		fMapSize = 0;
593 	}
594 
595 	fTask = NULL;
596 	fTaskMap = NULL;
597 
598 	super::free();
599 }
600 
601 IOReturn
map(IOSKIndex regionIndex,IOSKIndex segmentIndex,IOSKBuffer * buffer)602 IOSKMapper::map(
603 	IOSKIndex regionIndex, IOSKIndex segmentIndex, IOSKBuffer * buffer )
604 {
605 	IOSKRegionMapper * subMap;
606 	IOReturn ret = kIOReturnBadArgument;
607 
608 	// route to the region mapper at regionIndex
609 	assert(regionIndex < fSubMaps->getCount());
610 	subMap = (typeof(subMap))fSubMaps->getObject(regionIndex);
611 	if (subMap) {
612 		ret = subMap->map(segmentIndex, buffer);
613 	}
614 
615 	return ret;
616 }
617 
618 void
unmap(IOSKIndex regionIndex,IOSKIndex segmentIndex,vm_prot_t prot)619 IOSKMapper::unmap(
620 	IOSKIndex regionIndex, IOSKIndex segmentIndex, vm_prot_t prot )
621 {
622 	IOSKRegionMapper * subMap;
623 
624 	// route to the region mapper at regionIndex
625 	assert(regionIndex < fSubMaps->getCount());
626 	subMap = (typeof(subMap))fSubMaps->getObject(regionIndex);
627 	if (subMap) {
628 		subMap->unmap(segmentIndex, prot);
629 	}
630 }
631 
632 #undef  super
633 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKArena,OSObject)634 OSDefineMetaClassAndFinalStructors( IOSKArena, OSObject )
635 
636 bool
637 IOSKArena::initWithRegions(
638 	IOSKRegion ** regions, IOSKCount regionCount )
639 {
640 	IOSKRegion * region;
641 	IOSKSize     regionSize;
642 	IOSKOffset   regionOffset = 0;
643 	bool         ok = false;
644 
645 	assert(regions != NULL);
646 	assert(regionCount != 0);
647 
648 	do {
649 		if ((regions == NULL) || (regionCount == 0) || !super::init()) {
650 			break;
651 		}
652 
653 		fArenaLock = IOLockAlloc();
654 		if (fArenaLock == NULL) {
655 			break;
656 		}
657 
658 		fRegions = OSArray::withObjects((const OSObject **)regions, regionCount);
659 		if (!fRegions) {
660 			break;
661 		}
662 
663 		ok = true;
664 		for (uint32_t i = 0; i < regionCount; i++) {
665 			region = OSDynamicCast(IOSKRegion, fRegions->getObject(i));
666 			ok = (region != NULL);
667 			if (!ok) {
668 				break;
669 			}
670 
671 			regionSize = region->getRegionSize();
672 			assert(IOSK_SIZE_OK(regionSize));
673 
674 			// attach to each region and assign region offset/index
675 			ok = region->attachArena(this, regionOffset, i);
676 			if (!ok) {
677 				break;
678 			}
679 
680 			// offset of next region
681 			regionOffset += regionSize;
682 			assert(IOSK_OFFSET_OK(regionOffset));
683 		}
684 		fArenaSize = regionOffset;
685 	} while (false);
686 
687 	DLOG("SKArena %p regions %u size 0x%x ok %d",
688 	    this, regionCount, fArenaSize, ok);
689 	return ok;
690 }
691 
692 void
free(void)693 IOSKArena::free( void )
694 {
695 	DLOG("IOSKArena %p", this);
696 
697 	if (fRegions) {
698 		IOSKRegion * region;
699 		OSObject * object;
700 
701 		// detach from regions to stop mapping requests
702 		for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
703 			region = OSDynamicCast(IOSKRegion, object);
704 			if (region) {
705 				region->detachArena(this);
706 			}
707 		}
708 
709 		fRegions->release();
710 		fRegions = NULL;
711 	}
712 
713 	if (fMappers) {
714 		assert(fMappers->getCount() == 0);
715 		fMappers->release();
716 		fMappers = NULL;
717 	}
718 
719 	if (fArenaLock != NULL) {
720 		IOLockFree(fArenaLock);
721 		fArenaLock = NULL;
722 	}
723 
724 	super::free();
725 }
726 
727 IOReturn
createMapperForTask(task_t task,IOSKMapper ** outMapper)728 IOSKArena::createMapperForTask( task_t task, IOSKMapper ** outMapper )
729 {
730 	IOSKRegion * region;
731 	OSObject *   object;
732 	IOSKMapper * mapper;
733 	IOReturn     result, ret = kIOReturnSuccess;
734 
735 	assert(task != TASK_NULL);
736 	assert(outMapper != NULL);
737 
738 	mapper = new IOSKMapper;
739 	if (mapper && !mapper->initWithTask(task, this)) {
740 		mapper->release();
741 		mapper = NULL;
742 	}
743 	if (!mapper || !addMapper(mapper)) {
744 		ret = kIOReturnNoMemory;
745 		goto done;
746 	}
747 
748 	// request all regions to refresh the arena's mappings,
749 	// which now includes the newly added mapper.
750 	for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
751 		region = OSDynamicCast(IOSKRegion, object);
752 		assert(region != NULL);
753 		result = region->updateMappingsForArena(this, false);
754 		assert(kIOReturnSuccess == result);
755 		if (result != kIOReturnSuccess) {
756 			ret = result;
757 		}
758 	}
759 
760 done:
761 	if ((ret != kIOReturnSuccess) && mapper) {
762 		mapper->release();
763 		mapper = NULL;
764 	}
765 	*outMapper = mapper;
766 	return ret;
767 }
768 
769 IOReturn
map(const IOSKRegion * region __unused,IOSKOffset regionOffset __unused,IOSKIndex regionIndex,IOSKIndex segmentIndex,IOSKBuffer * buffer)770 IOSKArena::map(
771 	const IOSKRegion * region __unused,
772 	IOSKOffset regionOffset __unused,
773 	IOSKIndex regionIndex, IOSKIndex segmentIndex,
774 	IOSKBuffer * buffer )
775 {
776 	IOSKMapper * mapper;
777 	OSIterator * iter;
778 	IOReturn result, ret = kIOReturnSuccess;
779 
780 	IOLockLock(fArenaLock);
781 
782 	if (fMappers && (iter = OSCollectionIterator::withCollection(fMappers))) {
783 		while ((mapper = (typeof(mapper))iter->getNextObject())) {
784 			// skip any redirected mapper
785 			if (mapper->isRedirected()) {
786 				continue;
787 			}
788 			result = mapper->map(regionIndex, segmentIndex, buffer);
789 			assert(kIOReturnSuccess == result);
790 			if (result != kIOReturnSuccess) {
791 				ret = result;
792 			}
793 		}
794 		iter->release();
795 	}
796 
797 	IOLockUnlock(fArenaLock);
798 	return ret;
799 }
800 
801 void
unmap(const IOSKRegion * region __unused,IOSKOffset regionOffset __unused,IOSKIndex regionIndex,IOSKIndex segmentIndex,vm_prot_t prot,bool redirecting,const void * context)802 IOSKArena::unmap(
803 	const IOSKRegion * region __unused,
804 	IOSKOffset regionOffset __unused,
805 	IOSKIndex regionIndex, IOSKIndex segmentIndex,
806 	vm_prot_t prot, bool redirecting, const void * context )
807 {
808 	IOSKMapper * mapper;
809 	const IOSKMapper * redirectMapper = (typeof(redirectMapper))context;
810 	OSIterator * iter;
811 
812 	IOLockLock(fArenaLock);
813 
814 	if (fMappers && (iter = OSCollectionIterator::withCollection(fMappers))) {
815 		while ((mapper = (typeof(mapper))iter->getNextObject())) {
816 			if (redirecting) {
817 				if ((redirectMapper == NULL) || (redirectMapper == mapper)) {
818 					// redirecting can be specific to one mapper
819 					mapper->unmap(regionIndex, segmentIndex, prot);
820 					mapper->redirectMap();
821 				}
822 			} else if (!mapper->isRedirected()) {
823 				mapper->unmap(regionIndex, segmentIndex, prot);
824 			}
825 		}
826 		iter->release();
827 	}
828 
829 	IOLockUnlock(fArenaLock);
830 }
831 
832 void
redirectMap(IOSKMapper * mapper)833 IOSKArena::redirectMap( IOSKMapper * mapper )
834 {
835 	OSObject *   object;
836 	IOSKRegion * region;
837 	IOReturn     ret;
838 
839 	// request all (redirectable) regions to redirect the arena's mapper,
840 	// mapper=0 will redirect all mappers.
841 
842 	for (uint32_t i = 0; (object = fRegions->getObject(i)); i++) {
843 		region = OSDynamicCast(IOSKRegion, object);
844 		assert(region != NULL);
845 		ret = region->updateMappingsForArena(this, true, (const void *)mapper);
846 		assert(kIOReturnSuccess == ret);
847 	}
848 }
849 
850 IOSKRegion *
getRegion(IOSKIndex regionIndex) const851 IOSKArena::getRegion( IOSKIndex regionIndex ) const
852 {
853 	assert(regionIndex < getRegionCount());
854 	return OSDynamicCast(IOSKRegion, fRegions->getObject(regionIndex));
855 }
856 
857 bool
addMapper(const IOSKMapper * mapper)858 IOSKArena::addMapper( const IOSKMapper * mapper )
859 {
860 	bool ok = false;
861 
862 	assert(mapper != NULL);
863 	if (!mapper) {
864 		return false;
865 	}
866 
867 	IOLockLock(fArenaLock);
868 
869 	if (!fMappers) {
870 		fMappers = OSSet::withCapacity(2);
871 	}
872 	if (fMappers) {
873 		ok = fMappers->setObject(mapper);
874 	}
875 
876 	IOLockUnlock(fArenaLock);
877 
878 	DLOG("arena %p mapper %p ok %d", this, mapper, ok);
879 	return ok;
880 }
881 
882 void
removeMapper(const IOSKMapper * mapper)883 IOSKArena::removeMapper( const IOSKMapper * mapper )
884 {
885 	assert(mapper != NULL);
886 	if (!mapper) {
887 		return;
888 	}
889 
890 	IOLockLock(fArenaLock);
891 
892 	if (fMappers) {
893 		fMappers->removeObject(mapper);
894 	}
895 
896 	IOLockUnlock(fArenaLock);
897 	DLOG("arena %p mapper %p", this, mapper);
898 }
899 
900 #undef  super
901 #define super OSObject
OSDefineMetaClassAndFinalStructors(IOSKRegion,OSObject)902 OSDefineMetaClassAndFinalStructors( IOSKRegion, OSObject )
903 
904 bool
905 IOSKRegion::initWithSpec( const IOSKRegionSpec * spec,
906     IOSKSize segmentSize, IOSKCount segmentCount )
907 {
908 	bool ok = false;
909 
910 	do {
911 		if (!IOSK_SIZE_OK(segmentSize) || (segmentCount == 0) || !super::init()) {
912 			break;
913 		}
914 
915 		if (spec) {
916 			fSpec = *spec;
917 		}
918 		fSegmentCount = segmentCount;
919 		fSegmentSize = segmentSize;
920 
921 		fRegionLock = IOLockAlloc();
922 		if (fRegionLock == NULL) {
923 			break;
924 		}
925 
926 		SLIST_INIT(&fArenaHead);
927 
928 		fSegments = IONew(Segment, fSegmentCount);
929 		if (fSegments == NULL) {
930 			break;
931 		}
932 		bzero(fSegments, sizeof(IOSKRegion::Segment) * fSegmentCount);
933 		ok = true;
934 	} while (false);
935 
936 	SK_DF(ok ? SK_VERB_IOSK : SK_VERB_ERROR,
937 	    "SKRegion %p segment size 0x%x count %u ok %d",
938 	    this, segmentSize, segmentCount, ok);
939 
940 	return ok;
941 }
942 
943 void
free(void)944 IOSKRegion::free( void )
945 {
946 	DLOG("SKRegion %p", this);
947 
948 	ArenaEntry *entry, *tentry;
949 	SLIST_FOREACH_SAFE(entry, &fArenaHead, link, tentry) {
950 		SLIST_REMOVE(&fArenaHead, entry, ArenaEntry, link);
951 		// Arena didn't detach from the region before release()
952 		assert(entry->fArena == NULL);
953 		IOFreeType(entry, ArenaEntry);
954 	}
955 	assert(SLIST_EMPTY(&fArenaHead));
956 
957 	if (fSegments != NULL) {
958 		assert(fSegmentCount != 0);
959 		for (uint32_t i = 0; i < fSegmentCount; i++) {
960 			_clearSegmentBuffer(i, NULL);
961 		}
962 
963 		IODelete(fSegments, Segment, fSegmentCount);
964 		fSegments = NULL;
965 	}
966 
967 	if (fRegionLock != NULL) {
968 		IOLockFree(fRegionLock);
969 		fRegionLock = NULL;
970 	}
971 
972 	super::free();
973 }
974 
975 IOReturn
_setSegmentBuffer(const IOSKIndex segmentIndex,IOSKBuffer * buffer)976 IOSKRegion::_setSegmentBuffer(
977 	const IOSKIndex segmentIndex, IOSKBuffer * buffer )
978 {
979 	Segment *   seg;
980 	IOReturn    ret = kIOReturnSuccess;
981 
982 	assert(buffer != NULL);
983 	assert(segmentIndex < fSegmentCount);
984 
985 	if (!buffer || (buffer->getCapacity() != fSegmentSize) ||
986 	    (segmentIndex >= fSegmentCount)) {
987 		ret = kIOReturnBadArgument;
988 		goto done;
989 	}
990 
991 	seg = &fSegments[segmentIndex];
992 	assert(seg->fBuffer == NULL);
993 
994 	if (seg->fBuffer == NULL) {
995 		buffer->retain();
996 		seg->fBuffer = buffer;
997 
998 		// update mappings for all arenas containing this region,
999 		// or none if no arena is attached.
1000 		ArenaEntry * entry;
1001 		SLIST_FOREACH(entry, &fArenaHead, link) {
1002 			if (entry->fArena != NULL) {
1003 				ret = entry->fArena->map(this,
1004 				    entry->fRegionOffset, entry->fRegionIndex,
1005 				    segmentIndex, buffer);
1006 				assert(kIOReturnSuccess == ret);
1007 				if (ret != kIOReturnSuccess) {
1008 					break;
1009 				}
1010 			}
1011 		}
1012 	}
1013 
1014 	if (ret != kIOReturnSuccess) {
1015 		_clearSegmentBuffer(segmentIndex, NULL);
1016 	}
1017 
1018 done:
1019 	SK_DF(ret == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1020 	    "SKRegion %p set segment[%u] buffer %p ret 0x%x",
1021 	    this, segmentIndex, buffer, ret);
1022 
1023 	return ret;
1024 }
1025 
1026 void
_clearSegmentBuffer(const IOSKIndex segmentIndex,IOSKMemoryBufferRef * prevBuffer)1027 IOSKRegion::_clearSegmentBuffer(
1028 	const IOSKIndex segmentIndex, IOSKMemoryBufferRef * prevBuffer  )
1029 {
1030 	Segment * seg;
1031 	bool cleared = false;
1032 	IOSKBuffer * foundBuffer = NULL;
1033 
1034 	assert(segmentIndex < fSegmentCount);
1035 	if (segmentIndex >= fSegmentCount) {
1036 		goto done;
1037 	}
1038 
1039 	seg = &fSegments[segmentIndex];
1040 	if (seg->fBuffer != NULL) {
1041 		foundBuffer = seg->fBuffer;
1042 
1043 		// update mappings for all arenas containing this region,
1044 		// or none if no arena is attached.
1045 		vm_prot_t prot = VM_PROT_NONE;
1046 		ArenaEntry * entry;
1047 
1048 		SLIST_FOREACH(entry, &fArenaHead, link) {
1049 			if (entry->fArena != NULL) {
1050 				entry->fArena->unmap(this,
1051 				    entry->fRegionOffset, entry->fRegionIndex,
1052 				    segmentIndex, prot, false, NULL);
1053 			}
1054 		}
1055 
1056 		seg->fBuffer->release();
1057 		seg->fBuffer = NULL;
1058 		cleared = true;
1059 	}
1060 
1061 	if (prevBuffer) {
1062 		*prevBuffer = foundBuffer;
1063 	}
1064 
1065 done:
1066 	DLOG("SKRegion %p clear segment[%u] ok %d",
1067 	    this, segmentIndex, cleared);
1068 }
1069 
1070 IOReturn
setSegmentBuffer(IOSKIndex index,IOSKMemoryBuffer * buffer)1071 IOSKRegion::setSegmentBuffer(
1072 	IOSKIndex index, IOSKMemoryBuffer * buffer )
1073 {
1074 	IOReturn ret;
1075 
1076 	IOLockLock(fRegionLock);
1077 	ret = _setSegmentBuffer(index, buffer);
1078 	IOLockUnlock(fRegionLock);
1079 	return ret;
1080 }
1081 
1082 void
clearSegmentBuffer(IOSKIndex index,IOSKMemoryBufferRef * prevBuffer)1083 IOSKRegion::clearSegmentBuffer( IOSKIndex index, IOSKMemoryBufferRef * prevBuffer )
1084 {
1085 	IOLockLock(fRegionLock);
1086 	_clearSegmentBuffer(index, prevBuffer);
1087 	IOLockUnlock(fRegionLock);
1088 }
1089 
1090 IOSKRegion::ArenaEntry *
findArenaEntry(const IOSKArena * arena)1091 IOSKRegion::findArenaEntry( const IOSKArena * arena )
1092 {
1093 	ArenaEntry * found = NULL;
1094 
1095 	assert(arena != NULL);
1096 
1097 	ArenaEntry * entry;
1098 	SLIST_FOREACH(entry, &fArenaHead, link) {
1099 		if (entry->fArena == arena) {
1100 			found = entry;
1101 			break;
1102 		}
1103 	}
1104 	return found;
1105 }
1106 
1107 bool
attachArena(IOSKArena * arena,IOSKOffset regionOffset,IOSKIndex regionIndex)1108 IOSKRegion::attachArena(
1109 	IOSKArena * arena, IOSKOffset regionOffset, IOSKIndex regionIndex )
1110 {
1111 	bool ok = false;
1112 
1113 	assert(arena != NULL);
1114 	if (!arena) {
1115 		return false;
1116 	}
1117 
1118 	IOLockLock(fRegionLock);
1119 
1120 	ArenaEntry * entry = NULL;
1121 	ArenaEntry * empty = NULL;
1122 	ArenaEntry * dup = NULL;
1123 
1124 	SLIST_FOREACH(entry, &fArenaHead, link) {
1125 		// duplicates not allowed
1126 		assert(entry->fArena != arena);
1127 		if (entry->fArena == arena) {
1128 			dup = entry;
1129 			break;
1130 		}
1131 
1132 		if ((empty == NULL) && (entry->fArena == NULL)) {
1133 			empty = entry;
1134 		}
1135 	}
1136 
1137 	if (dup != NULL) {
1138 		// do nothing
1139 	} else if (empty != NULL) {
1140 		// update the empty/available entry
1141 		empty->fArena = arena;
1142 		empty->fRegionOffset = regionOffset;
1143 		empty->fRegionIndex = regionIndex;
1144 		ok = true;
1145 	} else {
1146 		// append a new entry
1147 		ArenaEntry * newEntry = IOMallocType(ArenaEntry);
1148 		newEntry->fArena = arena;
1149 		newEntry->fRegionOffset = regionOffset;
1150 		newEntry->fRegionIndex = regionIndex;
1151 		SLIST_INSERT_HEAD(&fArenaHead, newEntry, link);
1152 		ok = true;
1153 	}
1154 
1155 	IOLockUnlock(fRegionLock);
1156 
1157 	SK_DF(ok ? SK_VERB_IOSK : SK_VERB_ERROR,
1158 	    "SKRegion %p attach arena %p offset 0x%x index %u ok %d",
1159 	    this, arena, regionOffset, regionIndex, ok);
1160 	return ok;
1161 }
1162 
1163 void
detachArena(const IOSKArena * arena)1164 IOSKRegion::detachArena( const IOSKArena * arena )
1165 {
1166 	ArenaEntry * entry;
1167 	bool detached = false;
1168 
1169 	assert(arena != NULL);
1170 	if (!arena) {
1171 		return;
1172 	}
1173 
1174 	IOLockLock(fRegionLock);
1175 
1176 	entry = findArenaEntry(arena);
1177 	if (entry != NULL) {
1178 		entry->fArena = NULL;
1179 		entry->fRegionOffset = 0;
1180 		entry->fRegionIndex = 0;
1181 		detached = true;
1182 	}
1183 
1184 	IOLockUnlock(fRegionLock);
1185 	DLOG("SKRegion %p detach arena %p ok %d", this, arena, detached);
1186 }
1187 
1188 IOReturn
updateMappingsForArena(IOSKArena * arena,bool redirect,const void * context)1189 IOSKRegion::updateMappingsForArena(
1190 	IOSKArena * arena, bool redirect, const void * context )
1191 {
1192 	ArenaEntry * entry;
1193 	Segment *   seg;
1194 	vm_prot_t   prot;
1195 	IOReturn    result = kIOReturnSuccess;
1196 
1197 	assert(arena != NULL);
1198 	if (redirect && fSpec.noRedirect) {
1199 		DLOG("SKRegion %p no redirect", this);
1200 		return kIOReturnSuccess;
1201 	}
1202 
1203 	IOLockLock(fRegionLock);
1204 
1205 	entry = findArenaEntry(arena);
1206 	if (entry != NULL) {
1207 		assert(entry->fArena == arena);
1208 
1209 		for (uint32_t index = 0; index < fSegmentCount; index++) {
1210 			seg = &fSegments[index];
1211 			if ((seg->fBuffer == NULL) || redirect) {
1212 				prot = VM_PROT_NONE;
1213 				if (redirect && (seg->fBuffer != NULL)) {
1214 					prot = VM_PROT_READ;
1215 					if (seg->fBuffer->fSpec.user_writable) {
1216 						prot |= VM_PROT_WRITE;
1217 					}
1218 				}
1219 
1220 				arena->unmap(this, entry->fRegionOffset, entry->fRegionIndex,
1221 				    index, prot, redirect, context);
1222 			} else {
1223 				result = arena->map(this, entry->fRegionOffset,
1224 				    entry->fRegionIndex,
1225 				    index, seg->fBuffer);
1226 			}
1227 		}
1228 	}
1229 
1230 	IOLockUnlock(fRegionLock);
1231 	SK_DF(result == kIOReturnSuccess ? SK_VERB_IOSK : SK_VERB_ERROR,
1232 	    "%p update arena %p redirect %d ret 0x%x",
1233 	    this, arena, redirect, result);
1234 	return result;
1235 }
1236 
OSDefineMetaClassAndFinalStructors(IOSKMemoryArray,IOMultiMemoryDescriptor)1237 OSDefineMetaClassAndFinalStructors( IOSKMemoryArray, IOMultiMemoryDescriptor )
1238 
1239 bool
1240 IOSKMemoryArray::overwriteMappingInTask(
1241 	task_t              intoTask,
1242 	mach_vm_address_t * startAddr,
1243 	IOOptionBits        options )
1244 {
1245 	bool ok = true;
1246 
1247 	for (uint32_t i = 0; i < _descriptorsCount; i++) {
1248 		IOMemoryDescriptor * iomd = _descriptors[i];
1249 		IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, iomd);
1250 		IOSKMemoryArray *  ma = OSDynamicCast(IOSKMemoryArray, iomd);
1251 
1252 		if (mb) {
1253 			IOMemoryMap * rwMap;
1254 
1255 			if (mb->fSpec.user_writable) {
1256 				// overwrite read-only mapping to read-write
1257 				rwMap = mb->createMappingInTask(intoTask,
1258 				    *startAddr, options | kIOMapOverwrite);
1259 				if (rwMap) {
1260 					DLOG("map_rw %d: addr 0x%llx, size 0x%x",
1261 					    i, *startAddr, (uint32_t)iomd->getLength());
1262 					rwMap->release();
1263 				} else {
1264 					ELOG("overwrite map failed");
1265 					ok = false;
1266 					break;
1267 				}
1268 			} else {
1269 				DLOG("map_ro %d: addr 0x%llx, size 0x%x",
1270 				    i, *startAddr, (uint32_t)iomd->getLength());
1271 			}
1272 
1273 			//DLOG("map increment 0x%x", (uint32_t)iomd->getLength());
1274 			*startAddr += iomd->getLength();
1275 		} else if (ma) {
1276 			ok = ma->overwriteMappingInTask(intoTask, startAddr, options);
1277 			if (!ok) {
1278 				break;
1279 			}
1280 		}
1281 	}
1282 
1283 	return ok;
1284 }
1285 
1286 #undef  super
1287 #define super IOBufferMemoryDescriptor
OSDefineMetaClassAndFinalStructorsWithZone(IOSKMemoryBuffer,IOBufferMemoryDescriptor,ZC_NONE)1288 OSDefineMetaClassAndFinalStructorsWithZone( IOSKMemoryBuffer,
1289     IOBufferMemoryDescriptor, ZC_NONE )
1290 
1291 bool
1292 IOSKMemoryBuffer::initWithSpec(
1293 	task_t            inTask,
1294 	mach_vm_size_t    capacity,
1295 	mach_vm_address_t alignment,
1296 	const IOSKMemoryBufferSpec * spec )
1297 {
1298 	bool ok = true;
1299 	IOOptionBits options = kIOMemoryKernelUserShared;
1300 
1301 	if (spec) {
1302 		fSpec = *spec;
1303 	}
1304 	if (fSpec.iodir_in) {
1305 		options |= kIODirectionIn;
1306 	}
1307 	if (fSpec.iodir_out) {
1308 		options |= kIODirectionOut;
1309 	}
1310 	if (fSpec.purgeable) {
1311 		options |= (kIOMemoryPageable | kIOMemoryPurgeable);
1312 	}
1313 	if (fSpec.inhibitCache) {
1314 		options |= kIOMapInhibitCache;
1315 	}
1316 	if (fSpec.physcontig) {
1317 		options |= kIOMemoryPhysicallyContiguous;
1318 	}
1319 	if (fSpec.threadSafe) {
1320 		options |= kIOMemoryThreadSafe;
1321 	}
1322 
1323 	setVMTags(VM_KERN_MEMORY_SKYWALK, VM_MEMORY_SKYWALK);
1324 
1325 	if (fSpec.kernel_writable) {
1326 		if (fSpec.puredata) {
1327 			/* purely data; use data buffers heap */
1328 			ok = initWithPhysicalMask(
1329 				inTask, options, capacity, alignment, 0);
1330 		} else {
1331 			/* may have pointers; use default heap */
1332 			ok = initControlWithPhysicalMask(
1333 				inTask, options, capacity, alignment, 0);
1334 		}
1335 		if (!ok) {
1336 			return false;
1337 		}
1338 		fKernelAddr = super::getBytesNoCopy();
1339 		return true;
1340 	} else {
1341 		/*
1342 		 * To create kernel read-only BMD:
1343 		 * 1. init with TASK_NULL (which isn’t mapped anywhere);
1344 		 * 2. then map read-only into kernel_task
1345 		 * Note that kernel virtual address has to be obtained from
1346 		 * the secondary kernel read-only mapping.
1347 		 */
1348 		options |= kIOMapReadOnly;
1349 		if (fSpec.puredata) {
1350 			/* purely data; use data buffers heap */
1351 			ok = initWithPhysicalMask(
1352 				TASK_NULL, options, capacity, alignment, 0);
1353 		} else {
1354 			/* may have pointers; use default heap */
1355 			ok = initControlWithPhysicalMask(
1356 				TASK_NULL, options, capacity, alignment, 0);
1357 		}
1358 		if (!ok) {
1359 			return false;
1360 		}
1361 		/* RO mapping will retain this, see ::taggedRelease() */
1362 		fKernelReadOnlyMapping = super::createMappingInTask(kernel_task, 0, options);
1363 		if (fKernelReadOnlyMapping == NULL) {
1364 			return false;
1365 		}
1366 		fKernelAddr = (void *)fKernelReadOnlyMapping->getVirtualAddress();
1367 		assert(fKernelAddr != NULL);
1368 		return true;
1369 	}
1370 }
1371 
1372 void
taggedRelease(const void * tag) const1373 IOSKMemoryBuffer::taggedRelease(const void *tag) const
1374 {
1375 	/*
1376 	 * RO buffer has extra retain from fKernelReadOnlyMapping, needs to
1377 	 * explicitly release when refcnt == 2 to free ourselves.
1378 	 */
1379 	if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1380 		super::taggedRelease(tag, 2);
1381 	} else {
1382 		super::taggedRelease(tag);
1383 	}
1384 }
1385 
1386 void
free(void)1387 IOSKMemoryBuffer::free( void )
1388 {
1389 	if (!fSpec.kernel_writable && fKernelReadOnlyMapping != NULL) {
1390 		OSSafeReleaseNULL(fKernelReadOnlyMapping);
1391 		fKernelAddr = NULL;
1392 	}
1393 	super::free();
1394 }
1395 
1396 void *
getBytesNoCopy(void)1397 IOSKMemoryBuffer::getBytesNoCopy( void )
1398 {
1399 	return fKernelAddr;
1400 }
1401 
1402 void *
getBytesNoCopy(vm_size_t start,vm_size_t withLength)1403 IOSKMemoryBuffer::getBytesNoCopy( vm_size_t start, vm_size_t withLength )
1404 {
1405 	IOVirtualAddress address;
1406 
1407 	if ((start + withLength) < start) {
1408 		return NULL;
1409 	}
1410 
1411 	address = (IOVirtualAddress) fKernelAddr;
1412 
1413 	if (start < _length && (start + withLength) <= _length) {
1414 		return (void *)(address + start);
1415 	}
1416 	return NULL;
1417 }
1418 
1419 static IOSKMemoryBuffer *
RefToMemoryBuffer(IOSKMemoryRef inRef)1420 RefToMemoryBuffer( IOSKMemoryRef inRef )
1421 {
1422 	IOSKMemoryBuffer * mb = OSDynamicCast(IOSKMemoryBuffer, inRef);
1423 	return mb;
1424 }
1425 
1426 static IOSKMemoryArray *
RefToMemoryArray(IOSKMemoryRef inRef)1427 RefToMemoryArray( IOSKMemoryRef inRef )
1428 {
1429 	IOSKMemoryArray * ma = OSDynamicCast(IOSKMemoryArray, inRef);
1430 	return ma;
1431 }
1432 
1433 __BEGIN_DECLS
1434 
1435 void
IOSKMemoryDestroy(IOSKMemoryRef reference)1436 IOSKMemoryDestroy(
1437 	IOSKMemoryRef reference )
1438 {
1439 	assert(reference);
1440 	if (reference) {
1441 		reference->release();
1442 	}
1443 }
1444 
1445 void
IOSKMemoryMapDestroy(IOSKMemoryMapRef reference)1446 IOSKMemoryMapDestroy(
1447 	IOSKMemoryMapRef reference )
1448 {
1449 	assert(reference);
1450 	if (reference) {
1451 		reference->release();
1452 	}
1453 }
1454 
1455 IOSKMemoryBufferRef
IOSKMemoryBufferCreate(mach_vm_size_t capacity,const IOSKMemoryBufferSpec * spec,mach_vm_address_t * kvaddr)1456 IOSKMemoryBufferCreate(
1457 	mach_vm_size_t capacity,
1458 	const IOSKMemoryBufferSpec * spec,
1459 	mach_vm_address_t * kvaddr )
1460 {
1461 	IOSKMemoryBuffer * mb;
1462 	void * addr = NULL;
1463 
1464 	mach_vm_size_t rounded_capacity = round_page(capacity);
1465 	if (capacity != rounded_capacity) {
1466 		return NULL;
1467 	}
1468 
1469 	mb = new IOSKMemoryBuffer;
1470 	if (mb && !mb->initWithSpec(kernel_task, capacity, PAGE_SIZE, spec)) {
1471 		mb->release();
1472 		mb = NULL;
1473 	}
1474 	if (!mb) {
1475 		ELOG("create capacity=0x%llx failed", capacity);
1476 		goto fail;
1477 	}
1478 
1479 	addr = mb->fKernelAddr;
1480 	if (kvaddr) {
1481 		*kvaddr = (mach_vm_address_t)(uintptr_t)addr;
1482 	}
1483 	DLOG("buffer %p, vaddr %p, capacity 0x%llx", mb, addr, capacity);
1484 
1485 fail:
1486 	return mb;
1487 }
1488 
1489 IOSKMemoryArrayRef
IOSKMemoryArrayCreate(const IOSKMemoryRef refs[],uint32_t count)1490 IOSKMemoryArrayCreate(
1491 	const IOSKMemoryRef refs[],
1492 	uint32_t count )
1493 {
1494 	IOSKMemoryArray * ma;
1495 	IOSKMemoryRef ref;
1496 	bool ok = true;
1497 
1498 	if (!refs || (count < 1)) {
1499 		return NULL;
1500 	}
1501 
1502 	// Validate the references
1503 	for (uint32_t i = 0; i < count; i++) {
1504 		ref = refs[i];
1505 		assert(RefToMemoryBuffer(ref) || RefToMemoryArray(ref));
1506 		if (!RefToMemoryBuffer(ref) && !RefToMemoryArray(ref)) {
1507 			ok = false;
1508 			break;
1509 		}
1510 	}
1511 	if (!ok) {
1512 		return NULL;
1513 	}
1514 
1515 	ma = new IOSKMemoryArray;
1516 	if (ma && !ma->initWithDescriptors((IOMemoryDescriptor **)refs,
1517 	    count, kIODirectionInOut, false)) {
1518 		ma->release();
1519 		ma = NULL;
1520 	}
1521 	if (!ma) {
1522 		ELOG("create count=%u failed", count);
1523 	} else {
1524 		DLOG("array %p count=%u", ma, count);
1525 	}
1526 
1527 	return ma;
1528 }
1529 
1530 IOSKMemoryMapRef
IOSKMemoryMapToTask(IOSKMemoryRef reference,task_t intoTask,mach_vm_address_t * mapAddr,mach_vm_size_t * mapSize)1531 IOSKMemoryMapToTask(
1532 	IOSKMemoryRef       reference,
1533 	task_t              intoTask,
1534 	mach_vm_address_t * mapAddr,
1535 	mach_vm_size_t *    mapSize )
1536 {
1537 	IOOptionBits options = kIOMapAnywhere | kIOMapReadOnly;
1538 	mach_vm_address_t startAddr;
1539 	IOMemoryMap * map = NULL;
1540 
1541 	IOSKMemoryArray * ma = RefToMemoryArray(reference);
1542 
1543 	assert(ma);
1544 	if (!ma) {
1545 		return NULL;
1546 	}
1547 
1548 	assert(intoTask != kernel_task);
1549 	map = ma->createMappingInTask(intoTask, 0, options);
1550 	if (map) {
1551 		bool ok;
1552 
1553 		startAddr = map->getAddress();
1554 		*mapAddr = startAddr;
1555 		*mapSize = map->getSize();
1556 		DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1557 
1558 		options &= ~(kIOMapReadOnly | kIOMapAnywhere);
1559 		ok = ma->overwriteMappingInTask(intoTask, &startAddr, options);
1560 		if (!ok) {
1561 			map->release();
1562 			map = NULL;
1563 		}
1564 	}
1565 	return map;
1566 }
1567 
1568 IOSKMemoryMapRef
IOSKMemoryMapToKernelTask(IOSKMemoryRef reference,mach_vm_address_t * mapAddr,mach_vm_size_t * mapSize)1569 IOSKMemoryMapToKernelTask(
1570 	IOSKMemoryRef       reference,
1571 	mach_vm_address_t * mapAddr,
1572 	mach_vm_size_t *    mapSize )
1573 {
1574 	IOOptionBits options = kIOMapAnywhere;
1575 	mach_vm_address_t startAddr;
1576 	IOMemoryMap * map = NULL;
1577 
1578 	IOSKMemoryArray * ma = RefToMemoryArray(reference);
1579 
1580 	assert(ma);
1581 	if (!ma) {
1582 		return NULL;
1583 	}
1584 
1585 	map = ma->createMappingInTask(kernel_task, 0, options);
1586 	if (map) {
1587 		startAddr = map->getAddress();
1588 		*mapAddr = startAddr;
1589 		*mapSize = map->getSize();
1590 		DLOG("map vaddr 0x%llx, size 0x%llx", *mapAddr, *mapSize);
1591 	}
1592 	return map;
1593 }
1594 
1595 IOReturn
IOSKMemoryDiscard(IOSKMemoryRef reference)1596 IOSKMemoryDiscard( IOSKMemoryRef reference )
1597 {
1598 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1599 
1600 	assert(mb);
1601 	assert(mb->fSpec.purgeable);
1602 	if (!mb || !mb->fSpec.purgeable) {
1603 		return kIOReturnBadArgument;
1604 	}
1605 
1606 	return mb->setPurgeable(kIOMemoryPurgeableEmpty |
1607 	           kIOMemoryPurgeableFaultOnAccess, NULL);
1608 }
1609 
1610 IOReturn
IOSKMemoryReclaim(IOSKMemoryRef reference)1611 IOSKMemoryReclaim( IOSKMemoryRef reference )
1612 {
1613 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1614 
1615 	assert(mb);
1616 	assert(mb->fSpec.purgeable);
1617 	if (!mb || !mb->fSpec.purgeable) {
1618 		return kIOReturnBadArgument;
1619 	}
1620 
1621 	return mb->setPurgeable(kIOMemoryPurgeableNonVolatile, NULL);
1622 }
1623 
1624 IOReturn
IOSKMemoryWire(IOSKMemoryRef reference)1625 IOSKMemoryWire( IOSKMemoryRef reference )
1626 {
1627 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1628 
1629 	assert(mb);
1630 	assert(mb->fSpec.purgeable);
1631 	if (!mb || !mb->fSpec.purgeable) {
1632 		return kIOReturnBadArgument;
1633 	}
1634 
1635 	return mb->prepare();
1636 }
1637 
1638 IOReturn
IOSKMemoryUnwire(IOSKMemoryRef reference)1639 IOSKMemoryUnwire( IOSKMemoryRef reference )
1640 {
1641 	IOSKMemoryBuffer * mb = RefToMemoryBuffer(reference);
1642 
1643 	assert(mb);
1644 	assert(mb->fSpec.purgeable);
1645 	if (!mb || !mb->fSpec.purgeable) {
1646 		return kIOReturnBadArgument;
1647 	}
1648 
1649 	return mb->complete();
1650 }
1651 
1652 static void
IOSKObjectDestroy(const OSObject * object)1653 IOSKObjectDestroy( const OSObject * object )
1654 {
1655 	assert(object != NULL);
1656 	if (object) {
1657 		object->release();
1658 	}
1659 }
1660 
1661 IOSKArenaRef
IOSKArenaCreate(IOSKRegionRef * regionList,IOSKCount regionCount)1662 IOSKArenaCreate( IOSKRegionRef * regionList, IOSKCount regionCount )
1663 {
1664 	IOSKArenaRef arena;
1665 
1666 	arena = new IOSKArena;
1667 	if ((arena != NULL) && !arena->initWithRegions(regionList, regionCount)) {
1668 		arena->release();
1669 		arena = NULL;
1670 	}
1671 	return arena;
1672 }
1673 
1674 void
IOSKArenaDestroy(IOSKArenaRef arena)1675 IOSKArenaDestroy( IOSKArenaRef arena )
1676 {
1677 	IOSKObjectDestroy(arena);
1678 }
1679 
1680 void
IOSKArenaRedirect(IOSKArenaRef arena)1681 IOSKArenaRedirect( IOSKArenaRef arena )
1682 {
1683 	assert(arena != NULL);
1684 	if (arena != NULL) {
1685 		arena->redirectMap(NULL);
1686 	}
1687 }
1688 
1689 IOSKRegionRef
IOSKRegionCreate(const IOSKRegionSpec * regionSpec,IOSKSize segSize,IOSKCount segCount)1690 IOSKRegionCreate( const IOSKRegionSpec * regionSpec,
1691     IOSKSize segSize, IOSKCount segCount )
1692 {
1693 	IOSKRegionRef   region;
1694 
1695 	region = new IOSKRegion;
1696 	if ((region != NULL) && !region->initWithSpec(regionSpec, segSize, segCount)) {
1697 		region->release();
1698 		region = NULL;
1699 	}
1700 	return region;
1701 }
1702 
1703 void
IOSKRegionDestroy(IOSKRegionRef region)1704 IOSKRegionDestroy( IOSKRegionRef region )
1705 {
1706 	IOSKObjectDestroy(region);
1707 }
1708 
1709 IOReturn
IOSKRegionSetBuffer(IOSKRegionRef region,IOSKIndex segmentIndex,IOSKMemoryBufferRef buffer)1710 IOSKRegionSetBuffer( IOSKRegionRef region, IOSKIndex segmentIndex,
1711     IOSKMemoryBufferRef buffer )
1712 {
1713 	IOReturn ret = kIOReturnBadArgument;
1714 
1715 	assert(region != NULL);
1716 	if (region != NULL) {
1717 		ret = region->setSegmentBuffer(segmentIndex, (IOSKBuffer *)buffer);
1718 	}
1719 
1720 	return ret;
1721 }
1722 
1723 void
IOSKRegionClearBuffer(IOSKRegionRef region,IOSKIndex segmentIndex)1724 IOSKRegionClearBuffer( IOSKRegionRef region, IOSKIndex segmentIndex )
1725 {
1726 	assert(region != NULL);
1727 	if (region != NULL) {
1728 		region->clearSegmentBuffer(segmentIndex, NULL);
1729 	}
1730 }
1731 
1732 void
IOSKRegionClearBufferDebug(IOSKRegionRef region,IOSKIndex segmentIndex,IOSKMemoryBufferRef * prevBufferRef)1733 IOSKRegionClearBufferDebug( IOSKRegionRef region, IOSKIndex segmentIndex,
1734     IOSKMemoryBufferRef * prevBufferRef )
1735 {
1736 	assert(region != NULL);
1737 	if (region != NULL) {
1738 		region->clearSegmentBuffer(segmentIndex, prevBufferRef);
1739 	}
1740 }
1741 
1742 IOSKMapperRef
IOSKMapperCreate(IOSKArenaRef arena,task_t task)1743 IOSKMapperCreate( IOSKArenaRef arena, task_t task )
1744 {
1745 	IOSKMapperRef mapper = NULL;
1746 
1747 	assert(arena != NULL);
1748 	if (arena != NULL) {
1749 		arena->createMapperForTask(task, &mapper);
1750 	}
1751 	return mapper;
1752 }
1753 
1754 void
IOSKMapperDestroy(IOSKMapperRef mapper)1755 IOSKMapperDestroy( IOSKMapperRef mapper )
1756 {
1757 	assert(mapper != NULL);
1758 	if (mapper != NULL) {
1759 		IOSKArena * arena = mapper->getArena();
1760 		assert(arena != NULL);
1761 		arena->removeMapper(mapper);
1762 		IOSKObjectDestroy(mapper);
1763 	}
1764 }
1765 
1766 void
IOSKMapperRedirect(IOSKMapperRef mapper)1767 IOSKMapperRedirect( IOSKMapperRef mapper )
1768 {
1769 	assert(mapper != NULL);
1770 	if (mapper != NULL) {
1771 		IOSKArena * arena = mapper->getArena();
1772 		assert(arena != NULL);
1773 		arena->redirectMap(mapper);
1774 	}
1775 }
1776 
1777 IOReturn
IOSKMapperGetAddress(IOSKMapperRef mapper,mach_vm_address_t * address,mach_vm_size_t * size)1778 IOSKMapperGetAddress( IOSKMapperRef mapper,
1779     mach_vm_address_t * address, mach_vm_size_t * size )
1780 {
1781 	assert(mapper != NULL);
1782 	if ((mapper == NULL) || (address == NULL)) {
1783 		return kIOReturnBadArgument;
1784 	}
1785 
1786 	*address = mapper->getMapAddress(size);
1787 	return kIOReturnSuccess;
1788 }
1789 
1790 boolean_t
IOSKBufferIsWired(IOSKMemoryBufferRef buffer)1791 IOSKBufferIsWired( IOSKMemoryBufferRef buffer )
1792 {
1793 	assert(buffer != NULL);
1794 	return ((IOSKBuffer *)buffer)->isWired();
1795 }
1796 
1797 __END_DECLS
1798 
1799 #if DEVELOPMENT || DEBUG
1800 
1801 extern int IOSkywalkSupportTest(int x);
1802 
1803 int
IOSkywalkSupportTest(int newValue)1804 IOSkywalkSupportTest( int newValue )
1805 {
1806 	static const int kNumRegions = 3;
1807 	static const int kNumBuffers = 6;
1808 	static const int kNumMappers = 3;
1809 	static const int kNumArenas  = 2;
1810 
1811 	IOSKMemoryBufferSpec bspec;
1812 	IOSKRegionSpec      rspec;
1813 	IOSKMemoryBufferRef buffers[kNumBuffers];
1814 	mach_vm_address_t   bufkvas[kNumBuffers];
1815 	IOSKRegionRef       regions[kNumRegions];
1816 	IOSKRegionRef       reverse[kNumRegions];
1817 	IOSKArenaRef        arenas[kNumArenas];
1818 	IOSKMapperRef       mappers[kNumMappers];
1819 	mach_vm_address_t   addrs[kNumMappers];
1820 	mach_vm_size_t      size;
1821 	uint32_t            value;
1822 	uint32_t *          ptr;
1823 	IOReturn            ret;
1824 
1825 	kprintf("IOSKArena count  : %u\n",
1826 	    IOSKArena::gMetaClass.getInstanceCount());
1827 	kprintf("IOSKRegion count : %u\n",
1828 	    IOSKRegion::gMetaClass.getInstanceCount());
1829 	kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1830 	    IOSKMapper::gMetaClass.getInstanceCount(),
1831 	    IOSKRegionMapper::gMetaClass.getInstanceCount());
1832 	kprintf("IOSKBuffer count : %u\n",
1833 	    IOSKBuffer::gMetaClass.getInstanceCount());
1834 
1835 	rspec.noRedirect = true;
1836 	regions[0] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(1), 2);
1837 	assert(regions[0]);
1838 	rspec.noRedirect = false;
1839 	regions[1] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(2), 3);
1840 	assert(regions[1]);
1841 	regions[2] = IOSKRegionCreate(&rspec, (IOSKSize) ptoa(3), 4);
1842 	assert(regions[2]);
1843 
1844 	reverse[0] = regions[2];
1845 	reverse[1] = regions[1];
1846 	reverse[2] = regions[0];
1847 
1848 	arenas[0] = IOSKArenaCreate(regions, 3);
1849 	assert(arenas[0]);
1850 	arenas[1] = IOSKArenaCreate(reverse, 3);
1851 	assert(arenas[1]);
1852 
1853 	bzero(&bspec, sizeof(bspec));
1854 	bspec.purgeable = true;
1855 	bspec.user_writable = false;
1856 	buffers[0] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[0]);
1857 	assert(buffers[0]);
1858 	assert(IOSKBufferIsWired(buffers[0]) == false);
1859 	bspec.user_writable = true;
1860 	buffers[1] = IOSKMemoryBufferCreate(ptoa(1), &bspec, &bufkvas[1]);
1861 	assert(buffers[1]);
1862 	buffers[2] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[2]);
1863 	assert(buffers[2]);
1864 	buffers[3] = IOSKMemoryBufferCreate(ptoa(2), &bspec, &bufkvas[3]);
1865 	assert(buffers[3]);
1866 	buffers[4] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[4]);
1867 	assert(buffers[4]);
1868 	buffers[5] = IOSKMemoryBufferCreate(ptoa(3), &bspec, &bufkvas[5]);
1869 	assert(buffers[5]);
1870 
1871 	for (int i = 0; i < kNumBuffers; i++) {
1872 		value = 0x534B0000 | i;
1873 		ptr = (uint32_t *)(uintptr_t)bufkvas[i];
1874 		*ptr = value;
1875 		assert(value == *ptr);
1876 	}
1877 
1878 	ret = IOSKRegionSetBuffer(regions[0], 0, buffers[0]);
1879 	assert(ret == kIOReturnSuccess);
1880 	ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1881 	assert(ret == kIOReturnSuccess);
1882 	ret = IOSKRegionSetBuffer(regions[1], 0, buffers[2]);
1883 	assert(ret == kIOReturnSuccess);
1884 	ret = IOSKRegionSetBuffer(regions[1], 1, buffers[3]);
1885 	assert(ret == kIOReturnSuccess);
1886 	ret = IOSKRegionSetBuffer(regions[2], 0, buffers[4]);
1887 	assert(ret == kIOReturnSuccess);
1888 	ret = IOSKRegionSetBuffer(regions[2], 3, buffers[5]);
1889 	assert(ret == kIOReturnSuccess);
1890 
1891 	mappers[0] = IOSKMapperCreate(arenas[0], current_task());
1892 	assert(mappers[0]);
1893 	mappers[1] = IOSKMapperCreate(arenas[0], current_task());
1894 	assert(mappers[1]);
1895 	mappers[2] = IOSKMapperCreate(arenas[1], current_task());
1896 	assert(mappers[2]);
1897 
1898 	ret = IOSKMapperGetAddress(mappers[0], &addrs[0], &size);
1899 	assert(ret == kIOReturnSuccess);
1900 	assert(size == ptoa(20));
1901 	ret = IOSKMapperGetAddress(mappers[1], &addrs[1], &size);
1902 	assert(ret == kIOReturnSuccess);
1903 	assert(size == ptoa(20));
1904 	ret = IOSKMapperGetAddress(mappers[2], &addrs[2], &size);
1905 	assert(ret == kIOReturnSuccess);
1906 	assert(size == ptoa(20));
1907 
1908 	for (int i = 0; i < kNumMappers; i++) {
1909 		kprintf("mapper[%d] %p map address 0x%llx size 0x%x\n",
1910 		    i, mappers[i], (uint64_t)addrs[i], (uint32_t)size);
1911 	}
1912 
1913 	ptr = (uint32_t *)(uintptr_t)addrs[0];
1914 	assert(*ptr == 0x534B0000);
1915 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(1));
1916 	assert(*ptr == 0x534B0001);
1917 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(2));
1918 	assert(*ptr == 0x534B0002);
1919 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(4));
1920 	assert(*ptr == 0x534B0003);
1921 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(8));
1922 	assert(*ptr == 0x534B0004);
1923 	ptr = (uint32_t *)(uintptr_t)(addrs[0] + ptoa(17));
1924 	assert(*ptr == 0x534B0005);
1925 
1926 	*ptr = 0x4B530005;
1927 	assert(0x4B530005 == *ptr);
1928 	*ptr = 0x534B0005;
1929 
1930 	IOSKMapperRedirect(mappers[0]);
1931 	*ptr = 0x33333333;
1932 	assert(0x33333333 == *ptr);
1933 	ptr = (uint32_t *)(uintptr_t)addrs[0];
1934 	assert(*ptr == 0x534B0000);
1935 
1936 	ptr = (uint32_t *)(uintptr_t)addrs[2];
1937 	assert(*ptr == 0x534B0004);
1938 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(9));
1939 	assert(*ptr == 0x534B0005);
1940 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(12));
1941 	assert(*ptr == 0x534B0002);
1942 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(14));
1943 	assert(*ptr == 0x534B0003);
1944 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(18));
1945 	assert(*ptr == 0x534B0000);
1946 	ptr = (uint32_t *)(uintptr_t)(addrs[2] + ptoa(19));
1947 	assert(*ptr == 0x534B0001);
1948 
1949 	IOSKRegionClearBufferDebug(regions[0], 1, NULL);
1950 	ret = IOSKRegionSetBuffer(regions[0], 1, buffers[1]);
1951 	assert(ret == kIOReturnSuccess);
1952 	assert(*ptr == 0x534B0001);
1953 
1954 	IOSKArenaRedirect(arenas[0]);
1955 	IOSKArenaRedirect(arenas[1]);
1956 
1957 	for (int i = 0; i < kNumBuffers; i++) {
1958 		IOSKMemoryDestroy(buffers[i]);
1959 	}
1960 	for (int i = 0; i < kNumRegions; i++) {
1961 		IOSKRegionDestroy(regions[i]);
1962 	}
1963 	for (int i = 0; i < kNumArenas; i++) {
1964 		IOSKArenaDestroy(arenas[i]);
1965 	}
1966 	for (int i = 0; i < kNumMappers; i++) {
1967 		IOSKMapperDestroy(mappers[i]);
1968 	}
1969 
1970 	kprintf("IOSKArena count  : %u\n",
1971 	    IOSKArena::gMetaClass.getInstanceCount());
1972 	kprintf("IOSKRegion count : %u\n",
1973 	    IOSKRegion::gMetaClass.getInstanceCount());
1974 	kprintf("IOSKMapper count : %u, %u (sub maps)\n",
1975 	    IOSKMapper::gMetaClass.getInstanceCount(),
1976 	    IOSKRegionMapper::gMetaClass.getInstanceCount());
1977 	kprintf("IOSKBuffer count : %u\n",
1978 	    IOSKBuffer::gMetaClass.getInstanceCount());
1979 
1980 	return 0;
1981 }
1982 
1983 #endif  /* DEVELOPMENT || DEBUG */
1984 
1985 #if defined(__x86_64__)
1986 const OSSymbol *
IOSKCopyKextIdentifierWithAddress(vm_address_t address)1987 IOSKCopyKextIdentifierWithAddress( vm_address_t address )
1988 {
1989 	const OSSymbol * id = NULL;
1990 
1991 	OSKext * kext = OSKext::lookupKextWithAddress(address);
1992 	if (kext) {
1993 		id = kext->getIdentifier();
1994 		if (id) {
1995 			id->retain();
1996 		}
1997 		kext->release();
1998 	}
1999 	return id;
2000 }
2001 #endif /* __x86_64__ */
2002