xref: /xnu-8020.121.3/iokit/Kernel/IODMACommand.cpp (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #include <IOKit/assert.h>
32 
33 #include <libkern/OSTypes.h>
34 #include <libkern/OSByteOrder.h>
35 #include <libkern/OSDebug.h>
36 
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IODMACommand.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include "IOKitKernelInternal.h"
45 
46 #define MAPTYPE(type)           ((UInt) (type) & kTypeMask)
47 #define IS_NONCOHERENT(type)    (MAPTYPE(type) == kNonCoherent)
48 
49 enum{
50 	kWalkSyncIn       = 0x01,// bounce -> md
51 	kWalkSyncOut      = 0x02,// bounce <- md
52 	kWalkSyncAlways   = 0x04,
53 	kWalkPreflight    = 0x08,
54 	kWalkDoubleBuffer = 0x10,
55 	kWalkPrepare      = 0x20,
56 	kWalkComplete     = 0x40,
57 	kWalkClient       = 0x80
58 };
59 
60 
61 #define fInternalState reserved
62 #define fState         reserved->fState
63 #define fMDSummary     reserved->fMDSummary
64 
65 
66 #if 1
67 // no direction => OutIn
68 #define SHOULD_COPY_DIR(op, direction)                                      \
69 	((kIODirectionNone == (direction))                                  \
70 	    || (kWalkSyncAlways & (op))                                     \
71 	    || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut)   \
72 	                                            & (direction)))
73 
74 #else
75 #define SHOULD_COPY_DIR(state, direction) (true)
76 #endif
77 
78 #if 0
79 #define DEBG(fmt, args...)      { IOLog(fmt, ## args); kprintf(fmt, ## args); }
80 #else
81 #define DEBG(fmt, args...)      {}
82 #endif
83 
84 #if 0
85 #define LOGTAG          0x87654321
86 #endif
87 
88 /**************************** class IODMACommand ***************************/
89 
90 #undef super
91 #define super IOCommand
92 OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE);
93 
94 OSMetaClassDefineReservedUsedX86(IODMACommand, 0);
95 OSMetaClassDefineReservedUsedX86(IODMACommand, 1);
96 OSMetaClassDefineReservedUsedX86(IODMACommand, 2);
97 OSMetaClassDefineReservedUsedX86(IODMACommand, 3);
98 OSMetaClassDefineReservedUsedX86(IODMACommand, 4);
99 OSMetaClassDefineReservedUsedX86(IODMACommand, 5);
100 OSMetaClassDefineReservedUsedX86(IODMACommand, 6);
101 OSMetaClassDefineReservedUnused(IODMACommand, 7);
102 OSMetaClassDefineReservedUnused(IODMACommand, 8);
103 OSMetaClassDefineReservedUnused(IODMACommand, 9);
104 OSMetaClassDefineReservedUnused(IODMACommand, 10);
105 OSMetaClassDefineReservedUnused(IODMACommand, 11);
106 OSMetaClassDefineReservedUnused(IODMACommand, 12);
107 OSMetaClassDefineReservedUnused(IODMACommand, 13);
108 OSMetaClassDefineReservedUnused(IODMACommand, 14);
109 OSMetaClassDefineReservedUnused(IODMACommand, 15);
110 
111 
112 OSSharedPtr<IODMACommand>
withRefCon(void * refCon)113 IODMACommand::withRefCon(void * refCon)
114 {
115 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
116 
117 	if (me && !me->initWithRefCon(refCon)) {
118 		return nullptr;
119 	}
120 
121 	return me;
122 }
123 
124 OSSharedPtr<IODMACommand>
withSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,void * refCon)125 IODMACommand::withSpecification(SegmentFunction  outSegFunc,
126     const SegmentOptions * segmentOptions,
127     uint32_t               mappingOptions,
128     IOMapper             * mapper,
129     void                 * refCon)
130 {
131 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
132 
133 	if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions,
134 	    mapper, refCon)) {
135 		return nullptr;
136 	}
137 
138 	return me;
139 }
140 
141 OSSharedPtr<IODMACommand>
withSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,void * refCon)142 IODMACommand::withSpecification(SegmentFunction outSegFunc,
143     UInt8           numAddressBits,
144     UInt64          maxSegmentSize,
145     MappingOptions  mappingOptions,
146     UInt64          maxTransferSize,
147     UInt32          alignment,
148     IOMapper       *mapper,
149     void           *refCon)
150 {
151 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
152 
153 	if (me && !me->initWithSpecification(outSegFunc,
154 	    numAddressBits, maxSegmentSize,
155 	    mappingOptions, maxTransferSize,
156 	    alignment, mapper, refCon)) {
157 		return nullptr;
158 	}
159 
160 	return me;
161 }
162 
163 OSSharedPtr<IODMACommand>
cloneCommand(void * refCon)164 IODMACommand::cloneCommand(void *refCon)
165 {
166 	SegmentOptions segmentOptions =
167 	{
168 		.fStructSize                = sizeof(segmentOptions),
169 		.fNumAddressBits            = (uint8_t)fNumAddressBits,
170 		.fMaxSegmentSize            = fMaxSegmentSize,
171 		.fMaxTransferSize           = fMaxTransferSize,
172 		.fAlignment                 = fAlignMask + 1,
173 		.fAlignmentLength           = fAlignMaskInternalSegments + 1,
174 		.fAlignmentInternalSegments = fAlignMaskLength + 1
175 	};
176 
177 	return IODMACommand::withSpecification(fOutSeg, &segmentOptions,
178 	           fMappingOptions, fMapper.get(), refCon);
179 }
180 
181 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
182 
183 bool
initWithRefCon(void * refCon)184 IODMACommand::initWithRefCon(void * refCon)
185 {
186 	if (!super::init()) {
187 		return false;
188 	}
189 
190 	if (!reserved) {
191 		reserved = IOMallocType(IODMACommandInternal);
192 	}
193 	fRefCon = refCon;
194 
195 	return true;
196 }
197 
198 bool
initWithSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,void * refCon)199 IODMACommand::initWithSpecification(SegmentFunction        outSegFunc,
200     const SegmentOptions * segmentOptions,
201     uint32_t               mappingOptions,
202     IOMapper             * mapper,
203     void                 * refCon)
204 {
205 	if (!initWithRefCon(refCon)) {
206 		return false;
207 	}
208 
209 	if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions,
210 	    mappingOptions, mapper)) {
211 		return false;
212 	}
213 
214 	return true;
215 }
216 
217 bool
initWithSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,void * refCon)218 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
219     UInt8           numAddressBits,
220     UInt64          maxSegmentSize,
221     MappingOptions  mappingOptions,
222     UInt64          maxTransferSize,
223     UInt32          alignment,
224     IOMapper       *mapper,
225     void           *refCon)
226 {
227 	SegmentOptions segmentOptions =
228 	{
229 		.fStructSize                = sizeof(segmentOptions),
230 		.fNumAddressBits            = numAddressBits,
231 		.fMaxSegmentSize            = maxSegmentSize,
232 		.fMaxTransferSize           = maxTransferSize,
233 		.fAlignment                 = alignment,
234 		.fAlignmentLength           = 1,
235 		.fAlignmentInternalSegments = alignment
236 	};
237 
238 	return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon);
239 }
240 
241 IOReturn
setSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper)242 IODMACommand::setSpecification(SegmentFunction        outSegFunc,
243     const SegmentOptions * segmentOptions,
244     uint32_t               mappingOptions,
245     IOMapper             * mapper)
246 {
247 	IOService * device = NULL;
248 	UInt8       numAddressBits;
249 	UInt64      maxSegmentSize;
250 	UInt64      maxTransferSize;
251 	UInt32      alignment;
252 
253 	bool        is32Bit;
254 
255 	if (!outSegFunc || !segmentOptions) {
256 		return kIOReturnBadArgument;
257 	}
258 
259 	is32Bit = ((OutputHost32 == outSegFunc)
260 	    || (OutputBig32 == outSegFunc)
261 	    || (OutputLittle32 == outSegFunc));
262 
263 	numAddressBits = segmentOptions->fNumAddressBits;
264 	maxSegmentSize = segmentOptions->fMaxSegmentSize;
265 	maxTransferSize = segmentOptions->fMaxTransferSize;
266 	alignment = segmentOptions->fAlignment;
267 	if (is32Bit) {
268 		if (!numAddressBits) {
269 			numAddressBits = 32;
270 		} else if (numAddressBits > 32) {
271 			return kIOReturnBadArgument;  // Wrong output function for bits
272 		}
273 	}
274 
275 	if (numAddressBits && (numAddressBits < PAGE_SHIFT)) {
276 		return kIOReturnBadArgument;
277 	}
278 
279 	if (!maxSegmentSize) {
280 		maxSegmentSize--;               // Set Max segment to -1
281 	}
282 	if (!maxTransferSize) {
283 		maxTransferSize--;              // Set Max transfer to -1
284 	}
285 	if (mapper && !OSDynamicCast(IOMapper, mapper)) {
286 		device = mapper;
287 		mapper = NULL;
288 	}
289 	if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) {
290 		IOMapper::checkForSystemMapper();
291 		mapper = IOMapper::gSystem;
292 	}
293 
294 	fNumSegments     = 0;
295 	fOutSeg          = outSegFunc;
296 	fNumAddressBits  = numAddressBits;
297 	fMaxSegmentSize  = maxSegmentSize;
298 	fMappingOptions  = mappingOptions;
299 	fMaxTransferSize = maxTransferSize;
300 	if (!alignment) {
301 		alignment = 1;
302 	}
303 	fAlignMask       = alignment - 1;
304 
305 	alignment = segmentOptions->fAlignmentLength;
306 	if (!alignment) {
307 		alignment = 1;
308 	}
309 	fAlignMaskLength = alignment - 1;
310 
311 	alignment = segmentOptions->fAlignmentInternalSegments;
312 	if (!alignment) {
313 		alignment = (fAlignMask + 1);
314 	}
315 	fAlignMaskInternalSegments = alignment - 1;
316 
317 	switch (MAPTYPE(mappingOptions)) {
318 	case kMapped:       break;
319 	case kUnmapped:     break;
320 	case kNonCoherent:  break;
321 
322 	case kBypassed:
323 		if (!mapper) {
324 			break;
325 		}
326 		return kIOReturnBadArgument;
327 
328 	default:
329 		return kIOReturnBadArgument;
330 	}
331 	;
332 
333 	if (mapper != fMapper) {
334 		fMapper.reset(mapper, OSRetain);
335 	}
336 
337 	fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
338 	fInternalState->fDevice = device;
339 
340 	return kIOReturnSuccess;
341 }
342 
343 void
free()344 IODMACommand::free()
345 {
346 	if (reserved) {
347 		IOFreeType(reserved, IODMACommandInternal);
348 	}
349 
350 	fMapper.reset();
351 
352 	// Correct use of this class when setting an IOMemoryDescriptor
353 	// in fMemory via setMemoryDescriptor(desc) is, for the caller, to
354 	// have a matching call to clearMemoryDescriptor() before releasing
355 	// the object. The matching call has also the effect of releasing
356 	// the ref taken on the IOMemoryDescriptor in setMemoryDescriptor().
357 	//
358 	// A number of "misbehaving" drivers has been found during testing,
359 	// whereby a matching call to clearMemoryDescriptor() is missing:
360 	//
361 	// rdar://59947343
362 	// rdar://59946968
363 	//
364 	// Both the approaches taken in said drivers are wrong, but have gone
365 	// basically silent with fMemory being a regular pointer. With fMemory
366 	// becoming a OSSharedPtr, the IODMACommand destructor expects to find
367 	// either fMemory reset (through the call to clearMemoryDescriptor()) or
368 	// a reference hold for the release.
369 	//
370 	// For this reason, this workaround of detaching fMemory is put in
371 	// place here, choosing the leak over the panic for misbehaving
372 	// drivers. Once all instances are fixed, this workaround will be
373 	// removed.
374 	//
375 	// Note: all well behaving drivers that have matching calls for
376 	// setMemoryDescriptor() and clearMemoryDescriptor() are unaffected
377 	// since fMemory will be null at this point.
378 	fMemory.detach();
379 
380 	super::free();
381 }
382 
383 IOReturn
setMemoryDescriptor(const IOMemoryDescriptor * mem,bool autoPrepare)384 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
385 {
386 	IOReturn err = kIOReturnSuccess;
387 
388 	if (mem == fMemory) {
389 		if (!autoPrepare) {
390 			while (fActive) {
391 				complete();
392 			}
393 		}
394 		return kIOReturnSuccess;
395 	}
396 
397 	if (fMemory) {
398 		// As we are almost certainly being called from a work loop thread
399 		// if fActive is true it is probably not a good time to potentially
400 		// block.  Just test for it and return an error
401 		if (fActive) {
402 			return kIOReturnBusy;
403 		}
404 		clearMemoryDescriptor();
405 	}
406 
407 	if (mem) {
408 		bzero(&fMDSummary, sizeof(fMDSummary));
409 		err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
410 		    &fMDSummary, sizeof(fMDSummary));
411 		if (err) {
412 			return err;
413 		}
414 
415 		ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
416 
417 		if ((kMapped == MAPTYPE(fMappingOptions))
418 		    && fMapper) {
419 			fInternalState->fCheckAddressing = false;
420 		} else {
421 			fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
422 		}
423 
424 		fInternalState->fNewMD = true;
425 		fMemory.reset(const_cast<IOMemoryDescriptor *>(mem), OSRetain);
426 		fInternalState->fSetActiveNoMapper = (!fMapper);
427 		if (fInternalState->fSetActiveNoMapper) {
428 			mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
429 		}
430 		if (autoPrepare) {
431 			err = prepare();
432 			if (err) {
433 				clearMemoryDescriptor();
434 			}
435 		}
436 	}
437 
438 	return err;
439 }
440 
441 IOReturn
clearMemoryDescriptor(bool autoComplete)442 IODMACommand::clearMemoryDescriptor(bool autoComplete)
443 {
444 	if (fActive && !autoComplete) {
445 		return kIOReturnNotReady;
446 	}
447 
448 	if (fMemory) {
449 		while (fActive) {
450 			complete();
451 		}
452 		if (fInternalState->fSetActiveNoMapper) {
453 			fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
454 		}
455 		fMemory.reset();
456 	}
457 
458 	return kIOReturnSuccess;
459 }
460 
461 const IOMemoryDescriptor *
getMemoryDescriptor() const462 IODMACommand::getMemoryDescriptor() const
463 {
464 	return fMemory.get();
465 }
466 
467 IOMemoryDescriptor *
getIOMemoryDescriptor() const468 IODMACommand::getIOMemoryDescriptor() const
469 {
470 	OSSharedPtr<IOMemoryDescriptor> mem;
471 
472 	mem = reserved->fCopyMD;
473 	if (!mem) {
474 		mem = fMemory;
475 	}
476 
477 	return mem.get();
478 }
479 
480 IOReturn
segmentOp(void * reference,IODMACommand * target,Segment64 segment,void * segments,UInt32 segmentIndex)481 IODMACommand::segmentOp(
482 	void         *reference,
483 	IODMACommand *target,
484 	Segment64     segment,
485 	void         *segments,
486 	UInt32        segmentIndex)
487 {
488 	IOOptionBits op = (IOOptionBits)(uintptr_t) reference;
489 	addr64_t     maxPhys, address;
490 	uint64_t     length;
491 	uint32_t     numPages;
492 	uint32_t     mask;
493 
494 	IODMACommandInternal * state = target->reserved;
495 
496 	if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) {
497 		maxPhys = (1ULL << target->fNumAddressBits);
498 	} else {
499 		maxPhys = 0;
500 	}
501 	maxPhys--;
502 
503 	address = segment.fIOVMAddr;
504 	length = segment.fLength;
505 
506 	assert(length);
507 
508 	if (!state->fMisaligned) {
509 		mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
510 		state->fMisaligned |= (0 != (mask & address));
511 		if (state->fMisaligned) {
512 			DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
513 		}
514 	}
515 	if (!state->fMisaligned) {
516 		mask = target->fAlignMaskLength;
517 		state->fMisaligned |= (0 != (mask & length));
518 		if (state->fMisaligned) {
519 			DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
520 		}
521 	}
522 
523 	if (state->fMisaligned && (kWalkPreflight & op)) {
524 		return kIOReturnNotAligned;
525 	}
526 
527 	if (!state->fDoubleBuffer) {
528 		if ((address + length - 1) <= maxPhys) {
529 			length = 0;
530 		} else if (address <= maxPhys) {
531 			DEBG("tail %qx, %qx", address, length);
532 			length = (address + length - maxPhys - 1);
533 			address = maxPhys + 1;
534 			DEBG("-> %qx, %qx\n", address, length);
535 		}
536 	}
537 
538 	if (!length) {
539 		return kIOReturnSuccess;
540 	}
541 
542 	uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length));
543 	if (numPages64 > UINT_MAX) {
544 		return kIOReturnVMError;
545 	}
546 	numPages = (typeof(numPages))numPages64;
547 
548 	if (kWalkPreflight & op) {
549 		state->fCopyPageCount += numPages;
550 	} else {
551 		vm_page_t lastPage;
552 		lastPage = NULL;
553 		if (kWalkPrepare & op) {
554 			lastPage = state->fCopyNext;
555 			for (IOItemCount idx = 0; idx < numPages; idx++) {
556 				vm_page_set_offset(lastPage, atop_64(address) + idx);
557 				lastPage = vm_page_get_next(lastPage);
558 			}
559 		}
560 
561 		if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
562 			lastPage = state->fCopyNext;
563 			for (IOItemCount idx = 0; idx < numPages; idx++) {
564 				if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
565 					addr64_t cpuAddr = address;
566 					addr64_t remapAddr;
567 					uint64_t chunk;
568 
569 					if ((kMapped == MAPTYPE(target->fMappingOptions))
570 					    && target->fMapper) {
571 						cpuAddr = target->fMapper->mapToPhysicalAddress(address);
572 					}
573 
574 					remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
575 					if (!state->fDoubleBuffer) {
576 						remapAddr += (address & PAGE_MASK);
577 					}
578 					chunk = PAGE_SIZE - (address & PAGE_MASK);
579 					if (chunk > length) {
580 						chunk = length;
581 					}
582 					if (chunk > (UINT_MAX - PAGE_SIZE + 1)) {
583 						chunk = (UINT_MAX - PAGE_SIZE + 1);
584 					}
585 
586 					DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
587 					    (kWalkSyncIn & op) ? "->" : "<-",
588 					    address, chunk, op);
589 
590 					if (kWalkSyncIn & op) { // cppvNoModSnk
591 						copypv(remapAddr, cpuAddr, (unsigned int) chunk,
592 						    cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
593 					} else {
594 						copypv(cpuAddr, remapAddr, (unsigned int) chunk,
595 						    cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
596 					}
597 					address += chunk;
598 					length -= chunk;
599 				}
600 				lastPage = vm_page_get_next(lastPage);
601 			}
602 		}
603 		state->fCopyNext = lastPage;
604 	}
605 
606 	return kIOReturnSuccess;
607 }
608 
609 OSSharedPtr<IOBufferMemoryDescriptor>
createCopyBuffer(IODirection direction,UInt64 length)610 IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
611 {
612 	mach_vm_address_t mask = 0xFFFFF000;    //state->fSourceAlignMask
613 	return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
614 	           direction, length, mask);
615 }
616 
617 IOReturn
walkAll(uint32_t op)618 IODMACommand::walkAll(uint32_t op)
619 {
620 	IODMACommandInternal * state = fInternalState;
621 
622 	IOReturn     ret = kIOReturnSuccess;
623 	UInt32       numSegments;
624 	UInt64       offset;
625 
626 	if (kWalkPreflight & op) {
627 		state->fMisaligned     = false;
628 		state->fDoubleBuffer   = false;
629 		state->fPrepared       = false;
630 		state->fCopyNext       = NULL;
631 		state->fCopyPageAlloc  = NULL;
632 		state->fCopyPageCount  = 0;
633 		state->fNextRemapPage  = NULL;
634 		state->fCopyMD         = NULL;
635 
636 		if (!(kWalkDoubleBuffer & op)) {
637 			offset = 0;
638 			numSegments = 0 - 1;
639 			ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
640 		}
641 
642 		op &= ~kWalkPreflight;
643 
644 		state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer);
645 		state->fForceDoubleBuffer = false;
646 		if (state->fDoubleBuffer) {
647 			state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength)));
648 		}
649 
650 		if (state->fCopyPageCount) {
651 			vm_page_t mapBase = NULL;
652 
653 			DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
654 
655 			if (!fMapper && !state->fDoubleBuffer) {
656 				kern_return_t kr;
657 
658 				if (fMapper) {
659 					panic("fMapper copying");
660 				}
661 
662 				kr = vm_page_alloc_list(state->fCopyPageCount,
663 				    (kma_flags_t)(KMA_LOMEM | KMA_NOPAGEWAIT), &mapBase);
664 				if (KERN_SUCCESS != kr) {
665 					DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
666 					mapBase = NULL;
667 				}
668 			}
669 
670 			if (mapBase) {
671 				state->fCopyPageAlloc = mapBase;
672 				state->fCopyNext = state->fCopyPageAlloc;
673 				offset = 0;
674 				numSegments = 0 - 1;
675 				ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
676 				state->fPrepared = true;
677 				op &= ~(kWalkSyncIn | kWalkSyncOut);
678 			} else {
679 				DEBG("alloc IOBMD\n");
680 				state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
681 
682 				if (state->fCopyMD) {
683 					ret = kIOReturnSuccess;
684 					state->fPrepared = true;
685 				} else {
686 					DEBG("IODMACommand !alloc IOBMD");
687 					return kIOReturnNoResources;
688 				}
689 			}
690 		}
691 	}
692 
693 	if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) {
694 		if (state->fCopyPageCount) {
695 			DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
696 
697 			if (state->fCopyPageAlloc) {
698 				state->fCopyNext = state->fCopyPageAlloc;
699 				offset = 0;
700 				numSegments = 0 - 1;
701 				ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
702 			} else if (state->fCopyMD) {
703 				DEBG("sync IOBMD\n");
704 
705 				if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) {
706 					OSSharedPtr<IOMemoryDescriptor> poMD = fMemory;
707 
708 					IOByteCount bytes;
709 
710 					if (kWalkSyncIn & op) {
711 						bytes = poMD->writeBytes(state->fPreparedOffset,
712 						    state->fCopyMD->getBytesNoCopy(),
713 						    state->fPreparedLength);
714 					} else {
715 						bytes = poMD->readBytes(state->fPreparedOffset,
716 						    state->fCopyMD->getBytesNoCopy(),
717 						    state->fPreparedLength);
718 					}
719 					DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
720 					ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
721 				} else {
722 					ret = kIOReturnSuccess;
723 				}
724 			}
725 		}
726 	}
727 
728 	if (kWalkComplete & op) {
729 		if (state->fCopyPageAlloc) {
730 			vm_page_free_list(state->fCopyPageAlloc, FALSE);
731 			state->fCopyPageAlloc = NULL;
732 			state->fCopyPageCount = 0;
733 		}
734 		if (state->fCopyMD) {
735 			state->fCopyMD.reset();
736 		}
737 
738 		state->fPrepared = false;
739 	}
740 	return ret;
741 }
742 
743 UInt8
getNumAddressBits(void)744 IODMACommand::getNumAddressBits(void)
745 {
746 	return (UInt8) fNumAddressBits;
747 }
748 
749 UInt32
getAlignment(void)750 IODMACommand::getAlignment(void)
751 {
752 	return fAlignMask + 1;
753 }
754 
755 uint32_t
getAlignmentLength(void)756 IODMACommand::getAlignmentLength(void)
757 {
758 	return fAlignMaskLength + 1;
759 }
760 
761 uint32_t
getAlignmentInternalSegments(void)762 IODMACommand::getAlignmentInternalSegments(void)
763 {
764 	return fAlignMaskInternalSegments + 1;
765 }
766 
767 IOReturn
prepareWithSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,UInt64 offset,UInt64 length,bool flushCache,bool synchronize)768 IODMACommand::prepareWithSpecification(SegmentFunction        outSegFunc,
769     const SegmentOptions * segmentOptions,
770     uint32_t               mappingOptions,
771     IOMapper             * mapper,
772     UInt64                 offset,
773     UInt64                 length,
774     bool                   flushCache,
775     bool                   synchronize)
776 {
777 	IOReturn ret;
778 
779 	if (fActive) {
780 		return kIOReturnNotPermitted;
781 	}
782 
783 	ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
784 	if (kIOReturnSuccess != ret) {
785 		return ret;
786 	}
787 
788 	ret = prepare(offset, length, flushCache, synchronize);
789 
790 	return ret;
791 }
792 
793 IOReturn
prepareWithSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,UInt64 offset,UInt64 length,bool flushCache,bool synchronize)794 IODMACommand::prepareWithSpecification(SegmentFunction  outSegFunc,
795     UInt8            numAddressBits,
796     UInt64           maxSegmentSize,
797     MappingOptions   mappingOptions,
798     UInt64           maxTransferSize,
799     UInt32           alignment,
800     IOMapper         *mapper,
801     UInt64           offset,
802     UInt64           length,
803     bool             flushCache,
804     bool             synchronize)
805 {
806 	SegmentOptions segmentOptions =
807 	{
808 		.fStructSize                = sizeof(segmentOptions),
809 		.fNumAddressBits            = numAddressBits,
810 		.fMaxSegmentSize            = maxSegmentSize,
811 		.fMaxTransferSize           = maxTransferSize,
812 		.fAlignment                 = alignment,
813 		.fAlignmentLength           = 1,
814 		.fAlignmentInternalSegments = alignment
815 	};
816 
817 	return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
818 	           offset, length, flushCache, synchronize);
819 }
820 
821 
822 IOReturn
prepare(UInt64 offset,UInt64 length,bool flushCache,bool synchronize)823 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
824 {
825 	IODMACommandInternal *  state = fInternalState;
826 	IOReturn                  ret = kIOReturnSuccess;
827 	uint32_t       mappingOptions = fMappingOptions;
828 
829 	// check specification has been set
830 	if (!fOutSeg) {
831 		return kIOReturnNotReady;
832 	}
833 
834 	if (!length) {
835 		length = fMDSummary.fLength;
836 	}
837 
838 	if (length > fMaxTransferSize) {
839 		return kIOReturnNoSpace;
840 	}
841 
842 	if (fActive++) {
843 		if ((state->fPreparedOffset != offset)
844 		    || (state->fPreparedLength != length)) {
845 			ret = kIOReturnNotReady;
846 		}
847 	} else {
848 		if (fAlignMaskLength & length) {
849 			return kIOReturnNotAligned;
850 		}
851 
852 		if (atop_64(state->fPreparedLength) > UINT_MAX) {
853 			return kIOReturnVMError;
854 		}
855 		state->fPreparedOffset = offset;
856 		state->fPreparedLength = length;
857 
858 		state->fMisaligned     = false;
859 		state->fDoubleBuffer   = false;
860 		state->fPrepared       = false;
861 		state->fCopyNext       = NULL;
862 		state->fCopyPageAlloc  = NULL;
863 		state->fCopyPageCount  = 0;
864 		state->fNextRemapPage  = NULL;
865 		state->fCopyMD         = NULL;
866 		state->fLocalMapperAlloc       = 0;
867 		state->fLocalMapperAllocValid  = false;
868 		state->fLocalMapperAllocLength = 0;
869 
870 		state->fSourceAlignMask = fAlignMask;
871 		if (fMapper) {
872 			state->fSourceAlignMask &= page_mask;
873 		}
874 
875 		state->fCursor = state->fIterateOnly
876 		    || (!state->fCheckAddressing
877 		    && (!state->fSourceAlignMask
878 		    || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
879 
880 		if (!state->fCursor) {
881 			IOOptionBits op = kWalkPrepare | kWalkPreflight;
882 			if (synchronize) {
883 				op |= kWalkSyncOut;
884 			}
885 			ret = walkAll(op);
886 		}
887 
888 		if (IS_NONCOHERENT(mappingOptions) && flushCache) {
889 			if (state->fCopyMD) {
890 				state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
891 			} else {
892 				fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length);
893 			}
894 		}
895 
896 		if (fMapper) {
897 			IOMDDMAMapArgs mapArgs;
898 			bzero(&mapArgs, sizeof(mapArgs));
899 			mapArgs.fMapper = fMapper.get();
900 			mapArgs.fCommand = this;
901 			mapArgs.fMapSpec.device         = state->fDevice;
902 			mapArgs.fMapSpec.alignment      = fAlignMask + 1;
903 			mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64;
904 			mapArgs.fLength = state->fPreparedLength;
905 			OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD;
906 			if (md) {
907 				mapArgs.fOffset = 0;
908 			} else {
909 				md = fMemory;
910 				mapArgs.fOffset = state->fPreparedOffset;
911 			}
912 
913 			ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs));
914 
915 			if ((kIOReturnSuccess == ret)
916 			    && mapArgs.fAllocLength
917 			    && (mapArgs.fAllocLength != mapArgs.fLength)) {
918 				do {
919 					// multisegment case
920 					IOMDDMAWalkSegmentState  walkState;
921 					IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
922 					IOOptionBits             mdOp;
923 					uint64_t                 index;
924 					IOPhysicalLength         segLen;
925 					uint32_t                         segCount;
926 					uint64_t                         phys, align;
927 					uint64_t                         mapperPageMask;
928 					uint64_t                         mapperPageShift;
929 					uint64_t                         insertOffset;
930 					uint32_t                         mapOptions;
931 					uint64_t                         length;
932 
933 					assert(mapArgs.fAllocLength > mapArgs.fLength);
934 
935 					mapperPageMask    = fMapper->getPageSize();
936 					assert(mapperPageMask);
937 					mapperPageMask   -= 1;
938 					mapperPageShift   = (64 - __builtin_clzll(mapperPageMask));
939 					walkArgs->fMapped = false;
940 					length            = state->fPreparedLength;
941 					mdOp              = kIOMDFirstSegment;
942 					segCount          = 0;
943 					for (index = 0; index < length; segCount++) {
944 						walkArgs->fOffset = state->fPreparedOffset + index;
945 
946 						ret    = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
947 						mdOp   = kIOMDWalkSegments;
948 						assert(kIOReturnSuccess == ret);
949 						if (ret != kIOReturnSuccess) {
950 							panic("dmaCommandOperation");
951 						}
952 						segLen = walkArgs->fLength;
953 						index += segLen;
954 					}
955 					if (ret != kIOReturnSuccess) {
956 						break;
957 					}
958 
959 #if defined(LOGTAG)
960 					if (LOGTAG == fMemory->getTag()) {
961 						IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength);
962 					}
963 #endif /* defined(LOGTAG) */
964 
965 					state->fMapSegments = IONewZeroData(IODMACommandMapSegment, segCount);
966 					if (!state->fMapSegments) {
967 						ret = kIOReturnNoMemory;
968 						break;
969 					}
970 					state->fMapSegmentsCount = segCount;
971 
972 					switch (kIODirectionOutIn & fMDSummary.fDirection) {
973 					case kIODirectionOut:
974 						mapOptions = kIODMAMapReadAccess;
975 						break;
976 					case kIODirectionIn:
977 						mapOptions = kIODMAMapWriteAccess;
978 						break;
979 					default:
980 						mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess;
981 						break;
982 					}
983 
984 					mdOp = kIOMDFirstSegment;
985 					segCount = 0;
986 					for (insertOffset = 0, index = 0; index < length; segCount++) {
987 						walkArgs->fOffset = state->fPreparedOffset + index;
988 						ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
989 						mdOp = kIOMDWalkSegments;
990 						if (ret != kIOReturnSuccess) {
991 							panic("dmaCommandOperation 0x%x", ret);
992 						}
993 						phys = walkArgs->fIOVMAddr;
994 						segLen = walkArgs->fLength;
995 
996 #if defined(LOGTAG)
997 						if (LOGTAG == fMemory->getTag()) {
998 							IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen);
999 						}
1000 #endif /* defined(LOGTAG) */
1001 
1002 						align = (phys & mapperPageMask);
1003 
1004 #if defined(LOGTAG)
1005 						if (LOGTAG == fMemory->getTag()) {
1006 							IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align);
1007 						}
1008 #endif /* defined(LOGTAG) */
1009 
1010 						assert(segCount < state->fMapSegmentsCount);
1011 						state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index;
1012 						state->fMapSegments[segCount].fMapOffset = insertOffset;
1013 						state->fMapSegments[segCount].fPageOffset = align;
1014 						index  += segLen;
1015 
1016 						// segment page align
1017 						segLen  = ((phys + segLen + mapperPageMask) & ~mapperPageMask);
1018 						phys   -= align;
1019 						segLen -= phys;
1020 						insertOffset += segLen;
1021 					}
1022 					state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask);
1023 #if defined(LOGTAG)
1024 					if (LOGTAG == fMemory->getTag()) {
1025 						IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount);
1026 					}
1027 #endif /* defined(LOGTAG) */
1028 				} while (false);
1029 			}
1030 			if (kIOReturnSuccess == ret) {
1031 				state->fLocalMapperAlloc       = mapArgs.fAlloc;
1032 				state->fLocalMapperAllocValid  = true;
1033 				state->fLocalMapperAllocLength = mapArgs.fAllocLength;
1034 			}
1035 		}
1036 		if (kIOReturnSuccess == ret) {
1037 			state->fPrepared = true;
1038 		}
1039 	}
1040 	return ret;
1041 }
1042 
1043 IOReturn
complete(bool invalidateCache,bool synchronize)1044 IODMACommand::complete(bool invalidateCache, bool synchronize)
1045 {
1046 	IODMACommandInternal * state = fInternalState;
1047 	IOReturn               ret   = kIOReturnSuccess;
1048 	OSSharedPtr<IOMemoryDescriptor> copyMD;
1049 
1050 	if (fActive < 1) {
1051 		return kIOReturnNotReady;
1052 	}
1053 
1054 	if (!--fActive) {
1055 		copyMD = state->fCopyMD;
1056 
1057 		if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) {
1058 			if (copyMD) {
1059 				copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
1060 			} else {
1061 				OSSharedPtr<IOMemoryDescriptor> md = fMemory;
1062 				md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
1063 			}
1064 		}
1065 
1066 		if (!state->fCursor) {
1067 			IOOptionBits op = kWalkComplete;
1068 			if (synchronize) {
1069 				op |= kWalkSyncIn;
1070 			}
1071 			ret = walkAll(op);
1072 		}
1073 
1074 		if (state->fLocalMapperAllocValid) {
1075 			IOMDDMAMapArgs mapArgs;
1076 			bzero(&mapArgs, sizeof(mapArgs));
1077 			mapArgs.fMapper = fMapper.get();
1078 			mapArgs.fCommand = this;
1079 			mapArgs.fAlloc = state->fLocalMapperAlloc;
1080 			mapArgs.fAllocLength = state->fLocalMapperAllocLength;
1081 			OSSharedPtr<IOMemoryDescriptor> md = copyMD;
1082 			if (md) {
1083 				mapArgs.fOffset = 0;
1084 			} else {
1085 				md = fMemory;
1086 				mapArgs.fOffset = state->fPreparedOffset;
1087 			}
1088 
1089 			ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs));
1090 
1091 			state->fLocalMapperAlloc       = 0;
1092 			state->fLocalMapperAllocValid  = false;
1093 			state->fLocalMapperAllocLength = 0;
1094 			if (state->fMapSegments) {
1095 				IODeleteData(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount);
1096 				state->fMapSegments      = NULL;
1097 				state->fMapSegmentsCount = 0;
1098 			}
1099 		}
1100 
1101 		state->fPrepared = false;
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 IOReturn
getPreparedOffsetAndLength(UInt64 * offset,UInt64 * length)1108 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
1109 {
1110 	IODMACommandInternal * state = fInternalState;
1111 	if (fActive < 1) {
1112 		return kIOReturnNotReady;
1113 	}
1114 
1115 	if (offset) {
1116 		*offset = state->fPreparedOffset;
1117 	}
1118 	if (length) {
1119 		*length = state->fPreparedLength;
1120 	}
1121 
1122 	return kIOReturnSuccess;
1123 }
1124 
1125 IOReturn
synchronize(IOOptionBits options)1126 IODMACommand::synchronize(IOOptionBits options)
1127 {
1128 	IODMACommandInternal * state = fInternalState;
1129 	IOReturn               ret   = kIOReturnSuccess;
1130 	IOOptionBits           op;
1131 
1132 	if (kIODirectionOutIn == (kIODirectionOutIn & options)) {
1133 		return kIOReturnBadArgument;
1134 	}
1135 
1136 	if (fActive < 1) {
1137 		return kIOReturnNotReady;
1138 	}
1139 
1140 	op = 0;
1141 	if (kForceDoubleBuffer & options) {
1142 		if (state->fDoubleBuffer) {
1143 			return kIOReturnSuccess;
1144 		}
1145 		ret = complete(false /* invalidateCache */, true /* synchronize */);
1146 		state->fCursor = false;
1147 		state->fForceDoubleBuffer = true;
1148 		ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */);
1149 
1150 		return ret;
1151 	} else if (state->fCursor) {
1152 		return kIOReturnSuccess;
1153 	}
1154 
1155 	if (kIODirectionIn & options) {
1156 		op |= kWalkSyncIn | kWalkSyncAlways;
1157 	} else if (kIODirectionOut & options) {
1158 		op |= kWalkSyncOut | kWalkSyncAlways;
1159 	}
1160 
1161 	ret = walkAll(op);
1162 
1163 	return ret;
1164 }
1165 
1166 struct IODMACommandTransferContext {
1167 	void *   buffer;
1168 	UInt64   bufferOffset;
1169 	UInt64   remaining;
1170 	UInt32   op;
1171 };
1172 enum{
1173 	kIODMACommandTransferOpReadBytes  = 1,
1174 	kIODMACommandTransferOpWriteBytes = 2
1175 };
1176 
1177 IOReturn
transferSegment(void * reference,IODMACommand * target,Segment64 segment,void * segments,UInt32 segmentIndex)1178 IODMACommand::transferSegment(void   *reference,
1179     IODMACommand *target,
1180     Segment64     segment,
1181     void         *segments,
1182     UInt32        segmentIndex)
1183 {
1184 	IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
1185 	UInt64   length  = min(segment.fLength, context->remaining);
1186 	addr64_t ioAddr  = segment.fIOVMAddr;
1187 	addr64_t cpuAddr = ioAddr;
1188 
1189 	context->remaining -= length;
1190 
1191 	while (length) {
1192 		UInt64 copyLen = length;
1193 		if ((kMapped == MAPTYPE(target->fMappingOptions))
1194 		    && target->fMapper) {
1195 			cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
1196 			copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
1197 			ioAddr += copyLen;
1198 		}
1199 		if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) {
1200 			copyLen = (UINT_MAX - PAGE_SIZE + 1);
1201 		}
1202 
1203 		switch (context->op) {
1204 		case kIODMACommandTransferOpReadBytes:
1205 			copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, (unsigned int) copyLen,
1206 			    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1207 			break;
1208 		case kIODMACommandTransferOpWriteBytes:
1209 			copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, (unsigned int) copyLen,
1210 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1211 			break;
1212 		}
1213 		length                -= copyLen;
1214 		context->bufferOffset += copyLen;
1215 	}
1216 
1217 	return context->remaining ? kIOReturnSuccess : kIOReturnOverrun;
1218 }
1219 
1220 UInt64
transfer(IOOptionBits transferOp,UInt64 offset,void * buffer,UInt64 length)1221 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
1222 {
1223 	IODMACommandInternal *      state = fInternalState;
1224 	IODMACommandTransferContext context;
1225 	Segment64                   segments[1];
1226 	UInt32                      numSegments = 0 - 1;
1227 
1228 	if (fActive < 1) {
1229 		return 0;
1230 	}
1231 
1232 	if (offset >= state->fPreparedLength) {
1233 		return 0;
1234 	}
1235 	length = min(length, state->fPreparedLength - offset);
1236 
1237 	context.buffer       = buffer;
1238 	context.bufferOffset = 0;
1239 	context.remaining    = length;
1240 	context.op           = transferOp;
1241 	(void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
1242 
1243 	return length - context.remaining;
1244 }
1245 
1246 UInt64
readBytes(UInt64 offset,void * bytes,UInt64 length)1247 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
1248 {
1249 	return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length);
1250 }
1251 
1252 UInt64
writeBytes(UInt64 offset,const void * bytes,UInt64 length)1253 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
1254 {
1255 	return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length);
1256 }
1257 
1258 IOReturn
genIOVMSegments(UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1259 IODMACommand::genIOVMSegments(UInt64 *offsetP,
1260     void   *segmentsP,
1261     UInt32 *numSegmentsP)
1262 {
1263 	return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
1264 	           offsetP, segmentsP, numSegmentsP);
1265 }
1266 
1267 IOReturn
genIOVMSegments(uint32_t op,InternalSegmentFunction outSegFunc,void * reference,UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1268 IODMACommand::genIOVMSegments(uint32_t op,
1269     InternalSegmentFunction outSegFunc,
1270     void   *reference,
1271     UInt64 *offsetP,
1272     void   *segmentsP,
1273     UInt32 *numSegmentsP)
1274 {
1275 	IODMACommandInternal * internalState = fInternalState;
1276 	IOOptionBits           mdOp = kIOMDWalkSegments;
1277 	IOReturn               ret  = kIOReturnSuccess;
1278 
1279 	if (!(kWalkComplete & op) && !fActive) {
1280 		return kIOReturnNotReady;
1281 	}
1282 
1283 	if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) {
1284 		return kIOReturnBadArgument;
1285 	}
1286 
1287 	IOMDDMAWalkSegmentArgs *state =
1288 	    (IOMDDMAWalkSegmentArgs *)(void *) fState;
1289 
1290 	UInt64 offset    = *offsetP + internalState->fPreparedOffset;
1291 	UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1292 
1293 	if (offset >= memLength) {
1294 		return kIOReturnOverrun;
1295 	}
1296 
1297 	if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
1298 		state->fOffset                                   = 0;
1299 		internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1300 		internalState->fNextRemapPage                    = NULL;
1301 		internalState->fNewMD                            = false;
1302 		mdOp                                             = kIOMDFirstSegment;
1303 		if (fMapper) {
1304 			if (internalState->fLocalMapperAllocValid) {
1305 				state->fMapped = true;
1306 				state->fMappedBase = internalState->fLocalMapperAlloc;
1307 			} else {
1308 				state->fMapped = false;
1309 			}
1310 		}
1311 	}
1312 
1313 	UInt32    segIndex = 0;
1314 	UInt32    numSegments = *numSegmentsP;
1315 	Segment64 curSeg = { 0, 0 };
1316 	bool      curSegValid = false;
1317 	addr64_t  maxPhys;
1318 
1319 	if (fNumAddressBits && (fNumAddressBits < 64)) {
1320 		maxPhys = (1ULL << fNumAddressBits);
1321 	} else {
1322 		maxPhys = 0;
1323 	}
1324 	maxPhys--;
1325 
1326 	while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) {
1327 		// state = next seg
1328 		if (!internalState->fIOVMAddrValid) {
1329 			IOReturn rtn;
1330 
1331 			state->fOffset = offset;
1332 			state->fLength = memLength - offset;
1333 
1334 			bool done = false;
1335 			bool check = false;
1336 
1337 			if (internalState->fLocalMapperAllocValid) {
1338 				if (!internalState->fMapSegmentsCount) {
1339 					state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset;
1340 					rtn = kIOReturnSuccess;
1341 					done = true;
1342 					check = true;
1343 				} else {
1344 					uint64_t address;
1345 					uint64_t length;
1346 					uint64_t runOffset;
1347 					uint64_t ind;
1348 					uint64_t off2Ind = internalState->fOffset2Index;
1349 
1350 					// Validate the previous offset
1351 					if (offset
1352 					    && (offset == internalState->fNextOffset || off2Ind <= offset)) {
1353 						ind = internalState->fIndex;
1354 					} else {
1355 						ind = off2Ind = 0; // Start from beginning
1356 					}
1357 #if defined(LOGTAG)
1358 					if (LOGTAG == fMemory->getTag()) {
1359 						IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n", this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind);
1360 					}
1361 #endif /* defined(LOGTAG) */
1362 
1363 					// Scan through iopl info blocks looking for block containing offset
1364 					while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) {
1365 						ind++;
1366 					}
1367 					if (ind < internalState->fMapSegmentsCount) {
1368 						length = internalState->fMapSegments[ind].fDMAOffset;
1369 					} else {
1370 						length = memLength;
1371 					}
1372 					length -= offset;       // Remainder within iopl
1373 
1374 					// Go back to actual range as search goes past it
1375 					ind--;
1376 					off2Ind = internalState->fMapSegments[ind].fDMAOffset;
1377 
1378 					// Subtract offset till this iopl in total list
1379 					runOffset = offset - off2Ind;
1380 
1381 					// Compute an offset relative to the mapped base
1382 
1383 					runOffset += internalState->fMapSegments[ind].fPageOffset;
1384 					address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset;
1385 #if defined(LOGTAG)
1386 					if (LOGTAG == fMemory->getTag()) {
1387 						IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n", this, address, length);
1388 					}
1389 #endif /* defined(LOGTAG) */
1390 
1391 					state->fIOVMAddr = address;
1392 					state->fLength   = length;
1393 
1394 					internalState->fIndex        = ind;
1395 					internalState->fOffset2Index = off2Ind;
1396 					internalState->fNextOffset   = state->fOffset + length;
1397 
1398 					rtn = kIOReturnSuccess;
1399 					done = true;
1400 					check = true;
1401 				}
1402 			}
1403 
1404 			if (!done) {
1405 				IOMemoryDescriptor * memory =
1406 				    internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1407 				rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1408 				mdOp = kIOMDWalkSegments;
1409 			}
1410 #if 0
1411 			if (check
1412 			    && !ml_at_interrupt_context()
1413 			    && (rtn == kIOReturnSuccess)
1414 			    && fMapper
1415 			    && strcmp("AppleNVMeMMU", fMapper->getName())) {
1416 				uint64_t checkOffset;
1417 				IOPhysicalLength segLen;
1418 				IOMemoryDescriptor * memory =
1419 				    internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1420 				for (checkOffset = 0; checkOffset < state->fLength;) {
1421 					addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone);
1422 					addr64_t mapperPhys;
1423 
1424 					mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset);
1425 					mapperPhys |= (phys & (fMapper->getPageSize() - 1));
1426 					if (mapperPhys != phys) {
1427 						panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx",
1428 						    this, offset, checkOffset,
1429 						    state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength);
1430 					}
1431 					checkOffset += page_size - (phys & page_mask);
1432 				}
1433 			}
1434 #endif
1435 			if (rtn == kIOReturnSuccess) {
1436 				internalState->fIOVMAddrValid = true;
1437 				assert(state->fLength);
1438 				if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) {
1439 					UInt64 length = state->fLength;
1440 					offset          += length;
1441 					curSeg.fLength  += length;
1442 					internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1443 				}
1444 			} else if (rtn == kIOReturnOverrun) {
1445 				internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1446 			} else {
1447 				return rtn;
1448 			}
1449 		}
1450 
1451 		// seg = state, offset = end of seg
1452 		if (!curSegValid) {
1453 			UInt64 length                 = state->fLength;
1454 			offset                       += length;
1455 			curSeg.fIOVMAddr              = state->fIOVMAddr;
1456 			curSeg.fLength                = length;
1457 			curSegValid                   = true;
1458 			internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1459 		}
1460 
1461 		if (!internalState->fIOVMAddrValid) {
1462 			// maxPhys
1463 			if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) {
1464 				if (internalState->fCursor) {
1465 					curSegValid = curSeg.fIOVMAddr = 0;
1466 					ret = kIOReturnMessageTooLarge;
1467 					break;
1468 				} else if (curSeg.fIOVMAddr <= maxPhys) {
1469 					UInt64 remain, newLength;
1470 
1471 					newLength        = (maxPhys + 1 - curSeg.fIOVMAddr);
1472 					DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1473 					remain           = curSeg.fLength - newLength;
1474 					state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1475 					internalState->fIOVMAddrValid = true;
1476 					curSeg.fLength   = newLength;
1477 					state->fLength   = remain;
1478 					offset          -= remain;
1479 				} else {
1480 					UInt64    addr = curSeg.fIOVMAddr;
1481 					ppnum_t   addrPage = (ppnum_t) atop_64(addr);
1482 					vm_page_t remap = NULL;
1483 					UInt64    remain, newLength;
1484 
1485 					DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1486 
1487 					remap = internalState->fNextRemapPage;
1488 					if (remap && (addrPage == vm_page_get_offset(remap))) {
1489 					} else {
1490 						for (remap = internalState->fCopyPageAlloc;
1491 						    remap && (addrPage != vm_page_get_offset(remap));
1492 						    remap = vm_page_get_next(remap)) {
1493 						}
1494 					}
1495 
1496 					if (!remap) {
1497 						panic("no remap page found");
1498 					}
1499 
1500 					curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1501 					    + (addr & PAGE_MASK);
1502 					curSegValid = true;
1503 					internalState->fNextRemapPage = vm_page_get_next(remap);
1504 
1505 					newLength            = PAGE_SIZE - (addr & PAGE_MASK);
1506 					if (newLength < curSeg.fLength) {
1507 						remain           = curSeg.fLength - newLength;
1508 						state->fIOVMAddr = addr + newLength;
1509 						internalState->fIOVMAddrValid = true;
1510 						curSeg.fLength   = newLength;
1511 						state->fLength   = remain;
1512 						offset          -= remain;
1513 					}
1514 					DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1515 				}
1516 			}
1517 
1518 			// reduce size of output segment
1519 			uint64_t reduce, leftover = 0;
1520 
1521 			// fMaxSegmentSize
1522 			if (curSeg.fLength > fMaxSegmentSize) {
1523 				leftover      += curSeg.fLength - fMaxSegmentSize;
1524 				curSeg.fLength = fMaxSegmentSize;
1525 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1526 				internalState->fIOVMAddrValid = true;
1527 			}
1528 
1529 			// alignment current length
1530 
1531 			reduce = (curSeg.fLength & fAlignMaskLength);
1532 			if (reduce && (curSeg.fLength > reduce)) {
1533 				leftover       += reduce;
1534 				curSeg.fLength -= reduce;
1535 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1536 				internalState->fIOVMAddrValid = true;
1537 			}
1538 
1539 			// alignment next address
1540 
1541 			reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
1542 			if (reduce && (curSeg.fLength > reduce)) {
1543 				leftover       += reduce;
1544 				curSeg.fLength -= reduce;
1545 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1546 				internalState->fIOVMAddrValid = true;
1547 			}
1548 
1549 			if (leftover) {
1550 				DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1551 				    leftover, offset,
1552 				    curSeg.fIOVMAddr, curSeg.fLength);
1553 				state->fLength   = leftover;
1554 				offset          -= leftover;
1555 			}
1556 
1557 			//
1558 
1559 			if (internalState->fCursor) {
1560 				bool misaligned;
1561 				uint32_t mask;
1562 
1563 				mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
1564 				misaligned = (0 != (mask & curSeg.fIOVMAddr));
1565 				if (!misaligned) {
1566 					mask = fAlignMaskLength;
1567 					misaligned |= (0 != (mask &  curSeg.fLength));
1568 				}
1569 				if (misaligned) {
1570 					if (misaligned) {
1571 						DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1572 					}
1573 					curSegValid = curSeg.fIOVMAddr = 0;
1574 					ret = kIOReturnNotAligned;
1575 					break;
1576 				}
1577 			}
1578 
1579 			if (offset >= memLength) {
1580 				curSeg.fLength   -= (offset - memLength);
1581 				offset = memLength;
1582 				internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1583 				break;
1584 			}
1585 		}
1586 
1587 		if (internalState->fIOVMAddrValid) {
1588 			if ((segIndex + 1 == numSegments)) {
1589 				break;
1590 			}
1591 #if defined(LOGTAG)
1592 			if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1593 				IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1594 			}
1595 #endif /* defined(LOGTAG) */
1596 			ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1597 			curSegValid = curSeg.fIOVMAddr = 0;
1598 			if (kIOReturnSuccess != ret) {
1599 				break;
1600 			}
1601 		}
1602 	}
1603 
1604 	if (curSegValid) {
1605 #if defined(LOGTAG)
1606 		if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1607 			IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1608 		}
1609 #endif /* defined(LOGTAG) */
1610 		ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1611 	}
1612 
1613 	if (kIOReturnSuccess == ret) {
1614 		state->fOffset = offset;
1615 		*offsetP       = offset - internalState->fPreparedOffset;
1616 		*numSegmentsP  = segIndex;
1617 	}
1618 	return ret;
1619 }
1620 
1621 IOReturn
clientOutputSegment(void * reference,IODMACommand * target,Segment64 segment,void * vSegList,UInt32 outSegIndex)1622 IODMACommand::clientOutputSegment(
1623 	void *reference, IODMACommand *target,
1624 	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1625 {
1626 	SegmentFunction segmentFunction = (SegmentFunction) reference;
1627 	IOReturn ret = kIOReturnSuccess;
1628 
1629 	if (target->fNumAddressBits && (target->fNumAddressBits < 64)
1630 	    && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1631 	    && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) {
1632 		DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1633 		ret = kIOReturnMessageTooLarge;
1634 	}
1635 
1636 	if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) {
1637 		DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1638 		ret = kIOReturnMessageTooLarge;
1639 	}
1640 
1641 	return ret;
1642 }
1643 
1644 IOReturn
genIOVMSegments(SegmentFunction segmentFunction,UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1645 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1646     UInt64   *offsetP,
1647     void     *segmentsP,
1648     UInt32   *numSegmentsP)
1649 {
1650 	return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1651 	           offsetP, segmentsP, numSegmentsP);
1652 }
1653 
1654 bool
OutputHost32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1655 IODMACommand::OutputHost32(IODMACommand *,
1656     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1657 {
1658 	Segment32 *base = (Segment32 *) vSegList;
1659 	base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1660 	base[outSegIndex].fLength   = (UInt32) segment.fLength;
1661 	return true;
1662 }
1663 
1664 bool
OutputBig32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1665 IODMACommand::OutputBig32(IODMACommand *,
1666     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1667 {
1668 	const UInt offAddr = outSegIndex * sizeof(Segment32);
1669 	const UInt offLen  = offAddr + sizeof(UInt32);
1670 	OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1671 	OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1672 	return true;
1673 }
1674 
1675 bool
OutputLittle32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1676 IODMACommand::OutputLittle32(IODMACommand *,
1677     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1678 {
1679 	const UInt offAddr = outSegIndex * sizeof(Segment32);
1680 	const UInt offLen  = offAddr + sizeof(UInt32);
1681 	OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1682 	OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1683 	return true;
1684 }
1685 
1686 bool
OutputHost64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1687 IODMACommand::OutputHost64(IODMACommand *,
1688     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1689 {
1690 	Segment64 *base = (Segment64 *) vSegList;
1691 	base[outSegIndex] = segment;
1692 	return true;
1693 }
1694 
1695 bool
OutputBig64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1696 IODMACommand::OutputBig64(IODMACommand *,
1697     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1698 {
1699 	const UInt offAddr = outSegIndex * sizeof(Segment64);
1700 	const UInt offLen  = offAddr + sizeof(UInt64);
1701 	OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1702 	OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1703 	return true;
1704 }
1705 
1706 bool
OutputLittle64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1707 IODMACommand::OutputLittle64(IODMACommand *,
1708     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1709 {
1710 	const UInt offAddr = outSegIndex * sizeof(Segment64);
1711 	const UInt offLen  = offAddr + sizeof(UInt64);
1712 	OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1713 	OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1714 	return true;
1715 }
1716