xref: /xnu-11417.101.15/iokit/System/IODataQueueDispatchSourceShared.h (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 typedef struct _IODataQueueEntry {
2 	uint32_t  size;
3 	uint8_t   data[0];
4 } IODataQueueEntry;
5 
6 #define DATA_QUEUE_ENTRY_HEADER_SIZE sizeof(IODataQueueEntry)
7 
8 typedef struct _IODataQueueMemory {
9 	volatile uint32_t   head;
10 	volatile uint32_t   tail;
11 	volatile uint8_t    needServicedCallback;
12 	volatile uint8_t    _resv[119];
13 	IODataQueueEntry  queue[0];
14 } IODataQueueMemory;
15 
16 #define DATA_QUEUE_MEMORY_HEADER_SIZE (offsetof(IODataQueueMemory, queue))
17 
18 struct IODataQueueDispatchSource_IVars {
19 	IODataQueueMemory         * dataQueue;
20 	IODataQueueDispatchSource * source;
21 //    IODispatchQueue           * queue;
22 	IOMemoryDescriptor        * memory;
23 	OSAction                  * dataAvailableAction;
24 	OSAction                  * dataServicedAction;
25 	uint64_t                    options;
26 	uint32_t                    queueByteCount;
27 
28 #if !KERNEL
29 	bool                        enable;
30 	bool                        canceled;
31 #endif
32 };
33 
34 bool
init()35 IODataQueueDispatchSource::init()
36 {
37 	if (!super::init()) {
38 		return false;
39 	}
40 
41 	ivars = IONewZero(IODataQueueDispatchSource_IVars, 1);
42 	ivars->source = this;
43 
44 #if !KERNEL
45 	kern_return_t ret;
46 
47 	ret = CopyMemory(&ivars->memory);
48 	assert(kIOReturnSuccess == ret);
49 
50 	uint64_t address;
51 	uint64_t length;
52 
53 	ret = ivars->memory->Map(0, 0, 0, 0, &address, &length);
54 	assert(kIOReturnSuccess == ret);
55 	ivars->dataQueue = (typeof(ivars->dataQueue))(uintptr_t) address;
56 	ivars->queueByteCount = length;
57 #endif
58 
59 	return true;
60 }
61 
62 kern_return_t
CheckForWork_Impl(const IORPC rpc,bool synchronous)63 IODataQueueDispatchSource::CheckForWork_Impl(
64 	const IORPC rpc,
65 	bool synchronous)
66 {
67 	IOReturn ret = kIOReturnNotReady;
68 
69 	return ret;
70 }
71 
72 #if KERNEL
73 
74 kern_return_t
Create_Impl(uint64_t queueByteCount,IODispatchQueue * queue,IODataQueueDispatchSource ** source)75 IODataQueueDispatchSource::Create_Impl(
76 	uint64_t queueByteCount,
77 	IODispatchQueue * queue,
78 	IODataQueueDispatchSource ** source)
79 {
80 	IODataQueueDispatchSource * inst;
81 	IOBufferMemoryDescriptor  * bmd;
82 
83 	if (3 & queueByteCount) {
84 		return kIOReturnBadArgument;
85 	}
86 	if (queueByteCount > UINT_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
87 		return kIOReturnBadArgument;
88 	}
89 	queueByteCount += DATA_QUEUE_MEMORY_HEADER_SIZE;
90 	inst = OSTypeAlloc(IODataQueueDispatchSource);
91 	if (!inst) {
92 		return kIOReturnNoMemory;
93 	}
94 	if (!inst->init()) {
95 		inst->release();
96 		return kIOReturnError;
97 	}
98 
99 	bmd = IOBufferMemoryDescriptor::withOptions(
100 		kIODirectionOutIn | kIOMemoryKernelUserShared,
101 		queueByteCount, page_size);
102 	if (!bmd) {
103 		inst->release();
104 		return kIOReturnNoMemory;
105 	}
106 	inst->ivars->memory         = bmd;
107 	inst->ivars->queueByteCount = ((uint32_t) queueByteCount);
108 	inst->ivars->options        = 0;
109 	inst->ivars->dataQueue      = (typeof(inst->ivars->dataQueue))bmd->getBytesNoCopy();
110 
111 	*source = inst;
112 
113 	return kIOReturnSuccess;
114 }
115 
116 kern_return_t
CopyMemory_Impl(IOMemoryDescriptor ** memory)117 IODataQueueDispatchSource::CopyMemory_Impl(
118 	IOMemoryDescriptor ** memory)
119 {
120 	kern_return_t ret;
121 	IOMemoryDescriptor * result;
122 
123 	result = ivars->memory;
124 	if (result) {
125 		result->retain();
126 		ret = kIOReturnSuccess;
127 	} else {
128 		ret = kIOReturnNotReady;
129 	}
130 	*memory = result;
131 
132 	return ret;
133 }
134 
135 kern_return_t
CopyDataAvailableHandler_Impl(OSAction ** action)136 IODataQueueDispatchSource::CopyDataAvailableHandler_Impl(
137 	OSAction ** action)
138 {
139 	kern_return_t ret;
140 	OSAction    * result;
141 
142 	result = ivars->dataAvailableAction;
143 	if (result) {
144 		result->retain();
145 		ret = kIOReturnSuccess;
146 	} else {
147 		ret = kIOReturnNotReady;
148 	}
149 	*action = result;
150 
151 	return ret;
152 }
153 
154 kern_return_t
CopyDataServicedHandler_Impl(OSAction ** action)155 IODataQueueDispatchSource::CopyDataServicedHandler_Impl(
156 	OSAction ** action)
157 {
158 	kern_return_t ret;
159 	OSAction    * result;
160 
161 	result = ivars->dataServicedAction;
162 	if (result) {
163 		result->retain();
164 		ret = kIOReturnSuccess;
165 	} else {
166 		ret = kIOReturnNotReady;
167 	}
168 	*action = result;
169 	return ret;
170 }
171 
172 kern_return_t
SetDataAvailableHandler_Impl(OSAction * action)173 IODataQueueDispatchSource::SetDataAvailableHandler_Impl(
174 	OSAction * action)
175 {
176 	IOReturn ret;
177 	OSAction * oldAction;
178 
179 	oldAction = ivars->dataAvailableAction;
180 	if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataAvailableAction)) {
181 		oldAction->release();
182 	}
183 	if (action) {
184 		action->retain();
185 		ivars->dataAvailableAction = action;
186 		if (IsDataAvailable()) {
187 			DataAvailable(ivars->dataAvailableAction);
188 		}
189 	}
190 	ret = kIOReturnSuccess;
191 
192 	return ret;
193 }
194 
195 kern_return_t
SetDataServicedHandler_Impl(OSAction * action)196 IODataQueueDispatchSource::SetDataServicedHandler_Impl(
197 	OSAction * action)
198 {
199 	IOReturn ret;
200 	OSAction * oldAction;
201 
202 	oldAction = ivars->dataServicedAction;
203 	if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataServicedAction)) {
204 		oldAction->release();
205 	}
206 	if (action) {
207 		action->retain();
208 		ivars->dataServicedAction = action;
209 	}
210 	ret = kIOReturnSuccess;
211 
212 	return ret;
213 }
214 
215 #endif /* KERNEL */
216 
217 void
SendDataAvailable(void)218 IODataQueueDispatchSource::SendDataAvailable(void)
219 {
220 	IOReturn ret;
221 
222 	if (!ivars->dataAvailableAction) {
223 		ret = CopyDataAvailableHandler(&ivars->dataAvailableAction);
224 		if (kIOReturnSuccess != ret) {
225 			ivars->dataAvailableAction = NULL;
226 		}
227 	}
228 	if (ivars->dataAvailableAction) {
229 		DataAvailable(ivars->dataAvailableAction);
230 	}
231 }
232 
233 void
SendDataServiced(void)234 IODataQueueDispatchSource::SendDataServiced(void)
235 {
236 	IOReturn ret;
237 
238 	if (!ivars->dataServicedAction) {
239 		ret = CopyDataServicedHandler(&ivars->dataServicedAction);
240 		if (kIOReturnSuccess != ret) {
241 			ivars->dataServicedAction = NULL;
242 		}
243 	}
244 	if (ivars->dataServicedAction) {
245 		ivars->dataQueue->needServicedCallback = false;
246 		DataServiced(ivars->dataServicedAction);
247 	}
248 }
249 
250 kern_return_t
SetEnableWithCompletion_Impl(bool enable,IODispatchSourceCancelHandler handler)251 IODataQueueDispatchSource::SetEnableWithCompletion_Impl(
252 	bool enable,
253 	IODispatchSourceCancelHandler handler)
254 {
255 	IOReturn ret;
256 
257 #if !KERNEL
258 	ivars->enable = enable;
259 #endif
260 
261 	ret = kIOReturnSuccess;
262 	return ret;
263 }
264 
265 void
free()266 IODataQueueDispatchSource::free()
267 {
268 	OSSafeReleaseNULL(ivars->memory);
269 	OSSafeReleaseNULL(ivars->dataAvailableAction);
270 	OSSafeReleaseNULL(ivars->dataServicedAction);
271 	IOSafeDeleteNULL(ivars, IODataQueueDispatchSource_IVars, 1);
272 	super::free();
273 }
274 
275 kern_return_t
Cancel_Impl(IODispatchSourceCancelHandler handler)276 IODataQueueDispatchSource::Cancel_Impl(
277 	IODispatchSourceCancelHandler handler)
278 {
279 #if !KERNEL
280 	if (handler) {
281 		handler();
282 	}
283 #endif
284 	return kIOReturnSuccess;
285 }
286 
287 bool
IsDataAvailable(void)288 IODataQueueDispatchSource::IsDataAvailable(void)
289 {
290 	IODataQueueMemory *dataQueue = ivars->dataQueue;
291 
292 	return dataQueue && (dataQueue->head != dataQueue->tail);
293 }
294 
295 kern_return_t
Peek(IODataQueueClientDequeueEntryBlock callback)296 IODataQueueDispatchSource::Peek(IODataQueueClientDequeueEntryBlock callback)
297 {
298 	IODataQueueEntry *  entry = NULL;
299 	IODataQueueMemory * dataQueue;
300 	uint32_t            callerDataSize;
301 	uint32_t            dataSize;
302 	uint32_t            headOffset;
303 	uint32_t            tailOffset;
304 
305 	dataQueue = ivars->dataQueue;
306 	if (!dataQueue) {
307 		return kIOReturnNoMemory;
308 	}
309 
310 	// Read head and tail with acquire barrier
311 	headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
312 	tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
313 
314 	if (headOffset != tailOffset) {
315 		IODataQueueEntry *  head        = NULL;
316 		uint32_t            headSize    = 0;
317 		uint32_t            queueSize   = ivars->queueByteCount - DATA_QUEUE_MEMORY_HEADER_SIZE;
318 
319 		if (headOffset > queueSize) {
320 			return kIOReturnError;
321 		}
322 
323 		head     = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
324 		callerDataSize = head->size;
325 		if (os_add_overflow(3, callerDataSize, &headSize)) {
326 			return kIOReturnError;
327 		}
328 		headSize &= ~3U;
329 
330 		// Check if there's enough room before the end of the queue for a header.
331 		// If there is room, check if there's enough room to hold the header and
332 		// the data.
333 
334 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
335 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
336 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
337 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
338 			// No room for the header or the data, wrap to the beginning of the queue.
339 			// Note: wrapping even with the UINT32_MAX checks, as we have to support
340 			// queueSize of UINT32_MAX
341 			entry = dataQueue->queue;
342 			callerDataSize  = entry->size;
343 			dataSize = entry->size;
344 			if (os_add_overflow(3, callerDataSize, &dataSize)) {
345 				return kIOReturnError;
346 			}
347 			dataSize &= ~3U;
348 
349 			if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
350 			    (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
351 				return kIOReturnError;
352 			}
353 
354 			callback(&entry->data, callerDataSize);
355 			return kIOReturnSuccess;
356 		} else {
357 			callback(&head->data, callerDataSize);
358 			return kIOReturnSuccess;
359 		}
360 	}
361 
362 	return kIOReturnUnderrun;
363 }
364 
365 kern_return_t
Dequeue(IODataQueueClientDequeueEntryBlock callback)366 IODataQueueDispatchSource::Dequeue(IODataQueueClientDequeueEntryBlock callback)
367 {
368 	kern_return_t ret;
369 	bool          sendDataServiced;
370 
371 	sendDataServiced = false;
372 	ret = DequeueWithCoalesce(&sendDataServiced, callback);
373 	if (sendDataServiced) {
374 		SendDataServiced();
375 	}
376 	return ret;
377 }
378 
379 kern_return_t
DequeueWithCoalesce(bool * sendDataServiced,IODataQueueClientDequeueEntryBlock callback)380 IODataQueueDispatchSource::DequeueWithCoalesce(bool * sendDataServiced,
381     IODataQueueClientDequeueEntryBlock callback)
382 {
383 	IOReturn            retVal          = kIOReturnSuccess;
384 	IODataQueueEntry *  entry           = NULL;
385 	IODataQueueMemory * dataQueue;
386 	uint32_t            callerDataSize;
387 	uint32_t            dataSize        = 0;
388 	uint32_t            headOffset      = 0;
389 	uint32_t            tailOffset      = 0;
390 	uint32_t            newHeadOffset   = 0;
391 
392 	dataQueue = ivars->dataQueue;
393 	if (!dataQueue) {
394 		return kIOReturnNoMemory;
395 	}
396 
397 	// Read head and tail with acquire barrier
398 	headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
399 	tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE);
400 
401 	if (headOffset != tailOffset) {
402 		IODataQueueEntry *  head        = NULL;
403 		uint32_t            headSize    = 0;
404 		uint32_t            queueSize   = ivars->queueByteCount - DATA_QUEUE_MEMORY_HEADER_SIZE;
405 
406 		if (headOffset > queueSize) {
407 			return kIOReturnError;
408 		}
409 
410 		head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset);
411 		callerDataSize = head->size;
412 		if (os_add_overflow(3, callerDataSize, &headSize)) {
413 			return kIOReturnError;
414 		}
415 		headSize &= ~3U;
416 
417 		// we wrapped around to beginning, so read from there
418 		// either there was not even room for the header
419 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
420 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
421 		    // or there was room for the header, but not for the data
422 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
423 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
424 			// Note: we have to wrap to the beginning even with the UINT32_MAX checks
425 			// because we have to support a queueSize of UINT32_MAX.
426 			entry           = dataQueue->queue;
427 			callerDataSize  = entry->size;
428 
429 			if (os_add_overflow(callerDataSize, 3, &dataSize)) {
430 				return kIOReturnError;
431 			}
432 			dataSize &= ~3U;
433 			if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
434 			    (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
435 				return kIOReturnError;
436 			}
437 			newHeadOffset   = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
438 			// else it is at the end
439 		} else {
440 			entry = head;
441 
442 			if ((headSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
443 			    (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
444 			    (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
445 				return kIOReturnError;
446 			}
447 			newHeadOffset   = headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
448 		}
449 	} else {
450 		// empty queue
451 		if (dataQueue->needServicedCallback) {
452 			*sendDataServiced = true;
453 		}
454 		return kIOReturnUnderrun;
455 	}
456 
457 	callback(&entry->data, callerDataSize);
458 	if (dataQueue->needServicedCallback) {
459 		*sendDataServiced = true;
460 	}
461 
462 	__c11_atomic_store((_Atomic uint32_t *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
463 
464 	if (newHeadOffset == tailOffset) {
465 		//
466 		// If we are making the queue empty, then we need to make sure
467 		// that either the enqueuer notices, or we notice the enqueue
468 		// that raced with our making of the queue empty.
469 		//
470 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
471 	}
472 
473 	return retVal;
474 }
475 
476 kern_return_t
Enqueue(uint32_t callerDataSize,IODataQueueClientEnqueueEntryBlock callback)477 IODataQueueDispatchSource::Enqueue(uint32_t callerDataSize,
478     IODataQueueClientEnqueueEntryBlock callback)
479 {
480 	kern_return_t ret;
481 	bool          sendDataAvailable;
482 
483 	sendDataAvailable = false;
484 	ret = EnqueueWithCoalesce(callerDataSize, &sendDataAvailable, callback);
485 	if (sendDataAvailable) {
486 		SendDataAvailable();
487 	}
488 	return ret;
489 }
490 
491 kern_return_t
EnqueueWithCoalesce(uint32_t callerDataSize,bool * sendDataAvailable,IODataQueueClientEnqueueEntryBlock callback)492 IODataQueueDispatchSource::EnqueueWithCoalesce(uint32_t callerDataSize,
493     bool * sendDataAvailable,
494     IODataQueueClientEnqueueEntryBlock callback)
495 {
496 	IODataQueueMemory * dataQueue;
497 	IODataQueueEntry *  entry;
498 	uint32_t            head;
499 	uint32_t            tail;
500 	uint32_t            newTail;
501 	uint32_t                        dataSize;
502 	uint32_t            queueSize;
503 	uint32_t            entrySize;
504 	IOReturn            retVal = kIOReturnSuccess;
505 
506 	dataQueue = ivars->dataQueue;
507 	if (!dataQueue) {
508 		return kIOReturnNoMemory;
509 	}
510 	queueSize = ivars->queueByteCount - DATA_QUEUE_MEMORY_HEADER_SIZE;
511 
512 	// Force a single read of head and tail
513 	tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED);
514 	head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE);
515 
516 	if (os_add_overflow(callerDataSize, 3, &dataSize)) {
517 		return kIOReturnOverrun;
518 	}
519 	dataSize &= ~3U;
520 
521 	// Check for overflow of entrySize
522 	if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) {
523 		return kIOReturnOverrun;
524 	}
525 
526 	// Check for underflow of (getQueueSize() - tail)
527 	if (queueSize < tail || queueSize < head) {
528 		return kIOReturnUnderrun;
529 	}
530 
531 	newTail = tail;
532 	if (tail >= head) {
533 		// Is there enough room at the end for the entry?
534 		if ((entrySize <= (UINT32_MAX - tail)) &&
535 		    ((tail + entrySize) <= queueSize)) {
536 			entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
537 
538 			callback(&entry->data, callerDataSize);
539 
540 			entry->size = callerDataSize;
541 
542 			// The tail can be out of bound when the size of the new entry
543 			// exactly matches the available space at the end of the queue.
544 			// The tail can range from 0 to queueSize inclusive.
545 
546 			newTail = tail + entrySize;
547 		} else if (head > entrySize) { // Is there enough room at the beginning?
548 			entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue);
549 
550 			callback(&entry->data, callerDataSize);
551 
552 			// Wrap around to the beginning, but do not allow the tail to catch
553 			// up to the head.
554 
555 			entry->size = callerDataSize;
556 
557 			// We need to make sure that there is enough room to set the size before
558 			// doing this. The user client checks for this and will look for the size
559 			// at the beginning if there isn't room for it at the end.
560 
561 			if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
562 				((IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail))->size = dataSize;
563 			}
564 
565 			newTail = entrySize;
566 		} else {
567 			retVal = kIOReturnOverrun; // queue is full
568 		}
569 	} else {
570 		// Do not allow the tail to catch up to the head when the queue is full.
571 		// That's why the comparison uses a '>' rather than '>='.
572 
573 		if ((head - tail) > entrySize) {
574 			entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail);
575 
576 			callback(&entry->data, callerDataSize);
577 
578 			entry->size = callerDataSize;
579 
580 			newTail = tail + entrySize;
581 		} else {
582 			retVal = kIOReturnOverrun; // queue is full
583 		}
584 	}
585 
586 	// Send notification (via mach message) that data is available.
587 
588 	if (retVal == kIOReturnSuccess) {
589 		// Publish the data we just enqueued
590 		__c11_atomic_store((_Atomic uint32_t *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
591 
592 		if (tail != head) {
593 			//
594 			// The memory barrier below pairs with the one in dequeue
595 			// so that either our store to the tail cannot be missed by
596 			// the next dequeue attempt, or we will observe the dequeuer
597 			// making the queue empty.
598 			//
599 			// Of course, if we already think the queue is empty,
600 			// there's no point paying this extra cost.
601 			//
602 			__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
603 			head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED);
604 		}
605 
606 		if (tail == head) {
607 			// Send notification that data is now available.
608 			*sendDataAvailable = true;
609 			retVal = kIOReturnSuccess;
610 		}
611 	} else if (retVal == kIOReturnOverrun) {
612 		// ask to be notified of Dequeue()
613 		dataQueue->needServicedCallback = true;
614 		*sendDataAvailable = true;
615 	}
616 
617 	return retVal;
618 }
619 
620 kern_return_t
CanEnqueueData(uint32_t callerDataSize)621 IODataQueueDispatchSource::CanEnqueueData(uint32_t callerDataSize)
622 {
623 	return CanEnqueueData(callerDataSize, 1);
624 }
625 
626 kern_return_t
CanEnqueueData(uint32_t callerDataSize,uint32_t dataCount)627 IODataQueueDispatchSource::CanEnqueueData(uint32_t callerDataSize, uint32_t dataCount)
628 {
629 	IODataQueueMemory * dataQueue;
630 	uint32_t            head;
631 	uint32_t            tail;
632 	uint32_t            dataSize;
633 	uint32_t            queueSize;
634 	uint32_t            entrySize;
635 
636 	dataQueue = ivars->dataQueue;
637 	if (!dataQueue) {
638 		return kIOReturnNoMemory;
639 	}
640 	queueSize = ivars->queueByteCount - DATA_QUEUE_MEMORY_HEADER_SIZE;
641 
642 	// Force a single read of head and tail
643 	tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED);
644 	head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE);
645 
646 	if (os_add_overflow(callerDataSize, 3, &dataSize)) {
647 		return kIOReturnOverrun;
648 	}
649 	dataSize &= ~3U;
650 
651 	// Check for overflow of entrySize
652 	if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) {
653 		return kIOReturnOverrun;
654 	}
655 
656 	// Check for underflow of (getQueueSize() - tail)
657 	if (queueSize < tail || queueSize < head) {
658 		return kIOReturnError;
659 	}
660 
661 	if (tail >= head) {
662 		uint32_t endSpace = queueSize - tail;
663 		uint32_t endElements = endSpace / entrySize;
664 		uint32_t beginElements = head / entrySize;
665 		if (endElements < dataCount && endElements + beginElements <= dataCount) {
666 			return kIOReturnOverrun;
667 		}
668 	} else {
669 		// Do not allow the tail to catch up to the head when the queue is full.
670 		uint32_t space = head - tail - 1;
671 		uint32_t elements = space / entrySize;
672 		if (elements < dataCount) {
673 			return kIOReturnOverrun;
674 		}
675 	}
676 
677 	return kIOReturnSuccess;
678 }
679 
680 size_t
GetDataQueueEntryHeaderSize()681 IODataQueueDispatchSource::GetDataQueueEntryHeaderSize()
682 {
683 	return DATA_QUEUE_ENTRY_HEADER_SIZE;
684 }
685