xref: /xnu-12377.1.9/iokit/IOKit/IOCircularDataQueueImplementation.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/IOCircularDataQueue.h>
30 
31 __BEGIN_DECLS
32 
33 /*!
34  *  @header IOCircularDataQueueMemory
35  *
36  *  This header contains the memory layout for a circular data queue.
37  *
38  *  A circular data queue supports a single producer and zero or more consumers.
39  *
40  *
41  *  The producer does not wait for consumers to read the data.  If a
42  *  consumer falls behind, it will miss data.
43  *
44  *  The queue can be configured to support either fixed or variable sized
45  *  entries.
46  *  Currently only fixed is supported.
47  */
48 
49 /*
50  *  Fixed sized entry circular queue
51  *
52  +------------+
53  |    Queue   |
54  |   Header   |
55  +------------+ <--- First Data Entry
56  |Entry Header|
57  +------------+
58  |            |
59  |    Entry   |
60  |    Data    |
61  |            |
62  +------------+ <--- Second Data Entry
63  |Entry Header|
64  +------------+
65  |            |
66  |            |
67  |     ...    |
68  |     ...    |
69  |            |
70  |            |
71  |            |
72  +------------+ <--- Last Data Entry
73  |Entry Header|
74  +------------+
75  |            |
76  |    Entry   |
77  |    Data    |
78  |            |
79  +------------+
80  |
81  */
82 
83 #if defined(__STDC_VERSION__) && __STDC_VERSION__ < 201112L
84 	#define _STATIC_ASSERT_OVERLOADED_MACRO(_1, _2, NAME, ...) NAME
85 	#define static_assert(...) _STATIC_ASSERT_OVERLOADED_MACRO(__VA_ARGS__, _static_assert_2_args, _static_assert_1_arg)(__VA_ARGS__)
86 
87 	#define _static_assert_2_args(ex, str) _Static_assert((ex), str)
88 	#define _static_assert_1_arg(ex) _Static_assert((ex), #ex)
89 #endif
90 
91 #define HEADER_16BYTE_ALIGNED 1 // do the entry and entry headers need to be 16 byte aligned for perf/correctness ?
92 
93 /*!
94  * @typedef IOCircularDataQueueEntryHeaderInfo
95  * @abstract The state of an entry in the  circular data queue. The state is part of  each entry header in the queue.
96  * @discussion The entry state has the sequence number, data size, generation and current state of the entry. The state
97  * is read/updated atomically.
98  * @field seqNum A unique sequence number for this entry. The sequence number is monotonically increased on each enqueue
99  * to the queue. Each entry in the queue has a unique sequence number.
100  * @field dataSize The size of the data at this entry.
101  * @field generation The queue generation which is copied from the queue header memory into the entry state on an
102  * enqueue.
103  * @field `_reserved` Unused
104  * @field wrStatus Represents if the queue entry is currently being written to or not.
105  */
106 
107 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_SIZE 1
108 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_GENERATION_SIZE 30
109 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_DATATSIZE_SIZE 32
110 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_SEQNUM_SIZE 64
111 // #define IOCIRCULARDATAQUEUE_ENTRY_STATE_RESERVED_SIZE   1
112 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_RESERVED_SIZE                                                                  \
113     ((8 * sizeof(__uint128_t)) - IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_SIZE                                            \
114      - IOCIRCULARDATAQUEUE_ENTRY_STATE_GENERATION_SIZE - IOCIRCULARDATAQUEUE_ENTRY_STATE_DATATSIZE_SIZE                \
115      - IOCIRCULARDATAQUEUE_ENTRY_STATE_SEQNUM_SIZE)
116 
117 typedef union {
118 	__uint128_t val;
119 	struct {
120 		__uint128_t seqNum : IOCIRCULARDATAQUEUE_ENTRY_STATE_SEQNUM_SIZE; // Sequence Number
121 		__uint128_t dataSize : IOCIRCULARDATAQUEUE_ENTRY_STATE_DATATSIZE_SIZE; // datasize
122 		__uint128_t generation : IOCIRCULARDATAQUEUE_ENTRY_STATE_GENERATION_SIZE; // generation
123 		__uint128_t _reserved : IOCIRCULARDATAQUEUE_ENTRY_STATE_RESERVED_SIZE; // reserved, currently not used
124 		__uint128_t wrStatus : IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_SIZE; // queue writing status
125 	} fields;
126 } IOCircularDataQueueEntryHeaderInfo;
127 
128 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS (1)
129 #define IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_COMPLETE (0)
130 
131 static_assert(IOCIRCULARDATAQUEUE_ENTRY_STATE_RESERVED_SIZE > 0, "unexpected reserved field size");
132 
133 /*!
134  * @typedef IOCircularDataQueueEntryHeader
135  * @abstract An entry in the  circular data queue. The entry header is written at the beginning of each entry in the
136  * queue.
137  * @discussion The entry has the current state, sentinel, followed by the data at the enty.
138  * @field info The info of the queue entry. This  includes the size, sequence number, generation and write status of the
139  * data at this entry.
140  * @field sentinel unique value written to the queue entry. This is copied from the sentinel in the queue header memory
141  * when an entry is written.
142  * @field data Represents the beginning of the data region.  The address of the data field is a pointer to the start of
143  * the data region.
144  */
145 typedef struct {
146 	union {
147 		volatile _Atomic __uint128_t headerInfoVal;
148 		IOCircularDataQueueEntryHeaderInfo __headerInfo;         // for clarity, unused
149 	};
150 	volatile uint64_t sentinel;
151 	uint64_t _pad; // pad for 16 byte aligment of data that follows
152 #if HEADER_16BYTE_ALIGNED
153 	uint8_t data[16]; // Entry data begins.  Aligned to 16 bytes.
154 #else
155 	uint8_t data[8]; // Entry data begins.  Aligned to 8 bytes.
156 #endif
157 } IOCircularDataQueueEntryHeader;
158 
159 #if HEADER_16BYTE_ALIGNED
160 #define CIRCULAR_DATA_QUEUE_ENTRY_HEADER_SIZE (sizeof(IOCircularDataQueueEntryHeader) - 16)
161 #else
162 #define CIRCULAR_DATA_QUEUE_ENTRY_HEADER_SIZE (sizeof(IOCircularDataQueueEntryHeader) - 8)
163 #endif
164 
165 /*!
166  * @typedef IOCircularDataQueueState
167  * @abstract The current state of the circular data queue.
168  * @discussion The queue state is part of the queue memory header. It has the current sequence number, next writing
169  * index, generation and current reset and writing state off the queue. The queue state is read/updated atomically.
170  * @field seqNum A monotonically increasing sequence number which is incremented for each enqueue.
171  * @field wrIndex The next write position into the queue.
172  * @field generation The generation of the queue. It is a monotonically increasing number, which is incremented on each
173  * queue reset.
174  * @field rstStatus The queue reset state. The bit is set if the queue is currently being reset.
175  * @field wrStatus The queue writing state. The bit is set if an enqueue is in progress.
176  */
177 // Fahad : I dont think we need a reset bit, since we are doing everything in one atomic op.
178 
179 #define IOCIRCULARDATAQUEUE_STATE_WRITE_SIZE 1
180 #define IOCIRCULARDATAQUEUE_STATE_RESET_SIZE 1
181 #define IOCIRCULARDATAQUEUE_STATE_GENERATION_SIZE 30
182 #define IOCIRCULARDATAQUEUE_STATE_WRITEINDEX_SIZE 32
183 #define IOCIRCULARDATAQUEUE_STATE_SEQNUM_SIZE 64
184 //#define IOCIRCULARDATAQUEUE_STATE_RESERVED_SIZE                                                                  \
185 //    ((8 * sizeof(__uint128_t)) - IOCIRCULARDATAQUEUE_STATE_WRITE_SIZE                                            \
186 //     - IOCIRCULARDATAQUEUE_STATE_GENERATION_SIZE - IOCIRCULARDATAQUEUE_STATE_WRITEINDEX_SIZE                \
187 //     - IOCIRCULARDATAQUEUE_STATE_SEQNUM_SIZE)
188 
189 typedef union {
190 	__uint128_t val;
191 	struct {
192 		__uint128_t seqNum : IOCIRCULARDATAQUEUE_STATE_SEQNUM_SIZE; // Sequence Number
193 		__uint128_t wrIndex : IOCIRCULARDATAQUEUE_STATE_WRITEINDEX_SIZE; // write index
194 		__uint128_t generation : IOCIRCULARDATAQUEUE_STATE_GENERATION_SIZE; // generation
195 		// Fahad: We may not need reset.
196 		__uint128_t rstStatus : IOCIRCULARDATAQUEUE_STATE_RESET_SIZE; // queue reset status
197 		//        __uint128_t _rsvd : IOCIRCULARDATAQUEUE_STATE_RESERVED_SIZE; // reserved
198 		__uint128_t wrStatus : IOCIRCULARDATAQUEUE_STATE_WRITE_SIZE; // queue writing status
199 	} fields;
200 } IOCircularDataQueueState;
201 
202 #define IOCIRCULARDATAQUEUE_STATE_WRITE_INPROGRESS (1)
203 #define IOCIRCULARDATAQUEUE_STATE_WRITE_COMPLETE (0)
204 #define IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS (1)
205 #define IOCIRCULARDATAQUEUE_STATE_RESET_COMPLETE (0)
206 
207 // #define IOCircularDataQueueStateGeneration              (((uint32_t)1 << 30) - 1)
208 #define IOCIRCULARDATAQUEUE_STATE_GENERATION_MAX (((uint32_t)1 << 30))
209 
210 // static_assert(IOCIRCULARDATAQUEUE_STATE_RESERVED_SIZE > 0,
211 //               "unexpected reserved field size");
212 
213 static_assert(IOCIRCULARDATAQUEUE_STATE_GENERATION_SIZE == IOCIRCULARDATAQUEUE_ENTRY_STATE_GENERATION_SIZE,
214     "mismatched generation sizes");
215 static_assert(IOCIRCULARDATAQUEUE_STATE_SEQNUM_SIZE == IOCIRCULARDATAQUEUE_ENTRY_STATE_SEQNUM_SIZE,
216     "mismatched sequenece number sizes");
217 
218 /*!
219  * @typedef IOCircularDataQueueMemory
220  * @abstract The queue memory header present at the start of  queue shared memory region.
221  * @discussion The queue memory header contains the queue info and state and is followed by the data region of the
222  * queue.
223  * @field sentinel unique value when the queue was created.
224  * @field allocMemSize the allocated memory size of the queue including the queue header and the entries
225  * @field memorySize  the memory size of the queue excluding the queue header
226  * @field entryDataSize size of each entry in the queue including the entry header. The size is a multiple of 8 bytes
227  * @field dataSize size of each entry in the queue excluding the entry header.
228  * @field numEntries the number of fixed entries in the queue
229  * @field `_padding` memory padding for alingment.
230  * @field state the current state of the queue.
231  * @field entries Represents the beginning of the data region.  The address of the data field is a pointer to the start
232  * of the queue data region.
233  */
234 
235 typedef struct IOCircularDataQueueMemory {
236 	uint64_t sentinel;
237 	uint64_t _padding; // since we want it to be 16 bytes aligned below this
238 	union {
239 		volatile _Atomic __uint128_t queueStateVal;           // needs to be 16 bytes aligned.
240 		IOCircularDataQueueState  __queueState;               // for clarity, unused
241 	};
242 	IOCircularDataQueueEntryHeader entries[1]; // Entries begin.  Aligned to 16 bytes.
243 } IOCircularDataQueueMemory;
244 
245 #define CIRCULAR_DATA_QUEUE_MEMORY_HEADER_SIZE                                                                         \
246     (sizeof(IOCircularDataQueueMemory) - sizeof(IOCircularDataQueueEntryHeader))
247 
248 /*!
249  * @typedef IOCircularDataQueueMemoryCursor
250  * @abstract The circular data queue cursor struct.
251  * @discussion This struct represents a readers reference to a position in the queue. Each client holds an instance of
252  * this in its process indicating its current reading position in the queue. The cursor holds uniqely identifying
253  * information for the queue entry.
254  * @field generation  the generation for the entry data at the position in the queue. This generation is only changed
255  * when the queue is reset.
256  * @field position the position in the queue  the cursor is at
257  * @field sequenceNum  The unique number for the data at the  cursor position. The sequence number is unique for each
258  * entry in the queue.
259  *
260  */
261 typedef struct IOCircularDataQueueMemoryCursor {
262 	uint32_t generation; // uint32_t seems a little excessive right now, since we dont expect these many resets. but
263 	                     // lets leave it for now.
264 	uint32_t position;
265 	uint64_t sequenceNum;
266 } IOCircularDataQueueMemoryCursor;
267 
268 
269 /*!
270  * @typedef IOCircularDataQueueDescription
271  * @abstract The circular data queue header shadow struct.
272  * @discussion This struct represents the queue header shadow. Each client has a copy of this struct in its process .
273  * This is used to detect any memory corruption of the shared memory queue header. This struct needs to be shared from
274  * the creator of the queue to the clients via an out of band mechanism.
275  * @field sentinel unique value written to the queue header memory and each queue entry.
276  * @field allocMemSize the allocated memory size of the queue including the queue header
277  * @field entryDataSize size of each entry in the queue including the entry header. The size is a multiple of 8 bytes
278  * @field memorySize  the memory size of the queue excluding the queue header
279  * @field numEntries the number of fixed entries in the queue
280  * IOCircularDataQueueDescription
281  */
282 typedef struct IOCircularDataQueueDescription {
283 	uint64_t sentinel;
284 	uint32_t allocMemSize; // total allocated size of the queue including the queue header.
285 	uint32_t entryDataSize; // size of each queue entry including the per entry header.
286 	uint32_t memorySize; // memory size of the queue (excluding the queue header)
287 	uint32_t numEntries;
288 	uint32_t dataSize; // the client provided data size excluding the per entry header.
289 	uint32_t padding;
290 } IOCircularDataQueueDescription;
291 
292 #define kIOCircularQueueDescriptionKey  "IOCircularQueueDescription"
293 
294 
295 #if !KERNEL
296 /*
297  * IORound and IOTrunc convenience functions, in the spirit
298  * of vm's round_page() and trunc_page().
299  */
300 #define IORound(value, multiple) ((((value) + (multiple)-1) / (multiple)) * (multiple))
301 
302 #define IONew(type, count) (type *)calloc(count, sizeof(type))
303 #define IODelete(p, type, count) free(p)
304 
305 // libkern/os/base.h
306 #if __has_feature(ptrauth_calls)
307 #include <ptrauth.h>
308 #define OS_PTRAUTH_SIGNED_PTR(type) __ptrauth(ptrauth_key_process_independent_data, 1, ptrauth_string_discriminator(type))
309 #define OS_PTRAUTH_SIGNED_PTR_AUTH_NULL(type) __ptrauth(ptrauth_key_process_independent_data, 1, ptrauth_string_discriminator(type), "authenticates-null-values")
310 #define OS_PTRAUTH_DISCRIMINATOR(str) ptrauth_string_discriminator(str)
311 #define __ptrauth_only
312 #else //  __has_feature(ptrauth_calls)
313 #define OS_PTRAUTH_SIGNED_PTR(type)
314 #define OS_PTRAUTH_SIGNED_PTR_AUTH_NULL(type)
315 #define OS_PTRAUTH_DISCRIMINATOR(str) 0
316 #define __ptrauth_only __unused
317 #endif // __has_feature(ptrauth_calls)
318 #endif /* !KERNEL */
319 
320 #pragma mark - Debugging
321 
322 #define QUEUE_FORMAT "Queue(%" PRIu64 " gen:%" PRIu64 " pos:%" PRIu64 " next:%" PRIu64 ")"
323 #define QUEUE_ARGS(q) q->guard, q->generation, q->fixed.latestIndex, q->fixed.writingIndex
324 
325 #define CURSOR_FORMAT "Cursor(%p gen:%" PRIu64 " pos:%" PRIu64 ")"
326 #define CURSOR_ARGS(c) c, c->generation, c->position
327 
328 #define ENTRY_FORMAT "Entry(%" PRIu64 " gen:%" PRIu64 " pos:%" PRIu64 ")"
329 #define ENTRY_ARGS(e) e->guard, e->generation, e->position
330 
331 #if 1
332 #define queue_debug_error(fmt, ...)
333 #define queue_debug_note(fmt, ...)
334 #define queue_debug_trace(fmt, ...)
335 #else
336 #define queue_debug_error(fmt, ...)                                                                                    \
337     {                                                                                                                  \
338 	os_log_debug(LOG_QUEUE, "#ERROR %s:%d %s " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__);                  \
339     }
340 #define queue_debug_note(fmt, ...)                                                                                     \
341     {                                                                                                                  \
342 	os_log_debug(LOG_QUEUE, "#NOTE %s:%d %s " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__);                   \
343     }
344 #define queue_debug_trace(fmt, ...)                                                                                    \
345     {                                                                                                                  \
346 	os_log_debug(LOG_QUEUE, "#TRACE %s:%d %s " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__);                  \
347     }
348 #endif
349 
350 #if HEADER_16BYTE_ALIGNED
351 static_assert(offsetof(IOCircularDataQueueEntryHeader, data) % sizeof(__uint128_t) == 0,
352     "IOCircularDataQueueEntryHeader.data is not 16-byte aligned!");
353 #else
354 static_assert(offsetof(IOCircularDataQueueEntryHeader, data) % sizeof(uint64_t) == 0,
355     "IOCircularDataQueueEntryHeader.data is not 8-byte aligned!");
356 #endif
357 
358 static_assert(sizeof(IOCircularDataQueueState) == sizeof(__uint128_t), "Unexpected padding");
359 static_assert(offsetof(IOCircularDataQueueMemory, queueStateVal) % sizeof(__uint128_t) == 0,
360     "IOCircularDataQueueMemory.entries is not 16-byte aligned!");
361 
362 #if HEADER_16BYTE_ALIGNED
363 static_assert(offsetof(IOCircularDataQueueMemory, entries) % sizeof(__uint128_t) == 0,
364     "IOCircularDataQueueMemory.entries is not 16-byte aligned!");
365 #else
366 static_assert(offsetof(IOCircularDataQueueMemory, entries) % sizeof(uint64_t) == 0,
367     "IOCircularDataQueueMemory.entries is not 8-byte aligned!");
368 #endif
369 
370 /*!
371  * @typedef IOCircularDataQueue
372  * @abstract A fixed entry size circular queue that supports multiple concurrent readers and a single writer.
373  * @discussion The queue currently supports fixed size entries. The queue memory size is configured at init when the
374  * number of entries and size of each entry is specifiied and cannot be resized later. Since the queue is a circular
375  * buffer, the writer can potentially overwrite an entry while a reader is still reading it. The queue provides facility
376  * to check for data integrity after reading the entry is complete. There is no support for sending notifications to
377  * readers when data is enqueued into an empty queue by the writer. The queue supports a "pull model" for reading data
378  * from the queue. The queue can be used for passing data from user space to kernel and vice-versa.
379  * @field queueHeaderShadow    The queue header shadow
380  * @field queueCursor    The queue cursor
381  * @field isQueueMemoryAllocated    Represents if the queue memory is allocated or if the queue uses a previously
382  * created queue memory region.
383  * @field queueMemory    Pointer to the queue shared memory region
384  */
385 typedef struct IOCircularDataQueue {
386 	IOCircularDataQueueMemoryCursor queueCursor;
387 	IOCircularDataQueueMemory * OS_PTRAUTH_SIGNED_PTR("IOCircularDataQueue.queueMemory") queueMemory;
388 	IOCircularDataQueueDescription queueHeaderShadow;
389 #if KERNEL
390 	IOBufferMemoryDescriptor * OS_PTRAUTH_SIGNED_PTR("IOCircularDataQueue.iomd") iomd;
391 #else /* KERNEL */
392 	io_connect_t connect;
393 	uint32_t memoryType;
394 #endif /* !KERNEL */
395 } IOCircularDataQueue;
396 
397 
398 #if defined(__arm64__) && !KERNEL
399 #define ATTR_LSE2 __attribute__((target("lse2")))
400 #else
401 #define ATTR_LSE2
402 #endif /* defined(__arm64__) && !KERNEL */
403 
404 #pragma mark - Queue
405 
406 static bool ATTR_LSE2
_isQueueMemoryCorrupted(IOCircularDataQueue * queue)407 _isQueueMemoryCorrupted(IOCircularDataQueue *queue)
408 {
409 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
410 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
411 
412 	const size_t queueSentinel = queueMemory->sentinel;
413 	if (os_unlikely(queueSentinel != queueHeaderShadow->sentinel)) {
414 		return true;
415 	}
416 	return false;
417 }
418 
419 inline static bool ATTR_LSE2
_isCursorPositionInvalid(IOCircularDataQueue * queue)420 _isCursorPositionInvalid(IOCircularDataQueue *queue)
421 {
422 //	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
423 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
424 	IOCircularDataQueueMemoryCursor const *cursor = &queue->queueCursor;
425 
426 	if (os_unlikely(cursor->position >= queueHeaderShadow->numEntries)) {
427 		return true;
428 	}
429 
430 	return false;
431 }
432 
433 inline __unused static bool ATTR_LSE2
_isEntryOutOfBounds(IOCircularDataQueue * queue,IOCircularDataQueueEntryHeader * entry)434 _isEntryOutOfBounds(IOCircularDataQueue *queue, IOCircularDataQueueEntryHeader *entry)
435 {
436 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
437 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
438 //	IOCircularDataQueueMemoryCursor const *cursor = &queue->queueCursor;
439 
440 	bool ret = false;
441 	IOCircularDataQueueEntryHeader *firstEntry = (IOCircularDataQueueEntryHeader *)(&queueMemory->entries[0]);
442 	IOCircularDataQueueEntryHeader *lastEntry
443 	        = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0]
444 	    + ((queueHeaderShadow->numEntries - 1) * queueHeaderShadow->entryDataSize));
445 
446 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
447 	// within the queueMemory allocation before we begin writing.
448 	if (os_unlikely(entry < firstEntry || entry > lastEntry)) {
449 		ret = true;
450 	}
451 
452 	return ret;
453 }
454 
455 
456 #if !KERNEL
457 /*!
458  * @function isQueueMemoryValid
459  * Verify if the queue header shadow matches the queue header in shared memory.
460  * @param queue Handle to the queue.
461  * @return `true` if the queue header shadow matches the queue header in shared memory, else `false`.
462  *
463  */
464 
465 static bool ATTR_LSE2
isQueueMemoryValid(IOCircularDataQueue * queue)466 isQueueMemoryValid(IOCircularDataQueue *queue)
467 {
468 	return _isQueueMemoryCorrupted(queue) == false;
469 }
470 #endif /* KERNEL */
471 
472 /*!
473  * @function destroyQueueMem
474  * @abstract Function that destroys a previously created IOCircularDataQueueMemory instance.
475  * @param queue Handle to the queue.
476  *  @return
477  *  - `kIOReturnSuccess` if the queue was succesfully destroyed.
478  *  - `kIOReturnBadArgument` if an invalid queue was provided.
479  */
480 
481 static IOReturn ATTR_LSE2
destroyQueueMem(IOCircularDataQueue * queue)482 destroyQueueMem(IOCircularDataQueue *queue)
483 {
484 	IOReturn ret = kIOReturnBadArgument;
485 	if (queue != NULL) {
486 #if KERNEL
487 		OSSafeReleaseNULL(queue->iomd);
488 #else /* !KERNEL */
489 		IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
490 		IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
491 		if (queueMemory) {
492 			ret = IOConnectUnmapMemory(queue->connect, queue->memoryType,
493 			    mach_task_self(), (mach_vm_address_t) queueMemory);
494 //			assert(KERN_SUCCESS == ret);
495 			queue->queueMemory = NULL;
496 		}
497 #endif
498 		ret = kIOReturnSuccess;
499 	}
500 
501 	return ret;
502 }
503 
504 static IOReturn ATTR_LSE2
_reset(IOCircularDataQueue * queue)505 _reset(IOCircularDataQueue *queue)
506 {
507 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
508 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
509 
510 	if (queueMemory == NULL || queueHeaderShadow == NULL) {
511 		return kIOReturnBadArgument;
512 	}
513 
514 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
515 	if (!queueEntryDataSize) {
516 		return kIOReturnUnsupported;
517 	}
518 
519 	IOCircularDataQueueState currState;
520 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
521 
522 	if (os_unlikely(currState.fields.wrStatus & IOCIRCULARDATAQUEUE_STATE_WRITE_INPROGRESS)) {
523 		// Another thread is modifying the queue
524 		return kIOReturnBusy;
525 	}
526 
527 	uint32_t currGeneration = currState.fields.generation;
528 	uint32_t newGen = (currGeneration + 1) % IOCIRCULARDATAQUEUE_STATE_GENERATION_MAX;
529 
530 	IOCircularDataQueueState newState;
531 	newState.fields.generation = newGen;
532 	newState.fields.wrIndex = 0;
533 	newState.fields.seqNum = UINT64_MAX; // since we first increment the seq num on an enqueue.
534 
535 	if (!atomic_compare_exchange_strong(&queueMemory->queueStateVal, &currState.val, newState.val)) {
536 		return kIOReturnBusy;
537 	}
538 
539 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
540 		return kIOReturnBadMedia;
541 	}
542 
543 	queue_debug_trace("Reset " QUEUE_FORMAT, QUEUE_ARGS(queueMemory));
544 	return kIOReturnSuccess;
545 }
546 
547 /*!
548  * @function _enqueueInternal
549  * @abstract Internal function for enqueuing a new entry on the queue.
550  * @discussion This method adds a new data entry of dataSize to the queue.  It sets the size parameter of the entry
551  * pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue.  Once that
552  * is done, it moves the tail to the next available location.  When attempting to add a new entry towards the end of the
553  * queue and there isn't enough space at the end, it wraps back to the beginning.<br>
554  * @param queue Handle to the queue.
555  * @param data Pointer to the data to be added to the queue.
556  * @param dataSize Size of the data pointed to by data.
557  * @param earlyExitForTesting ealy exit flag used for testing only.
558  *  @return
559  *  - `kIOReturnSuccess` on success.
560  *  - Other values indicate an error.
561  */
562 
563 static IOReturn ATTR_LSE2
_enqueueInternal(IOCircularDataQueue * queue,const void * data,size_t dataSize,int earlyExitForTesting)564 _enqueueInternal(IOCircularDataQueue *queue,
565     const void *data,
566     size_t dataSize,
567     int earlyExitForTesting)
568 {
569 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
570 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
571 //	IOCircularDataQueueMemoryCursor const *cursor = &queue->queueCursor;
572 
573 	if (queueMemory == NULL || data == NULL || dataSize == 0 || queueHeaderShadow == NULL) {
574 		return kIOReturnBadArgument;
575 	}
576 
577 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
578 		return kIOReturnBadMedia;
579 	}
580 
581 	if (os_unlikely(dataSize > queueHeaderShadow->dataSize)) {
582 		return kIOReturnBadArgument;
583 	}
584 
585 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
586 
587 	if (!queueEntryDataSize) {
588 		return kIOReturnUnsupported;
589 	}
590 
591 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
592 	const uint32_t queueNumEntries = queueHeaderShadow->numEntries;
593 
594 	// Do not allow instruction re-ordering prior to the header check.
595 	os_compiler_barrier();
596 
597 	IOCircularDataQueueState currState;
598 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
599 
600 	if (os_unlikely(currState.fields.wrStatus & IOCIRCULARDATAQUEUE_STATE_WRITE_INPROGRESS)) {
601 		// Another thread is modifying the queue
602 		return kIOReturnBusy;
603 	}
604 
605 	//            size_t queueEntriesBufferSize = queueMemory->allocMemSize - CIRCULAR_DATA_QUEUE_MEMORY_HEADER_SIZE;
606 	uint32_t writeIndex = currState.fields.wrIndex;
607 	uint64_t nextWriteIndex = (writeIndex + 1) % queueNumEntries;
608 	uint64_t nextSeqNum = currState.fields.seqNum + 1;
609 	if (os_unlikely(nextSeqNum == UINT64_MAX)) {
610 		// End of the world. How many enqueues are you trying to do !!!
611 //        abort();
612 		return kIOReturnOverrun;
613 	}
614 
615 	__auto_type entry
616 	        = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0] + (writeIndex * queueEntryDataSize));
617 	//                printf("entry=%p\n", (void *)entry);
618 
619 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
620 	// within the queueMemory allocation before we begin writing.
621 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
622 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
623 		return kIOReturnBadArgument;
624 	}
625 
626 	//            if (os_unlikely(_isEntryOutOfBounds(queueHeaderShadow, queueMemory, entry) )) {
627 	//                ret = kIOReturnBadArgument;
628 	//                break;
629 	//            }
630 
631 	os_compiler_barrier();
632 
633 	// All checks passed. Set the write bit.
634 
635 	IOCircularDataQueueState newState = currState;
636 	newState.fields.wrStatus = IOCIRCULARDATAQUEUE_STATE_WRITE_INPROGRESS;
637 	// lets not change the writeIndex and seq num here.
638 	//            newState.fields.wrIndex = nextWriteIndex;
639 	//    newState.fields.seqNum = currState.fields.seqNum + 1; // its ok even if we ever rollover UINT64_MAX!!
640 
641 	if (!atomic_compare_exchange_strong(&queueMemory->queueStateVal, &currState.val, newState.val)) {
642 		// someone else is modifying the queue
643 		return kIOReturnBusy;
644 	}
645 
646 	// Update the entry header info
647 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
648 	enHeaderInfo.val = 0;
649 	enHeaderInfo.fields.wrStatus = IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS;
650 	enHeaderInfo.fields.generation = currState.fields.generation;
651 	//    enHeaderInfo.fields.seqNum = newState.fields.seqNum;
652 	enHeaderInfo.fields.seqNum = nextSeqNum;
653 	enHeaderInfo.fields.dataSize = dataSize;
654 	atomic_store_explicit(&entry->headerInfoVal, enHeaderInfo.val, memory_order_release);
655 
656 	entry->sentinel = queueHeaderShadow->sentinel;
657 	memcpy(entry->data, data, dataSize);
658 	enHeaderInfo.fields.wrStatus = IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_COMPLETE;
659 	atomic_store_explicit(&entry->headerInfoVal, enHeaderInfo.val, memory_order_release);
660 
661 	IOCircularDataQueueState finalState = newState;
662 	finalState.fields.wrStatus = IOCIRCULARDATAQUEUE_STATE_WRITE_COMPLETE;
663 	// Lets actually update the write index and seq num
664 	finalState.fields.wrIndex = nextWriteIndex;
665 	finalState.fields.seqNum = nextSeqNum;
666 	atomic_store_explicit(&queueMemory->queueStateVal, finalState.val, memory_order_release);
667 
668 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
669 		return kIOReturnBadMedia;
670 	}
671 
672 	return kIOReturnSuccess;
673 }
674 
675 /*!
676  * @function enqueueQueueMem
677  * @abstract Enqueues a new entry on the queue.
678  * @discussion This method adds a new data entry of dataSize to the queue.  It sets the size parameter of the entry
679  * pointed to by the write index  and copies the memory pointed to by the data parameter in place in the queue.  Once
680  * that is done, it moves the write index to the next index.
681  * @param queue Handle to the queue.
682  * @param data Pointer to the data to be added to the queue.
683  * @param dataSize Size of the data pointed to by data.
684  *  @return
685  *  - `kIOReturnSuccess` on success.
686  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
687  *  - `kIOReturnBadArgument` if an invalid queue was provided.
688  *  - `kIOReturnBusy` if another thread is enqueing concurrently
689  *  - `kIOReturnUnsupported` if the queue has not been configured to support fixed size entries. Variable size is
690  * currently not supported
691  *  - Other values indicate an error.
692  */
693 
694 static IOReturn ATTR_LSE2
enqueueQueueMem(IOCircularDataQueue * queue,const void * data,size_t dataSize)695 enqueueQueueMem(IOCircularDataQueue *queue,
696     const void *data,
697     size_t dataSize)
698 {
699 	return _enqueueInternal(queue, data, dataSize, 0);
700 }
701 
702 /*!
703  * @function isDataEntryValidInQueueMem
704  * Verify if the data at the cursor position is still valid. Call this function after having read the data from the
705  * queue, since the buffer could potentially have been overwritten while being read. <br>
706  * @param queue Handle to the queue.
707  *  @return
708  *  - `kIOReturnSuccess` if the data at the cursor position was valid.
709  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer valid and is
710  *     potentially overwritten. Call getLatestInQueueMem to get the latest data and cursor position.
711  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
712  *  - `kIOReturnBadArgument` if an invalid param was passed.
713  *  - `kIOReturnBadMedia` if the queueMemory is corrupted.
714  *
715  */
716 
717 static IOReturn ATTR_LSE2
isDataEntryValidInQueueMem(IOCircularDataQueue * queue)718 isDataEntryValidInQueueMem(IOCircularDataQueue *queue)
719 {
720 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
721 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
722 	IOCircularDataQueueMemoryCursor const *cursor = &queue->queueCursor;
723 
724 	if (os_unlikely(queueMemory == NULL || queueHeaderShadow == NULL)) {
725 		return kIOReturnBadArgument;
726 	}
727 
728 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
729 		return kIOReturnBadMedia;
730 	}
731 
732 	if (os_unlikely(_isCursorPositionInvalid(queue))) {
733 		return kIOReturnBadArgument;
734 	}
735 
736 	IOCircularDataQueueState currState;
737 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
738 
739 	// Fahad: We may remove this filed since we don't actually use it. Instead just use generation check below.
740 	if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
741 		// Another thread is resetting the queue
742 		return kIOReturnBusy;
743 	}
744 
745 	uint32_t queueGeneration = currState.fields.generation;
746 	if (queueGeneration != cursor->generation) {
747 		//        return kIOReturnOverrun;
748 		return kIOReturnAborted;
749 	}
750 
751 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
752 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
753 	__auto_type entry = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0]
754 	    + (cursor->position * queueEntryDataSize));
755 
756 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
757 	// within the queueMemory entries buffer before we begin writing.
758 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
759 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
760 		queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT, QUEUE_ARGS(queueMemory),
761 		    CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
762 		return kIOReturnBadArgument;
763 	}
764 
765 	os_compiler_barrier();
766 
767 	if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
768 		queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
769 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
770 		return kIOReturnBadMedia;
771 	}
772 
773 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
774 	enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
775 	uint32_t entryGeneration = enHeaderInfo.fields.generation;
776 	if (os_unlikely(entryGeneration != queueGeneration)) {
777 		queue_debug_note("entryGeneration != queueGeneration " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
778 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
779 		return kIOReturnOverrun;
780 	}
781 
782 	if (os_unlikely(enHeaderInfo.fields.wrStatus == IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS
783 	    || enHeaderInfo.fields.seqNum != cursor->sequenceNum)) {
784 		return kIOReturnOverrun;
785 	}
786 
787 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
788 		return kIOReturnBadMedia;
789 	}
790 
791 	return kIOReturnSuccess;
792 }
793 
794 /*!
795  * @function setCursorLatestInQueueMem
796  * Set the current cursor position to the latest entry in the queue. This only updates the cursor and does not read the
797  * data from the queue. If nothing has been enqueued into the queue yet, this returns an error.
798  * @param queue Handle to the queue.
799  *  @return
800  *  - `kIOReturnSuccess` if the cursor position was updated to the latest.
801  *  - `kIOReturnUnderrun` if nothing has ever been enqueued into the queue since there is no latest entry.
802  *  - `kIOReturnAborted` if the queue is in an irrecoverable state.
803  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
804  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
805  *  - Other values indicate an error.
806  *
807  */
808 
809 static IOReturn ATTR_LSE2
setCursorLatestInQueueMem(IOCircularDataQueue * queue)810 setCursorLatestInQueueMem(IOCircularDataQueue *queue)
811 {
812 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
813 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
814 	IOCircularDataQueueMemoryCursor *cursor = &queue->queueCursor;
815 
816 	if (queueMemory == NULL || queueHeaderShadow == NULL) {
817 		return kIOReturnBadArgument;
818 	}
819 
820 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
821 		return kIOReturnBadMedia;
822 	}
823 
824 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
825 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
826 
827 	IOCircularDataQueueState currState;
828 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
829 
830 	if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
831 		// Another thread is resetting the queue
832 		return kIOReturnBusy;
833 	}
834 
835 	if (os_unlikely(currState.fields.seqNum == UINT64_MAX)) {
836 		// Nothing has ever been written to the queue yet.
837 		return kIOReturnUnderrun;
838 	}
839 
840 	uint32_t queueGeneration = currState.fields.generation;
841 	uint32_t readIndex
842 	        = (currState.fields.wrIndex > 0) ? (currState.fields.wrIndex - 1) : (queueHeaderShadow->numEntries - 1);
843 
844 	__auto_type entry
845 	        = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0] + (readIndex * queueEntryDataSize));
846 
847 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
848 	// within the queueMemory entries buffer before we begin writing.
849 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
850 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
851 		queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT, QUEUE_ARGS(queueMemory),
852 		    CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
853 		return kIOReturnAborted;
854 	}
855 
856 	os_compiler_barrier();
857 
858 	if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
859 		queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
860 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
861 		return kIOReturnBadMedia;
862 	}
863 
864 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
865 	enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
866 	uint32_t entryGeneration = enHeaderInfo.fields.generation;
867 	if (os_unlikely(entryGeneration != queueGeneration)) {
868 		queue_debug_note("entryGeneration != queueGeneration " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
869 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
870 		return kIOReturnAborted;
871 	}
872 
873 	cursor->position = readIndex;
874 	cursor->generation = entryGeneration;
875 	cursor->sequenceNum = enHeaderInfo.fields.seqNum;
876 
877 	return kIOReturnSuccess;
878 }
879 
880 static IOReturn ATTR_LSE2
_getLatestInQueueMemInternal(IOCircularDataQueue * queue,void ** data,size_t * size,bool copyMem)881 _getLatestInQueueMemInternal(IOCircularDataQueue *queue,
882     void **data,
883     size_t *size,
884     bool copyMem)
885 {
886 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
887 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
888 	IOCircularDataQueueMemoryCursor *cursor = &queue->queueCursor;
889 
890 	IOReturn ret = kIOReturnTimeout;
891 	if (queueMemory == NULL || data == NULL || size == NULL || queueHeaderShadow == NULL) {
892 		return kIOReturnBadArgument;
893 	}
894 
895 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
896 		return kIOReturnBadMedia;
897 	}
898 
899 	const size_t kNumRetries = 5; // Number of retries if the latest index data gets overwritten by a writer.
900 	size_t retry = kNumRetries;
901 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
902 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
903 	size_t inSize;
904 
905 	inSize = *size;
906 	do {
907 		*size = 0;
908 		retry--;
909 		IOCircularDataQueueState currState;
910 		currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_consume);
911 
912 		if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
913 			// Another thread is resetting the queue
914 			return kIOReturnBusy;
915 		}
916 
917 		if (os_unlikely(currState.fields.seqNum == UINT64_MAX)) {
918 			// Nothing has ever been written to the queue yet.
919 			return kIOReturnUnderrun;
920 		}
921 
922 		uint32_t queueGeneration = currState.fields.generation;
923 		uint32_t readIndex
924 		        = (currState.fields.wrIndex > 0) ? (currState.fields.wrIndex - 1) : (queueHeaderShadow->numEntries - 1);
925 
926 		__auto_type entry = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0]
927 		    + (readIndex * queueEntryDataSize));
928 
929 		// SANITY CHECK - Final check to ensure the 'entry' pointer is
930 		// within the queueMemory entries buffer before we begin writing.
931 		if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
932 		    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
933 			queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
934 			    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
935 			return kIOReturnBadArgument;
936 		}
937 
938 		os_compiler_barrier();
939 
940 		if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
941 			queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT
942 			    " " ENTRY_FORMAT,
943 			    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
944 			return kIOReturnBadMedia;
945 		}
946 
947 		IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
948 		enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
949 		uint32_t entryGeneration = enHeaderInfo.fields.generation;
950 		/* Since the time we read the queue header, was the queue
951 		 *   - reset
952 		 *   - the entry is being overwritten
953 		 *   - the entry was overwritten and hence the seq numbers don't match anymore.
954 		 *
955 		 *  Lets retry in such a case
956 		 */
957 		if (os_unlikely(entryGeneration != queueGeneration
958 		    || enHeaderInfo.fields.wrStatus == IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS
959 		    || currState.fields.seqNum != enHeaderInfo.fields.seqNum)) {
960 			continue;
961 		}
962 
963 		cursor->position = readIndex;
964 		cursor->generation = entryGeneration;
965 		cursor->sequenceNum = enHeaderInfo.fields.seqNum;
966 
967 		if (os_unlikely(enHeaderInfo.fields.dataSize > queueHeaderShadow->entryDataSize)) {
968 			ret = kIOReturnOverrun;
969 			break;
970 		}
971 		*size = enHeaderInfo.fields.dataSize;
972 
973 		if (!copyMem) {
974 			*data = entry->data;
975 			ret = kIOReturnSuccess;
976 			break; // break out, we're done
977 		} else {
978 			if (os_unlikely(enHeaderInfo.fields.dataSize > inSize)) {
979 				return kIOReturnOverrun;
980 			}
981 			memcpy(*data, entry->data, enHeaderInfo.fields.dataSize);
982 			// Lets re-verify after the memcpy if the buffer is/has been overwritten.
983 
984 			IOCircularDataQueueEntryHeaderInfo enHeaderInfoAfter;
985 			enHeaderInfoAfter.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
986 			// Did something change ?
987 			if (enHeaderInfo.val == enHeaderInfoAfter.val) {
988 				ret = kIOReturnSuccess;
989 				break;
990 			} else {
991 				// we failed so we'll retry.
992 				*size = 0;
993 			}
994 		}
995 	} while (retry);
996 
997 	if ((kIOReturnSuccess == ret) && os_unlikely(_isQueueMemoryCorrupted(queue))) {
998 		return kIOReturnBadMedia;
999 	}
1000 
1001 	return ret;
1002 }
1003 
1004 /*!
1005  * @function getLatestInQueueMem
1006  * Access the latest entry data, also update the cursor position to the latest. No copy is made of the data. <br> Caller
1007  * is supposed to call isDataEntryValidInQueueMem() to check data integrity after reading the data is complete.
1008  * @param queue Handle to the queue.
1009  * @param data A pointer to the data memory region for the latest entry data in the queue.
1010  * @param size A pointer to the size of the data parameter.  On return, this contains the actual size of the data
1011  * pointed to by data param.
1012  *  @return
1013  *  - `kIOReturnSuccess` if the cursor position was updated.
1014  *  - `kIOReturnUnderrun` if nothing has ever been enqueued into the queue
1015  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1016  *  - `kIOReturnBadArgument` if an invalid queue was provided.
1017  *  - `kIOReturnTimeout` if the reader timed out when trying to read. This is possible if the writer overwrites the
1018  * latest index a reader is about to read. The function times out if the read is unsuccessful after multiple retries.
1019  *  - Other values indicate an error.
1020  *
1021  */
1022 
1023 static IOReturn ATTR_LSE2
getLatestInQueueMem(IOCircularDataQueue * queue,void ** data,size_t * size)1024 getLatestInQueueMem(IOCircularDataQueue *queue,
1025     void **data,
1026     size_t *size)
1027 {
1028 	return _getLatestInQueueMemInternal(queue, data, size, false);
1029 }
1030 
1031 /*!
1032  * @function copyLatestInQueueMem
1033  *  Access the latest entry data and copy into the provided buffer. Also update the cursor position to the latest.
1034  * Function gaurantees that the new data returned is always valid hence no need to call isDataEntryValidInQueueMem().
1035  * @param queue Handle to the queue.
1036  * @param data Pointer to memory into which the latest data from the queue is copied. Lifetime of this memory is
1037  * controlled by the caller.
1038  * @param size Size of the data buffer provided for copying. On return, this contains the actual size of the data
1039  * pointed to by data param.
1040  *  @return
1041  *  - `kIOReturnSuccess` if the cursor position was updated.
1042  *  - `kIOReturnUnderrun` if nothing has ever been enqueued into the queue
1043  *  - `kIOReturnBadArgument` if the buffer provided to copy the data is NULL or  if an invalid queue was provided..
1044  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1045  *  - `kIOReturnTimeout` if the reader timed out when trying to copy the latest data. This is possible if the writer
1046  * overwrites the latest index a reader is about to copy. The function times out if the copy is unsuccessful after
1047  * multiple retries.
1048  *  - Other values indicate an error.
1049  *
1050  */
1051 
1052 static IOReturn ATTR_LSE2
copyLatestInQueueMem(IOCircularDataQueue * queue,void * data,size_t * size)1053 copyLatestInQueueMem(IOCircularDataQueue *queue,
1054     void *data,
1055     size_t *size)
1056 {
1057 	return _getLatestInQueueMemInternal(queue, &data, size, true);
1058 }
1059 
1060 static IOReturn ATTR_LSE2
_getNextInQueueMemInternal(IOCircularDataQueue * queue,void ** data,size_t * size,bool copyMem)1061 _getNextInQueueMemInternal(IOCircularDataQueue *queue,
1062     void **data,
1063     size_t *size,
1064     bool copyMem)
1065 {
1066 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
1067 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
1068 	IOCircularDataQueueMemoryCursor *cursor = &queue->queueCursor;
1069 
1070 	IOReturn ret = kIOReturnError;
1071 	size_t inSize;
1072 
1073 	if (queueMemory == NULL || data == NULL || size == NULL || queueHeaderShadow == NULL) {
1074 		return kIOReturnBadArgument;
1075 	}
1076 
1077 	inSize = *size;
1078 	*size = 0;
1079 
1080 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
1081 		return kIOReturnBadMedia;
1082 	}
1083 
1084 	if (os_unlikely(_isCursorPositionInvalid(queue))) {
1085 		return kIOReturnAborted;
1086 	}
1087 
1088 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
1089 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
1090 
1091 	IOCircularDataQueueState currState;
1092 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
1093 
1094 	if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
1095 		// Another thread is resetting the queue
1096 		return kIOReturnBusy;
1097 	}
1098 
1099 	uint32_t queueGeneration = currState.fields.generation;
1100 
1101 	// was the queue reset ?
1102 	if (os_unlikely(cursor->generation != queueGeneration || cursor->sequenceNum > currState.fields.seqNum)) {
1103 		return kIOReturnAborted;
1104 	}
1105 
1106 	if (os_unlikely(currState.fields.seqNum == UINT64_MAX)) {
1107 		// Nothing has ever been written to the queue yet.
1108 		return kIOReturnUnderrun;
1109 	}
1110 
1111 	// nothing new written or an active write is in progress for the next entry.
1112 	if (os_unlikely(cursor->sequenceNum == currState.fields.seqNum
1113 	    || ((cursor->sequenceNum + 1) == currState.fields.seqNum
1114 	    && currState.fields.wrStatus == IOCIRCULARDATAQUEUE_STATE_WRITE_INPROGRESS))) {
1115 		return kIOReturnUnderrun;
1116 	}
1117 
1118 	uint32_t nextIndex = (cursor->position + 1) % queueHeaderShadow->numEntries;
1119 	__auto_type entry
1120 	        = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0] + (nextIndex * queueEntryDataSize));
1121 
1122 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
1123 	// within the queueMemory entries buffer before we begin writing.
1124 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
1125 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
1126 		queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT, QUEUE_ARGS(queueMemory),
1127 		    CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1128 		return kIOReturnBadArgument;
1129 	}
1130 
1131 	os_compiler_barrier();
1132 
1133 	if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
1134 		queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1135 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1136 		return kIOReturnBadMedia;
1137 	}
1138 
1139 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
1140 	enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1141 	uint32_t entryGeneration = enHeaderInfo.fields.generation;
1142 	if (os_unlikely(entryGeneration != queueGeneration)) {
1143 		queue_debug_note("entryGeneration != queueGeneration " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1144 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1145 		return kIOReturnAborted;
1146 	}
1147 
1148 	// is the entry currently being written to or has the cursor fallen too far behind and the cursor is no longer
1149 	// valid.
1150 	if (os_unlikely(enHeaderInfo.fields.wrStatus == IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS
1151 	    || enHeaderInfo.fields.seqNum != cursor->sequenceNum + 1)) {
1152 		return kIOReturnOverrun;
1153 	}
1154 
1155 	cursor->position = nextIndex;
1156 	cursor->generation = entryGeneration;
1157 	cursor->sequenceNum = enHeaderInfo.fields.seqNum;
1158 
1159 	if (os_unlikely(enHeaderInfo.fields.dataSize > queueHeaderShadow->entryDataSize)) {
1160 		return kIOReturnOverrun;
1161 	}
1162 	*size = enHeaderInfo.fields.dataSize;
1163 
1164 	if (!copyMem) {
1165 		*data = entry->data;
1166 		ret = kIOReturnSuccess;
1167 	} else {
1168 		if (os_unlikely(enHeaderInfo.fields.dataSize > inSize)) {
1169 			return kIOReturnOverrun;
1170 		}
1171 		memcpy(*data, entry->data, enHeaderInfo.fields.dataSize);
1172 		// Lets re-verify after the memcpy if the buffer is/has been overwritten.
1173 
1174 		IOCircularDataQueueEntryHeaderInfo enHeaderInfoAfter;
1175 		enHeaderInfoAfter.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1176 		// Did something change, while we were memcopying ?
1177 		if (enHeaderInfo.val == enHeaderInfoAfter.val) {
1178 			ret = kIOReturnSuccess;
1179 		} else {
1180 			// while we were memcopying, the writer wrapped around and is writing into our index. or the queue got reset
1181 			*size = 0;
1182 			ret = kIOReturnOverrun;
1183 		}
1184 	}
1185 
1186 	if ((kIOReturnSuccess == ret) && os_unlikely(_isQueueMemoryCorrupted(queue))) {
1187 		return kIOReturnBadMedia;
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 /*!
1194  * @function getNextInQueueMem
1195  * Access the data at the next cursor position and updates the cursor position to the next. No copy is made of the data.
1196  * <br> Caller is supposed to call isDataEntryValidInQueueMem() to check data integrity after reading the data is
1197  * complete.
1198  * @param queue Handle to the queue.
1199  * @param data A pointer to the data memory region for the next entry data in the queue.
1200  * @param size A pointer to the size of the data parameter.  On return, this contains the actual size of the data
1201  * pointed to by data param.
1202  *  @return
1203  *  - `kIOReturnSuccess` if the cursor position was updated.
1204  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
1205  *  - `kIOReturnUnderrun` if the cursor has reached the latest available data.
1206  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1207  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1208  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1209  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1210  *  - Other values indicate an error.
1211  *
1212  */
1213 
1214 static IOReturn ATTR_LSE2
getNextInQueueMem(IOCircularDataQueue * queue,void ** data,size_t * size)1215 getNextInQueueMem(IOCircularDataQueue *queue,
1216     void **data,
1217     size_t *size)
1218 {
1219 	return _getNextInQueueMemInternal(queue, data, size, false);
1220 }
1221 
1222 /*!
1223  * @function copyNextInQueueMem
1224  * Access the data at the next cursor position and copy into the provided buffer. Also update the cursor position to the
1225  * next. If successful, function gaurantees that the data returned is always valid hence no need to call
1226  * isDataEntryValidInQueueMem().
1227  * @param queue Handle to the queue.
1228  * @param data Pointer to memory into which the next data from the queue is copied. Lifetime of this memory is
1229  * controlled by the caller.
1230  * @param size Size of the data buffer provided for copying. On return, this contains the actual size of the data
1231  * pointed to by data param.
1232  *  @return
1233  *  - `kIOReturnSuccess` if the cursor position was updated.
1234  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
1235  *  - `kIOReturnUnderrun` if the cursor has reached the latest available data.
1236  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1237  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1238  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1239  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1240  *  - Other values indicate an error.
1241  *
1242  */
1243 
1244 static IOReturn ATTR_LSE2
copyNextInQueueMem(IOCircularDataQueue * queue,void * data,size_t * size)1245 copyNextInQueueMem(IOCircularDataQueue *queue,
1246     void *data,
1247     size_t *size)
1248 {
1249 	return _getNextInQueueMemInternal(queue, &data, size, true);
1250 }
1251 
1252 /*!
1253  * @function getPrevInQueueMem
1254  * Access the data at the previous cursor position and updates the cursor position to the previous. No copy is made of
1255  * the data. <br> Caller is supposed to call isDataEntryValidInQueueMem() to check data integrity after reading the data
1256  * is complete.
1257  * @param queue Handle to the queue.
1258  * @param data A pointer to the data memory region for the previous entry data in the queue.
1259  * @param size A pointer to the size of the data parameter.  On return, this contains the actual size of the data
1260  * pointed to by data param.
1261  *  @return
1262  *  - `kIOReturnSuccess` if the cursor position was updated to the previous.
1263  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
1264  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1265  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1266  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1267  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1268  *  - Other values indicate an error.
1269  *
1270  */
1271 
1272 static IOReturn ATTR_LSE2
_getPrevInQueueMemInternal(IOCircularDataQueue * queue,void ** data,size_t * size,bool copyMem)1273 _getPrevInQueueMemInternal(IOCircularDataQueue *queue,
1274     void **data,
1275     size_t *size,
1276     bool copyMem)
1277 {
1278 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
1279 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
1280 	IOCircularDataQueueMemoryCursor *cursor = &queue->queueCursor;
1281 	size_t inSize;
1282 
1283 	IOReturn ret = kIOReturnError;
1284 	if (queueMemory == NULL || data == NULL || size == NULL || queueHeaderShadow == NULL) {
1285 		return kIOReturnBadArgument;
1286 	}
1287 
1288 	inSize = *size;
1289 	*size = 0;
1290 
1291 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
1292 		return kIOReturnBadMedia;
1293 	}
1294 
1295 	if (os_unlikely(_isCursorPositionInvalid(queue))) {
1296 		return kIOReturnAborted;
1297 	}
1298 
1299 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
1300 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
1301 
1302 	IOCircularDataQueueState currState;
1303 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
1304 
1305 	if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
1306 		// Another thread is resetting the queue
1307 		return kIOReturnBusy;
1308 	}
1309 
1310 	uint32_t queueGeneration = currState.fields.generation;
1311 
1312 	// was the queue reset ?
1313 	if (os_unlikely(cursor->generation != queueGeneration || cursor->sequenceNum > currState.fields.seqNum)) {
1314 		return kIOReturnAborted;
1315 	}
1316 
1317 	if (os_unlikely(currState.fields.seqNum == UINT64_MAX)) {
1318 		// Nothing has ever been written to the queue yet.
1319 		return kIOReturnUnderrun;
1320 	}
1321 
1322 	uint32_t prevIndex = (cursor->position == 0) ? (queueHeaderShadow->numEntries - 1) : (cursor->position - 1);
1323 	__auto_type entry
1324 	        = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0] + (prevIndex * queueEntryDataSize));
1325 
1326 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
1327 	// within the queueMemory entries buffer before we begin writing.
1328 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
1329 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
1330 		queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT, QUEUE_ARGS(queueMemory),
1331 		    CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1332 		return kIOReturnBadArgument;
1333 	}
1334 
1335 	os_compiler_barrier();
1336 
1337 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
1338 	enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1339 	// is the entry currently being written to or this is the newest entry that was just written.
1340 	if (os_unlikely(enHeaderInfo.fields.wrStatus == IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS
1341 	    || enHeaderInfo.fields.seqNum > cursor->sequenceNum)) {
1342 		return kIOReturnOverrun;
1343 	}
1344 
1345 	uint32_t entryGeneration = enHeaderInfo.fields.generation;
1346 	if (os_unlikely(entryGeneration != queueGeneration)) {
1347 		queue_debug_note("entryGeneration != queueGeneration " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1348 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1349 		return kIOReturnOverrun;
1350 	}
1351 
1352 	// the sentinel has been corrupted.
1353 	if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
1354 		queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1355 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1356 		return kIOReturnBadMedia;
1357 	}
1358 
1359 	cursor->position = prevIndex;
1360 	cursor->generation = entryGeneration;
1361 	cursor->sequenceNum = enHeaderInfo.fields.seqNum;
1362 
1363 	if (os_unlikely(enHeaderInfo.fields.dataSize > queueHeaderShadow->entryDataSize)) {
1364 		return kIOReturnOverrun;
1365 	}
1366 	*size = enHeaderInfo.fields.dataSize;
1367 	ret = kIOReturnSuccess;
1368 
1369 	if (!copyMem) {
1370 		*data = entry->data;
1371 	} else {
1372 		if (os_unlikely(enHeaderInfo.fields.dataSize > inSize)) {
1373 			return kIOReturnOverrun;
1374 		}
1375 		memcpy(*data, entry->data, enHeaderInfo.fields.dataSize);
1376 		// Lets re-verify after the memcpy if the buffer is/has been overwritten.
1377 
1378 		IOCircularDataQueueEntryHeaderInfo enHeaderInfoAfter;
1379 		enHeaderInfoAfter.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1380 		// Did something change, while we were memcopying ?
1381 		if (enHeaderInfo.val != enHeaderInfoAfter.val) {
1382 			// while we were memcopying, the writer wrapped around and is writing into our index. or the queue got reset
1383 			*size = 0;
1384 			ret = kIOReturnOverrun;
1385 		}
1386 	}
1387 
1388 	if ((kIOReturnSuccess == ret) && os_unlikely(_isQueueMemoryCorrupted(queue))) {
1389 		return kIOReturnBadMedia;
1390 	}
1391 
1392 	return ret;
1393 }
1394 
1395 static IOReturn ATTR_LSE2
getPrevInQueueMem(IOCircularDataQueue * queue,void ** data,size_t * size)1396 getPrevInQueueMem(IOCircularDataQueue *queue,
1397     void **data,
1398     size_t *size)
1399 {
1400 	return _getPrevInQueueMemInternal(queue, data, size, false);
1401 }
1402 
1403 /*!
1404  * @function copyPrevInQueueMem
1405  * Access the data at the previous cursor position and copy into the provided buffer. Also update the cursor position to
1406  * the previous. If successful, function gaurantees that the data returned is always valid, hence no need to call
1407  * isDataEntryValidInQueueMem().
1408  * @param queue Handle to the queue.
1409  * @param data Pointer to memory into which the previous data is copied. Lifetime of this memory is controlled by the
1410  * caller.
1411  * @param size Size of the data buffer provided for copying. On return, this contains the actual size of the data
1412  * pointed to by data param.
1413  *  @return
1414  *  - `kIOReturnSuccess` if the cursor position was updated.
1415  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
1416  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1417  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1418  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1419  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1420  *  - Other values indicate an error.
1421  *
1422  */
1423 
1424 static IOReturn ATTR_LSE2
copyPrevInQueueMem(IOCircularDataQueue * queue,void * data,size_t * size)1425 copyPrevInQueueMem(IOCircularDataQueue *queue,
1426     void *data,
1427     size_t *size)
1428 {
1429 	return _getPrevInQueueMemInternal(queue, &data, size, true);
1430 }
1431 
1432 static IOReturn ATTR_LSE2
_getCurrentInQueueMemInternal(IOCircularDataQueue * queue,void ** data,size_t * size,bool copyMem)1433 _getCurrentInQueueMemInternal(IOCircularDataQueue *queue,
1434     void **data,
1435     size_t *size,
1436     bool copyMem)
1437 {
1438 	IOCircularDataQueueMemory *queueMemory = queue->queueMemory;
1439 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
1440 	IOCircularDataQueueMemoryCursor const *cursor = &queue->queueCursor;
1441 
1442 	size_t inSize;
1443 
1444 	if (queueMemory == NULL || data == NULL || size == NULL || queueHeaderShadow == NULL) {
1445 		return kIOReturnBadArgument;
1446 	}
1447 
1448 	inSize = *size;
1449 	*size = 0;
1450 
1451 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
1452 		return kIOReturnBadMedia;
1453 	}
1454 
1455 	if (os_unlikely(_isCursorPositionInvalid(queue))) {
1456 		return kIOReturnAborted;
1457 	}
1458 
1459 	const size_t queueAllocMemSize = queueHeaderShadow->allocMemSize;
1460 	const size_t queueEntryDataSize = queueHeaderShadow->entryDataSize;
1461 
1462 	IOCircularDataQueueState currState;
1463 	currState.val = atomic_load_explicit(&queueMemory->queueStateVal, memory_order_acquire);
1464 
1465 	if (os_unlikely(currState.fields.rstStatus & IOCIRCULARDATAQUEUE_STATE_RESET_INPROGRESS)) {
1466 		// Another thread is resetting the queue
1467 		return kIOReturnBusy;
1468 	}
1469 
1470 	uint32_t queueGeneration = currState.fields.generation;
1471 
1472 	// was the queue reset ?
1473 	if (os_unlikely(cursor->generation != queueGeneration || cursor->sequenceNum > currState.fields.seqNum)) {
1474 		return kIOReturnAborted;
1475 	}
1476 
1477 	if (os_unlikely(currState.fields.seqNum == UINT64_MAX)) {
1478 		// Nothing has ever been written to the queue yet.
1479 		return kIOReturnUnderrun;
1480 	}
1481 
1482 	__auto_type entry = (IOCircularDataQueueEntryHeader *)(uintptr_t)((uint8_t *)&queueMemory->entries[0]
1483 	    + (cursor->position * queueEntryDataSize));
1484 
1485 	// SANITY CHECK - Final check to ensure the 'entry' pointer is
1486 	// within the queueMemory entries buffer before we begin writing.
1487 	if (os_unlikely((uint8_t *)entry < (uint8_t *)(&queueMemory->entries[0])
1488 	    || (uint8_t *)entry >= (uint8_t *)queueMemory + queueAllocMemSize)) {
1489 		queue_debug_error("Out of Bounds! " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT, QUEUE_ARGS(queueMemory),
1490 		    CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1491 		return kIOReturnBadArgument;
1492 	}
1493 
1494 	os_compiler_barrier();
1495 
1496 	if (os_unlikely(entry->sentinel != queueHeaderShadow->sentinel)) {
1497 		queue_debug_error("entry->sentinel != queueMemory->sentinel " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1498 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1499 		return kIOReturnBadMedia;
1500 	}
1501 
1502 	IOCircularDataQueueEntryHeaderInfo enHeaderInfo;
1503 	enHeaderInfo.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1504 	uint32_t entryGeneration = enHeaderInfo.fields.generation;
1505 	if (os_unlikely(entryGeneration != queueGeneration)) {
1506 		queue_debug_note("entryGeneration != queueGeneration " QUEUE_FORMAT " " CURSOR_FORMAT " " ENTRY_FORMAT,
1507 		    QUEUE_ARGS(queueMemory), CURSOR_ARGS(cursor), ENTRY_ARGS(entry));
1508 		return kIOReturnAborted;
1509 	}
1510 
1511 	// is the entry currently being written to or has the cursor fallen too far behind and the cursor is no longer
1512 	// valid.
1513 	if (os_unlikely(enHeaderInfo.fields.wrStatus == IOCIRCULARDATAQUEUE_ENTRY_STATE_WRITE_INPROGRESS
1514 	    || enHeaderInfo.fields.seqNum != cursor->sequenceNum)) {
1515 		return kIOReturnOverrun;
1516 	}
1517 
1518 	if (os_unlikely(enHeaderInfo.fields.dataSize > queueHeaderShadow->entryDataSize)) {
1519 		return kIOReturnOverrun;
1520 	}
1521 	*size = enHeaderInfo.fields.dataSize;
1522 
1523 	if (!copyMem) {
1524 		*data = entry->data;
1525 	} else {
1526 		if (os_unlikely(enHeaderInfo.fields.dataSize > inSize)) {
1527 			return kIOReturnOverrun;
1528 		}
1529 		memcpy(*data, entry->data, enHeaderInfo.fields.dataSize);
1530 		// Lets re-verify after the memcpy if the buffer is/has been overwritten.
1531 
1532 		IOCircularDataQueueEntryHeaderInfo enHeaderInfoAfter;
1533 		enHeaderInfoAfter.val = atomic_load_explicit(&entry->headerInfoVal, memory_order_acquire);
1534 		// Did something change, while we were memcopying ?
1535 		if (enHeaderInfo.val != enHeaderInfoAfter.val) {
1536 			// while we were memcopying, the writer wrapped around and is writing into our index. or the queue got reset
1537 			*size = 0;
1538 			return kIOReturnBusy;
1539 		}
1540 	}
1541 
1542 	if (os_unlikely(_isQueueMemoryCorrupted(queue))) {
1543 		return kIOReturnBadMedia;
1544 	}
1545 
1546 	return kIOReturnSuccess;
1547 }
1548 
1549 /*!
1550  * @function getCurrentInQueueMem
1551  * Access the data at the current cursor position. The cursor position is unchanged. No copy is made of the data. <br>
1552  * Caller is supposed to call isDataEntryValidInQueueMem() to check data integrity after reading the data is complete.
1553  * @param queue Handle to the queue.
1554  * @param data A pointer to the data memory region for the previous entry data in the queue.
1555  * @param size A pointer to the size of the data parameter.  On return, this contains the actual size of the data
1556  * pointed to by data param.
1557  *  @return
1558  *  - `kIOReturnSuccess` if the cursor position was updated to the previous.
1559  *  - `kIOReturnAborted` if the cursor has become invalid, possibly due to a reset of the queue.
1560  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1561  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1562  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1563  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1564  *  - Other values indicate an error.
1565  *
1566  */
1567 
1568 static IOReturn ATTR_LSE2
getCurrentInQueueMem(IOCircularDataQueue * queue,void ** data,size_t * size)1569 getCurrentInQueueMem(IOCircularDataQueue *queue,
1570     void **data,
1571     size_t *size)
1572 {
1573 	return _getCurrentInQueueMemInternal(queue, data, size, false);
1574 }
1575 
1576 /*!
1577  * @function copyCurrentInQueueMem
1578  * Access the data at the current cursor position and copy into the provided buffer. The cursor position is unchanged.
1579  * If successful, function gaurantees that the data returned is always valid, hence no need to call
1580  * isDataEntryValidInQueueMem().
1581  * @param queue Handle to the queue.
1582  * @param data Pointer to memory into which the previous data is copied. Lifetime of this memory is controlled by the
1583  * caller.
1584  * @param size Size of the data buffer provided for copying. On return, this contains the actual size of the data
1585  * pointed to by data param.
1586  *  @return
1587  *  - `kIOReturnSuccess` if the cursor position was updated.
1588  *  - `kIOReturnAborted` if the cursor has become invalid.
1589  *  - `kIOReturnOverrun` if the entry at the cursor position is no longer in
1590  *     the queue's buffer. Call getLatestInQueueMem to get the latest data and cursor position.
1591  *  - `kIOReturnBadArgument` if an invalid argument is passsed.
1592  *  - `kIOReturnBadMedia` if the queue shared memory has been compromised.
1593  *  - Other values indicate an error.
1594  *
1595  */
1596 
1597 static IOReturn ATTR_LSE2
copyCurrentInQueueMem(IOCircularDataQueue * queue,void * data,size_t * size)1598 copyCurrentInQueueMem(IOCircularDataQueue *queue,
1599     void *data,
1600     size_t *size)
1601 {
1602 	return _getCurrentInQueueMemInternal(queue, &data, size, true);
1603 }
1604 
1605 
1606 /* API */
1607 
1608 static void ATTR_LSE2
_initCursor(IOCircularDataQueue * queue)1609 _initCursor(IOCircularDataQueue *queue)
1610 {
1611 	// Invalidate the cursor
1612 	IOCircularDataQueueMemoryCursor *cursor = &queue->queueCursor;
1613 	cursor->generation = UINT32_MAX;
1614 	cursor->position = UINT32_MAX;
1615 	cursor->sequenceNum = UINT64_MAX;
1616 }
1617 
1618 #if KERNEL
1619 
1620 IOReturn ATTR_LSE2
IOCircularDataQueueCreateWithEntries(IOCircularDataQueueCreateOptions options,uint32_t numEntries,uint32_t entrySize,IOCircularDataQueue ** pQueue)1621 IOCircularDataQueueCreateWithEntries(IOCircularDataQueueCreateOptions options, uint32_t numEntries, uint32_t entrySize, IOCircularDataQueue **pQueue)
1622 {
1623 	IOCircularDataQueueMemory *queueMemory;
1624 	IOReturn ret;
1625 
1626 	if (!pQueue) {
1627 		return kIOReturnBadArgument;
1628 	}
1629 	*pQueue = NULL;
1630 	if (!numEntries || !entrySize) {
1631 		return kIOReturnBadArgument;
1632 	}
1633 
1634 	uint64_t sentinel = 0xA5A5A5A5A5A5A5A5;
1635 
1636 #if HEADER_16BYTE_ALIGNED
1637 	size_t entryRoundedDataSize = IORound(entrySize, sizeof(__uint128_t));
1638 #else
1639 	size_t entryRoundedDataSize = IORound(entrySize, sizeof(UInt64));
1640 #endif
1641 	size_t entryDataSize = entryRoundedDataSize + CIRCULAR_DATA_QUEUE_ENTRY_HEADER_SIZE;
1642 	size_t entriesSize = numEntries * (entryDataSize);
1643 	size_t totalSize = entriesSize + CIRCULAR_DATA_QUEUE_MEMORY_HEADER_SIZE;
1644 
1645 	if (os_unlikely(numEntries > UINT32_MAX - 1
1646 	    || entryRoundedDataSize > (UINT32_MAX - sizeof(IOCircularDataQueueEntryHeader))
1647 	    || entryDataSize > UINT32_MAX || totalSize > UINT32_MAX)) {
1648 		return kIOReturnBadArgument;
1649 	}
1650 
1651 	IOCircularDataQueue *queue = IONew(IOCircularDataQueue, 1);
1652 	if (!queue) {
1653 		return kIOReturnNoMemory;
1654 	}
1655 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
1656 
1657 	OSData * desc;
1658 	queue->iomd = IOBufferMemoryDescriptor::inTaskWithOptions(
1659 		kernel_task, kIOMemoryDirectionOutIn | kIOMemoryKernelUserShared, totalSize, page_size);
1660 	if (os_unlikely(queue->iomd == NULL)) {
1661 		ret = kIOReturnNoMemory;
1662 		goto error;
1663 	}
1664 	queueMemory = (IOCircularDataQueueMemory *)queue->iomd->getBytesNoCopy();
1665 	queue->queueMemory = queueMemory;
1666 	queueMemory->sentinel = queueHeaderShadow->sentinel = sentinel;
1667 
1668 	queueHeaderShadow->allocMemSize = (uint32_t)totalSize;
1669 	queueHeaderShadow->entryDataSize
1670 	        = (uint32_t)entryDataSize; // totalSize check above gaurantess this will not overflow UINT32_MAX.
1671 	queueHeaderShadow->numEntries = numEntries;
1672 	queueHeaderShadow->dataSize = entrySize; // the client requested fixed entry size.
1673 	queueHeaderShadow->memorySize = (uint32_t)entriesSize;
1674 
1675 	desc = OSData::withBytes(queueHeaderShadow, sizeof(*queueHeaderShadow));
1676 	queue->iomd->setSharingContext(kIOCircularQueueDescriptionKey, desc);
1677 
1678 	IOCircularDataQueueState newState;
1679 	newState.val = 0;
1680 	newState.fields.seqNum = UINT64_MAX;
1681 	atomic_store_explicit(&queueMemory->queueStateVal, newState.val, memory_order_release);
1682 
1683 	ret = _reset(queue);
1684 	if (ret != kIOReturnSuccess) {
1685 		goto error;
1686 	}
1687 
1688 	_initCursor(queue);
1689 	*pQueue = queue;
1690 	return kIOReturnSuccess;
1691 
1692 
1693 error:
1694 	IOCircularDataQueueDestroy(&queue);
1695 	return ret;
1696 }
1697 
1698 IOMemoryDescriptor * ATTR_LSE2
IOCircularDataQueueCopyMemoryDescriptor(IOCircularDataQueue * queue)1699 IOCircularDataQueueCopyMemoryDescriptor(IOCircularDataQueue  *queue)
1700 {
1701 	IOMemoryDescriptor * md;
1702 	md = queue->iomd;
1703 	if (md) {
1704 		md->retain();
1705 	}
1706 	return md;
1707 }
1708 
1709 #else /* KERNEL */
1710 
1711 #if defined(__arm64__) && defined(__LP64__)
1712 #include <System/arm/cpu_capabilities.h>
1713 #endif /* defined(__arm64__) */
1714 
1715 IOReturn ATTR_LSE2
IOCircularDataQueueCreateWithConnection(IOCircularDataQueueCreateOptions options,io_connect_t connect,uint32_t memoryType,IOCircularDataQueue ** pQueue)1716 IOCircularDataQueueCreateWithConnection(IOCircularDataQueueCreateOptions options, io_connect_t connect, uint32_t memoryType, IOCircularDataQueue **pQueue)
1717 {
1718 	if (!pQueue) {
1719 		return kIOReturnBadArgument;
1720 	}
1721 	*pQueue = NULL;
1722 
1723 #if defined(__arm64__) && defined(__LP64__)
1724 	if (0 == (kHasFeatLSE2 & _get_cpu_capabilities())) {
1725 		return kIOReturnUnsupported;
1726 	}
1727 #else
1728 	return kIOReturnUnsupported;
1729 #endif /* defined(__arm64__) */
1730 
1731 	uint64_t sentinel = 0xA5A5A5A5A5A5A5A5;
1732 
1733 	IOCircularDataQueue *queue = IONew(IOCircularDataQueue, 1);
1734 	if (!queue) {
1735 		return kIOReturnNoMemory;
1736 	}
1737 	IOCircularDataQueueDescription *queueHeaderShadow = &queue->queueHeaderShadow;
1738 
1739 	queue->connect = connect;
1740 	queue->memoryType = memoryType;
1741 
1742 	io_struct_inband_t inband_output;
1743 	mach_msg_type_number_t inband_outputCnt;
1744 	mach_vm_address_t map_address;
1745 	mach_vm_size_t map_size;
1746 	IOReturn ret;
1747 
1748 	inband_outputCnt = sizeof(inband_output);
1749 
1750 	ret = io_connect_map_shared_memory(connect, memoryType, mach_task_self(),
1751 	    &map_address, &map_size,
1752 	    /* flags */ 0,
1753 	    (char *) kIOCircularQueueDescriptionKey,
1754 	    inband_output,
1755 	    &inband_outputCnt);
1756 
1757 	printf("%x, %lx, 0x%llx, 0x%llx\n", inband_outputCnt, sizeof(IOCircularDataQueueDescription), map_address, map_size);
1758 
1759 	assert(sizeof(IOCircularDataQueueDescription) == inband_outputCnt);
1760 	memcpy(queueHeaderShadow, inband_output, sizeof(IOCircularDataQueueDescription));
1761 	printf("sentinel %qx\n", queueHeaderShadow->sentinel);
1762 	assert(queueHeaderShadow->allocMemSize == map_size);
1763 	queue->queueMemory = (IOCircularDataQueueMemory *) map_address;
1764 
1765 	if (!isQueueMemoryValid(queue)) {
1766 		IOCircularDataQueueDestroy(&queue);
1767 		return kIOReturnBadArgument;
1768 	}
1769 
1770 	_initCursor(queue);
1771 	*pQueue = queue;
1772 
1773 	return ret;
1774 }
1775 
1776 #endif /* !KERNEL */
1777 
1778 IOReturn ATTR_LSE2
IOCircularDataQueueDestroy(IOCircularDataQueue ** pQueue)1779 IOCircularDataQueueDestroy(IOCircularDataQueue **pQueue)
1780 {
1781 	IOCircularDataQueue * queue;
1782 	IOReturn ret = kIOReturnSuccess;
1783 
1784 	if (!pQueue) {
1785 		return kIOReturnBadArgument;
1786 	}
1787 	queue = *pQueue;
1788 	if (queue) {
1789 		ret = destroyQueueMem(queue);
1790 		IODelete(queue, IOCircularDataQueue, 1);
1791 		*pQueue = NULL;
1792 	}
1793 	return ret;
1794 }
1795 
1796 IOReturn ATTR_LSE2
IOCircularDataQueueEnqueue(IOCircularDataQueue * queue,const void * data,size_t dataSize)1797 IOCircularDataQueueEnqueue(IOCircularDataQueue *queue, const void *data, size_t dataSize)
1798 {
1799 	if (!queue) {
1800 		return kIOReturnBadArgument;
1801 	}
1802 
1803 	return enqueueQueueMem(queue, data, dataSize);
1804 }
1805 
1806 IOReturn ATTR_LSE2
IOCircularDataQueueGetLatest(IOCircularDataQueue * queue,void ** data,size_t * size)1807 IOCircularDataQueueGetLatest(IOCircularDataQueue *queue, void **data, size_t *size)
1808 {
1809 	if (!queue) {
1810 		return kIOReturnBadArgument;
1811 	}
1812 
1813 	return getLatestInQueueMem(queue, data, size);
1814 }
1815 
1816 IOReturn ATTR_LSE2
IOCircularDataQueueCopyLatest(IOCircularDataQueue * queue,void * data,size_t * size)1817 IOCircularDataQueueCopyLatest(IOCircularDataQueue *queue, void *data, size_t *size)
1818 {
1819 	if (!queue) {
1820 		return kIOReturnBadArgument;
1821 	}
1822 
1823 	return copyLatestInQueueMem(queue, data, size);
1824 }
1825 
1826 IOReturn ATTR_LSE2
IOCircularDataQueueGetNext(IOCircularDataQueue * queue,void ** data,size_t * size)1827 IOCircularDataQueueGetNext(IOCircularDataQueue *queue, void **data, size_t *size)
1828 {
1829 	if (!queue) {
1830 		return kIOReturnBadArgument;
1831 	}
1832 
1833 	return getNextInQueueMem(queue, data, size);
1834 }
1835 
1836 IOReturn ATTR_LSE2
IOCircularDataQueueCopyNext(IOCircularDataQueue * queue,void * data,size_t * size)1837 IOCircularDataQueueCopyNext(IOCircularDataQueue *queue, void *data, size_t *size)
1838 {
1839 	if (!queue) {
1840 		return kIOReturnBadArgument;
1841 	}
1842 
1843 	return copyNextInQueueMem(queue, data, size);
1844 }
1845 
1846 IOReturn ATTR_LSE2
IOCircularDataQueueGetPrevious(IOCircularDataQueue * queue,void ** data,size_t * size)1847 IOCircularDataQueueGetPrevious(IOCircularDataQueue *queue, void **data, size_t *size)
1848 {
1849 	if (!queue) {
1850 		return kIOReturnBadArgument;
1851 	}
1852 
1853 	return getPrevInQueueMem(queue, data, size);
1854 }
1855 
1856 IOReturn ATTR_LSE2
IOCircularDataQueueCopyPrevious(IOCircularDataQueue * queue,void * data,size_t * size)1857 IOCircularDataQueueCopyPrevious(IOCircularDataQueue *queue, void *data, size_t *size)
1858 {
1859 	if (!queue) {
1860 		return kIOReturnBadArgument;
1861 	}
1862 
1863 	return copyPrevInQueueMem(queue, data, size);
1864 }
1865 
1866 // IOReturn
1867 //IOCircularDataQueueGetLatestWithBlock(IOCircularDataQueue *queue, void (^handler)(void * data, size_t size))
1868 //{
1869 //     if (!queue) {
1870 //         return kIOReturnBadArgument;
1871 //     }
1872 //
1873 ////    return getPrevInQueueMem(queue->queueMemory, (IOCircularDataQueueDescription *)
1874 ///&queue->queueHeaderShadow, (IOCircularDataQueueMemoryCursor *) &queue->queueCursor, data, size);
1875 //}
1876 //
1877 
1878 IOReturn ATTR_LSE2
IOCircularDataQueueIsCurrentDataValid(IOCircularDataQueue * queue)1879 IOCircularDataQueueIsCurrentDataValid(IOCircularDataQueue *queue)
1880 {
1881 	if (!queue) {
1882 		return kIOReturnBadArgument;
1883 	}
1884 
1885 	return isDataEntryValidInQueueMem(queue);
1886 }
1887 
1888 IOReturn ATTR_LSE2
IOCircularDataQueueSetCursorLatest(IOCircularDataQueue * queue)1889 IOCircularDataQueueSetCursorLatest(IOCircularDataQueue *queue)
1890 {
1891 	if (!queue) {
1892 		return kIOReturnBadArgument;
1893 	}
1894 
1895 	return setCursorLatestInQueueMem(queue);
1896 }
1897 
1898 IOReturn ATTR_LSE2
IOCircularDataQueueGetCurrent(IOCircularDataQueue * queue,void ** data,size_t * size)1899 IOCircularDataQueueGetCurrent(IOCircularDataQueue *queue, void **data, size_t *size)
1900 {
1901 	if (!queue) {
1902 		return kIOReturnBadArgument;
1903 	}
1904 
1905 	return getCurrentInQueueMem(queue, data, size);
1906 }
1907 
1908 IOReturn ATTR_LSE2
IOCircularDataQueueCopyCurrent(IOCircularDataQueue * queue,void * data,size_t * size)1909 IOCircularDataQueueCopyCurrent(IOCircularDataQueue *queue, void *data, size_t *size)
1910 {
1911 	if (!queue) {
1912 		return kIOReturnBadArgument;
1913 	}
1914 
1915 	return copyCurrentInQueueMem(queue, data, size);
1916 }
1917 
1918 __END_DECLS
1919