1 /* compile: xcrun -sdk macosx.internal clang -arch arm64e -arch x86_64 -ldarwintest -o test_aio aio.c */
2
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <darwintest_multiprocess.h>
6 #include <aio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdlib.h>
10 #include <unistd.h>
11 #include <sys/event.h>
12 #include <sys/resource.h>
13 #include <sys/sysctl.h>
14
15 #include "test_utils.h"
16
17
18 #ifndef SIGEV_KEVENT
19 #define SIGEV_KEVENT 4
20 #endif
21
22 T_GLOBAL_META(
23 T_META_NAMESPACE("xnu.file_descriptors.aio"),
24 T_META_RADAR_COMPONENT_NAME("xnu"),
25 T_META_RADAR_COMPONENT_VERSION("file descriptors"),
26 T_META_CHECK_LEAKS(false),
27 T_META_TAG_VM_PREFERRED);
28
29
30 #define AIO_TESTFILE "aio_testfile"
31 #define AIO_BUFFER_SIZE (1024 * 1024)
32 #define AIO_BUFFER_PATTERN 0x20190912
33 #define AIO_LIST_MAX 4
34
35 static char *g_testfiles[AIO_LIST_MAX];
36 static int g_fds[AIO_LIST_MAX];
37 static struct aiocb g_aiocbs[AIO_LIST_MAX];
38 static char *g_buffers[AIO_LIST_MAX];
39
40 /*
41 * This unit-test tests AIO (Asynchronous I/O) facility.
42 */
43
44
45 static void
exit_cleanup(void)46 exit_cleanup(void)
47 {
48 for (int i = 0; i < AIO_LIST_MAX; i++) {
49 if (g_fds[i] > 0) {
50 close(g_fds[i]);
51 }
52 if (g_testfiles[i]) {
53 (void)remove(g_testfiles[i]);
54 }
55 if (g_buffers[i]) {
56 free(g_buffers[i]);
57 }
58 }
59 }
60
61 static void
do_init(int num_files,bool enable_nocache)62 do_init(int num_files, bool enable_nocache)
63 {
64 const char *tmpdir = dt_tmpdir();
65 int i, err;
66
67 T_SETUPBEGIN;
68
69 atexit(exit_cleanup);
70
71 T_QUIET;
72 T_ASSERT_LE(num_files, AIO_LIST_MAX, "too many files");
73
74 for (i = 0; i < AIO_LIST_MAX; i++) {
75 g_fds[i] = -1;
76 g_testfiles[i] = NULL;
77 g_buffers[i] = NULL;
78 }
79
80 for (i = 0; i < num_files; i++) {
81 T_WITH_ERRNO;
82 g_testfiles[i] = malloc(MAXPATHLEN);
83 T_QUIET;
84 T_ASSERT_NE(g_testfiles[i], NULL, "Allocate path buffer %d size %d",
85 i, MAXPATHLEN);
86
87 snprintf(g_testfiles[i], MAXPATHLEN, "%s/%s.%d",
88 tmpdir, AIO_TESTFILE, i);
89
90 T_WITH_ERRNO;
91 g_fds[i] = open(g_testfiles[i], O_CREAT | O_RDWR, 0666);
92 T_ASSERT_NE(g_fds[i], -1, "Create test fi1e: %s", g_testfiles[i]);
93
94 T_WITH_ERRNO;
95 g_buffers[i] = malloc(AIO_BUFFER_SIZE);
96 T_QUIET;
97 T_ASSERT_NE(g_buffers[i], NULL, "Allocate data buffer %d size %d",
98 i, AIO_BUFFER_SIZE);
99 memset(g_buffers[i], AIO_BUFFER_PATTERN, AIO_BUFFER_SIZE);
100
101 if (enable_nocache) {
102 T_WITH_ERRNO;
103 err = fcntl(g_fds[i], F_NOCACHE, 1);
104 T_ASSERT_NE(err, -1, "Set F_NOCACHE: %s", g_testfiles[i]);
105 }
106 }
107
108 T_SETUPEND;
109 }
110
111 static struct aiocb *
init_aiocb(int idx,off_t offset,int lio_opcode)112 init_aiocb(int idx, off_t offset, int lio_opcode)
113 {
114 struct aiocb *aiocbp;
115
116 aiocbp = &g_aiocbs[idx];
117 memset(aiocbp, 0, sizeof(struct aiocb));
118 aiocbp->aio_fildes = g_fds[idx];
119 aiocbp->aio_offset = offset;
120 aiocbp->aio_buf = g_buffers[idx];
121 aiocbp->aio_nbytes = AIO_BUFFER_SIZE;
122 aiocbp->aio_lio_opcode = lio_opcode;
123
124 return aiocbp;
125 }
126
127 static int
poll_aio_error(struct aiocb * aiocbp)128 poll_aio_error(struct aiocb *aiocbp)
129 {
130 int err;
131
132 while (1) {
133 err = aio_error(aiocbp);
134 if (err != EINPROGRESS) {
135 break;
136 }
137 usleep(10000);
138 }
139
140 return err;
141 }
142
143 static int
wait_for_kevent(int kq,struct kevent64_s * kevent)144 wait_for_kevent(int kq, struct kevent64_s *kevent)
145 {
146 struct timespec timeout = {.tv_sec = 10, .tv_nsec = 0};
147
148 return kevent64(kq, NULL, 0, kevent, 1, 0, &timeout);
149 }
150
151 static int
verify_buffer_data(struct aiocb * aiocbp,uint32_t pattern)152 verify_buffer_data(struct aiocb *aiocbp, uint32_t pattern)
153 {
154 char *buf_to_verify;
155 int err = 0;
156
157 buf_to_verify = malloc(aiocbp->aio_nbytes);
158 if (!buf_to_verify) {
159 err = ENOMEM;
160 goto out;
161 }
162 memset(buf_to_verify, pattern, aiocbp->aio_nbytes);
163
164 err = memcmp((const void *)aiocbp->aio_buf, (const void *)buf_to_verify,
165 aiocbp->aio_nbytes);
166 free(buf_to_verify);
167
168 out:
169 return err;
170 }
171
172 /*
173 * Test aio_write() and aio_read().
174 * Poll with aio_error() for AIO completion and call aio_return() to retrieve
175 * return status of AIO operation.
176 */
177 T_DECL(write_read, "Test aio_write() and aio_read(). Poll for AIO completion")
178 {
179 struct aiocb *aiocbp;
180 ssize_t retval;
181 int err;
182
183 do_init(1, true);
184
185 /* Setup aiocb for aio_write(). */
186 aiocbp = init_aiocb(0, 0, 0);
187
188 T_WITH_ERRNO;
189 err = aio_write(aiocbp);
190 T_ASSERT_NE(err, -1, "aio_write() for fd %d offset 0x%llx length 0x%zx",
191 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
192
193 T_WITH_ERRNO;
194 err = poll_aio_error(aiocbp);
195 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
196
197 T_WITH_ERRNO;
198 retval = aio_return(aiocbp);
199 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
200 "aio_return() for aiocbp %p bytes_written 0x%zx", aiocbp, retval);
201
202 memset((void *)aiocbp->aio_buf, 0, AIO_BUFFER_SIZE);
203
204 T_WITH_ERRNO;
205 err = aio_read(aiocbp);
206 T_ASSERT_NE(err, -1, "aio_read() for fd %d offset 0x%llx length 0x%zx",
207 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
208
209 T_WITH_ERRNO;
210 err = poll_aio_error(aiocbp);
211 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
212
213 T_WITH_ERRNO;
214 retval = aio_return(aiocbp);
215 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
216 "aio_return() for aiocbp %p bytes_read 0x%zx", aiocbp, retval);
217
218 err = verify_buffer_data(aiocbp, AIO_BUFFER_PATTERN);
219 T_ASSERT_EQ(err, 0, "verify data returned from aio_read()");
220 }
221
222 /*
223 * Test aio_write() and aio_fsync().
224 * Poll with aio_error() for AIO completion and call aio_return() to retrieve
225 * return status of AIO operation.
226 */
227 T_DECL(write_fsync, "Test aio_write() and aio_fsync(). Poll for AIO completion.")
228 {
229 struct aiocb *aiocbp;
230 ssize_t retval;
231 int err;
232
233 do_init(1, false);
234
235 /* Setup aiocb for aio_write(). */
236 aiocbp = init_aiocb(0, (1024 * 1024), 0);
237
238 T_WITH_ERRNO;
239 err = aio_write(aiocbp);
240 T_ASSERT_NE(err, -1, "aio_write() for fd %d offset 0x%llx length 0x%zx",
241 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
242
243 T_WITH_ERRNO;
244 err = poll_aio_error(aiocbp);
245 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
246
247 T_WITH_ERRNO;
248 retval = aio_return(aiocbp);
249 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
250 "aio_return() for aiocbp %p bytes_written 0x%zx", aiocbp, retval);
251
252 T_WITH_ERRNO;
253 err = aio_fsync(O_SYNC, aiocbp);
254 T_ASSERT_NE(err, -1, "aio_fsync() for aiocbp %p", aiocbp);
255
256 T_WITH_ERRNO;
257 err = poll_aio_error(aiocbp);
258 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
259
260 T_WITH_ERRNO;
261 err = aio_return(aiocbp);
262 T_ASSERT_EQ(err, 0, "aio_return() for aiocbp %p", aiocbp);
263 }
264
265 /*
266 * Test aio_write() and aio_suspend().
267 * Suspend with aio_suspend() until AIO completion and call aio_return() to
268 * retrieve return status of AIO operation.
269 */
270 T_DECL(write_suspend, "Test aio_write() and aio_suspend(). Suspend until AIO completion.")
271 {
272 struct aiocb *aiocbp, *aiocb_list[AIO_LIST_MAX];
273 struct timespec timeout;
274 ssize_t retval;
275 int err;
276
277 do_init(1, false);
278
279 /* Setup aiocb for aio_write(). */
280 aiocbp = init_aiocb(0, (128 * 1024), 0);
281 aiocb_list[0] = aiocbp;
282
283 T_WITH_ERRNO;
284 err = aio_write(aiocbp);
285 T_ASSERT_NE(err, -1, "aio_write() for fd %d offset 0x%llx length 0x%zx",
286 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
287
288 T_WITH_ERRNO;
289 timeout.tv_sec = 1;
290 timeout.tv_nsec = 0;
291 err = aio_suspend((const struct aiocb *const *)aiocb_list, 1, &timeout);
292 T_ASSERT_NE(err, -1, "aio_suspend() with 1 sec timeout");
293
294 T_WITH_ERRNO;
295 retval = aio_return(aiocbp);
296 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
297 "aio_return() for aiocbp %p bytes_written 0x%zx", aiocbp, retval);
298 }
299
300 /*
301 * Test lio_listio() with LIO_WAIT.
302 * Initiate a list of AIO operations and wait for their completions.
303 */
304 T_DECL(lio_listio_wait, "Test lio_listio() with LIO_WAIT.")
305 {
306 struct aiocb *aiocbp, *aiocb_list[AIO_LIST_MAX];
307 ssize_t retval;
308 int i, err;
309
310 do_init(AIO_LIST_MAX, true);
311
312 /* Setup aiocbs for lio_listio(). */
313 for (i = 0; i < AIO_LIST_MAX; i++) {
314 aiocbp = init_aiocb(i, (i * 1024 * 1024), LIO_WRITE);
315 aiocb_list[i] = aiocbp;
316 }
317
318 T_WITH_ERRNO;
319 err = lio_listio(LIO_WAIT, aiocb_list, AIO_LIST_MAX, NULL);
320 T_ASSERT_NE(err, -1, "lio_listio(LIO_WAIT) for %d AIO operations",
321 AIO_LIST_MAX);
322
323 for (i = 0; i < AIO_LIST_MAX; i++) {
324 aiocbp = aiocb_list[i];
325
326 T_WITH_ERRNO;
327 retval = aio_return(aiocbp);
328 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
329 "aio_return() for aiocbp(%d) %p bytes_written 0x%zx",
330 i, aiocbp, retval);
331 }
332 }
333
334 /*
335 * Test lio_listio() with LIO_NOWAIT.
336 * Initiate a list of AIO operations and poll for their completions.
337 */
338 T_DECL(lio_listio_nowait, "Test lio_listio() with LIO_NOWAIT.")
339 {
340 struct aiocb *aiocbp, *aiocb_list[AIO_LIST_MAX];
341 ssize_t retval;
342 int i, err;
343
344 do_init(AIO_LIST_MAX, true);
345
346 /* Setup aiocbs for lio_listio(). */
347 for (i = 0; i < AIO_LIST_MAX; i++) {
348 aiocbp = init_aiocb(i, (i * 1024 * 1024), LIO_WRITE);
349 aiocb_list[i] = aiocbp;
350 }
351
352 T_WITH_ERRNO;
353 err = lio_listio(LIO_NOWAIT, aiocb_list, AIO_LIST_MAX, NULL);
354 T_ASSERT_NE(err, -1, "lio_listio(LIO_NOWAIT) for %d AIO operations",
355 AIO_LIST_MAX);
356
357 for (i = 0; i < AIO_LIST_MAX; i++) {
358 aiocbp = aiocb_list[i];
359
360 T_WITH_ERRNO;
361 err = poll_aio_error(aiocbp);
362 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
363
364 T_WITH_ERRNO;
365 retval = aio_return(aiocbp);
366 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
367 "aio_return() for aiocbp(%d) %p bytes_written 0x%zx",
368 i, aiocbp, retval);
369 }
370 }
371
372 /*
373 * Test lio_listio() and aio_cancel().
374 * Initiate a list of AIO operations and attempt to cancel them with
375 * aio_cancel().
376 */
377 T_DECL(lio_listio_cancel, "Test lio_listio() and aio_cancel().")
378 {
379 struct aiocb *aiocbp, *aiocb_list[AIO_LIST_MAX];
380 char *buffer;
381 ssize_t retval;
382 int i, err;
383
384 do_init(AIO_LIST_MAX, true);
385
386 /* Setup aiocbs for lio_listio(). */
387 for (i = 0; i < AIO_LIST_MAX; i++) {
388 aiocbp = init_aiocb(i, (i * 1024 * 1024), LIO_WRITE);
389 aiocb_list[i] = aiocbp;
390 }
391
392 T_WITH_ERRNO;
393 err = lio_listio(LIO_NOWAIT, aiocb_list, AIO_LIST_MAX, NULL);
394 T_ASSERT_NE(err, -1, "lio_listio() for %d AIO operations", AIO_LIST_MAX);
395
396 for (i = 0; i < AIO_LIST_MAX; i++) {
397 aiocbp = aiocb_list[i];
398
399 T_WITH_ERRNO;
400 err = aio_cancel(g_fds[i], aiocbp);
401 T_ASSERT_TRUE(((err & (AIO_ALLDONE | AIO_CANCELED | AIO_NOTCANCELED)) != 0),
402 "aio_cancel() for aiocbp(%d) %p err %d", i, aiocbp, err);
403
404 if (err == AIO_NOTCANCELED || err == AIO_ALLDONE) {
405 if (err == AIO_NOTCANCELED) {
406 T_WITH_ERRNO;
407 err = poll_aio_error(aiocbp);
408 T_ASSERT_NE(err, -1, "aio_error() for aiocbp %p", aiocbp);
409 }
410 T_WITH_ERRNO;
411 retval = aio_return(aiocbp);
412 T_ASSERT_EQ((int)retval, AIO_BUFFER_SIZE,
413 "aio_return() for aiocbp(%d) %p bytes_written 0x%zx",
414 i, aiocbp, retval);
415 } else if (err == AIO_CANCELED) {
416 T_WITH_ERRNO;
417 retval = aio_return(aiocbp);
418 T_ASSERT_EQ((int)retval, -1,
419 "aio_return() for aiocbp(%d) %p", i, aiocbp);
420 }
421 }
422 }
423
424 /*
425 * Test aio_write() and aio_read().
426 * Use kevent for AIO completion and return status.
427 */
428 T_DECL(write_read_kevent, "Test aio_write() and aio_read(). Use kevent for AIO completion and return status.")
429 {
430 struct aiocb *aiocbp;
431 struct kevent64_s kevent;
432 void *udata1, *udata2;
433 ssize_t retval;
434 int err, kq;
435
436 do_init(1, true);
437
438 kq = kqueue();
439 T_ASSERT_NE(kq, -1, "Create kqueue");
440
441 /* Setup aiocb for aio_write(). */
442 aiocbp = init_aiocb(0, 0, 0);
443 aiocbp->aio_sigevent.sigev_notify = SIGEV_KEVENT;
444 aiocbp->aio_sigevent.sigev_signo = kq;
445 aiocbp->aio_sigevent.sigev_value.sival_ptr = (void *)&udata1;
446
447 T_WITH_ERRNO;
448 err = aio_write(aiocbp);
449 T_ASSERT_NE(err, -1, "aio_write() for fd %d offset 0x%llx length 0x%zx",
450 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
451
452 memset(&kevent, 0, sizeof(kevent));
453 err = wait_for_kevent(kq, &kevent);
454 T_ASSERT_NE(err, -1, "Listen for AIO completion event on kqueue %d", kq);
455
456 if (err > 0) {
457 T_ASSERT_EQ(err, 1, "num event returned %d", err);
458 T_ASSERT_EQ((struct aiocb *)kevent.ident, aiocbp, "kevent.ident %p",
459 (struct aiocb *)kevent.ident);
460 T_ASSERT_EQ(kevent.filter, EVFILT_AIO, "kevent.filter %d",
461 kevent.filter);
462 T_ASSERT_EQ((void **)kevent.udata, &udata1, "kevent.udata %p",
463 (char *)kevent.udata);
464 T_ASSERT_EQ((int)kevent.ext[0], 0, "kevent.ext[0] (err %d)",
465 (int)kevent.ext[0]);
466 T_ASSERT_EQ((int)kevent.ext[1], AIO_BUFFER_SIZE,
467 "kevent.ext[1] (bytes_written 0x%x)", (int)kevent.ext[1]);
468 } else {
469 T_FAIL("Timedout listening for AIO completion event on kqueue %d", kq);
470 }
471
472 aiocbp->aio_sigevent.sigev_value.sival_ptr = (void *)&udata2;
473
474 T_WITH_ERRNO;
475 err = aio_read(aiocbp);
476 T_ASSERT_NE(err, -1, "aio_read() for fd %d offset 0x%llx length 0x%zx",
477 aiocbp->aio_fildes, aiocbp->aio_offset, aiocbp->aio_nbytes);
478
479 memset(&kevent, 0, sizeof(kevent));
480 err = wait_for_kevent(kq, &kevent);
481 T_ASSERT_NE(err, -1, "Listen for AIO completion event on kqueue %d", kq);
482
483 if (err > 0) {
484 T_ASSERT_EQ(err, 1, "num event returned %d", err);
485 T_ASSERT_EQ((struct aiocb *)kevent.ident, aiocbp, "kevent.ident %p",
486 (struct aiocb *)kevent.ident);
487 T_ASSERT_EQ(kevent.filter, EVFILT_AIO, "kevent.filter %d",
488 kevent.filter);
489 T_ASSERT_EQ((void **)kevent.udata, &udata2, "kevent.udata %p",
490 (char *)kevent.udata);
491 T_ASSERT_EQ((int)kevent.ext[0], 0, "kevent.ext[0] (err %d)",
492 (int)kevent.ext[0]);
493 T_ASSERT_EQ((int)kevent.ext[1], AIO_BUFFER_SIZE,
494 "kevent.ext[1] (bytes_read 0x%x)", (int)kevent.ext[1]);
495 } else {
496 T_FAIL("Timedout listening for AIO completion event on kqueue %d", kq);
497 }
498 }
499
500 /*
501 * Test lio_listio() with LIO_NOWAIT.
502 * Initiate a list of AIO operations and use kevent for their completion
503 * notification and status.
504 */
505 T_DECL(lio_listio_kevent, "Test lio_listio() with kevent.")
506 {
507 struct aiocb *aiocbp, *aiocb_list[AIO_LIST_MAX];
508 struct kevent64_s kevent;
509 ssize_t retval;
510 int i, err, kq;
511
512 do_init(AIO_LIST_MAX, true);
513
514 kq = kqueue();
515 T_ASSERT_NE(kq, -1, "Create kqueue");
516
517 /* Setup aiocbs for lio_listio(). */
518 for (i = 0; i < AIO_LIST_MAX; i++) {
519 aiocbp = init_aiocb(i, (i * 1024 * 1024), LIO_WRITE);
520 aiocbp->aio_sigevent.sigev_notify = SIGEV_KEVENT;
521 aiocbp->aio_sigevent.sigev_signo = kq;
522 aiocbp->aio_sigevent.sigev_value.sival_ptr = (void *)g_testfiles[i];
523 aiocb_list[i] = aiocbp;
524 }
525
526 T_WITH_ERRNO;
527 err = lio_listio(LIO_NOWAIT, aiocb_list, AIO_LIST_MAX, NULL);
528 T_ASSERT_NE(err, -1, "lio_listio(LIO_NOWAIT) for %d AIO operations",
529 AIO_LIST_MAX);
530
531 for (i = 0; i < AIO_LIST_MAX; i++) {
532 aiocbp = aiocb_list[i];
533
534 memset(&kevent, 0, sizeof(kevent));
535 err = wait_for_kevent(kq, &kevent);
536 T_ASSERT_NE(err, -1, "Listen for AIO completion event on kqueue %d", kq);
537 if (err > 0) {
538 int idx;
539
540 aiocbp = NULL;
541 T_ASSERT_EQ(err, 1, "num event returned %d", err);
542
543 for (idx = 0; idx < AIO_LIST_MAX; idx++) {
544 if (aiocb_list[idx] == (struct aiocb *)kevent.ident) {
545 aiocbp = (struct aiocb *)kevent.ident;
546 break;
547 }
548 }
549
550 T_ASSERT_EQ((struct aiocb *)kevent.ident, aiocbp, "kevent.ident %p",
551 (struct aiocb *)kevent.ident);
552 T_ASSERT_EQ(kevent.filter, EVFILT_AIO, "kevent.filter %d",
553 kevent.filter);
554 T_ASSERT_EQ((void *)kevent.udata, (void *)g_testfiles[idx],
555 "kevent.udata %p", (char *)kevent.udata);
556 T_ASSERT_EQ((int)kevent.ext[0], 0, "kevent.ext[0] (err %d)",
557 (int)kevent.ext[0]);
558 T_ASSERT_EQ((int)kevent.ext[1], AIO_BUFFER_SIZE,
559 "kevent.ext[1] (bytes_read 0x%x)", (int)kevent.ext[1]);
560 } else {
561 T_FAIL("Timedout listening for AIO completion event on kqueue %d", kq);
562 }
563 }
564 }
565