1 /*
2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38 #include <Kernel/IOKitKernelInternal.h>
39 #include <IOKit/IOUserClient.h>
40 #include <IOKit/IOService.h>
41 #include "Tests.h"
42
43 #ifndef __LP64__
44 #include <IOKit/IOSubMemoryDescriptor.h>
45 #endif /* !__LP64__ */
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #include <IOKit/IOMultiMemoryDescriptor.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOGuardPageMemoryDescriptor.h>
50
51 #include <IOKit/IOKitDebug.h>
52 #include <libkern/OSDebug.h>
53 #include <sys/uio.h>
54 #include <libkern/sysctl.h>
55 #include <sys/sysctl.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <mach/vm_param.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
68 #include <vm/vm_map_xnu.h>
69 #include <vm/vm_kern_xnu.h>
70 __END_DECLS
71
72
73 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
74
75 #if DEVELOPMENT || DEBUG
76
77 extern SInt32 gIOMemoryReferenceCount;
78
79 static int
IOMultMemoryDescriptorTest(int newValue)80 IOMultMemoryDescriptorTest(int newValue)
81 {
82 IOMemoryDescriptor * mds[3];
83 IOMultiMemoryDescriptor * mmd;
84 IOMemoryMap * map;
85 void * addr;
86 uint8_t * data;
87 uint32_t i;
88 IOAddressRange ranges[2];
89
90 data = (typeof(data))IOMallocAligned(ptoa(8), page_size);
91 for (i = 0; i < ptoa(8); i++) {
92 data[i] = ((uint8_t) atop(i)) | 0xD0;
93 }
94
95 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
96 ranges[0].length = ptoa(4);
97 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
98 ranges[1].length = ptoa(4);
99
100 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, 2, kIODirectionOutIn, kernel_task);
101 assert(mds[0]);
102 {
103 uint64_t dmaLen, dmaOffset;
104 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
105 assert(0 == dmaOffset);
106 assert(ptoa(1) == dmaLen);
107 }
108 mds[0]->release();
109 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) (data + page_size - 2), 4, kIODirectionOutIn, kernel_task);
110 assert(mds[0]);
111 {
112 uint64_t dmaLen, dmaOffset;
113 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
114 assert((page_size - 2) == dmaOffset);
115 assert(ptoa(2) == dmaLen);
116 }
117 mds[0]->release();
118
119 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
120 {
121 uint64_t dmaLen, dmaOffset;
122 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
123 assert(0 == dmaOffset);
124 assert(ptoa(8) == dmaLen);
125 }
126 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
127 {
128 uint64_t dmaLen, dmaOffset;
129 dmaLen = mds[1]->getDMAMapLength(&dmaOffset);
130 assert(0 == dmaOffset);
131 assert(ptoa(2) == dmaLen);
132 }
133 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
134
135 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
136 {
137 uint64_t dmaLen, dmaOffset;
138 dmaLen = mmd->getDMAMapLength(&dmaOffset);
139 assert(0 == dmaOffset);
140 assert(ptoa(11) == dmaLen);
141 }
142 mds[2]->release();
143 mds[1]->release();
144 mds[0]->release();
145 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, ptoa(7), mmd->getLength() - ptoa(7));
146 mmd->release();
147 assert(map);
148
149 addr = (void *) map->getVirtualAddress();
150 assert(ptoa(4) == map->getLength());
151 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
152 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
153 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
154 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
155 map->release();
156 IOFreeAligned(data, ptoa(8));
157
158 return 0;
159 }
160
161
162
163 // <rdar://problem/30102458>
164 static int
IODMACommandForceDoubleBufferTest(int newValue)165 IODMACommandForceDoubleBufferTest(int newValue)
166 {
167 IOReturn ret;
168 IOBufferMemoryDescriptor * bmd;
169 IODMACommand * dma;
170 uint32_t dir, data;
171 IODMACommand::SegmentOptions segOptions =
172 {
173 .fStructSize = sizeof(segOptions),
174 .fNumAddressBits = 64,
175 .fMaxSegmentSize = 0x2000,
176 .fMaxTransferSize = 128 * 1024,
177 .fAlignment = 1,
178 .fAlignmentLength = 1,
179 .fAlignmentInternalSegments = 1
180 };
181 IODMACommand::Segment64 segments[1];
182 UInt32 numSegments;
183 UInt64 dmaOffset;
184
185
186 for (dir = kIODirectionIn;; dir++) {
187 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
188 dir | kIOMemoryPageable, ptoa(8));
189 assert(bmd);
190 {
191 uint64_t dmaLen, dmaOffset;
192 dmaLen = bmd->getDMAMapLength(&dmaOffset);
193 assert(0 == dmaOffset);
194 assert(ptoa(8) == dmaLen);
195 }
196
197 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
198
199 ret = bmd->prepare((IODirection) dir);
200 assert(kIOReturnSuccess == ret);
201
202 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
203 kIODMAMapOptionMapped,
204 NULL, NULL);
205 assert(dma);
206 ret = dma->setMemoryDescriptor(bmd, true);
207 assert(kIOReturnSuccess == ret);
208
209 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
210 assert(kIOReturnSuccess == ret);
211
212 dmaOffset = 0;
213 numSegments = 1;
214 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
215 assert(kIOReturnSuccess == ret);
216 assert(1 == numSegments);
217
218 if (kIODirectionOut & dir) {
219 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
220 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
221 }
222 if (kIODirectionIn & dir) {
223 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
224 }
225
226 ret = dma->clearMemoryDescriptor(true);
227 assert(kIOReturnSuccess == ret);
228 dma->release();
229
230 bmd->complete((IODirection) dir);
231
232 if (kIODirectionIn & dir) {
233 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
234 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
235 }
236
237 bmd->release();
238
239 if (dir == kIODirectionInOut) {
240 break;
241 }
242 }
243
244 return 0;
245 }
246
247 // <rdar://problem/34322778>
248 static int __unused
IODMACommandLocalMappedNonContig(int newValue)249 IODMACommandLocalMappedNonContig(int newValue)
250 {
251 IOReturn kr;
252 IOMemoryDescriptor * md;
253 IODMACommand * dma;
254 OSDictionary * matching;
255 IOService * device;
256 IOMapper * mapper;
257 IODMACommand::SegmentOptions segOptions =
258 {
259 .fStructSize = sizeof(segOptions),
260 .fNumAddressBits = 64,
261 .fMaxSegmentSize = 128 * 1024,
262 .fMaxTransferSize = 128 * 1024,
263 .fAlignment = 1,
264 .fAlignmentLength = 1,
265 .fAlignmentInternalSegments = 1
266 };
267 IODMACommand::Segment64 segments[1];
268 UInt32 numSegments;
269 UInt64 dmaOffset;
270 UInt64 segPhys;
271 mach_vm_address_t buffer;
272 vm_size_t bufSize = ptoa(4);
273
274 if (!IOMapper::gSystem) {
275 return 0;
276 }
277
278 buffer = 0;
279 kr = mach_vm_allocate_kernel(kernel_map, &buffer, bufSize,
280 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
281 assert(KERN_SUCCESS == kr);
282
283 // fragment the vmentries
284 kr = mach_vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
285 assert(KERN_SUCCESS == kr);
286
287 md = IOMemoryDescriptor::withAddressRange(
288 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
289 assert(md);
290 kr = md->prepare(kIODirectionOutIn);
291 assert(kIOReturnSuccess == kr);
292
293 segPhys = md->getPhysicalSegment(0, NULL, 0);
294
295 matching = IOService::nameMatching("XHC1");
296 assert(matching);
297 device = IOService::copyMatchingService(matching);
298 matching->release();
299 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
300 OSSafeReleaseNULL(device);
301
302 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
303 kIODMAMapOptionMapped,
304 mapper, NULL);
305 assert(dma);
306 kr = dma->setMemoryDescriptor(md, true);
307 assert(kIOReturnSuccess == kr);
308
309 dmaOffset = 0;
310 numSegments = 1;
311 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
312 assert(kIOReturnSuccess == kr);
313 assert(1 == numSegments);
314
315 if (mapper) {
316 assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
317 }
318
319 kr = dma->clearMemoryDescriptor(true);
320 assert(kIOReturnSuccess == kr);
321 dma->release();
322
323 kr = md->complete(kIODirectionOutIn);
324 assert(kIOReturnSuccess == kr);
325 md->release();
326
327 kr = mach_vm_deallocate(kernel_map, buffer, bufSize);
328 assert(KERN_SUCCESS == kr);
329 OSSafeReleaseNULL(mapper);
330
331 return 0;
332 }
333
334 // <rdar://problem/30102458>
335 static int
IOMemoryRemoteTest(int newValue)336 IOMemoryRemoteTest(int newValue)
337 {
338 IOReturn ret;
339 IOMemoryDescriptor * md;
340 IOByteCount offset, length;
341 addr64_t addr;
342 uint32_t idx;
343
344 IODMACommand * dma;
345 IODMACommand::SegmentOptions segOptions =
346 {
347 .fStructSize = sizeof(segOptions),
348 .fNumAddressBits = 64,
349 .fMaxSegmentSize = 0x2000,
350 .fMaxTransferSize = 128 * 1024,
351 .fAlignment = 1,
352 .fAlignmentLength = 1,
353 .fAlignmentInternalSegments = 1
354 };
355 IODMACommand::Segment64 segments[1];
356 UInt32 numSegments;
357 UInt64 dmaOffset;
358
359 IOAddressRange ranges[2] = {
360 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
361 };
362
363 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL);
364 assert(md);
365
366 // md->map();
367 // md->readBytes(0, &idx, sizeof(idx));
368
369 ret = md->prepare(kIODirectionOutIn);
370 assert(kIOReturnSuccess == ret);
371
372 printf("remote md flags 0x%qx, r %d\n",
373 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
374
375 for (offset = 0, idx = 0; true; offset += length, idx++) {
376 addr = md->getPhysicalSegment(offset, &length, 0);
377 if (!length) {
378 break;
379 }
380 assert(idx < 2);
381 assert(addr == ranges[idx].address);
382 assert(length == ranges[idx].length);
383 }
384 assert(offset == md->getLength());
385
386 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
387 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
388 NULL, NULL);
389 assert(dma);
390 ret = dma->setMemoryDescriptor(md, true);
391 assert(kIOReturnSuccess == ret);
392
393 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) {
394 numSegments = 1;
395 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
396 assert(kIOReturnSuccess == ret);
397 assert(1 == numSegments);
398 assert(idx < 2);
399 assert(segments[0].fIOVMAddr == ranges[idx].address);
400 assert(segments[0].fLength == ranges[idx].length);
401 }
402 assert(dmaOffset == md->getLength());
403
404 ret = dma->clearMemoryDescriptor(true);
405 assert(kIOReturnSuccess == ret);
406 dma->release();
407 md->complete(kIODirectionOutIn);
408 md->release();
409
410 return 0;
411 }
412
413 static IOReturn
IOMemoryPrefaultTest(uint32_t options)414 IOMemoryPrefaultTest(uint32_t options)
415 {
416 IOBufferMemoryDescriptor * bmd;
417 IOMemoryMap * map;
418 IOReturn kr;
419 uint32_t data;
420 uint32_t * p;
421 IOSimpleLock * lock;
422
423 lock = IOSimpleLockAlloc();
424 assert(lock);
425
426 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
427 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
428 assert(bmd);
429 kr = bmd->prepare();
430 assert(KERN_SUCCESS == kr);
431
432 map = bmd->map(kIOMapPrefault);
433 assert(map);
434
435 p = (typeof(p))map->getVirtualAddress();
436 IOSimpleLockLock(lock);
437 data = p[0];
438 IOSimpleLockUnlock(lock);
439
440 IOLog("IOMemoryPrefaultTest %d\n", data);
441
442 map->release();
443 bmd->release();
444 IOSimpleLockFree(lock);
445
446 return kIOReturnSuccess;
447 }
448
449 static IOReturn
IOBMDOverflowTest(uint32_t options)450 IOBMDOverflowTest(uint32_t options)
451 {
452 IOBufferMemoryDescriptor * bmd;
453
454 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryPageable | kIODirectionOut,
455 0xffffffffffffffff, 0);
456 assert(NULL == bmd);
457
458 return kIOReturnSuccess;
459 }
460
461 static IOReturn
IOBMDSetLengthMapTest(uint32_t options)462 IOBMDSetLengthMapTest(uint32_t options)
463 {
464 IOBufferMemoryDescriptor * bmd;
465 IOMemoryMap * map;
466
467 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(
468 kernel_task, kIOMemoryDirectionOutIn | kIOMemoryKernelUserShared, 0x4000, 0x4000);
469 assert(bmd);
470
471 bmd->setLength(0x100);
472 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
473 assert(map);
474 OSSafeReleaseNULL(map);
475
476 bmd->setLength(0x200);
477 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
478 assert(map);
479 OSSafeReleaseNULL(map);
480
481 bmd->release();
482
483 return kIOReturnSuccess;
484 }
485
486 // <rdar://problem/26375234>
487 static IOReturn
ZeroLengthTest(int newValue)488 ZeroLengthTest(int newValue)
489 {
490 IOMemoryDescriptor * md;
491
492 md = IOMemoryDescriptor::withAddressRange(
493 0, 0, kIODirectionNone, current_task());
494 assert(md);
495 md->prepare();
496 md->complete();
497 md->release();
498 return 0;
499 }
500
501 // <rdar://problem/27002624>
502 static IOReturn
BadFixedAllocTest(int newValue)503 BadFixedAllocTest(int newValue)
504 {
505 IOBufferMemoryDescriptor * bmd;
506 IOMemoryMap * map;
507
508 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
509 kIODirectionIn | kIOMemoryPageable, ptoa(1));
510 assert(bmd);
511 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
512 assert(!map);
513
514 bmd->release();
515 return 0;
516 }
517
518 // <rdar://problem/26466423>
519 static IOReturn
IODirectionPrepareNoZeroFillTest(int newValue)520 IODirectionPrepareNoZeroFillTest(int newValue)
521 {
522 IOBufferMemoryDescriptor * bmd;
523
524 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
525 kIODirectionIn | kIOMemoryPageable, ptoa(24));
526 assert(bmd);
527 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
528 bmd->prepare(kIODirectionIn);
529 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
530 bmd->complete(kIODirectionIn);
531 bmd->release();
532 return 0;
533 }
534
535 // <rdar://problem/28190483>
536 static IOReturn
IOMemoryMapTest(uint32_t options)537 IOMemoryMapTest(uint32_t options)
538 {
539 IOBufferMemoryDescriptor * bmd;
540 IOMemoryDescriptor * md;
541 IOMemoryMap * map;
542 uint32_t data;
543 user_addr_t p;
544 uint8_t * p2;
545 int r;
546 uint64_t time, nano;
547
548 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
549 kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800);
550 assert(bmd);
551 p = (typeof(p))bmd->getBytesNoCopy();
552 p += 0x800;
553 data = 0x11111111;
554 r = copyout(&data, p, sizeof(data));
555 assert(r == 0);
556 data = 0x22222222;
557 r = copyout(&data, p + 0x1000, sizeof(data));
558 assert(r == 0);
559 data = 0x33333333;
560 r = copyout(&data, p + 0x2000, sizeof(data));
561 assert(r == 0);
562 data = 0x44444444;
563 r = copyout(&data, p + 0x3000, sizeof(data));
564 assert(r == 0);
565
566 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
567 kIODirectionOut | options,
568 current_task());
569 assert(md);
570 time = mach_absolute_time();
571 map = md->map(kIOMapReadOnly);
572 time = mach_absolute_time() - time;
573 assert(map);
574 absolutetime_to_nanoseconds(time, &nano);
575
576 p2 = (typeof(p2))map->getVirtualAddress();
577 assert(0x11 == p2[0]);
578 assert(0x22 == p2[0x1000]);
579 assert(0x33 == p2[0x2000]);
580 assert(0x44 == p2[0x3000]);
581
582 data = 0x99999999;
583 r = copyout(&data, p + 0x2000, sizeof(data));
584 assert(r == 0);
585
586 assert(0x11 == p2[0]);
587 assert(0x22 == p2[0x1000]);
588 assert(0x44 == p2[0x3000]);
589 if (kIOMemoryMapCopyOnWrite & options) {
590 assert(0x33 == p2[0x2000]);
591 } else {
592 assert(0x99 == p2[0x2000]);
593 }
594
595 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
596 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
597 nano);
598
599 map->release();
600 md->release();
601 bmd->release();
602
603 return kIOReturnSuccess;
604 }
605
606 static int
IOMemoryMapCopyOnWriteTest(int newValue)607 IOMemoryMapCopyOnWriteTest(int newValue)
608 {
609 IOMemoryMapTest(0);
610 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
611 return 0;
612 }
613
614 static int
AllocationNameTest(int newValue)615 AllocationNameTest(int newValue)
616 {
617 IOMemoryDescriptor * bmd;
618 kern_allocation_name_t name, prior;
619
620 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
621 assert(name);
622
623 prior = thread_set_allocation_name(name);
624
625 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
626 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
627 ptoa(13));
628 assert(bmd);
629 bmd->prepare();
630
631 thread_set_allocation_name(prior);
632 kern_allocation_name_release(name);
633
634 if (newValue != 7) {
635 bmd->release();
636 }
637
638 return 0;
639 }
640
641 static IOReturn
IOGuardPageMDTest(int newValue)642 IOGuardPageMDTest(int newValue)
643 {
644 constexpr size_t MAX_LEFT_GUARD_PAGES = 5;
645 constexpr size_t MAX_RIGHT_GUARD_PAGES = 5;
646
647 IOMemoryDescriptor * mds[3];
648 IOMemoryDescriptor * dataMD;
649 IOMultiMemoryDescriptor * mmd;
650 IOBufferMemoryDescriptor * iobmd;
651 IOMemoryMap * map;
652 void * addr;
653 uint8_t * data;
654 uint32_t i;
655
656 data = (typeof(data))IOMallocAligned(page_size, page_size);
657 for (i = 0; i < page_size; i++) {
658 data[i] = (uint8_t)(i & 0xFF);
659 }
660
661 dataMD = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, page_size, kIODirectionOutIn, kernel_task);
662 assert(dataMD);
663
664
665 for (size_t leftGuardSize = 1; leftGuardSize < MAX_LEFT_GUARD_PAGES; leftGuardSize++) {
666 for (size_t rightGuardSize = 1; rightGuardSize < MAX_RIGHT_GUARD_PAGES; rightGuardSize++) {
667 mds[0] = IOGuardPageMemoryDescriptor::withSize(page_size * leftGuardSize);
668 assert(mds[0]);
669
670 mds[1] = dataMD;
671 mds[1]->retain();
672
673 mds[2] = IOGuardPageMemoryDescriptor::withSize(page_size * rightGuardSize);
674 assert(mds[2]);
675
676 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
677
678 OSSafeReleaseNULL(mds[2]);
679 OSSafeReleaseNULL(mds[1]);
680 OSSafeReleaseNULL(mds[0]);
681
682 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, 0, mmd->getLength());
683
684 OSSafeReleaseNULL(mmd);
685 assert(map);
686 addr = (void *)map->getAddress();
687
688 // check data
689 for (i = 0; i < page_size; i++) {
690 assert(*(uint8_t *)((uintptr_t)addr + page_size * leftGuardSize + i) == (uint8_t)(i & 0xFF));
691 }
692
693 // check map length
694 assert(page_size * leftGuardSize + page_size + page_size * rightGuardSize == map->getLength());
695
696 // check page protections
697 for (i = 0; i < leftGuardSize + 1 + rightGuardSize; i++) {
698 mach_vm_address_t regionAddr = (vm_address_t)addr + i * page_size;
699 mach_vm_size_t regionSize;
700 vm_region_extended_info regionInfo;
701 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
702 mach_port_t unused;
703 kern_return_t kr = mach_vm_region(kernel_map, ®ionAddr, ®ionSize, VM_REGION_EXTENDED_INFO, (vm_region_info_t)®ionInfo, &count, &unused);
704 assert(kr == KERN_SUCCESS);
705 if (i < leftGuardSize || i > leftGuardSize + 1) {
706 assert(regionInfo.protection == VM_PROT_NONE);
707 }
708 }
709 OSSafeReleaseNULL(map);
710 }
711 }
712
713 OSSafeReleaseNULL(dataMD);
714 IOFreeAligned(data, page_size);
715
716 for (size_t iobmdCapacity = page_size / 8; iobmdCapacity < page_size * 10; iobmdCapacity += page_size / 8) {
717 iobmd = IOBufferMemoryDescriptor::inTaskWithGuardPages(kernel_task, kIODirectionOutIn, iobmdCapacity);
718
719 // Capacity should be rounded up to page size
720 assert(iobmd->getLength() == round_page(iobmdCapacity));
721
722 // Buffer should be page aligned
723 addr = iobmd->getBytesNoCopy();
724 assert((vm_offset_t)addr == round_page((vm_offset_t)addr));
725
726 // fill buffer
727 for (size_t i = 0; i < iobmdCapacity; i++) {
728 *((char *)addr + i) = (char)(i & 0xFF);
729 }
730
731 map = iobmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique, 0, iobmd->getLength());
732 assert(map->getLength() == iobmd->getLength());
733
734 // check buffer
735 for (size_t i = 0; i < iobmdCapacity; i++) {
736 assert(*((char *)map->getAddress() + i) == (char)(i & 0xFF));
737 }
738
739 OSSafeReleaseNULL(map);
740 OSSafeReleaseNULL(iobmd);
741 }
742
743 return kIOReturnSuccess;
744 }
745
746 static IOReturn
IOMDContextTest(int newValue)747 IOMDContextTest(int newValue)
748 {
749 IOBufferMemoryDescriptor * bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
750 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
751 ptoa(13));
752
753 OSObject * current = NULL;
754 OSString * firstString = OSString::withCStringNoCopy("firstString");
755 OSString * secondString = OSString::withCStringNoCopy("secondString");
756
757 assert(bmd->copyContext() == NULL);
758
759 bmd->setContext(NULL);
760 assert(bmd->copyContext() == NULL);
761
762 bmd->setContext(firstString);
763 current = bmd->copyContext();
764 assert(current == firstString);
765 OSSafeReleaseNULL(current);
766
767 bmd->setContext(NULL);
768 assert(bmd->copyContext() == NULL);
769
770 bmd->setContext(secondString);
771 current = bmd->copyContext();
772 assert(current == secondString);
773 OSSafeReleaseNULL(current);
774
775 bmd->release();
776
777 assert(firstString->getRetainCount() == 1);
778 assert(secondString->getRetainCount() == 1);
779
780 firstString->release();
781 secondString->release();
782
783 return kIOReturnSuccess;
784 }
785
786 int
IOMemoryDescriptorTest(int newValue)787 IOMemoryDescriptorTest(int newValue)
788 {
789 int result;
790
791 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
792
793 #if 0
794 if (6 == newValue) {
795 IOMemoryDescriptor * sbmds[3];
796 IOMultiMemoryDescriptor * smmd;
797 IOMemoryDescriptor * mds[2];
798 IOMultiMemoryDescriptor * mmd;
799 IOMemoryMap * map;
800
801 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
802 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
803 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
804 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false);
805
806 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
807 mds[1] = smmd;
808 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
809 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall);
810 assert(map);
811 map->release();
812 mmd->release();
813 mds[0]->release();
814 mds[1]->release();
815 sbmds[0]->release();
816 sbmds[1]->release();
817 sbmds[2]->release();
818
819 return 0;
820 } else if (5 == newValue) {
821 IOReturn ret;
822 IOMemoryDescriptor * md;
823 IODMACommand * dma;
824 IODMACommand::SegmentOptions segOptions =
825 {
826 .fStructSize = sizeof(segOptions),
827 .fNumAddressBits = 64,
828 .fMaxSegmentSize = 4096,
829 .fMaxTransferSize = 128 * 1024,
830 .fAlignment = 4,
831 .fAlignmentLength = 4,
832 .fAlignmentInternalSegments = 0x1000
833 };
834
835 IOAddressRange ranges[3][2] =
836 {
837 {
838 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
839 { 0, 0 },
840 },
841 {
842 { ranges[0][0].address, 0x10 },
843 { 0x3000 + ranges[0][0].address, 0xff0 },
844 },
845 {
846 { ranges[0][0].address, 0x2ffc },
847 { trunc_page(ranges[0][0].address), 0x800 },
848 },
849 };
850 static const uint32_t rangesCount[3] = { 1, 2, 2 };
851 uint32_t test;
852
853 for (test = 0; test < 3; test++) {
854 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
855 ranges[test][0].address, ranges[test][0].length,
856 ranges[test][1].address, ranges[test][1].length);
857
858 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
859 assert(md);
860 ret = md->prepare();
861 assert(kIOReturnSuccess == ret);
862 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
863 IODMACommand::kMapped, NULL, NULL);
864 assert(dma);
865 ret = dma->setMemoryDescriptor(md, true);
866 if (kIOReturnSuccess == ret) {
867 IODMACommand::Segment64 segments[1];
868 UInt32 numSegments;
869 UInt64 offset;
870
871 offset = 0;
872 do{
873 numSegments = 1;
874 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
875 assert(kIOReturnSuccess == ret);
876 assert(1 == numSegments);
877 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
878 }while (offset < md->getLength());
879
880 ret = dma->clearMemoryDescriptor(true);
881 assert(kIOReturnSuccess == ret);
882 dma->release();
883 }
884 md->release();
885 }
886
887 return kIOReturnSuccess;
888 } else if (4 == newValue) {
889 IOService * isp;
890 IOMapper * mapper;
891 IOBufferMemoryDescriptor * md1;
892 IODMACommand * dma;
893 IOReturn ret;
894 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
895 uint64_t start, time, nano;
896
897 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
898 assert(isp);
899 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
900 assert(mapper);
901
902 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
903 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
904 bufSize, page_size);
905
906 ret = md1->prepare();
907 assert(kIOReturnSuccess == ret);
908
909 IODMAMapSpecification mapSpec;
910 bzero(&mapSpec, sizeof(mapSpec));
911 uint64_t mapped;
912 uint64_t mappedLength;
913
914 start = mach_absolute_time();
915
916 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
917 assert(kIOReturnSuccess == ret);
918
919 time = mach_absolute_time() - start;
920
921 absolutetime_to_nanoseconds(time, &nano);
922 kprintf("time %lld us\n", nano / 1000ULL);
923 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
924
925 assert(md1);
926
927 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
928 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
929
930 assert(dma);
931
932 start = mach_absolute_time();
933 ret = dma->setMemoryDescriptor(md1, true);
934 assert(kIOReturnSuccess == ret);
935 time = mach_absolute_time() - start;
936
937 absolutetime_to_nanoseconds(time, &nano);
938 kprintf("time %lld us\n", nano / 1000ULL);
939
940
941 IODMACommand::Segment32 segments[1];
942 UInt32 numSegments = 1;
943 UInt64 offset;
944
945 offset = 0;
946 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
947 assert(kIOReturnSuccess == ret);
948 assert(1 == numSegments);
949 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
950
951 ret = dma->clearMemoryDescriptor(true);
952 assert(kIOReturnSuccess == ret);
953
954 md1->release();
955
956 return kIOReturnSuccess;
957 }
958
959 if (3 == newValue) {
960 IOBufferMemoryDescriptor * md1;
961 IOBufferMemoryDescriptor * md2;
962 IOMemoryMap * map1;
963 IOMemoryMap * map2;
964 uint32_t * buf1;
965 uint32_t * buf2;
966 IOReturn err;
967
968 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
969 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
970 64 * 1024, page_size);
971 assert(md1);
972 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
973 assert(map1);
974 buf1 = (uint32_t *) map1->getVirtualAddress();
975
976 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
977 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
978 64 * 1024, page_size);
979 assert(md2);
980 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
981 assert(map2);
982 buf2 = (uint32_t *) map2->getVirtualAddress();
983
984 memset(buf1, 0x11, 64 * 1024L);
985 memset(buf2, 0x22, 64 * 1024L);
986
987 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
988
989 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
990 assert(0x11111111 == buf1[0]);
991 assert(0x22222222 == buf2[0]);
992 err = map1->redirect(md2, 0, 0ULL);
993 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
994 assert(0x11111111 == buf2[0]);
995 assert(0x22222222 == buf1[0]);
996 err = map1->redirect(md1, 0, 0ULL);
997 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
998 assert(0x11111111 == buf1[0]);
999 assert(0x22222222 == buf2[0]);
1000 map1->release();
1001 map2->release();
1002 md1->release();
1003 md2->release();
1004 }
1005 #endif
1006
1007 // result = IODMACommandLocalMappedNonContig(newValue);
1008 // if (result) return (result);
1009
1010 result = IODMACommandForceDoubleBufferTest(newValue);
1011 if (result) {
1012 return result;
1013 }
1014
1015 result = AllocationNameTest(newValue);
1016 if (result) {
1017 return result;
1018 }
1019
1020 result = IOMemoryMapCopyOnWriteTest(newValue);
1021 if (result) {
1022 return result;
1023 }
1024
1025 result = IOMultMemoryDescriptorTest(newValue);
1026 if (result) {
1027 return result;
1028 }
1029
1030 result = IOBMDOverflowTest(newValue);
1031 if (result) {
1032 return result;
1033 }
1034
1035 result = IOBMDSetLengthMapTest(newValue);
1036 if (result) {
1037 return result;
1038 }
1039
1040 result = ZeroLengthTest(newValue);
1041 if (result) {
1042 return result;
1043 }
1044
1045 result = IODirectionPrepareNoZeroFillTest(newValue);
1046 if (result) {
1047 return result;
1048 }
1049
1050 result = BadFixedAllocTest(newValue);
1051 if (result) {
1052 return result;
1053 }
1054
1055 result = IOMemoryRemoteTest(newValue);
1056 if (result) {
1057 return result;
1058 }
1059
1060 result = IOMemoryPrefaultTest(newValue);
1061 if (result) {
1062 return result;
1063 }
1064
1065 result = IOGuardPageMDTest(newValue);
1066 if (result) {
1067 return result;
1068 }
1069
1070 result = IOMDContextTest(newValue);
1071 if (result) {
1072 return result;
1073 }
1074
1075 IOGeneralMemoryDescriptor * md;
1076 mach_vm_offset_t data[2];
1077 vm_size_t bsize = 16 * 1024 * 1024;
1078 vm_size_t srcsize, srcoffset, mapoffset, size;
1079 kern_return_t kr;
1080
1081 data[0] = data[1] = 0;
1082 kr = mach_vm_allocate_kernel(kernel_map, &data[0], bsize,
1083 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
1084 assert(KERN_SUCCESS == kr);
1085
1086 mach_vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
1087 mach_vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
1088
1089 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
1090
1091 uint32_t idx, offidx;
1092 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) {
1093 ((uint32_t*)data[0])[idx] = idx;
1094 }
1095
1096 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) {
1097 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) {
1098 IOAddressRange ranges[3];
1099 uint32_t rangeCount = 1;
1100
1101 bzero(&ranges[0], sizeof(ranges));
1102 ranges[0].address = data[0] + srcoffset;
1103 ranges[0].length = srcsize;
1104 ranges[1].address = ranges[2].address = data[0];
1105
1106 if (srcsize > ptoa(5)) {
1107 ranges[0].length = 7634;
1108 ranges[1].length = 9870;
1109 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1110 ranges[1].address = ranges[0].address + ranges[0].length;
1111 ranges[2].address = ranges[1].address + ranges[1].length;
1112 rangeCount = 3;
1113 } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1114 ranges[0].length = ptoa(1);
1115 ranges[1].length = ptoa(1);
1116 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1117 ranges[0].address = data[0] + srcoffset + ptoa(1);
1118 ranges[1].address = data[0] + srcoffset;
1119 ranges[2].address = ranges[0].address + ranges[0].length;
1120 rangeCount = 3;
1121 }
1122
1123 md = OSDynamicCast(IOGeneralMemoryDescriptor,
1124 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
1125 assert(md);
1126
1127 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
1128 (long) srcsize, (long) srcoffset,
1129 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
1130 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
1131 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
1132
1133 if (kIOReturnSuccess == kr) {
1134 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) {
1135 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) {
1136 IOMemoryMap * map;
1137 mach_vm_address_t addr = 0;
1138 uint32_t data;
1139
1140 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
1141
1142 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, mapoffset, size);
1143 if (map) {
1144 addr = map->getAddress();
1145 } else {
1146 kr = kIOReturnError;
1147 }
1148
1149 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
1150
1151 if (kIOReturnSuccess != kr) {
1152 break;
1153 }
1154 kr = md->prepare();
1155 if (kIOReturnSuccess != kr) {
1156 panic("prepare() fail 0x%x", kr);
1157 break;
1158 }
1159
1160 IOByteCount resident, dirty, swapped;
1161 kr = md->getPageCounts(&resident, &dirty, &swapped);
1162 if (kIOReturnSuccess != kr) {
1163 panic("unable to getExtendedPageCounts");
1164 break;
1165 }
1166 IOLog("Page Counts: %llu resident, %llu dirty, %llu swapped\n",
1167 resident, dirty, swapped);
1168 if (swapped != 0) {
1169 panic("Swapped page count is not 0 for prepared descriptor %llu", swapped);
1170 }
1171
1172 for (idx = 0; idx < size; idx += sizeof(uint32_t)) {
1173 offidx = (typeof(offidx))(idx + mapoffset + srcoffset);
1174 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1175 if (offidx < ptoa(2)) {
1176 offidx ^= ptoa(1);
1177 }
1178 }
1179 offidx /= sizeof(uint32_t);
1180
1181 if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) {
1182 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1183 kr = kIOReturnBadMedia;
1184 } else {
1185 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) {
1186 data = 0;
1187 }
1188 if (offidx != data) {
1189 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1190 kr = kIOReturnBadMedia;
1191 }
1192 }
1193 }
1194 md->complete();
1195 map->release();
1196 // IOLog("unmapRef %llx\n", addr);
1197 }
1198 if (kIOReturnSuccess != kr) {
1199 break;
1200 }
1201 }
1202 }
1203 md->release();
1204 if (kIOReturnSuccess != kr) {
1205 break;
1206 }
1207 }
1208 if (kIOReturnSuccess != kr) {
1209 break;
1210 }
1211 }
1212
1213 if (kIOReturnSuccess != kr) {
1214 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
1215 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
1216 }
1217
1218 assert(kr == kIOReturnSuccess);
1219
1220 mach_vm_deallocate(kernel_map, data[0], bsize);
1221 //mach_vm_deallocate(kernel_map, data[1], size);
1222
1223 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
1224
1225 return 0;
1226 }
1227
1228
1229 #endif /* DEVELOPMENT || DEBUG */
1230