1import struct 2 3from core import ( 4 caching, 5 gettype, 6 lldbwrap, 7 xnu_format, 8) 9from .kmem import KMem, MemoryRange 10from .vm import Pmap 11from .btlog import BTLog, BTLibrary 12from .whatis import * 13 14# FIXME: should not import this from xnu / utils 15from xnu import ( 16 GetSourceInformationForAddress, 17 print_hex_data, 18) 19 20class ZoneBitsMemoryObject(MemoryObject): 21 """ Memory Object for pointers in the Zone Bitmaps range """ 22 23 MO_KIND = "zone bitmap" 24 25 @property 26 def object_range(self): 27 return self.kmem.bits_range 28 29 def describe(self, verbose=False): 30 # 31 # Printing something more useful would require crawling 32 # all zone chunks with non inline bitmaps until we find 33 # the one. 34 # 35 # This is very expensive and really unlikely to ever 36 # be needed for debugging. 37 # 38 # Moreover, bitmap pointers do not leak outside 39 # of the bowels of zalloc, dangling pointers to 40 # this region is very unexpected. 41 # 42 print("Zone Bitmap Info") 43 print(" N/A") 44 print() 45 46 47class ZonePageMetadata(MemoryObject): 48 """ Memory Object for Zone Page Metadata """ 49 50 MO_KIND = "zone metadata" 51 52 def __init__(self, kmem, address): 53 super().__init__(kmem, address) 54 55 if not kmem.meta_range.contains(address): 56 raise IndexError("{:#x} is not inside the meta range {}".format( 57 address, kmem.meta_range)) 58 59 # 60 # Resolve the ZPM we fall into 61 # 62 size = kmem.zpm_type.GetByteSize() 63 idx = (address - kmem.meta_range.start) // size 64 sbv = kmem.target.xCreateValueFromAddress(None, 65 kmem.meta_range.start + idx * size, kmem.zpm_type) 66 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 67 68 self.mo_sbv = sbv 69 self.kmem = kmem 70 71 # 72 # Compute the canonical ZPM 73 # 74 # 0xe = ZM_SECONDARY_PAGE 75 # 0xf = ZM_SECONDARY_PCPU_PAGE 76 # 77 # TODO use a nice package to index enums by name, 78 # can't use GetEnumName() because it uses kern.* 79 # 80 if chunk_len in (0xe, 0xf): 81 pg_idx = sbv.xGetIntegerByName('zm_page_index') 82 idx -= pg_idx 83 sbv = sbv.xGetSiblingValueAtIndex(-pg_idx) 84 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 85 86 self.sbv = sbv 87 self._idx = idx 88 self._chunk_len = chunk_len 89 90 @classmethod 91 def _create_with_zone_address(cls, kmem, address): 92 zone_range = kmem.zone_range 93 if not zone_range.contains(address): 94 raise IndexError("{:#x} is not inside the zone map {}".format( 95 address, zone_range)) 96 97 index = (address - zone_range.start) >> kmem.page_shift 98 meta_addr = kmem.meta_range.start + index * kmem.zpm_type.GetByteSize() 99 100 return ZonePageMetadata(kmem, meta_addr) 101 102 @classmethod 103 def _create_with_pva(cls, kmem, pva): 104 address = ((pva | 0xffffffff00000000) << kmem.page_shift) & 0xffffffffffffffff 105 return ZonePageMetadata._create_with_zone_address(kmem, address) 106 107 @property 108 def object_range(self): 109 addr = self.sbv.GetLoadAddress() 110 clen = self._chunk_len 111 if clen == 1 and self.zone.percpu: 112 clen = self.kmem.ncpus 113 size = self._chunk_len * self.kmem.zpm_type.GetByteSize() 114 115 return MemoryRange(addr, addr + size) 116 117 @property 118 def zone(self): 119 sbv = self.sbv 120 return Zone(sbv.xGetIntegerByName('zm_index')) 121 122 def describe(self, verbose=False): 123 kmem = self.kmem 124 sbv = self.sbv 125 zone = self.zone 126 127 chunk_len = self._chunk_len 128 if zone.percpu: 129 chunk_len = kmem.ncpus 130 131 zone.describe() 132 133 print("Zone Metadata Info") 134 print(" chunk length : {}".format(chunk_len)) 135 print(" metadata : {:#x}".format(sbv.GetLoadAddress())) 136 print(" page : {:#x}".format(self.page_addr)) 137 138 if sbv.xGetIntegerByName('zm_inline_bitmap'): 139 if verbose: 140 bitmap = [ 141 "{:#010x}".format(sbv.xGetSiblingValueAtIndex(i).xGetIntegerByName('zm_bitmap')) 142 for i in range(self._chunk_len) 143 ] 144 print(" bitmap : inline [ {} ]".format(" ".join(bitmap))) 145 else: 146 print(" bitmap : inline") 147 else: 148 bref = sbv.xGetIntegerByName('zm_bitmap') 149 blen = 1 << ((bref >> 29) & 0x7) 150 bsize = blen << 3 151 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 152 bitmap = ( 153 "{:#018x}".format(word) 154 for word in kmem.target.xIterAsUInt64(baddr, blen) 155 ) 156 157 if bref == 0: 158 print(" bitmap : None") 159 elif not verbose: 160 print(" bitmap : {:#x} ({} bytes)".format(baddr, bsize)) 161 elif blen <= 2: 162 print(" bitmap : {:#x} ({} bytes) [ {} ]".format( 163 baddr, bsize, ' '.join(bitmap))) 164 else: 165 print(" bitmap : {:#x} ({} bytes) [".format(baddr, bsize)) 166 for i in range(blen // 4): 167 print(" {} {} {} {}".format( 168 next(bitmap), next(bitmap), 169 next(bitmap), next(bitmap))) 170 print(" ]") 171 172 print() 173 174 mo_sbv = self.mo_sbv 175 if sbv != mo_sbv: 176 pg_idx = self.mo_sbv.xGetIntegerByName('zm_page_index') 177 178 print("Secondary Metadata Info") 179 print(" index : {}/{}".format(pg_idx + 1, chunk_len)) 180 print(" metadata : {:#x}".format(mo_sbv.GetLoadAddress())) 181 print(" page : {:#x}".format( 182 self.page_addr + (pg_idx << kmem.page_shift))) 183 print() 184 185 if verbose: 186 print("-" * 80) 187 print() 188 print(str(self.mo_sbv)) 189 print() 190 191 192 @property 193 def next_pva(self): 194 """ the next zone_pva_t queued after this Zone Page Metadata """ 195 196 return self.sbv.xGetIntegerByPath('.zm_page_next.packed_address') 197 198 @property 199 def page_addr(self): 200 """ The page address corresponding to this Zone Page Metadata """ 201 202 kmem = self.kmem 203 return kmem.zone_range.start + (self._idx << kmem.page_shift) 204 205 def iter_all(self, zone): 206 """ All element addresses covered by this chunk """ 207 208 base = self.page_addr 209 esize = zone.elem_outer_size 210 offs = zone.elem_inner_offs 211 count = zone.chunk_elems 212 run = self.sbv.xGetIntegerByName('zm_chunk_len') 213 214 return range(base + offs, base + (run << self.kmem.page_shift), esize) 215 216 def is_allocated(self, zone, addr): 217 """ Whether an address has the allocated bit set """ 218 219 if not self._chunk_len: 220 return False 221 222 sbv = self.sbv 223 base = self.page_addr + zone.elem_inner_offs 224 esize = zone.elem_inner_size 225 idx = (addr - base) // esize 226 227 if sbv.xGetIntegerByName('zm_inline_bitmap'): 228 w, b = divmod(idx, 32) 229 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 230 return (mask & (1 << b)) == 0 231 else: 232 w, b = divmod(idx, 64) 233 bref = sbv.xGetIntegerByName('zm_bitmap') 234 kmem = self.kmem 235 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) + 8 * w 236 return not (kmem.target.xReadUInt64(baddr) & (1 << b)) 237 238 def iter_allocated(self, zone): 239 """ All allocated addresses in this this chunk """ 240 241 kmem = self.kmem 242 sbv = self.sbv 243 base = self.page_addr 244 245 # cache memory, can make enumeration twice as fast for smaller objects 246 sbv.target.xReadBytes(base, self._chunk_len << kmem.page_shift) 247 248 esize = zone.elem_outer_size 249 base += zone.elem_inner_offs 250 251 if sbv.xGetIntegerByName('zm_inline_bitmap'): 252 for i in range(zone.chunk_elems): 253 w, b = divmod(i, 32) 254 if b == 0: 255 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 256 if not mask & (1 << b): 257 yield base + i * esize 258 else: 259 bref = sbv.xGetIntegerByName('zm_bitmap') 260 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 261 data = kmem.target.xIterAsUInt64(baddr, 1 << ((bref >> 29) & 0x7)) 262 263 for i in range(zone.chunk_elems): 264 b = i & 63 265 if b == 0: 266 word = next(data) 267 if not word & (1 << b): 268 yield base + i * esize 269 270 271class ZoneHeapMemoryObject(MemoryObject): 272 """ Memory Object for zone allocated objects """ 273 274 MO_KIND = "zone heap" 275 276 def __init__(self, kmem, address): 277 super().__init__(kmem, address) 278 279 if not kmem.zone_range.contains(address): 280 raise IndexError("{:#x} is not inside the zone range {}".format( 281 address, kmem.zone_range)) 282 283 meta = ZonePageMetadata._create_with_zone_address(kmem, address) 284 zone = meta.zone 285 esize = zone.elem_outer_size 286 287 base = meta.page_addr + zone.elem_inner_offs 288 elem_idx = (address - base) // esize if address >= base else -1 289 elem_addr = base + elem_idx * esize if address >= base else None 290 self.real_addr = elem_addr 291 self.real_meta = meta 292 293 self.kmem = kmem 294 self.meta = meta 295 self.zone = zone 296 self.elem_idx = elem_idx 297 self.elem_addr = elem_addr 298 299 @property 300 def object_range(self): 301 if self.elem_idx >= 0: 302 elem_addr = self.elem_addr 303 elem_size = self.zone.elem_outer_size 304 return MemoryRange(elem_addr, elem_addr + elem_size) 305 306 base = self.meta.page_addr 307 size = self.zone.elem_inner_offs 308 return MemoryRange(base, base + size) 309 310 @property 311 def status(self): 312 zone = self.zone 313 real_addr = self.real_addr 314 315 if self.elem_idx < 0: 316 return "invalid" 317 318 elif not self.real_meta.is_allocated(zone, real_addr): 319 return "free" 320 321 elif real_addr in zone.cached(): 322 return "free (cached)" 323 324 elif real_addr in zone.recirc(): 325 return "free (recirc)" 326 327 else: 328 return "allocated" 329 330 def hexdump(self): 331 print("Hexdump:") 332 333 target = self.kmem.target 334 zone = self.zone 335 eaddr = self.elem_addr 336 eend = eaddr + zone.elem_inner_size 337 delta = self.real_addr - eaddr 338 339 rz = zone.elem_redzone 340 start = (eaddr & -16) - min(rz, 16) - 16 341 end = (eend + 16 + 15) & -16 342 marks = { self.address: '>' } 343 phex = print_hex_data 344 345 346 if rz > 16: 347 print(" " + "=" * 88) 348 print(" {}".format("." * 18)) 349 350 try: 351 data = target.xReadBytes(start + delta, eaddr - start) 352 phex(data, start, "", marks) 353 except: 354 print(" *** unable to read redzone memory ***") 355 else: 356 try: 357 data = target.xReadBytes(start + delta, eaddr - rz - start) 358 phex(data, start, "", marks) 359 except: 360 pass 361 362 print(" " + "=" * 88) 363 364 if rz: 365 try: 366 data = target.xReadBytes(eaddr - rz + delta, rz) 367 phex(data, eaddr - rz, "", marks) 368 except: 369 print(" *** unable to read redzone memory ***") 370 371 if rz: 372 print(" {}".format("-" * 88)) 373 374 try: 375 data = target.xReadBytes(eaddr + delta, eend - eaddr) 376 phex(data, eaddr, "", marks) 377 except: 378 print(" *** unable to read element memory ***") 379 380 print(" " + "=" * 88) 381 382 try: 383 data = target.xReadBytes(eend + delta, end - eend) 384 phex(data, eend, "", marks) 385 except: 386 pass 387 388 print() 389 390 def describe(self, verbose=False): 391 meta = self.meta 392 zone = self.zone 393 status = self.status 394 btlog = zone.btlog 395 396 meta.describe() 397 398 print("Zone Heap Object Info") 399 400 print(" element index : {}".format(self.elem_idx)) 401 print(" chunk offset : {}".format(self.address - meta.page_addr)) 402 print(" status : {}".format(status)) 403 if btlog and (btlog.is_log() or status == 'allocated'): 404 record = next(btlog.iter_records( 405 wantElement=self.elem_addr, reverse=True), None) 406 if record: 407 btlib = BTLibrary.get_shared() 408 print(" last zlog backtrace", 409 *btlib.get_stack(record.ref).symbolicated_frames(prefix=" "), sep="\n") 410 411 print() 412 413 if self.elem_idx >= 0 and verbose: 414 self.hexdump() 415 416 417@whatis_provider 418class ZoneWhatisProvider(WhatisProvider): 419 """ 420 Whatis Provider for the zone ranges 421 - metadata (bits and ZPM) 422 - PGZ 423 - regular heap objects 424 """ 425 426 def __init__(self, kmem): 427 super().__init__(kmem) 428 429 def claims(self, address): 430 kmem = self.kmem 431 432 return any( 433 r.contains(address) 434 for r in (kmem.meta_range, kmem.bits_range, kmem.zone_range) 435 ) 436 437 def lookup(self, address): 438 kmem = self.kmem 439 440 if kmem.meta_range.contains(address): 441 return ZonePageMetadata(self.kmem, address) 442 443 if kmem.bits_range.contains(address): 444 return ZoneBitsMemoryObject(self.kmem, address) 445 446 return ZoneHeapMemoryObject(self.kmem, address) 447 448 449class ZPercpuValue(object): 450 """ 451 Provides an enumerator for a zpercpu value 452 """ 453 454 def __init__(self, sbvalue): 455 """ 456 @param sbvalue (SBValue) 457 The value to enumerate 458 """ 459 self.sbv = sbvalue 460 461 def __iter__(self): 462 sbv = self.sbv 463 kmem = KMem.get_shared() 464 addr = sbv.GetValueAsAddress() 465 name = sbv.GetName() 466 ty = sbv.GetType().GetPointeeType() 467 468 return ( 469 sbv.xCreateValueFromAddress(name, addr + (cpu << kmem.page_shift), ty) 470 for cpu in kmem.zcpus 471 ) 472 473 474class Zone(object): 475 """ 476 the Zone class wraps XNU Zones and provides fast enumeration 477 of allocated, cached, ... elements. 478 """ 479 480 def __init__(self, index_name_or_addr): 481 """ 482 @param index_name_or_addr (int or str): 483 - int: a zone index within [0, num_zones) 484 - int: a zone address within [zone_array, zone_array + num_zones) 485 - str: a zone name 486 487 @param kmem (KMem or None) 488 The kmem this command applies to, 489 or None for the current one 490 """ 491 492 kmem = KMem.get_shared() 493 zarr = kmem.zone_array 494 495 if isinstance(index_name_or_addr, str): 496 mangled_name = index_name_or_addr.replace(' ', '.') 497 zid = self._find_zone_id_by_mangled_name(mangled_name) 498 elif index_name_or_addr <= kmem.num_zones: 499 zid = index_name_or_addr 500 else: 501 zid = index_name_or_addr - zarr.GetLoadAddress() 502 zid = zid // zarr.GetType().GetArrayElementType().GetByteSize() 503 504 self.kmem = kmem 505 self.zid = zid 506 self.sbv = zarr.chkGetChildAtIndex(zid) 507 508 @staticmethod 509 @caching.cache_dynamically 510 def get_zone_name(zid, target=None): 511 """ 512 Returns a zone name by index. 513 514 @param zid (int 515 A zone ID 516 517 @returns (str or None) 518 Returns a string holding the zone name 519 if the zone exists, or None 520 """ 521 522 kmem = KMem.get_shared() 523 if zid >= kmem.num_zones: 524 return None 525 526 zone = kmem.zone_array.chkGetChildAtIndex(zid) 527 zsec = kmem.zsec_array.chkGetChildAtIndex(zid) 528 529 if zone.xGetIntegerByName('z_self') == 0: 530 return None 531 532 heap_id = zsec.xGetIntegerByName('z_kheap_id') 533 534 return KMem._HEAP_NAMES[heap_id] + zone.xGetCStringByName('z_name') 535 536 @staticmethod 537 @caching.cache_dynamically 538 def _find_zone_id_by_mangled_name(name, target=None): 539 """ 540 Lookup a zone ID by name 541 542 @param name (str) 543 The name of the zone to lookup 544 545 @returns (int) 546 The zone ID for this name 547 """ 548 549 kmem = KMem.get_shared() 550 for zid in range(kmem.num_zones): 551 k = Zone.get_zone_name(zid) 552 if k is not None and name == k.replace(' ', '.'): 553 return zid 554 555 raise KeyError("No zone called '{}' found".format(name)) 556 557 @property 558 def initialized(self): 559 """ The zone name """ 560 561 return self.sbv.xGetIntegerByName('z_self') != 0 562 563 @property 564 def address(self): 565 """ The zone address """ 566 567 return self.sbv.GetLoadAddress() 568 569 @property 570 def name(self): 571 """ The zone name """ 572 573 return self.get_zone_name(self.zid) 574 575 @property 576 def mangled_name(self): 577 """ The zone mangled name """ 578 579 return self.name.replace(' ', '.') 580 581 @caching.dyn_cached_property 582 def elem_redzone(self, target=None): 583 """ The inner size of elements """ 584 585 if self.kmem.kasan_classic: 586 return self.sbv.xGetIntegerByName('z_kasan_redzone') 587 return 0 588 589 @caching.dyn_cached_property 590 def elem_inner_size(self, target=None): 591 """ The inner size of elements """ 592 593 return self.sbv.xGetIntegerByName('z_elem_size') 594 595 @caching.dyn_cached_property 596 def elem_outer_size(self, target=None): 597 """ The size of elements """ 598 599 if not self.kmem.kasan_classic: 600 return self.elem_inner_size 601 return self.elem_inner_size + self.elem_redzone 602 603 @caching.dyn_cached_property 604 def elem_inner_offs(self, target=None): 605 """ The chunk initial offset """ 606 607 return self.sbv.xGetIntegerByName('z_elem_offs') 608 609 @caching.dyn_cached_property 610 def chunk_pages(self, target=None): 611 """ The number of pages per chunk """ 612 613 return self.sbv.xGetIntegerByName('z_chunk_pages') 614 615 @caching.dyn_cached_property 616 def chunk_elems(self, target=None): 617 """ The number of elements per chunk """ 618 619 return self.sbv.xGetIntegerByName('z_chunk_elems') 620 621 @property 622 def percpu(self): 623 """ Whether this is a per-cpu zone """ 624 625 return self.sbv.xGetIntegerByName('z_percpu') 626 627 @property 628 def btlog(self): 629 """ Returns the zone's BTLog or None """ 630 631 try: 632 btlog = self.sbv.xGetPointeeByName('z_btlog') 633 return BTLog(btlog) 634 except: 635 return None 636 637 def describe(self): 638 kmem = self.kmem 639 zone = self.sbv 640 zsec = kmem.zsec_array.chkGetChildAtIndex(self.zid) 641 642 submap_arr = kmem.target.chkFindFirstGlobalVariable('zone_submaps_names') 643 submap_idx = zsec.xGetIntegerByName('z_submap_idx') 644 submap_name = submap_arr.xGetCStringAtIndex(submap_idx) 645 submap_end = zsec.xGetIntegerByName('z_submap_from_end') 646 647 try: 648 btlog = zone.xGetIntegerByName('z_btlog') 649 except: 650 # likely a release kernel 651 btlog = None 652 653 fmt = ( 654 "Zone Info\n" 655 " name : {0.name} ({&z:#x})\n" 656 " submap : {1} (from {2})\n" 657 " element size : {0.elem_inner_size}\n" 658 " element offs : {0.elem_inner_offs}\n" 659 ) 660 if kmem.kasan_classic: 661 fmt += " element redzone : {0.elem_redzone}\n" 662 fmt += " chunk elems / pages : {$z.z_chunk_elems} / {$z.z_chunk_pages}\n" 663 if btlog: 664 fmt += " btlog : {$z.z_btlog:#x}\n" 665 666 print(xnu_format(fmt, self, submap_name, 667 "right" if submap_end else "left", z = zone)); 668 669 def iter_page_queue(self, name): 670 kmem = self.kmem 671 zone = self.sbv 672 673 pva = zone.xGetIntegerByPath('.{}.packed_address'.format(name)) 674 675 while pva: 676 meta = ZonePageMetadata._create_with_pva(kmem, pva) 677 pva = meta.next_pva 678 yield meta 679 680 def _depotElements(self, depot, into): 681 last = depot.xGetPointeeByName('zd_tail').GetValueAsAddress() 682 mag = depot.xGetPointeeByName('zd_head') 683 684 kmem = self.kmem 685 n = kmem.mag_size 686 target = kmem.target 687 688 while mag and mag.GetLoadAddress() != last: 689 into.update(kmem.iter_addresses(target.xIterAsULong( 690 mag.xGetLoadAddressByName('zm_elems'), 691 n 692 ))) 693 mag = mag.xGetPointeeByName('zm_next') 694 695 return into 696 697 def cached(self, into = None): 698 """ all addresses in per-cpu caches or per-cpu depots """ 699 700 pcpu = self.sbv.GetChildMemberWithName('z_pcpu_cache') 701 into = into if into is not None else set() 702 703 if pcpu.GetValueAsAddress(): 704 target = pcpu.target 705 kmem = self.kmem 706 707 for cache in ZPercpuValue(pcpu): 708 into.update(kmem.iter_addresses(target.xIterAsULong( 709 cache.xGetIntegerByName('zc_alloc_elems'), 710 cache.xGetIntegerByName('zc_alloc_cur') 711 ))) 712 713 into.update(kmem.iter_addresses(target.xIterAsULong( 714 cache.xGetIntegerByName('zc_free_elems'), 715 cache.xGetIntegerByName('zc_free_cur') 716 ))) 717 718 self._depotElements( 719 cache.chkGetChildMemberWithName('zc_depot'), 720 into = into 721 ) 722 723 return into 724 725 def recirc(self, into = None): 726 """ all addresses in the recirculation layer """ 727 728 return self._depotElements( 729 self.sbv.chkGetChildMemberWithName('z_recirc'), 730 into = into if into is not None else set() 731 ) 732 733 def iter_all(self, ty = None): 734 """ 735 Returns a generator for all addresses/values that can be made 736 737 @param ty (SBType or None) 738 An optional type to use to form SBValues 739 740 @returns 741 - (generator<int>) if ty is None 742 - (generator<SBValue>) if ty is set 743 """ 744 745 addresses = ( 746 addr 747 for name in ( 748 'z_pageq_full', 749 'z_pageq_partial', 750 'z_pageq_empty', 751 ) 752 for meta in self.iter_page_queue(name) 753 for addr in meta.iter_all(self) 754 ) 755 756 if ty is None: 757 return addresses 758 759 fn = self.kmem.target.xCreateValueFromAddress 760 return (fn('e', addr, ty) for addr in addresses) 761 762 def iter_free(self, ty = None): 763 """ 764 Returns a generator for all free addresses/values 765 766 @param ty (SBType or None) 767 An optional type to use to form SBValues 768 769 @returns 770 - (generator<int>) if ty is None 771 - (generator<SBValue>) if ty is set 772 """ 773 774 cached = set() 775 self.cached(into = cached) 776 self.recirc(into = cached) 777 778 addresses = ( 779 addr 780 for name in ( 781 'z_pageq_full', 782 'z_pageq_partial', 783 ) 784 for meta in self.iter_page_queue(name) 785 for addr in meta.iter_all(self) 786 if addr in cached or not meta.is_allocated(self, addr) 787 ) 788 789 if ty is None: 790 return addresses 791 792 fn = self.kmem.target.xCreateValueFromAddress 793 return (fn('e', addr, ty) for addr in addresses) 794 795 def iter_allocated(self, ty = None): 796 """ 797 Returns a generator for all allocated addresses/values 798 799 @param ty (SBType or None) 800 An optional type to use to form SBValues 801 802 @returns 803 - (generator<int>) if ty is None 804 - (generator<SBValue>) if ty is set 805 """ 806 807 cached = set() 808 self.cached(into = cached) 809 self.recirc(into = cached) 810 811 addresses = ( 812 addr 813 for name in ( 814 'z_pageq_full', 815 'z_pageq_partial', 816 ) 817 for meta in self.iter_page_queue(name) 818 for addr in meta.iter_allocated(self) 819 if addr not in cached 820 ) 821 822 if ty is None: 823 return addresses 824 825 fn = self.kmem.target.xCreateValueFromAddress 826 return (fn('e', addr, ty) for addr in addresses) 827 828 def __iter__(self): 829 return self.iter_allocated() 830 831 832__all__ = [ 833 ZPercpuValue.__name__, 834 Zone.__name__, 835] 836