1import struct 2 3from core import ( 4 caching, 5 gettype, 6 lldbwrap, 7 xnu_format, 8) 9from .kmem import KMem, MemoryRange 10from .vm import Pmap 11from .btlog import BTLog, BTLibrary 12from .whatis import * 13 14# FIXME: should not import this from xnu / utils 15from xnu import ( 16 GetSourceInformationForAddress, 17 print_hex_data, 18) 19 20class ZoneBitsMemoryObject(MemoryObject): 21 """ Memory Object for pointers in the Zone Bitmaps range """ 22 23 MO_KIND = "zone bitmap" 24 25 @property 26 def object_range(self): 27 return self.kmem.bits_range 28 29 def describe(self, verbose=False): 30 # 31 # Printing something more useful would require crawling 32 # all zone chunks with non inline bitmaps until we find 33 # the one. 34 # 35 # This is very expensive and really unlikely to ever 36 # be needed for debugging. 37 # 38 # Moreover, bitmap pointers do not leak outside 39 # of the bowels of zalloc, dangling pointers to 40 # this region is very unexpected. 41 # 42 print("Zone Bitmap Info") 43 print(" N/A") 44 print() 45 46 47class ZonePageMetadata(MemoryObject): 48 """ Memory Object for Zone Page Metadata """ 49 50 MO_KIND = "zone metadata" 51 52 def __init__(self, kmem, address): 53 super().__init__(kmem, address) 54 55 if not kmem.meta_range.contains(address): 56 raise IndexError("{:#x} is not inside the meta range {}".format( 57 address, kmem.meta_range)) 58 59 # 60 # Resolve the ZPM we fall into 61 # 62 size = kmem.zpm_type.GetByteSize() 63 idx = (address - kmem.meta_range.start) // size 64 sbv = kmem.target.xCreateValueFromAddress(None, 65 kmem.meta_range.start + idx * size, kmem.zpm_type) 66 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 67 68 self.mo_sbv = sbv 69 self.kmem = kmem 70 71 # 72 # Compute the canonical ZPM 73 # 74 # 0xe = ZM_SECONDARY_PAGE 75 # 0xf = ZM_SECONDARY_PCPU_PAGE 76 # 77 # TODO use a nice package to index enums by name, 78 # can't use GetEnumName() because it uses kern.* 79 # 80 if chunk_len in (0xe, 0xf): 81 pg_idx = sbv.xGetIntegerByName('zm_page_index') 82 idx -= pg_idx 83 sbv = sbv.xGetSiblingValueAtIndex(-pg_idx) 84 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 85 86 self.sbv = sbv 87 self._idx = idx 88 self._chunk_len = chunk_len 89 90 @classmethod 91 def _create_with_zone_address(cls, kmem, address): 92 zone_range = kmem.zone_range 93 if not zone_range.contains(address): 94 raise IndexError("{:#x} is not inside the zone map {}".format( 95 address, zone_range)) 96 97 index = (address - zone_range.start) >> kmem.page_shift 98 meta_addr = kmem.meta_range.start + index * kmem.zpm_type.GetByteSize() 99 100 return ZonePageMetadata(kmem, meta_addr) 101 102 @classmethod 103 def _create_with_pva(cls, kmem, pva): 104 address = ((pva | 0xffffffff00000000) << kmem.page_shift) & 0xffffffffffffffff 105 return ZonePageMetadata._create_with_zone_address(kmem, address) 106 107 @property 108 def object_range(self): 109 addr = self.sbv.GetLoadAddress() 110 clen = self._chunk_len 111 if clen == 1 and self.zone.percpu: 112 clen = self.kmem.ncpus 113 size = self._chunk_len * self.kmem.zpm_type.GetByteSize() 114 115 return MemoryRange(addr, addr + size) 116 117 @property 118 def zone(self): 119 sbv = self.sbv 120 return Zone(sbv.xGetIntegerByName('zm_index')) 121 122 def describe(self, verbose=False): 123 kmem = self.kmem 124 sbv = self.sbv 125 zone = self.zone 126 127 chunk_len = self._chunk_len 128 if zone.percpu: 129 chunk_len = kmem.ncpus 130 131 zone.describe() 132 133 print("Zone Metadata Info") 134 print(" chunk length : {}".format(chunk_len)) 135 print(" metadata : {:#x}".format(sbv.GetLoadAddress())) 136 print(" page : {:#x}".format(self.page_addr)) 137 138 if sbv.xGetIntegerByName('zm_inline_bitmap'): 139 if verbose: 140 bitmap = [ 141 "{:#010x}".format(sbv.xGetSiblingValueAtIndex(i).xGetIntegerByName('zm_bitmap')) 142 for i in range(self._chunk_len) 143 ] 144 print(" bitmap : inline [ {} ]".format(" ".join(bitmap))) 145 else: 146 print(" bitmap : inline") 147 else: 148 bref = sbv.xGetIntegerByName('zm_bitmap') 149 blen = 1 << ((bref >> 29) & 0x7) 150 bsize = blen << 3 151 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 152 bitmap = ( 153 "{:#018x}".format(word) 154 for word in kmem.target.xIterAsUInt64(baddr, blen) 155 ) 156 157 if bref == 0: 158 print(" bitmap : None") 159 elif not verbose: 160 print(" bitmap : {:#x} ({} bytes)".format(baddr, bsize)) 161 elif blen <= 2: 162 print(" bitmap : {:#x} ({} bytes) [ {} ]".format( 163 baddr, bsize, ' '.join(bitmap))) 164 else: 165 print(" bitmap : {:#x} ({} bytes) [".format(baddr, bsize)) 166 for i in range(blen // 4): 167 print(" {} {} {} {}".format( 168 next(bitmap), next(bitmap), 169 next(bitmap), next(bitmap))) 170 print(" ]") 171 172 print() 173 174 mo_sbv = self.mo_sbv 175 if sbv != mo_sbv: 176 pg_idx = self.mo_sbv.xGetIntegerByName('zm_page_index') 177 178 print("Secondary Metadata Info") 179 print(" index : {}/{}".format(pg_idx + 1, chunk_len)) 180 print(" metadata : {:#x}".format(mo_sbv.GetLoadAddress())) 181 print(" page : {:#x}".format( 182 self.page_addr + (pg_idx << kmem.page_shift))) 183 print() 184 185 if verbose: 186 print("-" * 80) 187 print() 188 print(str(self.mo_sbv)) 189 print() 190 191 192 @property 193 def next_pva(self): 194 """ the next zone_pva_t queued after this Zone Page Metadata """ 195 196 return self.sbv.xGetIntegerByPath('.zm_page_next.packed_address') 197 198 @property 199 def page_addr(self): 200 """ The page address corresponding to this Zone Page Metadata """ 201 202 kmem = self.kmem 203 return kmem.zone_range.start + (self._idx << kmem.page_shift) 204 205 def iter_all(self, zone): 206 """ All element addresses covered by this chunk """ 207 208 base = self.page_addr 209 esize = zone.elem_outer_size 210 offs = zone.elem_inner_offs 211 count = zone.chunk_elems 212 run = self.sbv.xGetIntegerByName('zm_chunk_len') 213 214 return range(base + offs, base + (run << self.kmem.page_shift), esize) 215 216 def is_allocated(self, zone, addr): 217 """ Whether an address has the allocated bit set """ 218 219 if not self._chunk_len: 220 return False 221 222 sbv = self.sbv 223 base = self.page_addr + zone.elem_inner_offs 224 esize = zone.elem_inner_size 225 idx = (addr - base) // esize 226 227 if sbv.xGetIntegerByName('zm_inline_bitmap'): 228 w, b = divmod(idx, 32) 229 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 230 return (mask & (1 << b)) == 0 231 else: 232 w, b = divmod(idx, 64) 233 bref = sbv.xGetIntegerByName('zm_bitmap') 234 kmem = self.kmem 235 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) + 8 * w 236 return not (kmem.target.xReadUInt64(baddr) & (1 << b)) 237 238 def iter_allocated(self, zone): 239 """ All allocated addresses in this this chunk """ 240 241 kmem = self.kmem 242 sbv = self.sbv 243 base = self.page_addr 244 245 # cache memory, can make enumeration twice as fast for smaller objects 246 sbv.target.xReadBytes(base, self._chunk_len << kmem.page_shift) 247 248 esize = zone.elem_outer_size 249 base += zone.elem_inner_offs 250 251 if sbv.xGetIntegerByName('zm_inline_bitmap'): 252 for i in range(zone.chunk_elems): 253 w, b = divmod(i, 32) 254 if b == 0: 255 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 256 if not mask & (1 << b): 257 yield base + i * esize 258 else: 259 bref = sbv.xGetIntegerByName('zm_bitmap') 260 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 261 data = kmem.target.xIterAsUInt64(baddr, 1 << ((bref >> 29) & 0x7)) 262 263 for i in range(zone.chunk_elems): 264 b = i & 63 265 if b == 0: 266 word = next(data) 267 if not word & (1 << b): 268 yield base + i * esize 269 270 271class ZoneHeapMemoryObject(MemoryObject): 272 """ Memory Object for zone allocated objects """ 273 274 MO_KIND = "zone heap" 275 276 def __init__(self, kmem, address): 277 super().__init__(kmem, address) 278 279 if not kmem.zone_range.contains(address): 280 raise IndexError("{:#x} is not inside the zone range {}".format( 281 address, kmem.zone_range)) 282 283 meta = ZonePageMetadata._create_with_zone_address(kmem, address) 284 zone = meta.zone 285 esize = zone.elem_outer_size 286 287 base = meta.page_addr + zone.elem_inner_offs 288 elem_idx = (address - base) // esize if address >= base else -1 289 elem_addr = base + elem_idx * esize if address >= base else None 290 self.real_addr = elem_addr 291 self.real_meta = meta 292 293 self.kmem = kmem 294 self.meta = meta 295 self.zone = zone 296 self.elem_idx = elem_idx 297 self.elem_addr = elem_addr 298 299 @property 300 def object_range(self): 301 if self.elem_idx >= 0: 302 elem_addr = self.elem_addr 303 elem_size = self.zone.elem_outer_size 304 return MemoryRange(elem_addr, elem_addr + elem_size) 305 306 base = self.meta.page_addr 307 size = self.zone.elem_inner_offs 308 return MemoryRange(base, base + size) 309 310 @property 311 def status(self): 312 zone = self.zone 313 real_addr = self.real_addr 314 315 if self.elem_idx < 0: 316 return "invalid" 317 318 elif not self.real_meta.is_allocated(zone, real_addr): 319 return "free" 320 321 elif real_addr in zone.cached(): 322 return "free (cached)" 323 324 elif real_addr in zone.recirc(): 325 return "free (recirc)" 326 327 else: 328 return "allocated" 329 330 def hexdump(self): 331 print("Hexdump:") 332 333 target = self.kmem.target 334 zone = self.zone 335 eaddr = self.elem_addr 336 eend = eaddr + zone.elem_inner_size 337 delta = self.real_addr - eaddr 338 339 rz = zone.elem_redzone 340 start = (eaddr & -16) - min(rz, 16) - 16 341 end = (eend + 16 + 15) & -16 342 marks = { self.address: '>' } 343 phex = print_hex_data 344 345 def append_tag(addr): 346 t = Pmap.kernel_pmap().get_tag(addr) 347 return None if t is None else "{:#x}".format(t) 348 def phex(a, b, c, d): 349 print_hex_data(a, b, c, d, extra=append_tag) 350 351 if rz > 16: 352 print(" " + "=" * 88) 353 print(" {}".format("." * 18)) 354 355 try: 356 data = target.xReadBytes(start + delta, eaddr - start) 357 phex(data, start, "", marks) 358 except: 359 print(" *** unable to read redzone memory ***") 360 else: 361 try: 362 data = target.xReadBytes(start + delta, eaddr - rz - start) 363 phex(data, start, "", marks) 364 except: 365 pass 366 367 print(" " + "=" * 88) 368 369 if rz: 370 try: 371 data = target.xReadBytes(eaddr - rz + delta, rz) 372 phex(data, eaddr - rz, "", marks) 373 except: 374 print(" *** unable to read redzone memory ***") 375 376 if rz: 377 print(" {}".format("-" * 88)) 378 379 try: 380 data = target.xReadBytes(eaddr + delta, eend - eaddr) 381 phex(data, eaddr, "", marks) 382 except: 383 print(" *** unable to read element memory ***") 384 385 print(" " + "=" * 88) 386 387 try: 388 data = target.xReadBytes(eend + delta, end - eend) 389 phex(data, eend, "", marks) 390 except: 391 pass 392 393 print() 394 395 def describe(self, verbose=False): 396 meta = self.meta 397 zone = self.zone 398 status = self.status 399 btlog = zone.btlog 400 401 meta.describe() 402 403 print("Zone Heap Object Info") 404 tagged_addr = Pmap.kernel_pmap().ldg(self.address) 405 if self.address != tagged_addr: 406 print(" tagged address : {:#x}".format(tagged_addr)) 407 408 print(" element index : {}".format(self.elem_idx)) 409 print(" chunk offset : {}".format(self.address - meta.page_addr)) 410 print(" status : {}".format(status)) 411 if btlog and (btlog.is_log() or status == 'allocated'): 412 record = next(btlog.iter_records( 413 wantElement=self.elem_addr, reverse=True), None) 414 if record: 415 btlib = BTLibrary.get_shared() 416 print(" last zlog backtrace", 417 *btlib.get_stack(record.ref).symbolicated_frames(prefix=" "), sep="\n") 418 419 print() 420 421 if self.elem_idx >= 0 and verbose: 422 self.hexdump() 423 424 425@whatis_provider 426class ZoneWhatisProvider(WhatisProvider): 427 """ 428 Whatis Provider for the zone ranges 429 - metadata (bits and ZPM) 430 - PGZ 431 - regular heap objects 432 """ 433 434 def __init__(self, kmem): 435 super().__init__(kmem) 436 437 def claims(self, address): 438 kmem = self.kmem 439 440 return any( 441 r.contains(address) 442 for r in (kmem.meta_range, kmem.bits_range, kmem.zone_range) 443 ) 444 445 def lookup(self, address): 446 kmem = self.kmem 447 448 if kmem.meta_range.contains(address): 449 return ZonePageMetadata(self.kmem, address) 450 451 if kmem.bits_range.contains(address): 452 return ZoneBitsMemoryObject(self.kmem, address) 453 454 return ZoneHeapMemoryObject(self.kmem, address) 455 456 457class ZPercpuValue(object): 458 """ 459 Provides an enumerator for a zpercpu value 460 """ 461 462 def __init__(self, sbvalue): 463 """ 464 @param sbvalue (SBValue) 465 The value to enumerate 466 """ 467 self.sbv = sbvalue 468 469 def __iter__(self): 470 sbv = self.sbv 471 kmem = KMem.get_shared() 472 addr = sbv.GetValueAsAddress() 473 name = sbv.GetName() 474 ty = sbv.GetType().GetPointeeType() 475 476 return ( 477 sbv.xCreateValueFromAddress(name, addr + (cpu << kmem.page_shift), ty) 478 for cpu in kmem.zcpus 479 ) 480 481 482class Zone(object): 483 """ 484 the Zone class wraps XNU Zones and provides fast enumeration 485 of allocated, cached, ... elements. 486 """ 487 488 def __init__(self, index_name_or_addr): 489 """ 490 @param index_name_or_addr (int or str): 491 - int: a zone index within [0, num_zones) 492 - int: a zone address within [zone_array, zone_array + num_zones) 493 - str: a zone name 494 495 @param kmem (KMem or None) 496 The kmem this command applies to, 497 or None for the current one 498 """ 499 500 kmem = KMem.get_shared() 501 zarr = kmem.zone_array 502 503 if isinstance(index_name_or_addr, str): 504 mangled_name = index_name_or_addr.replace(' ', '.') 505 zid = self._find_zone_id_by_mangled_name(mangled_name) 506 elif index_name_or_addr <= kmem.num_zones: 507 zid = index_name_or_addr 508 else: 509 zid = index_name_or_addr - zarr.GetLoadAddress() 510 zid = zid // zarr.GetType().GetArrayElementType().GetByteSize() 511 512 self.kmem = kmem 513 self.zid = zid 514 self.sbv = zarr.chkGetChildAtIndex(zid) 515 516 @staticmethod 517 @caching.cache_dynamically 518 def get_zone_name(zid, target=None): 519 """ 520 Returns a zone name by index. 521 522 @param zid (int 523 A zone ID 524 525 @returns (str or None) 526 Returns a string holding the zone name 527 if the zone exists, or None 528 """ 529 530 kmem = KMem.get_shared() 531 if zid >= kmem.num_zones: 532 return None 533 534 zone = kmem.zone_array.chkGetChildAtIndex(zid) 535 zsec = kmem.zsec_array.chkGetChildAtIndex(zid) 536 537 if zone.xGetIntegerByName('z_self') == 0: 538 return None 539 540 heap_id = zsec.xGetIntegerByName('z_kheap_id') 541 542 return KMem._HEAP_NAMES[heap_id] + zone.xGetCStringByName('z_name') 543 544 @staticmethod 545 @caching.cache_dynamically 546 def _find_zone_id_by_mangled_name(name, target=None): 547 """ 548 Lookup a zone ID by name 549 550 @param name (str) 551 The name of the zone to lookup 552 553 @returns (int) 554 The zone ID for this name 555 """ 556 557 kmem = KMem.get_shared() 558 for zid in range(kmem.num_zones): 559 k = Zone.get_zone_name(zid) 560 if k is not None and name == k.replace(' ', '.'): 561 return zid 562 563 raise KeyError("No zone called '{}' found".format(name)) 564 565 @property 566 def initialized(self): 567 """ The zone name """ 568 569 return self.sbv.xGetIntegerByName('z_self') != 0 570 571 @property 572 def address(self): 573 """ The zone address """ 574 575 return self.sbv.GetLoadAddress() 576 577 @property 578 def name(self): 579 """ The zone name """ 580 581 return self.get_zone_name(self.zid) 582 583 @property 584 def mangled_name(self): 585 """ The zone mangled name """ 586 587 return self.name.replace(' ', '.') 588 589 @caching.dyn_cached_property 590 def elem_redzone(self, target=None): 591 """ The inner size of elements """ 592 593 if self.kmem.kasan_classic: 594 return self.sbv.xGetIntegerByName('z_kasan_redzone') 595 return 0 596 597 @caching.dyn_cached_property 598 def elem_inner_size(self, target=None): 599 """ The inner size of elements """ 600 601 return self.sbv.xGetIntegerByName('z_elem_size') 602 603 @caching.dyn_cached_property 604 def elem_outer_size(self, target=None): 605 """ The size of elements """ 606 607 if not self.kmem.kasan_classic: 608 return self.elem_inner_size 609 return self.elem_inner_size + self.elem_redzone 610 611 @caching.dyn_cached_property 612 def elem_inner_offs(self, target=None): 613 """ The chunk initial offset """ 614 615 return self.sbv.xGetIntegerByName('z_elem_offs') 616 617 @caching.dyn_cached_property 618 def chunk_pages(self, target=None): 619 """ The number of pages per chunk """ 620 621 return self.sbv.xGetIntegerByName('z_chunk_pages') 622 623 @caching.dyn_cached_property 624 def chunk_elems(self, target=None): 625 """ The number of elements per chunk """ 626 627 return self.sbv.xGetIntegerByName('z_chunk_elems') 628 629 @property 630 def percpu(self): 631 """ Whether this is a per-cpu zone """ 632 633 return self.sbv.xGetIntegerByName('z_percpu') 634 635 @property 636 def btlog(self): 637 """ Returns the zone's BTLog or None """ 638 639 try: 640 btlog = self.sbv.xGetPointeeByName('z_btlog') 641 return BTLog(btlog) 642 except: 643 return None 644 645 def describe(self): 646 kmem = self.kmem 647 zone = self.sbv 648 zsec = kmem.zsec_array.chkGetChildAtIndex(self.zid) 649 650 submap_arr = kmem.target.chkFindFirstGlobalVariable('zone_submaps_names') 651 submap_idx = zsec.xGetIntegerByName('z_submap_idx') 652 submap_name = submap_arr.xGetCStringAtIndex(submap_idx) 653 submap_end = zsec.xGetIntegerByName('z_submap_from_end') 654 655 try: 656 btlog = zone.xGetIntegerByName('z_btlog') 657 except: 658 # likely a release kernel 659 btlog = None 660 661 fmt = ( 662 "Zone Info\n" 663 " name : {0.name} ({&z:#x})\n" 664 " submap : {1} (from {2})\n" 665 " element size : {0.elem_inner_size}\n" 666 " element offs : {0.elem_inner_offs}\n" 667 ) 668 if kmem.kasan_classic: 669 fmt += " element redzone : {0.elem_redzone}\n" 670 fmt += " chunk elems / pages : {$z.z_chunk_elems} / {$z.z_chunk_pages}\n" 671 if btlog: 672 fmt += " btlog : {$z.z_btlog:#x}\n" 673 674 print(xnu_format(fmt, self, submap_name, 675 "right" if submap_end else "left", z = zone)); 676 677 def iter_page_queue(self, name): 678 kmem = self.kmem 679 zone = self.sbv 680 681 pva = zone.xGetIntegerByPath('.{}.packed_address'.format(name)) 682 683 while pva: 684 meta = ZonePageMetadata._create_with_pva(kmem, pva) 685 pva = meta.next_pva 686 yield meta 687 688 def _depotElements(self, depot, into): 689 last = depot.xGetPointeeByName('zd_tail').GetValueAsAddress() 690 mag = depot.xGetPointeeByName('zd_head') 691 692 kmem = self.kmem 693 n = kmem.mag_size 694 target = kmem.target 695 696 while mag and mag.GetLoadAddress() != last: 697 into.update(kmem.iter_addresses(target.xIterAsULong( 698 mag.xGetLoadAddressByName('zm_elems'), 699 n 700 ))) 701 mag = mag.xGetPointeeByName('zm_next') 702 703 return into 704 705 def cached(self, into = None): 706 """ all addresses in per-cpu caches or per-cpu depots """ 707 708 pcpu = self.sbv.GetChildMemberWithName('z_pcpu_cache') 709 into = into if into is not None else set() 710 711 if pcpu.GetValueAsAddress(): 712 target = pcpu.target 713 kmem = self.kmem 714 715 for cache in ZPercpuValue(pcpu): 716 into.update(kmem.iter_addresses(target.xIterAsULong( 717 cache.xGetIntegerByName('zc_alloc_elems'), 718 cache.xGetIntegerByName('zc_alloc_cur') 719 ))) 720 721 into.update(kmem.iter_addresses(target.xIterAsULong( 722 cache.xGetIntegerByName('zc_free_elems'), 723 cache.xGetIntegerByName('zc_free_cur') 724 ))) 725 726 self._depotElements( 727 cache.chkGetChildMemberWithName('zc_depot'), 728 into = into 729 ) 730 731 return into 732 733 def recirc(self, into = None): 734 """ all addresses in the recirculation layer """ 735 736 return self._depotElements( 737 self.sbv.chkGetChildMemberWithName('z_recirc'), 738 into = into if into is not None else set() 739 ) 740 741 def iter_all(self, ty = None): 742 """ 743 Returns a generator for all addresses/values that can be made 744 745 @param ty (SBType or None) 746 An optional type to use to form SBValues 747 748 @returns 749 - (generator<int>) if ty is None 750 - (generator<SBValue>) if ty is set 751 """ 752 753 addresses = ( 754 addr 755 for name in ( 756 'z_pageq_full', 757 'z_pageq_partial', 758 'z_pageq_empty', 759 ) 760 for meta in self.iter_page_queue(name) 761 for addr in meta.iter_all(self) 762 ) 763 764 if ty is None: 765 return addresses 766 767 fn = self.kmem.target.xCreateValueFromAddress 768 return (fn('e', addr, ty) for addr in addresses) 769 770 def iter_free(self, ty = None): 771 """ 772 Returns a generator for all free addresses/values 773 774 @param ty (SBType or None) 775 An optional type to use to form SBValues 776 777 @returns 778 - (generator<int>) if ty is None 779 - (generator<SBValue>) if ty is set 780 """ 781 782 cached = set() 783 self.cached(into = cached) 784 self.recirc(into = cached) 785 786 addresses = ( 787 addr 788 for name in ( 789 'z_pageq_full', 790 'z_pageq_partial', 791 ) 792 for meta in self.iter_page_queue(name) 793 for addr in meta.iter_all(self) 794 if addr in cached or not meta.is_allocated(self, addr) 795 ) 796 797 if ty is None: 798 return addresses 799 800 fn = self.kmem.target.xCreateValueFromAddress 801 return (fn('e', addr, ty) for addr in addresses) 802 803 def iter_allocated(self, ty = None): 804 """ 805 Returns a generator for all allocated addresses/values 806 807 @param ty (SBType or None) 808 An optional type to use to form SBValues 809 810 @returns 811 - (generator<int>) if ty is None 812 - (generator<SBValue>) if ty is set 813 """ 814 815 cached = set() 816 self.cached(into = cached) 817 self.recirc(into = cached) 818 819 addresses = ( 820 addr 821 for name in ( 822 'z_pageq_full', 823 'z_pageq_partial', 824 ) 825 for meta in self.iter_page_queue(name) 826 for addr in meta.iter_allocated(self) 827 if addr not in cached 828 ) 829 830 if ty is None: 831 return addresses 832 833 fn = self.kmem.target.xCreateValueFromAddress 834 return (fn('e', addr, ty) for addr in addresses) 835 836 def __iter__(self): 837 return self.iter_allocated() 838 839 840__all__ = [ 841 ZPercpuValue.__name__, 842 Zone.__name__, 843] 844