1from xnu import * 2import xnudefines 3from kdp import * 4from utils import * 5import struct 6from collections import namedtuple 7import process 8 9def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None): 10 """ Read a physical memory data based on address. 11 params: 12 phys_addr : int - Physical address to read 13 bitsize : int - defines how many bytes to read. defaults to 64 bit 14 cpuval : None (optional) 15 returns: 16 int - int value read from memory. in case of failure 0xBAD10AD is returned. 17 """ 18 if "kdp" == GetConnectionProtocol(): 19 return KDPReadPhysMEM(phys_addr, bitsize) 20 21 # NO KDP. Attempt to use physical memory 22 paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr)) 23 if paddr_in_kva: 24 if bitsize == 64 : 25 return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 26 if bitsize == 32 : 27 return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 28 if bitsize == 16 : 29 return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 30 if bitsize == 8 : 31 return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 32 return 0xBAD10AD 33 34@lldb_command('readphys') 35def ReadPhys(cmd_args = None): 36 """ Reads the specified untranslated address 37 The argument is interpreted as a physical address, and the 64-bit word 38 addressed is displayed. 39 usage: readphys <nbits> <address> 40 nbits: 8,16,32,64 41 address: 1234 or 0x1234 or `foo_ptr` 42 """ 43 if cmd_args is None or len(cmd_args) < 2: 44 raise ArgumentError() 45 46 else: 47 nbits = ArgumentStringToInt(cmd_args[0]) 48 phys_addr = ArgumentStringToInt(cmd_args[1]) 49 print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits))) 50 return True 51 52lldb_alias('readphys8', 'readphys 8 ') 53lldb_alias('readphys16', 'readphys 16 ') 54lldb_alias('readphys32', 'readphys 32 ') 55lldb_alias('readphys64', 'readphys 64 ') 56 57def KDPReadPhysMEM(address, bits): 58 """ Setup the state for READPHYSMEM64 commands for reading data via kdp 59 params: 60 address : int - address where to read the data from 61 bits : int - number of bits in the intval (8/16/32/64) 62 returns: 63 int: read value from memory. 64 0xBAD10AD: if failed to read data. 65 """ 66 retval = 0xBAD10AD 67 if "kdp" != GetConnectionProtocol(): 68 print("Target is not connected over kdp. Nothing to do here.") 69 return retval 70 71 if "hwprobe" == KDPMode(): 72 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 73 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 74 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 75 packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0) 76 77 ret_obj = lldb.SBCommandReturnObject() 78 ci = lldb.debugger.GetCommandInterpreter() 79 ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj) 80 81 if ret_obj.Succeeded(): 82 value = ret_obj.GetOutput() 83 84 if bits == 64 : 85 pack_fmt = "<Q" 86 unpack_fmt = ">Q" 87 if bits == 32 : 88 pack_fmt = "<I" 89 unpack_fmt = ">I" 90 if bits == 16 : 91 pack_fmt = "<H" 92 unpack_fmt = ">H" 93 if bits == 8 : 94 pack_fmt = "<B" 95 unpack_fmt = ">B" 96 97 retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0] 98 99 else: 100 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 101 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 102 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 103 104 if not WriteInt32ToMemoryAddress(0, input_address): 105 return retval 106 107 kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize() 108 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 109 return retval 110 111 data_addr = int(addressof(kern.globals.manual_pkt)) 112 pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *') 113 114 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size) 115 116 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 117 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 118 WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and 119 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 120 ): 121 122 if WriteInt32ToMemoryAddress(1, input_address): 123 # now read data from the kdp packet 124 data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data)) 125 if bits == 64 : 126 retval = kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 127 if bits == 32 : 128 retval = kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 129 if bits == 16 : 130 retval = kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 131 if bits == 8 : 132 retval = kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 133 134 return retval 135 136 137def KDPWritePhysMEM(address, intval, bits): 138 """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp 139 params: 140 address : int - address where to save the data 141 intval : int - integer value to be stored in memory 142 bits : int - number of bits in the intval (8/16/32/64) 143 returns: 144 boolean: True if the write succeeded. 145 """ 146 if "kdp" != GetConnectionProtocol(): 147 print("Target is not connected over kdp. Nothing to do here.") 148 return False 149 150 if "hwprobe" == KDPMode(): 151 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 152 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 153 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 154 155 if bits == 64 : 156 pack_fmt = ">Q" 157 unpack_fmt = "<Q" 158 if bits == 32 : 159 pack_fmt = ">I" 160 unpack_fmt = "<I" 161 if bits == 16 : 162 pack_fmt = ">H" 163 unpack_fmt = "<H" 164 if bits == 8 : 165 pack_fmt = ">B" 166 unpack_fmt = "<B" 167 168 data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0] 169 170 packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val) 171 172 ret_obj = lldb.SBCommandReturnObject() 173 ci = lldb.debugger.GetCommandInterpreter() 174 ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj) 175 176 if ret_obj.Succeeded(): 177 return True 178 else: 179 return False 180 181 else: 182 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 183 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 184 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 185 if not WriteInt32ToMemoryAddress(0, input_address): 186 return False 187 188 kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8) 189 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 190 return False 191 192 data_addr = int(addressof(kern.globals.manual_pkt)) 193 pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *') 194 195 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size) 196 197 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 198 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 199 WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and 200 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 201 ): 202 203 if bits == 8: 204 if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))): 205 return False 206 if bits == 16: 207 if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))): 208 return False 209 if bits == 32: 210 if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))): 211 return False 212 if bits == 64: 213 if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))): 214 return False 215 if WriteInt32ToMemoryAddress(1, input_address): 216 return True 217 return False 218 219 220def WritePhysInt(phys_addr, int_val, bitsize = 64): 221 """ Write and integer value in a physical memory data based on address. 222 params: 223 phys_addr : int - Physical address to read 224 int_val : int - int value to write in memory 225 bitsize : int - defines how many bytes to read. defaults to 64 bit 226 returns: 227 bool - True if write was successful. 228 """ 229 if "kdp" == GetConnectionProtocol(): 230 if not KDPWritePhysMEM(phys_addr, int_val, bitsize): 231 print("Failed to write via KDP.") 232 return False 233 return True 234 #We are not connected via KDP. So do manual math and savings. 235 print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol()) 236 return False 237 238@lldb_command('writephys') 239def WritePhys(cmd_args=None): 240 """ writes to the specified untranslated address 241 The argument is interpreted as a physical address, and the 64-bit word 242 addressed is displayed. 243 usage: writephys <nbits> <address> <value> 244 nbits: 8,16,32,64 245 address: 1234 or 0x1234 or `foo_ptr` 246 value: int value to be written 247 ex. (lldb)writephys 16 0x12345abcd 0x25 248 """ 249 if cmd_args is None or len(cmd_args) < 3: 250 raise ArgumentError() 251 252 else: 253 nbits = ArgumentStringToInt(cmd_args[0]) 254 phys_addr = ArgumentStringToInt(cmd_args[1]) 255 int_value = ArgumentStringToInt(cmd_args[2]) 256 print(WritePhysInt(phys_addr, int_value, nbits)) 257 258 259lldb_alias('writephys8', 'writephys 8 ') 260lldb_alias('writephys16', 'writephys 16 ') 261lldb_alias('writephys32', 'writephys 32 ') 262lldb_alias('writephys64', 'writephys 64 ') 263 264 265def _PT_Step(paddr, index, verbose_level = vSCRIPT): 266 """ 267 Step to lower-level page table and print attributes 268 paddr: current page table entry physical address 269 index: current page table entry index (0..511) 270 verbose_level: vHUMAN: print nothing 271 vSCRIPT: print basic information 272 vDETAIL: print basic information and hex table dump 273 returns: (pt_paddr, pt_valid, pt_large) 274 pt_paddr: next level page table entry physical address 275 or null if invalid 276 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 277 should be aborted 278 pt_large: 1 if kgm_pt_paddr is a page frame address 279 of a large page and not another page table entry 280 """ 281 entry_addr = paddr + (8 * index) 282 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 283 out_string = '' 284 if verbose_level >= vDETAIL: 285 for pte_loop in range(0, 512): 286 paddr_tmp = paddr + (8 * pte_loop) 287 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 288 paddr_mask = ~((0xfff<<52) | 0xfff) 289 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 290 pt_valid = False 291 pt_large = False 292 pt_paddr = 0 293 if verbose_level < vSCRIPT: 294 if entry & 0x1 : 295 pt_valid = True 296 pt_large = False 297 pt_paddr = entry & paddr_mask 298 if entry & (0x1 <<7): 299 pt_large = True 300 pt_paddr = entry & paddr_large_mask 301 else: 302 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 303 if entry & 0x1: 304 out_string += " valid" 305 pt_paddr = entry & paddr_mask 306 pt_valid = True 307 else: 308 out_string += " invalid" 309 pt_paddr = 0 310 pt_valid = False 311 if entry & (0x1 << 62): 312 out_string += " compressed" 313 #Stop decoding other bits 314 entry = 0 315 if entry & (0x1 << 1): 316 out_string += " writable" 317 else: 318 out_string += " read-only" 319 320 if entry & (0x1 << 2): 321 out_string += " user" 322 else: 323 out_string += " supervisor" 324 325 if entry & (0x1 << 3): 326 out_string += " PWT" 327 328 if entry & (0x1 << 4): 329 out_string += " PCD" 330 331 if entry & (0x1 << 5): 332 out_string += " accessed" 333 334 if entry & (0x1 << 6): 335 out_string += " dirty" 336 337 if entry & (0x1 << 7): 338 out_string += " large" 339 pt_large = True 340 else: 341 pt_large = False 342 343 if entry & (0x1 << 8): 344 out_string += " global" 345 346 if entry & (0x3 << 9): 347 out_string += " avail:{0:x}".format((entry >> 9) & 0x3) 348 349 if entry & (0x1 << 63): 350 out_string += " noexec" 351 print(out_string) 352 return (pt_paddr, pt_valid, pt_large) 353 354def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT): 355 """ 356 Step to lower-level page table and print attributes for EPT pmap 357 paddr: current page table entry physical address 358 index: current page table entry index (0..511) 359 verbose_level: vHUMAN: print nothing 360 vSCRIPT: print basic information 361 vDETAIL: print basic information and hex table dump 362 returns: (pt_paddr, pt_valid, pt_large) 363 pt_paddr: next level page table entry physical address 364 or null if invalid 365 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 366 should be aborted 367 pt_large: 1 if kgm_pt_paddr is a page frame address 368 of a large page and not another page table entry 369 """ 370 entry_addr = paddr + (8 * index) 371 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 372 out_string = '' 373 if verbose_level >= vDETAIL: 374 for pte_loop in range(0, 512): 375 paddr_tmp = paddr + (8 * pte_loop) 376 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 377 paddr_mask = ~((0xfff<<52) | 0xfff) 378 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 379 pt_valid = False 380 pt_large = False 381 pt_paddr = 0 382 if verbose_level < vSCRIPT: 383 if entry & 0x7 : 384 pt_valid = True 385 pt_large = False 386 pt_paddr = entry & paddr_mask 387 if entry & (0x1 <<7): 388 pt_large = True 389 pt_paddr = entry & paddr_large_mask 390 else: 391 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 392 if entry & 0x7: 393 out_string += "valid" 394 pt_paddr = entry & paddr_mask 395 pt_valid = True 396 else: 397 out_string += "invalid" 398 pt_paddr = 0 399 pt_valid = False 400 if entry & (0x1 << 62): 401 out_string += " compressed" 402 #Stop decoding other bits 403 entry = 0 404 if entry & 0x1: 405 out_string += " readable" 406 else: 407 out_string += " no read" 408 if entry & (0x1 << 1): 409 out_string += " writable" 410 else: 411 out_string += " no write" 412 413 if entry & (0x1 << 2): 414 out_string += " executable" 415 else: 416 out_string += " no exec" 417 418 ctype = entry & 0x38 419 if ctype == 0x30: 420 out_string += " cache-WB" 421 elif ctype == 0x28: 422 out_string += " cache-WP" 423 elif ctype == 0x20: 424 out_string += " cache-WT" 425 elif ctype == 0x8: 426 out_string += " cache-WC" 427 else: 428 out_string += " cache-NC" 429 430 if (entry & 0x40) == 0x40: 431 out_string += " Ignore-PTA" 432 433 if (entry & 0x100) == 0x100: 434 out_string += " accessed" 435 436 if (entry & 0x200) == 0x200: 437 out_string += " dirty" 438 439 if entry & (0x1 << 7): 440 out_string += " large" 441 pt_large = True 442 else: 443 pt_large = False 444 print(out_string) 445 return (pt_paddr, pt_valid, pt_large) 446 447def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT): 448 """ Walk the l4 pmap entry. 449 params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t 450 vaddr : int - virtual address to walk 451 """ 452 pt_paddr = unsigned(pmap_addr_val) 453 pt_valid = (unsigned(pmap_addr_val) != 0) 454 pt_large = 0 455 pframe_offset = 0 456 if pt_valid: 457 # Lookup bits 47:39 of linear address in PML4T 458 pt_index = (vaddr >> 39) & 0x1ff 459 pframe_offset = vaddr & 0x7fffffffff 460 if verbose_level > vHUMAN : 461 print("pml4 (index {0:d}):".format(pt_index)) 462 if not(ept_pmap): 463 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 464 else: 465 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 466 if pt_valid: 467 # Lookup bits 38:30 of the linear address in PDPT 468 pt_index = (vaddr >> 30) & 0x1ff 469 pframe_offset = vaddr & 0x3fffffff 470 if verbose_level > vHUMAN: 471 print("pdpt (index {0:d}):".format(pt_index)) 472 if not(ept_pmap): 473 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 474 else: 475 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 476 if pt_valid and not pt_large: 477 #Lookup bits 29:21 of the linear address in PDPT 478 pt_index = (vaddr >> 21) & 0x1ff 479 pframe_offset = vaddr & 0x1fffff 480 if verbose_level > vHUMAN: 481 print("pdt (index {0:d}):".format(pt_index)) 482 if not(ept_pmap): 483 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 484 else: 485 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 486 if pt_valid and not pt_large: 487 #Lookup bits 20:21 of linear address in PT 488 pt_index = (vaddr >> 12) & 0x1ff 489 pframe_offset = vaddr & 0xfff 490 if verbose_level > vHUMAN: 491 print("pt (index {0:d}):".format(pt_index)) 492 if not(ept_pmap): 493 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 494 else: 495 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 496 paddr = 0 497 paddr_isvalid = False 498 if pt_valid: 499 paddr = pt_paddr + pframe_offset 500 paddr_isvalid = True 501 502 if verbose_level > vHUMAN: 503 if paddr_isvalid: 504 pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self) 505 print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue)) 506 else: 507 print("no translation") 508 509 return paddr 510 511def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT): 512 """ 513 params: pmapval - core.value representing pmap_t in kernel 514 vaddr: int - int representing virtual address to walk 515 """ 516 if pmapval.pm_cr3 != 0: 517 if verbose_level > vHUMAN: 518 print("Using normal Intel PMAP from pm_cr3\n") 519 return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity']) 520 else: 521 if verbose_level > vHUMAN: 522 print("Using EPT pmap from pm_eptp\n") 523 return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity']) 524 525def assert_64bit(val): 526 assert(val < 2**64) 527 528ARM64_TTE_SIZE = 8 529ARM64_TTE_SHIFT = 3 530ARM64_VMADDR_BITS = 48 531 532def PmapBlockOffsetMaskARM64(page_size, level): 533 assert level >= 0 and level <= 3 534 ttentries = (page_size // ARM64_TTE_SIZE) 535 return page_size * (ttentries ** (3 - level)) - 1 536 537def PmapBlockBaseMaskARM64(page_size, level): 538 assert level >= 0 and level <= 3 539 return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level) 540 541PmapTTEARM64 = namedtuple('PmapTTEARM64', ['level', 'value', 'stage2']) 542 543def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False): 544 """ Display the bits of an ARM64 translation table or page table entry 545 in human-readable form. 546 tte: integer value of the TTE/PTE 547 level: translation table level. Valid values are 1, 2, or 3. 548 is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise. 549 """ 550 assert(isinstance(level, numbers.Integral)) 551 assert_64bit(tte) 552 553 if tte & 0x1 == 0x0: 554 print("Invalid.") 555 return 556 557 if (tte & 0x2 == 0x2) and (level != 0x3): 558 print("Type = Table pointer.") 559 print("Table addr = {:#x}.".format(tte & 0xfffffffff000)) 560 561 if not stage2: 562 print("PXN = {:#x}.".format((tte >> 59) & 0x1)) 563 print("XN = {:#x}.".format((tte >> 60) & 0x1)) 564 print("AP = {:#x}.".format((tte >> 61) & 0x3)) 565 print("NS = {:#x}.".format(tte >> 63)) 566 else: 567 print("Type = Block.") 568 569 if stage2: 570 print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf)) 571 else: 572 attr_index = (tte >> 2) & 0x7 573 attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU', 574 3: 'CACHE DISABLE', 575 4: 'RESERVED (MTE if FEAT_MTE supported)', 576 5: 'POSTED (DISABLE_XS if FEAT_XS supported)', 577 6: 'POSTED_REORDERED (POSTED_COMBINED_REORDERED if FEAT_XS supported)', 578 7: 'POSTED_COMBINED_REORDERED (POSTED_COMBINED_REORDERED_XS if FEAT_XS supported)' } 579 580 # Only show the string version of the AttrIdx for CPU mappings since 581 # these values don't apply to IOMMU mappings. 582 if is_iommu_tte: 583 print("AttrIdx = {:#x}.".format(attr_index)) 584 else: 585 print("AttrIdx = {:#x} ({:s}).".format(attr_index, attr_string[attr_index])) 586 print("NS = {:#x}.".format((tte >> 5) & 0x1)) 587 588 if stage2: 589 print("S2AP = {:#x}.".format((tte >> 6) & 0x3)) 590 else: 591 print("AP = {:#x}.".format((tte >> 6) & 0x3)) 592 593 print("SH = {:#x}.".format((tte >> 8) & 0x3)) 594 print("AF = {:#x}.".format((tte >> 10) & 0x1)) 595 596 if not stage2: 597 print("nG = {:#x}.".format((tte >> 11) & 0x1)) 598 599 print("HINT = {:#x}.".format((tte >> 52) & 0x1)) 600 601 if stage2: 602 print("S2XN = {:#x}.".format((tte >> 53) & 0x3)) 603 else: 604 print("PXN = {:#x}.".format((tte >> 53) & 0x1)) 605 print("XN = {:#x}.".format((tte >> 54) & 0x1)) 606 607 print("SW Use = {:#x}.".format((tte >> 55) & 0xf)) 608 609 return 610 611def PmapTTnIndexARM64(vaddr, pmap_pt_attr): 612 pta_max_level = unsigned(pmap_pt_attr.pta_max_level) 613 614 tt_index = [] 615 for i in range(pta_max_level + 1): 616 tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \ 617 >> unsigned(pmap_pt_attr.pta_level_info[i].shift)) 618 619 return tt_index 620 621def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN, extra=None): 622 assert(type(vaddr) in (int, int)) 623 assert_64bit(vaddr) 624 assert_64bit(root_tte) 625 626 # Obtain pmap attributes 627 page_size = pmap_pt_attr.pta_page_size 628 page_offset_mask = (page_size - 1) 629 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 630 tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr) 631 stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) 632 633 # The pmap starts at a page table level that is defined by register 634 # values; the root level can be obtained from the attributes structure 635 level = unsigned(pmap_pt_attr.pta_root_level) 636 637 root_tt_index = tt_index[level] 638 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 639 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 640 tte = int(unsigned(root_tte[root_tt_index])) 641 642 # Walk the page tables 643 paddr = None 644 max_level = unsigned(pmap_pt_attr.pta_max_level) 645 is_valid = True 646 is_leaf = False 647 648 if extra is not None: 649 extra['page_size'] = page_size 650 extra['page_mask'] = page_size - 1 651 extra['paddr'] = None 652 extra['is_valid'] = True 653 extra['is_leaf'] = False 654 extra['tte'] = [] 655 656 while (level <= max_level): 657 if extra is not None: 658 extra['tte'].append(PmapTTEARM64(level=level, value=tte, stage2=stage2)) 659 660 if verbose_level >= vSCRIPT: 661 print("L{} entry: {:#x}".format(level, tte)) 662 if verbose_level >= vDETAIL: 663 PmapDecodeTTEARM64(tte, level, stage2) 664 665 if tte & 0x1 == 0x0: 666 if verbose_level >= vHUMAN: 667 print("L{} entry invalid: {:#x}\n".format(level, tte)) 668 669 if extra is not None: 670 extra['is_valid'] = False 671 is_valid = False 672 break 673 674 # Handle leaf entry 675 if tte & 0x2 == 0x0 or level == max_level: 676 base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level) 677 offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level) 678 paddr = tte & base_mask 679 paddr = paddr | (vaddr & offset_mask) 680 681 if level != max_level: 682 print("phys: {:#x}".format(paddr)) 683 684 if extra is not None: 685 extra['is_leaf'] = True 686 extra['paddr'] = paddr 687 is_leaf = True 688 break 689 else: 690 # Handle page table entry 691 next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1]) 692 assert(isinstance(next_phys, numbers.Integral)) 693 694 next_virt = kern.PhysToKernelVirt(next_phys) 695 assert(isinstance(next_virt, numbers.Integral)) 696 697 if verbose_level >= vDETAIL: 698 print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt)) 699 700 ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*") 701 tte = int(unsigned(dereference(ttep))) 702 assert(isinstance(tte, numbers.Integral)) 703 704 # We've parsed one level, so go to the next level 705 assert(level <= 3) 706 level = level + 1 707 708 709 if verbose_level >= vHUMAN: 710 if paddr: 711 print("Translation of {:#x} is {:#x}.".format(vaddr, paddr)) 712 else: 713 print("(no translation)") 714 715 return paddr 716 717def PmapWalk(pmap, vaddr, verbose_level = vHUMAN): 718 if kern.arch == 'x86_64': 719 return PmapWalkX86_64(pmap, vaddr, verbose_level) 720 elif kern.arch.startswith('arm64'): 721 # Obtain pmap attributes from pmap structure 722 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 723 return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level) 724 else: 725 raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch)) 726 727@lldb_command('pmap_walk') 728def PmapWalkHelper(cmd_args=None): 729 """ Perform a page-table walk in <pmap> for <virtual_address>. 730 Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e] 731 Multiple -v's can be specified for increased verbosity 732 """ 733 if cmd_args is None or len(cmd_args) < 2: 734 raise ArgumentError("Too few arguments to pmap_walk.") 735 736 pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t') 737 addr = ArgumentStringToInt(cmd_args[1]) 738 PmapWalk(pmap, addr, config['verbosity']) 739 return 740 741def GetMemoryAttributesFromUser(requested_type): 742 pmap_attr_dict = { 743 '4k' : kern.globals.pmap_pt_attr_4k, 744 '16k' : kern.globals.pmap_pt_attr_16k, 745 '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None, 746 } 747 748 requested_type = requested_type.lower() 749 if requested_type not in pmap_attr_dict: 750 return None 751 752 return pmap_attr_dict[requested_type] 753 754@lldb_command('ttep_walk') 755def TTEPWalkPHelper(cmd_args=None): 756 """ Perform a page-table walk in <root_ttep> for <virtual_address>. 757 Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e] 758 Multiple -v's can be specified for increased verbosity 759 """ 760 if cmd_args is None or len(cmd_args) < 2: 761 raise ArgumentError("Too few arguments to ttep_walk.") 762 763 if not kern.arch.startswith('arm64'): 764 raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch)) 765 766 tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *') 767 addr = ArgumentStringToInt(cmd_args[1]) 768 769 pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2]) 770 if pmap_pt_attr is None: 771 raise ArgumentError("Invalid translation attribute type.") 772 773 return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity']) 774 775@lldb_command('decode_tte') 776def DecodeTTE(cmd_args=None): 777 """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2] 778 Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2] 779 """ 780 if cmd_args is None or len(cmd_args) < 2: 781 raise ArgumentError("Too few arguments to decode_tte.") 782 if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]: 783 raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2])) 784 if kern.arch.startswith('arm64'): 785 stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False 786 PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2) 787 else: 788 raise NotImplementedError("decode_tte does not support {0}".format(kern.arch)) 789 790PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54) 791PVH_HIGH_FLAGS_ARM32 = (1 << 31) 792 793def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN): 794 """ Dump information about a single mapping retrieved by the pv_head_table. 795 796 pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP, 797 or None if type PVH_TYPE_PTEP. 798 ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with 799 the high flags already set (the type bits don't need to be cleared). 800 For type PVH_TYPE_PVEP this will be the value retrieved from the 801 pve_ptep[] array. 802 """ 803 if kern.arch.startswith('arm64'): 804 iommu_flag = 0x4 805 iommu_table_flag = 1 << 63 806 else: 807 iommu_flag = 0 808 iommu_table_flag = 0 809 810 # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries. 811 if pvep is not None and (ptep & 0x1): 812 # Note: It's not possible for IOMMU mappings to be marked as alt acct so 813 # setting this string is mutually exclusive with setting the IOMMU strings. 814 pte_str = ' (alt acct)' 815 else: 816 pte_str = '' 817 818 if pvep is not None: 819 pve_str = 'PVEP {:#x}, '.format(pvep) 820 else: 821 pve_str = '' 822 823 # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this 824 # either does nothing or clears out the AltAcct bit. 825 ptep = ptep & ~0x3 826 827 # When printing with extra verbosity, print an extra newline that describes 828 # who owns the mapping. 829 extra_str = '' 830 831 if ptep & iommu_flag: 832 # The mapping is an IOMMU Mapping 833 ptep = ptep & ~iommu_flag 834 835 # Due to LLDB automatically setting all the high bits of pointers, when 836 # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set 837 # the iommu_table_flag, which means this check only works for PVH entries 838 # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH 839 # entry which has the right casting applied to avoid this issue). 840 # 841 # Why don't we just do the same casting for pve_ptep[] you ask? Well not 842 # for a lack of trying, that's for sure. If you can figure out how to 843 # cast that array correctly, then be my guest. 844 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 845 if ptep & iommu_table_flag: 846 pte_str = ' (IOMMU table), entry' 847 ptd = GetPtDesc(KVToPhysARM(ptep)) 848 iommu = dereference(ptd.iommu) 849 else: 850 # Instead of dumping the PTE (since we don't have that), dump the 851 # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc). 852 # 853 # This works because later on when the "ptep" is dereferenced as a 854 # PTE pointer (uint64_t pointer), the descriptor pointer will be 855 # dumped as that's the first 64-bit value in the IOMMU state object. 856 pte_str = ' (IOMMU state), descriptor' 857 ptep = ptep | iommu_table_flag 858 iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *')) 859 860 # For IOMMU mappings, dump who owns the mapping as the extra string. 861 extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name) 862 if unsigned(iommu.name) != 0: 863 extra_str += '/{:s}'.format(iommu.name) 864 extra_str += ' (iommu state: {:x})'.format(addressof(iommu)) 865 else: 866 ptd = GetPtDesc(KVToPhysARM(ptep)) 867 extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu) 868 else: 869 # The mapping is a CPU Mapping 870 pte_str += ', entry' 871 ptd = GetPtDesc(KVToPhysARM(ptep)) 872 if ptd.pmap == kern.globals.kernel_pmap: 873 extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap) 874 elif verbose_level >= vDETAIL: 875 task = process.TaskForPmapHelper(ptd.pmap) 876 extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>") 877 try: 878 print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) 879 except: 880 print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str)) 881 882 if verbose_level >= vDETAIL: 883 print(" |-- {:s}".format(extra_str)) 884 885def PVWalkARM(pai, verbose_level = vHUMAN): 886 """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap. 887 888 pai: physical address index (PAI) corresponding to the pv_head_table 889 entry to walk. 890 verbose_level: Set to vSCRIPT or higher to print extra info around the 891 the pv_head_table/pp_attr_table flags and to dump the 892 pt_desc_t object if the type is a PTD. 893 """ 894 # LLDB will automatically try to make pointer values dereferencable by 895 # setting the upper bits if they aren't set. We need to parse the flags 896 # stored in the upper bits later, so cast the pv_head_table to an array of 897 # integers to get around this "feature". We'll add the upper bits back 898 # manually before deref'ing anything. 899 pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*") 900 pvh_raw = unsigned(pv_head_table[pai]) 901 pvh = pvh_raw 902 pvh_type = pvh & 0x3 903 904 print("PVH raw value: {:#x}".format(pvh_raw)) 905 if kern.arch.startswith('arm64'): 906 pvh = pvh | PVH_HIGH_FLAGS_ARM64 907 else: 908 pvh = pvh | PVH_HIGH_FLAGS_ARM32 909 910 if pvh_type == 0: 911 print("PVH type: NULL") 912 elif pvh_type == 3: 913 print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3)) 914 elif pvh_type == 2: 915 print("PVH type: single PTE") 916 PVDumpPTE(None, pvh, verbose_level) 917 elif pvh_type == 1: 918 pvep = pvh & ~0x3 919 print("PVH type: PTE list") 920 pve_ptep_idx = 0 921 while pvep != 0: 922 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 923 924 if pve.pve_ptep[pve_ptep_idx] != 0: 925 PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level) 926 927 pve_ptep_idx += 1 928 if pve_ptep_idx == 2: 929 pve_ptep_idx = 0 930 pvep = unsigned(pve.pve_next) 931 932 if verbose_level >= vDETAIL: 933 if (pvh_type == 1) or (pvh_type == 2): 934 # Dump pv_head_table flags when there's a valid mapping. 935 pvh_flags = [] 936 937 if pvh_raw & (1 << 62): 938 pvh_flags.append("CPU") 939 if pvh_raw & (1 << 60): 940 pvh_flags.append("EXEC") 941 if pvh_raw & (1 << 59): 942 pvh_flags.append("LOCKDOWN_KC") 943 if pvh_raw & (1 << 58): 944 pvh_flags.append("HASHED") 945 if pvh_raw & (1 << 57): 946 pvh_flags.append("LOCKDOWN_CS") 947 if pvh_raw & (1 << 56): 948 pvh_flags.append("LOCKDOWN_RO") 949 if pvh_raw & (1 << 55): 950 pvh_flags.append("RETIRED") 951 if pvh_raw & (1 << 54): 952 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 953 pvh_flags.append("SECURE_FLUSH_NEEDED") 954 else: 955 pvh_flags.append("SLEEPABLE_LOCK") 956 if kern.arch.startswith('arm64') and pvh_raw & (1 << 61): 957 pvh_flags.append("LOCK") 958 959 print("PVH Flags: {}".format(pvh_flags)) 960 961 # Always dump pp_attr_table flags (these can be updated even if there aren't mappings). 962 ppattr = unsigned(kern.globals.pp_attr_table[pai]) 963 print("PPATTR raw value: {:#x}".format(ppattr)) 964 965 ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)] 966 if ppattr & 0x40: 967 ppattr_flags.append("REFERENCED") 968 if ppattr & 0x80: 969 ppattr_flags.append("MODIFIED") 970 if ppattr & 0x100: 971 ppattr_flags.append("INTERNAL") 972 if ppattr & 0x200: 973 ppattr_flags.append("REUSABLE") 974 if ppattr & 0x400: 975 ppattr_flags.append("ALTACCT") 976 if ppattr & 0x800: 977 ppattr_flags.append("NOENCRYPT") 978 if ppattr & 0x1000: 979 ppattr_flags.append("REFFAULT") 980 if ppattr & 0x2000: 981 ppattr_flags.append("MODFAULT") 982 if ppattr & 0x4000: 983 ppattr_flags.append("MONITOR") 984 if ppattr & 0x8000: 985 ppattr_flags.append("NO_MONITOR") 986 987 print("PPATTR Flags: {}".format(ppattr_flags)) 988 989 if pvh_type == 3: 990 def RunLldbCmdHelper(command): 991 """Helper for dumping an LLDB command right before executing it 992 and printing the results. 993 command: The LLDB command (as a string) to run. 994 995 Example input: "p/x kernel_pmap". 996 """ 997 print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command))) 998 # Dump the page table descriptor object 999 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 1000 RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd)) 1001 1002 # Depending on the system, more than one ptd_info can be associated 1003 # with a single PTD. Only dump the first PTD info and assume the 1004 # user knows to dump the rest if they're on one of those systems. 1005 RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]") 1006 1007@lldb_command('pv_walk') 1008def PVWalk(cmd_args=None): 1009 """ Show mappings for <physical_address | PAI> tracked in the PV list. 1010 Syntax: (lldb) pv_walk <physical_address | PAI> [-vv] 1011 1012 Extra verbosity will pretty print the pv_head_table/pp_attr_table flags 1013 as well as dump the page table descriptor (PTD) struct if the entry is a 1014 PTD. 1015 """ 1016 if cmd_args is None or len(cmd_args) == 0: 1017 raise ArgumentError("Too few arguments to pv_walk.") 1018 if not kern.arch.startswith('arm'): 1019 raise NotImplementedError("pv_walk does not support {0}".format(kern.arch)) 1020 1021 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1022 1023 # If the input is already a PAI, this function will return the input unchanged. 1024 # This function also ensures that the physical address is kernel-managed. 1025 pai = ConvertPhysAddrToPai(pa) 1026 1027 PVWalkARM(pai, config['verbosity']) 1028 1029@lldb_command('kvtophys') 1030def KVToPhys(cmd_args=None): 1031 """ Translate a kernel virtual address to the corresponding physical address. 1032 Assumes the virtual address falls within the kernel static region. 1033 Syntax: (lldb) kvtophys <kernel virtual address> 1034 """ 1035 if cmd_args is None or len(cmd_args) == 0: 1036 raise ArgumentError("Too few arguments to kvtophys.") 1037 if kern.arch.startswith('arm'): 1038 print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1039 elif kern.arch == 'x86_64': 1040 print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base))) 1041 1042@lldb_command('phystokv') 1043def PhysToKV(cmd_args=None): 1044 """ Translate a physical address to the corresponding static kernel virtual address. 1045 Assumes the physical address corresponds to managed DRAM. 1046 Syntax: (lldb) phystokv <physical address> 1047 """ 1048 if cmd_args is None or len(cmd_args) == 0: 1049 raise ArgumentError("Too few arguments to phystokv.") 1050 print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1051 1052def KVToPhysARM(addr): 1053 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1054 ptov_table = kern.globals.ptov_table 1055 for i in range(0, kern.globals.ptov_index): 1056 if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))): 1057 return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa))) 1058 else: 1059 papt_table = kern.globals.libsptm_papt_ranges 1060 page_size = kern.globals.page_size 1061 for i in range(0, unsigned(dereference(kern.globals.libsptm_n_papt_ranges))): 1062 if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))): 1063 return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start))) 1064 raise ValueError("VA {:#x} not found in physical region lookup table".format(addr)) 1065 return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase)) 1066 1067 1068def GetPtDesc(paddr): 1069 pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size 1070 pvh = unsigned(kern.globals.pv_head_table[pn]) 1071 if kern.arch.startswith('arm64'): 1072 pvh = pvh | PVH_HIGH_FLAGS_ARM64 1073 else: 1074 pvh = pvh | PVH_HIGH_FLAGS_ARM32 1075 pvh_type = pvh & 0x3 1076 if pvh_type != 0x3: 1077 raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh)) 1078 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 1079 return ptd 1080 1081def PhysToFrameTableEntry(paddr): 1082 if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)): 1083 return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size] 1084 page_idx = paddr / kern.globals.page_size 1085 for i in range(0, kern.globals.sptm_n_io_ranges): 1086 base = kern.globals.io_frame_table[i].io_range.phys_page_idx 1087 end = base + kern.globals.io_frame_table[i].io_range.num_pages 1088 if page_idx >= base and page_idx < end: 1089 return kern.globals.io_frame_table[i] 1090 return kern.globals.xnu_io_fte 1091 1092@lldb_command('phystofte') 1093def PhysToFTE(cmd_args=None): 1094 """ Translate a physical address to the corresponding SPTM frame table entry pointer 1095 Syntax: (lldb) phystofte <physical address> 1096 """ 1097 if cmd_args is None or len(cmd_args) == 0: 1098 raise ArgumentError("Too few arguments to phystofte.") 1099 1100 fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))) 1101 print(repr(fte)) 1102 1103XNU_IOMMU = 23 1104XNU_PAGE_TABLE = 19 1105XNU_PAGE_TABLE_SHARED = 20 1106XNU_PAGE_TABLE_ROZONE = 21 1107XNU_PAGE_TABLE_COMMPAGE = 22 1108SPTM_PAGE_TABLE = 9 1109 1110def ShowPTEARM(pte, page_size, level): 1111 """ Display vital information about an ARM page table entry 1112 pte: kernel virtual address of the PTE. page_size and level may be None, 1113 in which case we'll try to infer them from the page table descriptor. 1114 Inference of level may only work for L2 and L3 TTEs depending upon system 1115 configuration. 1116 """ 1117 pt_index = 0 1118 stage2 = False 1119 def GetPageTableInfo(ptd, paddr): 1120 nonlocal pt_index, page_size, level 1121 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1122 # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page. 1123 # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes 1124 # do, so the IOMMU refcnt token is always stored at index 0. If this is not 1125 # an IOMMU page, we may end up using a different final value for pt_index below. 1126 refcnt = ptd.ptd_info[0].refcnt 1127 # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001. 1128 is_iommu_pte = (refcnt & 0x8000) == 0x8000 1129 if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'): 1130 page_size = ptd.pmap.pmap_pt_attr.pta_page_size 1131 elif page_size is None: 1132 page_size = kern.globals.native_pt_attr.pta_page_size 1133 pt_index = (pte % kern.globals.page_size) // page_size 1134 refcnt = ptd.ptd_info[pt_index].refcnt 1135 if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'): 1136 stage2 = ptd.pmap.pmap_pt_attr.stage2 1137 if level is None: 1138 if refcnt == 0x4000: 1139 level = 2 1140 else: 1141 level = 3 1142 if is_iommu_pte: 1143 iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name) 1144 if unsigned(dereference(ptd.iommu).name) != 0: 1145 iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name) 1146 info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name) 1147 else: 1148 info_str = None 1149 return (int(unsigned(refcnt)), level, info_str) 1150 else: 1151 fte = PhysToFrameTableEntry(paddr) 1152 if fte.type == XNU_IOMMU: 1153 if page_size is None: 1154 page_size = kern.globals.native_pt_attr.pta_page_size 1155 info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd) 1156 return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str) 1157 elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]: 1158 if page_size is None: 1159 if hasattr(ptd.pmap, 'pmap_pt_attr'): 1160 page_size = ptd.pmap.pmap_pt_attr.pta_page_size 1161 else: 1162 page_size = kern.globals.native_pt_attr.pta_page_size; 1163 return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None) 1164 else: 1165 raise ValueError("Unrecognized FTE type {:#x}".format(fte.type)) 1166 raise ValueError("Unable to retrieve PTD refcnt") 1167 pte_paddr = KVToPhysARM(pte) 1168 ptd = GetPtDesc(pte_paddr) 1169 refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr) 1170 wiredcnt = ptd.ptd_info[pt_index].wiredcnt 1171 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1172 va = ptd.va[pt_index] 1173 else: 1174 va = ptd.va 1175 print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va)) 1176 1177 # The pmap/iommu field is a union, so only print the correct one. 1178 if info_str is not None: 1179 print(info_str) 1180 else: 1181 if ptd.pmap == kern.globals.kernel_pmap: 1182 pmap_str = "(kernel_pmap)" 1183 else: 1184 task = process.TaskForPmapHelper(ptd.pmap) 1185 pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>") 1186 print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str)) 1187 nttes = page_size // 8 1188 granule = page_size * (nttes ** (3 - level)) 1189 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1190 pte_pgoff = pte % page_size 1191 else: 1192 pte_pgoff = pte % kern.globals.native_pt_attr.pta_page_size 1193 pte_pgoff = pte_pgoff // 8 1194 print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(va)) + (pte_pgoff * granule))) 1195 pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *')))) 1196 print("value: {:#x}".format(pteval)) 1197 print("level: {:d}".format(level)) 1198 PmapDecodeTTEARM64(pteval, level, stage2) 1199 1200@lldb_command('showpte') 1201def ShowPTE(cmd_args=None): 1202 """ Display vital information about the page table entry at VA <pte> 1203 Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2] 1204 """ 1205 if cmd_args is None or len(cmd_args) == 0: 1206 raise ArgumentError("Too few arguments to showpte.") 1207 1208 if kern.arch.startswith('arm64'): 1209 if len(cmd_args) >= 3: 1210 pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2]) 1211 if pmap_pt_attr is None: 1212 raise ArgumentError("Invalid translation attribute type.") 1213 page_size = pmap_pt_attr.pta_page_size 1214 else: 1215 page_size = None 1216 1217 level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None 1218 ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level) 1219 else: 1220 raise NotImplementedError("showpte does not support {0}".format(kern.arch)) 1221 1222def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action): 1223 """ Perform the specified action for all valid mappings in an ARM64 translation table 1224 pmap: owner of the translation table 1225 tt: translation table or page table 1226 nttes: number of entries in tt 1227 level: translation table level, 1 2 or 3 1228 action: callback for each valid TTE 1229 """ 1230 # Obtain pmap attributes 1231 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1232 page_size = pmap_pt_attr.pta_page_size 1233 page_offset_mask = (page_size - 1) 1234 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1235 max_level = unsigned(pmap_pt_attr.pta_max_level) 1236 1237 for i in range(nttes): 1238 try: 1239 tte = tt[i] 1240 if tte & 0x1 == 0x0: 1241 continue 1242 1243 tt_next = None 1244 paddr = unsigned(tte) & unsigned(page_base_mask) 1245 1246 # Handle leaf entry 1247 if tte & 0x2 == 0x0 or level == max_level: 1248 type = 'block' if level < max_level else 'entry' 1249 granule = PmapBlockOffsetMaskARM64(page_size, level) + 1 1250 else: 1251 # Handle page table entry 1252 type = 'table' 1253 granule = page_size 1254 tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') 1255 1256 mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i) 1257 if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): 1258 if tt_next is not None: 1259 FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action) 1260 1261 except Exception as exc: 1262 print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i])))) 1263 1264def ScanPageTables(action, targetPmap=None): 1265 """ Perform the specified action for all valid mappings in all page tables, 1266 optionally restricted to a single pmap. 1267 pmap: pmap whose page table should be scanned. If None, all pmaps on system will be scanned. 1268 """ 1269 print("Scanning all available translation tables. This may take a long time...") 1270 def ScanPmap(pmap, action): 1271 if kern.arch.startswith('arm64'): 1272 # Obtain pmap attributes 1273 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1274 granule = pmap_pt_attr.pta_page_size 1275 level = unsigned(pmap_pt_attr.pta_root_level) 1276 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 1277 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 1278 1279 if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule): 1280 if kern.arch.startswith('arm64'): 1281 FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action) 1282 1283 if targetPmap is not None: 1284 ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action) 1285 else: 1286 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1287 ScanPmap(pmap, action) 1288 1289@lldb_command('showallmappings') 1290def ShowAllMappings(cmd_args=None): 1291 """ Find and display all available mappings on the system for 1292 <physical_address>. Optionally only searches the pmap 1293 specified by [<pmap>] 1294 Syntax: (lldb) showallmappings <physical_address> [<pmap>] 1295 WARNING: this macro can take a long time (up to 30min.) to complete! 1296 """ 1297 if cmd_args is None or len(cmd_args) == 0: 1298 raise ArgumentError("Too few arguments to showallmappings.") 1299 if not kern.arch.startswith('arm'): 1300 raise NotImplementedError("showallmappings does not support {0}".format(kern.arch)) 1301 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1302 targetPmap = None 1303 if len(cmd_args) > 1: 1304 targetPmap = cmd_args[1] 1305 def printMatchedMapping(pmap, level, type, tte, paddr, va, granule): 1306 if paddr <= pa < (paddr + granule): 1307 print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va)) 1308 return True 1309 ScanPageTables(printMatchedMapping, targetPmap) 1310 1311@lldb_command('showptusage') 1312def ShowPTUsage(cmd_args=None): 1313 """ Display a summary of pagetable allocations for a given pmap. 1314 Syntax: (lldb) showptusage [<pmap>] 1315 WARNING: this macro can take a long time (> 1hr) to complete! 1316 """ 1317 if not kern.arch.startswith('arm'): 1318 raise NotImplementedError("showptusage does not support {0}".format(kern.arch)) 1319 targetPmap = None 1320 if len(cmd_args) > 0: 1321 targetPmap = cmd_args[0] 1322 lastPmap = [None] 1323 numTables = [0] 1324 numUnnested = [0] 1325 numPmaps = [0] 1326 def printValidTTE(pmap, level, type, tte, paddr, va, granule): 1327 unnested = "" 1328 nested_region_addr = int(unsigned(pmap.nested_region_addr)) 1329 nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size)) 1330 if lastPmap[0] is None or (pmap != lastPmap[0]): 1331 lastPmap[0] = pmap 1332 numPmaps[0] = numPmaps[0] + 1 1333 print ("pmap {:#x}:".format(pmap)) 1334 if type == 'root': 1335 return True 1336 if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end): 1337 ptd = GetPtDesc(paddr) 1338 if ptd.pmap != pmap: 1339 return False 1340 else: 1341 numUnnested[0] = numUnnested[0] + 1 1342 unnested = " (likely unnested)" 1343 numTables[0] = numTables[0] + 1 1344 print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested) 1345 if level == 2: 1346 return False 1347 else: 1348 return True 1349 ScanPageTables(printValidTTE, targetPmap) 1350 print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0])) 1351 1352def checkPVList(pmap, level, type, tte, paddr, va, granule): 1353 """ Checks an ARM physical-to-virtual mapping list for consistency errors. 1354 pmap: owner of the translation table 1355 level: translation table level. PV lists will only be checked for L2 (arm32) or L3 (arm64) tables. 1356 type: unused 1357 tte: KVA of PTE to check for presence in PV list. If None, presence check will be skipped. 1358 paddr: physical address whose PV list should be checked. Need not be page-aligned. 1359 granule: unused 1360 """ 1361 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1362 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1363 page_size = kern.globals.page_size 1364 if kern.arch.startswith('arm64'): 1365 page_offset_mask = (page_size - 1) 1366 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1367 paddr = paddr & page_base_mask 1368 max_level = 3 1369 pvh_set_bits = PVH_HIGH_FLAGS_ARM64 1370 if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys: 1371 return True 1372 pn = (paddr - vm_first_phys) // page_size 1373 pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits 1374 pvh_type = pvh & 0x3 1375 if pmap is not None: 1376 pmap_str = "pmap: {:#x}: ".format(pmap) 1377 else: 1378 pmap_str = '' 1379 if tte is not None: 1380 tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr) 1381 else: 1382 tte_str = "paddr {:#x}: ".format(paddr) 1383 if pvh_type == 0 or pvh_type == 3: 1384 print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type)) 1385 elif pvh_type == 2: 1386 ptep = pvh & ~0x3 1387 if tte is not None and ptep != unsigned(tte): 1388 print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep)) 1389 try: 1390 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1391 if (pte != paddr): 1392 print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1393 except Exception as exc: 1394 print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep)) 1395 elif pvh_type == 1: 1396 pvep = pvh & ~0x3 1397 tte_match = False 1398 pve_ptep_idx = 0 1399 while pvep != 0: 1400 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 1401 ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3 1402 pve_ptep_idx += 1 1403 if pve_ptep_idx == 2: 1404 pve_ptep_idx = 0 1405 pvep = unsigned(pve.pve_next) 1406 if ptep == 0: 1407 continue 1408 if tte is not None and ptep == unsigned(tte): 1409 tte_match = True 1410 try: 1411 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1412 if (pte != paddr): 1413 print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1414 except Exception as exc: 1415 print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep)) 1416 if tte is not None and not tte_match: 1417 print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr)) 1418 return True 1419 1420@lldb_command('pv_check', 'P') 1421def PVCheck(cmd_args=None, cmd_options={}): 1422 """ Check the physical-to-virtual mapping for a given PTE or physical address 1423 Syntax: (lldb) pv_check <addr> [-p] 1424 -P : Interpret <addr> as a physical address rather than a PTE 1425 """ 1426 if cmd_args is None or len(cmd_args) == 0: 1427 raise ArgumentError("Too few arguments to pv_check.") 1428 if kern.arch.startswith('arm64'): 1429 level = 3 1430 else: 1431 raise NotImplementedError("pv_check does not support {0}".format(kern.arch)) 1432 if "-P" in cmd_options: 1433 pte = None 1434 pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long"))) 1435 else: 1436 pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *') 1437 pa = int(unsigned(dereference(pte))) 1438 checkPVList(None, level, None, pte, pa, 0, None) 1439 1440@lldb_command('check_pmaps') 1441def CheckPmapIntegrity(cmd_args=None): 1442 """ Performs a system-wide integrity check of all PTEs and associated PV lists. 1443 Optionally only checks the pmap specified by [<pmap>] 1444 Syntax: (lldb) check_pmaps [<pmap>] 1445 WARNING: this macro can take a HUGE amount of time (several hours) if you do not 1446 specify [pmap] to limit it to a single pmap. It will also give false positives 1447 for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM. 1448 Use of this macro without the [<pmap>] argument is heavily discouraged. 1449 """ 1450 if not kern.arch.startswith('arm'): 1451 raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch)) 1452 targetPmap = None 1453 if len(cmd_args) > 0: 1454 targetPmap = cmd_args[0] 1455 ScanPageTables(checkPVList, targetPmap) 1456 1457@lldb_command('pmapsforledger') 1458def PmapsForLedger(cmd_args=None): 1459 """ Find and display all pmaps currently using <ledger>. 1460 Syntax: (lldb) pmapsforledger <ledger> 1461 """ 1462 if cmd_args is None or len(cmd_args) == 0: 1463 raise ArgumentError("Too few arguments to pmapsforledger.") 1464 if not kern.arch.startswith('arm'): 1465 raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch)) 1466 ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t') 1467 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1468 if pmap.ledger == ledger: 1469 print("pmap: {:#x}".format(pmap)) 1470 1471 1472def IsValidPai(pai): 1473 """ Given an unsigned value, detect whether that value is a valid physical 1474 address index (PAI). It does this by first computing the last possible 1475 PAI and comparing the input to that. 1476 1477 All contemporary SoCs reserve the bottom part of the address space, so 1478 there shouldn't be any valid physical addresses between zero and the 1479 last PAI either. 1480 """ 1481 page_size = unsigned(kern.globals.page_size) 1482 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1483 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1484 1485 last_pai = (vm_last_phys - vm_first_phys) // page_size 1486 if (pai < 0) or (pai >= last_pai): 1487 return False 1488 1489 return True 1490 1491def ConvertPaiToPhysAddr(pai): 1492 """ Convert the given Physical Address Index (PAI) into a physical address. 1493 1494 If the input isn't a valid PAI (it's most likely already a physical 1495 address), then just return back the input unchanged. 1496 """ 1497 pa = pai 1498 1499 # If the value is a valid PAI, then convert it into a physical address. 1500 if IsValidPai(pai): 1501 pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys) 1502 1503 return pa 1504 1505def ConvertPhysAddrToPai(pa): 1506 """ Convert the given physical address into a Physical Address Index (PAI). 1507 1508 If the input is already a valid PAI, then just return back the input 1509 unchanged. 1510 """ 1511 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1512 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1513 pai = pa 1514 1515 if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys): 1516 raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys)) 1517 elif not IsValidPai(pa): 1518 # If the value isn't already a valid PAI, then convert it into one. 1519 pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size) 1520 1521 return pai 1522 1523@lldb_command('pmappaindex') 1524def PmapPaIndex(cmd_args=None): 1525 """ Display both a physical address and physical address index (PAI) when 1526 provided with only one of those values. 1527 1528 Syntax: (lldb) pmappaindex <physical address | PAI> 1529 1530 NOTE: This macro will throw an exception if the input isn't a valid PAI 1531 and is also not a kernel-managed physical address. 1532 """ 1533 if cmd_args is None or len(cmd_args) == 0: 1534 raise ArgumentError("Too few arguments to pmappaindex.") 1535 1536 if not kern.arch.startswith('arm'): 1537 raise NotImplementedError("pmappaindex is only supported on ARM devices.") 1538 1539 value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1540 pai = value 1541 phys_addr = value 1542 1543 if IsValidPai(value): 1544 # Input is a PAI, calculate the physical address. 1545 phys_addr = ConvertPaiToPhysAddr(value) 1546 else: 1547 # Input is a physical address, calculate the PAI 1548 pai = ConvertPhysAddrToPai(value) 1549 1550 print("Physical Address: {:#x}".format(phys_addr)) 1551 print("PAI: {:d}".format(pai)) 1552 1553@lldb_command('pmapdumpsurts') 1554def PmapDumpSurts(cmd_args=None): 1555 """ Dump the SURT list. 1556 1557 Syntax: (lldb) pmapdumpsurts 1558 """ 1559 from scheduler import IterateBitmap 1560 1561 if "surt_list" not in kern.globals: 1562 raise NotImplementedError("SURT is not supported on this device.") 1563 1564 i = 0 1565 for surt_page in IterateLinkageChain(kern.globals.surt_list, 'surt_page_t *', 'surt_chain'): 1566 print(f"SURT Page {i} at physical address {hex(surt_page.surt_page_pa)}") 1567 print('') 1568 print('Allocation status (O: free, X: allocated):') 1569 bitmap_visual = bytearray('X' * 128, 'ascii') 1570 for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[0]): 1571 bitmap_index = 127 - free_bit 1572 bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O' 1573 for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[1]): 1574 bitmap_index = 127 - (free_bit + 64) 1575 bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O' 1576 1577 for j in range(0, 128, 8): 1578 print(f"{bitmap_visual[j:(j+8)].decode('ascii')} bit [{127 - j}:{120 - j}]") 1579 1580 print('') 1581 print('SURT list structure raw:') 1582 print(dereference(surt_page)) 1583 print('') 1584 print('') 1585 1586 i = i + 1 1587 1588@lldb_command('showallpmaps') 1589def ShowAllPmaps(cmd_args=None): 1590 """ Dump all pmaps. 1591 1592 Syntax: (lldb) showallpmaps 1593 """ 1594 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1595 print(dereference(pmap)) 1596 print() 1597 1598@lldb_command('pmapforroottablepa') 1599def PmapForRootTablePa(cmd_args=None): 1600 """ Dump the pmap with matching root TTE physical address. 1601 1602 Syntax: (lldb) pmapforroottablepa <pa> 1603 """ 1604 if cmd_args is None or len(cmd_args) == 0: 1605 raise ArgumentError('Invalid argument, expecting the physical address of a root translation table') 1606 1607 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1608 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1609 if pmap.ttep == pa: 1610 print(dereference(pmap)) 1611 print() 1612