1from xnu import * 2import xnudefines 3from kdp import * 4from utils import * 5import struct 6 7def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None): 8 """ Read a physical memory data based on address. 9 params: 10 phys_addr : int - Physical address to read 11 bitsize : int - defines how many bytes to read. defaults to 64 bit 12 cpuval : None (optional) 13 returns: 14 int - int value read from memory. in case of failure 0xBAD10AD is returned. 15 """ 16 if "kdp" == GetConnectionProtocol(): 17 return KDPReadPhysMEM(phys_addr, bitsize) 18 19 #NO KDP. Attempt to use physical memory 20 paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr)) 21 if paddr_in_kva : 22 if bitsize == 64 : 23 return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 24 if bitsize == 32 : 25 return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 26 if bitsize == 16 : 27 return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 28 if bitsize == 8 : 29 return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 30 return 0xBAD10AD 31 32@lldb_command('readphys') 33def ReadPhys(cmd_args = None): 34 """ Reads the specified untranslated address 35 The argument is interpreted as a physical address, and the 64-bit word 36 addressed is displayed. 37 usage: readphys <nbits> <address> 38 nbits: 8,16,32,64 39 address: 1234 or 0x1234 40 """ 41 if cmd_args is None or len(cmd_args) < 2: 42 print("Insufficient arguments.", ReadPhys.__doc__) 43 return False 44 else: 45 nbits = ArgumentStringToInt(cmd_args[0]) 46 phys_addr = ArgumentStringToInt(cmd_args[1]) 47 print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits))) 48 return True 49 50lldb_alias('readphys8', 'readphys 8 ') 51lldb_alias('readphys16', 'readphys 16 ') 52lldb_alias('readphys32', 'readphys 32 ') 53lldb_alias('readphys64', 'readphys 64 ') 54 55def KDPReadPhysMEM(address, bits): 56 """ Setup the state for READPHYSMEM64 commands for reading data via kdp 57 params: 58 address : int - address where to read the data from 59 bits : int - number of bits in the intval (8/16/32/64) 60 returns: 61 int: read value from memory. 62 0xBAD10AD: if failed to read data. 63 """ 64 retval = 0xBAD10AD 65 if "kdp" != GetConnectionProtocol(): 66 print("Target is not connected over kdp. Nothing to do here.") 67 return retval 68 69 if "hwprobe" == KDPMode(): 70 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 71 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 72 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 73 packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0) 74 75 ret_obj = lldb.SBCommandReturnObject() 76 ci = lldb.debugger.GetCommandInterpreter() 77 ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj) 78 79 if ret_obj.Succeeded(): 80 value = ret_obj.GetOutput() 81 82 if bits == 64 : 83 pack_fmt = "<Q" 84 unpack_fmt = ">Q" 85 if bits == 32 : 86 pack_fmt = "<I" 87 unpack_fmt = ">I" 88 if bits == 16 : 89 pack_fmt = "<H" 90 unpack_fmt = ">H" 91 if bits == 8 : 92 pack_fmt = "<B" 93 unpack_fmt = ">B" 94 95 retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0] 96 97 else: 98 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 99 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 100 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 101 102 if not WriteInt32ToMemoryAddress(0, input_address): 103 return retval 104 105 kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize() 106 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 107 return retval 108 109 data_addr = int(addressof(kern.globals.manual_pkt)) 110 pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *') 111 112 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size) 113 114 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 115 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 116 WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and 117 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 118 ): 119 120 if WriteInt32ToMemoryAddress(1, input_address): 121 # now read data from the kdp packet 122 data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data)) 123 if bits == 64 : 124 retval = kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 125 if bits == 32 : 126 retval = kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 127 if bits == 16 : 128 retval = kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 129 if bits == 8 : 130 retval = kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 131 132 return retval 133 134 135def KDPWritePhysMEM(address, intval, bits): 136 """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp 137 params: 138 address : int - address where to save the data 139 intval : int - integer value to be stored in memory 140 bits : int - number of bits in the intval (8/16/32/64) 141 returns: 142 boolean: True if the write succeeded. 143 """ 144 if "kdp" != GetConnectionProtocol(): 145 print("Target is not connected over kdp. Nothing to do here.") 146 return False 147 148 if "hwprobe" == KDPMode(): 149 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 150 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 151 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 152 153 if bits == 64 : 154 pack_fmt = ">Q" 155 unpack_fmt = "<Q" 156 if bits == 32 : 157 pack_fmt = ">I" 158 unpack_fmt = "<I" 159 if bits == 16 : 160 pack_fmt = ">H" 161 unpack_fmt = "<H" 162 if bits == 8 : 163 pack_fmt = ">B" 164 unpack_fmt = "<B" 165 166 data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0] 167 168 packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val) 169 170 ret_obj = lldb.SBCommandReturnObject() 171 ci = lldb.debugger.GetCommandInterpreter() 172 ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj) 173 174 if ret_obj.Succeeded(): 175 return True 176 else: 177 return False 178 179 else: 180 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 181 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 182 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 183 if not WriteInt32ToMemoryAddress(0, input_address): 184 return False 185 186 kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8) 187 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 188 return False 189 190 data_addr = int(addressof(kern.globals.manual_pkt)) 191 pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *') 192 193 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size) 194 195 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 196 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 197 WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and 198 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 199 ): 200 201 if bits == 8: 202 if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))): 203 return False 204 if bits == 16: 205 if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))): 206 return False 207 if bits == 32: 208 if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))): 209 return False 210 if bits == 64: 211 if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))): 212 return False 213 if WriteInt32ToMemoryAddress(1, input_address): 214 return True 215 return False 216 217 218def WritePhysInt(phys_addr, int_val, bitsize = 64): 219 """ Write and integer value in a physical memory data based on address. 220 params: 221 phys_addr : int - Physical address to read 222 int_val : int - int value to write in memory 223 bitsize : int - defines how many bytes to read. defaults to 64 bit 224 returns: 225 bool - True if write was successful. 226 """ 227 if "kdp" == GetConnectionProtocol(): 228 if not KDPWritePhysMEM(phys_addr, int_val, bitsize): 229 print("Failed to write via KDP.") 230 return False 231 return True 232 #We are not connected via KDP. So do manual math and savings. 233 print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol()) 234 return False 235 236@lldb_command('writephys') 237def WritePhys(cmd_args=None): 238 """ writes to the specified untranslated address 239 The argument is interpreted as a physical address, and the 64-bit word 240 addressed is displayed. 241 usage: writephys <nbits> <address> <value> 242 nbits: 8,16,32,64 243 address: 1234 or 0x1234 244 value: int value to be written 245 ex. (lldb)writephys 16 0x12345abcd 0x25 246 """ 247 if cmd_args is None or len(cmd_args) < 3: 248 print("Invalid arguments.", WritePhys.__doc__) 249 else: 250 nbits = ArgumentStringToInt(cmd_args[0]) 251 phys_addr = ArgumentStringToInt(cmd_args[1]) 252 int_value = ArgumentStringToInt(cmd_args[2]) 253 print(WritePhysInt(phys_addr, int_value, nbits)) 254 255 256lldb_alias('writephys8', 'writephys 8 ') 257lldb_alias('writephys16', 'writephys 16 ') 258lldb_alias('writephys32', 'writephys 32 ') 259lldb_alias('writephys64', 'writephys 64 ') 260 261 262def _PT_Step(paddr, index, verbose_level = vSCRIPT): 263 """ 264 Step to lower-level page table and print attributes 265 paddr: current page table entry physical address 266 index: current page table entry index (0..511) 267 verbose_level: vHUMAN: print nothing 268 vSCRIPT: print basic information 269 vDETAIL: print basic information and hex table dump 270 returns: (pt_paddr, pt_valid, pt_large) 271 pt_paddr: next level page table entry physical address 272 or null if invalid 273 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 274 should be aborted 275 pt_large: 1 if kgm_pt_paddr is a page frame address 276 of a large page and not another page table entry 277 """ 278 entry_addr = paddr + (8 * index) 279 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 280 out_string = '' 281 if verbose_level >= vDETAIL: 282 for pte_loop in range(0, 512): 283 paddr_tmp = paddr + (8 * pte_loop) 284 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 285 paddr_mask = ~((0xfff<<52) | 0xfff) 286 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 287 pt_valid = False 288 pt_large = False 289 pt_paddr = 0 290 if verbose_level < vSCRIPT: 291 if entry & 0x1 : 292 pt_valid = True 293 pt_large = False 294 pt_paddr = entry & paddr_mask 295 if entry & (0x1 <<7): 296 pt_large = True 297 pt_paddr = entry & paddr_large_mask 298 else: 299 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 300 if entry & 0x1: 301 out_string += " valid" 302 pt_paddr = entry & paddr_mask 303 pt_valid = True 304 else: 305 out_string += " invalid" 306 pt_paddr = 0 307 pt_valid = False 308 if entry & (0x1 << 62): 309 out_string += " compressed" 310 #Stop decoding other bits 311 entry = 0 312 if entry & (0x1 << 1): 313 out_string += " writable" 314 else: 315 out_string += " read-only" 316 317 if entry & (0x1 << 2): 318 out_string += " user" 319 else: 320 out_string += " supervisor" 321 322 if entry & (0x1 << 3): 323 out_string += " PWT" 324 325 if entry & (0x1 << 4): 326 out_string += " PCD" 327 328 if entry & (0x1 << 5): 329 out_string += " accessed" 330 331 if entry & (0x1 << 6): 332 out_string += " dirty" 333 334 if entry & (0x1 << 7): 335 out_string += " large" 336 pt_large = True 337 else: 338 pt_large = False 339 340 if entry & (0x1 << 8): 341 out_string += " global" 342 343 if entry & (0x3 << 9): 344 out_string += " avail:{0:x}".format((entry >> 9) & 0x3) 345 346 if entry & (0x1 << 63): 347 out_string += " noexec" 348 print(out_string) 349 return (pt_paddr, pt_valid, pt_large) 350 351def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT): 352 """ 353 Step to lower-level page table and print attributes for EPT pmap 354 paddr: current page table entry physical address 355 index: current page table entry index (0..511) 356 verbose_level: vHUMAN: print nothing 357 vSCRIPT: print basic information 358 vDETAIL: print basic information and hex table dump 359 returns: (pt_paddr, pt_valid, pt_large) 360 pt_paddr: next level page table entry physical address 361 or null if invalid 362 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 363 should be aborted 364 pt_large: 1 if kgm_pt_paddr is a page frame address 365 of a large page and not another page table entry 366 """ 367 entry_addr = paddr + (8 * index) 368 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 369 out_string = '' 370 if verbose_level >= vDETAIL: 371 for pte_loop in range(0, 512): 372 paddr_tmp = paddr + (8 * pte_loop) 373 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 374 paddr_mask = ~((0xfff<<52) | 0xfff) 375 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 376 pt_valid = False 377 pt_large = False 378 pt_paddr = 0 379 if verbose_level < vSCRIPT: 380 if entry & 0x7 : 381 pt_valid = True 382 pt_large = False 383 pt_paddr = entry & paddr_mask 384 if entry & (0x1 <<7): 385 pt_large = True 386 pt_paddr = entry & paddr_large_mask 387 else: 388 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 389 if entry & 0x7: 390 out_string += "valid" 391 pt_paddr = entry & paddr_mask 392 pt_valid = True 393 else: 394 out_string += "invalid" 395 pt_paddr = 0 396 pt_valid = False 397 if entry & (0x1 << 62): 398 out_string += " compressed" 399 #Stop decoding other bits 400 entry = 0 401 if entry & 0x1: 402 out_string += " readable" 403 else: 404 out_string += " no read" 405 if entry & (0x1 << 1): 406 out_string += " writable" 407 else: 408 out_string += " no write" 409 410 if entry & (0x1 << 2): 411 out_string += " executable" 412 else: 413 out_string += " no exec" 414 415 ctype = entry & 0x38 416 if ctype == 0x30: 417 out_string += " cache-WB" 418 elif ctype == 0x28: 419 out_string += " cache-WP" 420 elif ctype == 0x20: 421 out_string += " cache-WT" 422 elif ctype == 0x8: 423 out_string += " cache-WC" 424 else: 425 out_string += " cache-NC" 426 427 if (entry & 0x40) == 0x40: 428 out_string += " Ignore-PTA" 429 430 if (entry & 0x100) == 0x100: 431 out_string += " accessed" 432 433 if (entry & 0x200) == 0x200: 434 out_string += " dirty" 435 436 if entry & (0x1 << 7): 437 out_string += " large" 438 pt_large = True 439 else: 440 pt_large = False 441 print(out_string) 442 return (pt_paddr, pt_valid, pt_large) 443 444def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT): 445 """ Walk the l4 pmap entry. 446 params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t 447 vaddr : int - virtual address to walk 448 """ 449 pt_paddr = unsigned(pmap_addr_val) 450 pt_valid = (unsigned(pmap_addr_val) != 0) 451 pt_large = 0 452 pframe_offset = 0 453 if pt_valid: 454 # Lookup bits 47:39 of linear address in PML4T 455 pt_index = (vaddr >> 39) & 0x1ff 456 pframe_offset = vaddr & 0x7fffffffff 457 if verbose_level > vHUMAN : 458 print("pml4 (index {0:d}):".format(pt_index)) 459 if not(ept_pmap): 460 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 461 else: 462 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 463 if pt_valid: 464 # Lookup bits 38:30 of the linear address in PDPT 465 pt_index = (vaddr >> 30) & 0x1ff 466 pframe_offset = vaddr & 0x3fffffff 467 if verbose_level > vHUMAN: 468 print("pdpt (index {0:d}):".format(pt_index)) 469 if not(ept_pmap): 470 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 471 else: 472 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 473 if pt_valid and not pt_large: 474 #Lookup bits 29:21 of the linear address in PDPT 475 pt_index = (vaddr >> 21) & 0x1ff 476 pframe_offset = vaddr & 0x1fffff 477 if verbose_level > vHUMAN: 478 print("pdt (index {0:d}):".format(pt_index)) 479 if not(ept_pmap): 480 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 481 else: 482 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 483 if pt_valid and not pt_large: 484 #Lookup bits 20:21 of linear address in PT 485 pt_index = (vaddr >> 12) & 0x1ff 486 pframe_offset = vaddr & 0xfff 487 if verbose_level > vHUMAN: 488 print("pt (index {0:d}):".format(pt_index)) 489 if not(ept_pmap): 490 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 491 else: 492 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 493 paddr = 0 494 paddr_isvalid = False 495 if pt_valid: 496 paddr = pt_paddr + pframe_offset 497 paddr_isvalid = True 498 499 if verbose_level > vHUMAN: 500 if paddr_isvalid: 501 pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self) 502 print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue)) 503 else: 504 print("no translation") 505 506 return paddr 507 508def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT): 509 """ 510 params: pmapval - core.value representing pmap_t in kernel 511 vaddr: int - int representing virtual address to walk 512 """ 513 if pmapval.pm_cr3 != 0: 514 if verbose_level > vHUMAN: 515 print("Using normal Intel PMAP from pm_cr3\n") 516 return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity']) 517 else: 518 if verbose_level > vHUMAN: 519 print("Using EPT pmap from pm_eptp\n") 520 return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity']) 521 522def assert_64bit(val): 523 assert(val < 2**64) 524 525ARM64_TTE_SIZE = 8 526ARM64_TTE_SHIFT = 3 527ARM64_VMADDR_BITS = 48 528 529def PmapBlockOffsetMaskARM64(page_size, level): 530 assert level >= 0 and level <= 3 531 ttentries = (page_size // ARM64_TTE_SIZE) 532 return page_size * (ttentries ** (3 - level)) - 1 533 534def PmapBlockBaseMaskARM64(page_size, level): 535 assert level >= 0 and level <= 3 536 return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level) 537 538def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False): 539 """ Display the bits of an ARM64 translation table or page table entry 540 in human-readable form. 541 tte: integer value of the TTE/PTE 542 level: translation table level. Valid values are 1, 2, or 3. 543 is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise. 544 """ 545 assert(isinstance(level, numbers.Integral)) 546 assert_64bit(tte) 547 548 if tte & 0x1 == 0x0: 549 print("Invalid.") 550 return 551 552 if (tte & 0x2 == 0x2) and (level != 0x3): 553 print("Type = Table pointer.") 554 print("Table addr = {:#x}.".format(tte & 0xfffffffff000)) 555 556 if not stage2: 557 print("PXN = {:#x}.".format((tte >> 59) & 0x1)) 558 print("XN = {:#x}.".format((tte >> 60) & 0x1)) 559 print("AP = {:#x}.".format((tte >> 61) & 0x3)) 560 print("NS = {:#x}.".format(tte >> 63)) 561 else: 562 print("Type = Block.") 563 564 if stage2: 565 print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf)) 566 else: 567 attr_index = (tte >> 2) & 0x7 568 attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU', 569 3: 'CACHE DISABLE', 4: 'INNERWRITEBACK', 5: 'POSTED', 570 6: 'POSTED_REORDERED', 7: 'POSTED_COMBINED_REORDERED' } 571 572 # Only show the string version of the AttrIdx for CPU mappings since 573 # these values don't apply to IOMMU mappings. 574 if is_iommu_tte: 575 print("AttrIdx = {:#x}.".format(attr_index)) 576 else: 577 print("AttrIdx = {:#x} ({:s}).".format(attr_index, attr_string[attr_index])) 578 print("NS = {:#x}.".format((tte >> 5) & 0x1)) 579 580 if stage2: 581 print("S2AP = {:#x}.".format((tte >> 6) & 0x3)) 582 else: 583 print("AP = {:#x}.".format((tte >> 6) & 0x3)) 584 585 print("SH = {:#x}.".format((tte >> 8) & 0x3)) 586 print("AF = {:#x}.".format((tte >> 10) & 0x1)) 587 588 if not stage2: 589 print("nG = {:#x}.".format((tte >> 11) & 0x1)) 590 591 print("HINT = {:#x}.".format((tte >> 52) & 0x1)) 592 593 if stage2: 594 print("S2XN = {:#x}.".format((tte >> 53) & 0x3)) 595 else: 596 print("PXN = {:#x}.".format((tte >> 53) & 0x1)) 597 print("XN = {:#x}.".format((tte >> 54) & 0x1)) 598 599 print("SW Use = {:#x}.".format((tte >> 55) & 0xf)) 600 601 return 602 603def PmapTTnIndexARM64(vaddr, pmap_pt_attr): 604 pta_max_level = unsigned(pmap_pt_attr.pta_max_level) 605 606 tt_index = [] 607 for i in range(pta_max_level + 1): 608 tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \ 609 >> unsigned(pmap_pt_attr.pta_level_info[i].shift)) 610 611 return tt_index 612 613def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN): 614 assert(type(vaddr) in (int, int)) 615 assert_64bit(vaddr) 616 assert_64bit(root_tte) 617 618 # Obtain pmap attributes 619 page_size = pmap_pt_attr.pta_page_size 620 page_offset_mask = (page_size - 1) 621 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 622 tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr) 623 stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) 624 625 # The pmap starts at a page table level that is defined by register 626 # values; the root level can be obtained from the attributes structure 627 level = unsigned(pmap_pt_attr.pta_root_level) 628 629 root_tt_index = tt_index[level] 630 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 631 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 632 tte = int(unsigned(root_tte[root_tt_index])) 633 634 # Walk the page tables 635 paddr = -1 636 max_level = unsigned(pmap_pt_attr.pta_max_level) 637 is_valid = True 638 is_leaf = False 639 640 while (level <= max_level): 641 if verbose_level >= vSCRIPT: 642 print("L{} entry: {:#x}".format(level, tte)) 643 if verbose_level >= vDETAIL: 644 PmapDecodeTTEARM64(tte, level, stage2) 645 646 if tte & 0x1 == 0x0: 647 if verbose_level >= vHUMAN: 648 print("L{} entry invalid: {:#x}\n".format(level, tte)) 649 650 is_valid = False 651 break 652 653 # Handle leaf entry 654 if tte & 0x2 == 0x0 or level == max_level: 655 base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level) 656 offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level) 657 paddr = tte & base_mask 658 paddr = paddr | (vaddr & offset_mask) 659 660 if level != max_level: 661 print("phys: {:#x}".format(paddr)) 662 663 is_leaf = True 664 break 665 else: 666 # Handle page table entry 667 next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1]) 668 assert(isinstance(next_phys, numbers.Integral)) 669 670 next_virt = kern.PhysToKernelVirt(next_phys) 671 assert(isinstance(next_virt, numbers.Integral)) 672 673 if verbose_level >= vDETAIL: 674 print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt)) 675 676 ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*") 677 tte = int(unsigned(dereference(ttep))) 678 assert(isinstance(tte, numbers.Integral)) 679 680 # We've parsed one level, so go to the next level 681 assert(level <= 3) 682 level = level + 1 683 684 685 if verbose_level >= vHUMAN: 686 if paddr: 687 print("Translation of {:#x} is {:#x}.".format(vaddr, paddr)) 688 else: 689 print("(no translation)") 690 691 return paddr 692 693def PmapWalk(pmap, vaddr, verbose_level = vHUMAN): 694 if kern.arch == 'x86_64': 695 return PmapWalkX86_64(pmap, vaddr, verbose_level) 696 elif kern.arch.startswith('arm64'): 697 # Obtain pmap attributes from pmap structure 698 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 699 return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level) 700 else: 701 raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch)) 702 703@lldb_command('pmap_walk') 704def PmapWalkHelper(cmd_args=None): 705 """ Perform a page-table walk in <pmap> for <virtual_address>. 706 Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e] 707 Multiple -v's can be specified for increased verbosity 708 """ 709 if cmd_args is None or len(cmd_args) < 2: 710 raise ArgumentError("Too few arguments to pmap_walk.") 711 712 pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t') 713 addr = ArgumentStringToInt(cmd_args[1]) 714 PmapWalk(pmap, addr, config['verbosity']) 715 return 716 717def GetMemoryAttributesFromUser(requested_type): 718 pmap_attr_dict = { 719 '4k' : kern.globals.pmap_pt_attr_4k, 720 '16k' : kern.globals.pmap_pt_attr_16k, 721 '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None, 722 } 723 724 requested_type = requested_type.lower() 725 if requested_type not in pmap_attr_dict: 726 return None 727 728 return pmap_attr_dict[requested_type] 729 730@lldb_command('ttep_walk') 731def TTEPWalkPHelper(cmd_args=None): 732 """ Perform a page-table walk in <root_ttep> for <virtual_address>. 733 Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e] 734 Multiple -v's can be specified for increased verbosity 735 """ 736 if cmd_args is None or len(cmd_args) < 2: 737 raise ArgumentError("Too few arguments to ttep_walk.") 738 739 if not kern.arch.startswith('arm64'): 740 raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch)) 741 742 tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *') 743 addr = ArgumentStringToInt(cmd_args[1]) 744 745 pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2]) 746 if pmap_pt_attr is None: 747 raise ArgumentError("Invalid translation attribute type.") 748 749 return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity']) 750 751@lldb_command('decode_tte') 752def DecodeTTE(cmd_args=None): 753 """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2] 754 Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2] 755 """ 756 if cmd_args is None or len(cmd_args) < 2: 757 raise ArgumentError("Too few arguments to decode_tte.") 758 if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]: 759 raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2])) 760 if kern.arch.startswith('arm64'): 761 stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False 762 PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2) 763 else: 764 raise NotImplementedError("decode_tte does not support {0}".format(kern.arch)) 765 766PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54) 767PVH_HIGH_FLAGS_ARM32 = (1 << 31) 768 769def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN): 770 """ Dump information about a single mapping retrieved by the pv_head_table. 771 772 pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP, 773 or None if type PVH_TYPE_PTEP. 774 ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with 775 the high flags already set (the type bits don't need to be cleared). 776 For type PVH_TYPE_PVEP this will be the value retrieved from the 777 pve_ptep[] array. 778 """ 779 if kern.arch.startswith('arm64'): 780 iommu_flag = 0x4 781 iommu_table_flag = 1 << 63 782 else: 783 iommu_flag = 0 784 iommu_table_flag = 0 785 786 # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries. 787 if pvep is not None and (ptep & 0x1): 788 # Note: It's not possible for IOMMU mappings to be marked as alt acct so 789 # setting this string is mutually exclusive with setting the IOMMU strings. 790 pte_str = ' (alt acct)' 791 else: 792 pte_str = '' 793 794 if pvep is not None: 795 pve_str = 'PVEP {:#x}, '.format(pvep) 796 else: 797 pve_str = '' 798 799 # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this 800 # either does nothing or clears out the AltAcct bit. 801 ptep = ptep & ~0x3 802 803 # When printing with extra verbosity, print an extra newline that describes 804 # who owns the mapping. 805 extra_str = '' 806 807 if ptep & iommu_flag: 808 # The mapping is an IOMMU Mapping 809 ptep = ptep & ~iommu_flag 810 811 # Due to LLDB automatically setting all the high bits of pointers, when 812 # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set 813 # the iommu_table_flag, which means this check only works for PVH entries 814 # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH 815 # entry which has the right casting applied to avoid this issue). 816 # 817 # Why don't we just do the same casting for pve_ptep[] you ask? Well not 818 # for a lack of trying, that's for sure. If you can figure out how to 819 # cast that array correctly, then be my guest. 820 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 821 if ptep & iommu_table_flag: 822 pte_str = ' (IOMMU table), entry' 823 ptd = GetPtDesc(KVToPhysARM(ptep)) 824 iommu = dereference(ptd.iommu) 825 else: 826 # Instead of dumping the PTE (since we don't have that), dump the 827 # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc). 828 # 829 # This works because later on when the "ptep" is dereferenced as a 830 # PTE pointer (uint64_t pointer), the descriptor pointer will be 831 # dumped as that's the first 64-bit value in the IOMMU state object. 832 pte_str = ' (IOMMU state), descriptor' 833 ptep = ptep | iommu_table_flag 834 iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *')) 835 836 # For IOMMU mappings, dump who owns the mapping as the extra string. 837 extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name) 838 if unsigned(iommu.name) != 0: 839 extra_str += '/{:s}'.format(iommu.name) 840 extra_str += ' (iommu state: {:x})'.format(addressof(iommu)) 841 else: 842 ptd = GetPtDesc(KVToPhysARM(ptep)) 843 extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu) 844 else: 845 # The mapping is a CPU Mapping 846 pte_str += ', entry' 847 ptd = GetPtDesc(KVToPhysARM(ptep)) 848 if ptd.pmap == kern.globals.kernel_pmap: 849 extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap) 850 elif verbose_level >= vDETAIL: 851 task = TaskForPmapHelper(ptd.pmap) 852 extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>") 853 try: 854 print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) 855 except: 856 print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str)) 857 858 if verbose_level >= vDETAIL: 859 print(" |-- {:s}".format(extra_str)) 860 861def PVWalkARM(pai, verbose_level = vHUMAN): 862 """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap. 863 864 pai: physical address index (PAI) corresponding to the pv_head_table 865 entry to walk. 866 verbose_level: Set to vSCRIPT or higher to print extra info around the 867 the pv_head_table/pp_attr_table flags and to dump the 868 pt_desc_t object if the type is a PTD. 869 """ 870 # LLDB will automatically try to make pointer values dereferencable by 871 # setting the upper bits if they aren't set. We need to parse the flags 872 # stored in the upper bits later, so cast the pv_head_table to an array of 873 # integers to get around this "feature". We'll add the upper bits back 874 # manually before deref'ing anything. 875 pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*") 876 pvh_raw = unsigned(pv_head_table[pai]) 877 pvh = pvh_raw 878 pvh_type = pvh & 0x3 879 880 print("PVH raw value: {:#x}".format(pvh_raw)) 881 if kern.arch.startswith('arm64'): 882 pvh = pvh | PVH_HIGH_FLAGS_ARM64 883 else: 884 pvh = pvh | PVH_HIGH_FLAGS_ARM32 885 886 if pvh_type == 0: 887 print("PVH type: NULL") 888 elif pvh_type == 3: 889 print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3)) 890 elif pvh_type == 2: 891 print("PVH type: single PTE") 892 PVDumpPTE(None, pvh, verbose_level) 893 elif pvh_type == 1: 894 pvep = pvh & ~0x3 895 print("PVH type: PTE list") 896 pve_ptep_idx = 0 897 while pvep != 0: 898 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 899 900 if pve.pve_ptep[pve_ptep_idx] != 0: 901 PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level) 902 903 pve_ptep_idx += 1 904 if pve_ptep_idx == 2: 905 pve_ptep_idx = 0 906 pvep = unsigned(pve.pve_next) 907 908 if verbose_level >= vDETAIL: 909 if (pvh_type == 1) or (pvh_type == 2): 910 # Dump pv_head_table flags when there's a valid mapping. 911 pvh_flags = [] 912 913 if pvh_raw & (1 << 62): 914 pvh_flags.append("CPU") 915 if pvh_raw & (1 << 60): 916 pvh_flags.append("EXEC") 917 if pvh_raw & (1 << 59): 918 pvh_flags.append("LOCKDOWN_KC") 919 if pvh_raw & (1 << 58): 920 pvh_flags.append("HASHED") 921 if pvh_raw & (1 << 57): 922 pvh_flags.append("LOCKDOWN_CS") 923 if pvh_raw & (1 << 56): 924 pvh_flags.append("LOCKDOWN_RO") 925 if pvh_raw & (1 << 55): 926 pvh_flags.append("RETIRED") 927 if pvh_raw & (1 << 54): 928 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 929 pvh_flags.append("SECURE_FLUSH_NEEDED") 930 else: 931 pvh_flags.append("SLEEPABLE_LOCK") 932 if kern.arch.startswith('arm64') and pvh_raw & (1 << 61): 933 pvh_flags.append("LOCK") 934 935 print("PVH Flags: {}".format(pvh_flags)) 936 937 # Always dump pp_attr_table flags (these can be updated even if there aren't mappings). 938 ppattr = unsigned(kern.globals.pp_attr_table[pai]) 939 print("PPATTR raw value: {:#x}".format(ppattr)) 940 941 ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)] 942 if ppattr & 0x40: 943 ppattr_flags.append("REFERENCED") 944 if ppattr & 0x80: 945 ppattr_flags.append("MODIFIED") 946 if ppattr & 0x100: 947 ppattr_flags.append("INTERNAL") 948 if ppattr & 0x200: 949 ppattr_flags.append("REUSABLE") 950 if ppattr & 0x400: 951 ppattr_flags.append("ALTACCT") 952 if ppattr & 0x800: 953 ppattr_flags.append("NOENCRYPT") 954 if ppattr & 0x1000: 955 ppattr_flags.append("REFFAULT") 956 if ppattr & 0x2000: 957 ppattr_flags.append("MODFAULT") 958 if ppattr & 0x4000: 959 ppattr_flags.append("MONITOR") 960 if ppattr & 0x8000: 961 ppattr_flags.append("NO_MONITOR") 962 963 print("PPATTR Flags: {}".format(ppattr_flags)) 964 965 if pvh_type == 3: 966 def RunLldbCmdHelper(command): 967 """Helper for dumping an LLDB command right before executing it 968 and printing the results. 969 command: The LLDB command (as a string) to run. 970 971 Example input: "p/x kernel_pmap". 972 """ 973 print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command))) 974 # Dump the page table descriptor object 975 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 976 RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd)) 977 978 # Depending on the system, more than one ptd_info can be associated 979 # with a single PTD. Only dump the first PTD info and assume the 980 # user knows to dump the rest if they're on one of those systems. 981 RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]") 982 983@lldb_command('pv_walk') 984def PVWalk(cmd_args=None): 985 """ Show mappings for <physical_address | PAI> tracked in the PV list. 986 Syntax: (lldb) pv_walk <physical_address | PAI> [-vv] 987 988 Extra verbosity will pretty print the pv_head_table/pp_attr_table flags 989 as well as dump the page table descriptor (PTD) struct if the entry is a 990 PTD. 991 """ 992 if cmd_args is None or len(cmd_args) < 1: 993 raise ArgumentError("Too few arguments to pv_walk.") 994 if not kern.arch.startswith('arm'): 995 raise NotImplementedError("pv_walk does not support {0}".format(kern.arch)) 996 997 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 998 999 # If the input is already a PAI, this function will return the input unchanged. 1000 # This function also ensures that the physical address is kernel-managed. 1001 pai = ConvertPhysAddrToPai(pa) 1002 1003 PVWalkARM(pai, config['verbosity']) 1004 1005@lldb_command('kvtophys') 1006def KVToPhys(cmd_args=None): 1007 """ Translate a kernel virtual address to the corresponding physical address. 1008 Assumes the virtual address falls within the kernel static region. 1009 Syntax: (lldb) kvtophys <kernel virtual address> 1010 """ 1011 if cmd_args is None or len(cmd_args) < 1: 1012 raise ArgumentError("Too few arguments to kvtophys.") 1013 if kern.arch.startswith('arm'): 1014 print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1015 elif kern.arch == 'x86_64': 1016 print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base))) 1017 1018@lldb_command('phystokv') 1019def PhysToKV(cmd_args=None): 1020 """ Translate a physical address to the corresponding static kernel virtual address. 1021 Assumes the physical address corresponds to managed DRAM. 1022 Syntax: (lldb) phystokv <physical address> 1023 """ 1024 if cmd_args is None or len(cmd_args) < 1: 1025 raise ArgumentError("Too few arguments to phystokv.") 1026 print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1027 1028def KVToPhysARM(addr): 1029 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1030 ptov_table = kern.globals.ptov_table 1031 for i in range(0, kern.globals.ptov_index): 1032 if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))): 1033 return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa))) 1034 else: 1035 papt_table = kern.globals.libsptm_papt_ranges 1036 page_size = kern.globals.page_size 1037 for i in range(0, kern.globals.libsptm_n_papt_ranges): 1038 if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))): 1039 return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start))) 1040 raise ValueError("VA {:#x} not found in physical region lookup table".format(addr)) 1041 return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase)) 1042 1043 1044def GetPtDesc(paddr): 1045 pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size 1046 pvh = unsigned(kern.globals.pv_head_table[pn]) 1047 if kern.arch.startswith('arm64'): 1048 pvh = pvh | PVH_HIGH_FLAGS_ARM64 1049 else: 1050 pvh = pvh | PVH_HIGH_FLAGS_ARM32 1051 pvh_type = pvh & 0x3 1052 if pvh_type != 0x3: 1053 raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh)) 1054 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 1055 return ptd 1056 1057def PhysToFrameTableEntry(paddr): 1058 if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)): 1059 return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size] 1060 page_idx = paddr / kern.globals.page_size 1061 for i in range(0, kern.globals.sptm_n_io_ranges): 1062 base = kern.globals.io_frame_table[i].io_range.phys_page_idx 1063 end = base + kern.globals.io_frame_table[i].io_range.num_pages 1064 if page_idx >= base and page_idx < end: 1065 return kern.globals.io_frame_table[i] 1066 return kern.globals.xnu_io_fte 1067 1068@lldb_command('phystofte') 1069def PhysToFTE(cmd_args=None): 1070 """ Translate a physical address to the corresponding SPTM frame table entry pointer 1071 Syntax: (lldb) phystofte <physical address> 1072 """ 1073 if cmd_args is None or len(cmd_args) < 1: 1074 raise ArgumentError("Too few arguments to phystofte.") 1075 1076 fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))) 1077 print(repr(fte)) 1078 1079XNU_IOMMU = 22 1080XNU_PAGE_TABLE = 18 1081XNU_PAGE_TABLE_SHARED = 19 1082XNU_PAGE_TABLE_ROZONE = 20 1083XNU_PAGE_TABLE_COMMPAGE = 21 1084SPTM_PAGE_TABLE = 9 1085 1086def ShowPTEARM(pte, page_size, level): 1087 """ Display vital information about an ARM page table entry 1088 pte: kernel virtual address of the PTE. page_size and level may be None, 1089 in which case we'll try to infer them from the page table descriptor. 1090 Inference of level may only work for L2 and L3 TTEs depending upon system 1091 configuration. 1092 """ 1093 pt_index = 0 1094 stage2 = False 1095 def GetPageTableInfo(ptd, paddr): 1096 nonlocal pt_index, page_size, level 1097 if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL: 1098 # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page. 1099 # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes 1100 # do, so the IOMMU refcnt token is always stored at index 0. If this is not 1101 # an IOMMU page, we may end up using a different final value for pt_index below. 1102 refcnt = ptd.ptd_info[0].refcnt 1103 # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001. 1104 is_iommu_pte = (refcnt & 0x8000) == 0x8000 1105 if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'): 1106 page_size = ptd.pmap.pmap_pt_attr.pta_page_size 1107 elif page_size is None: 1108 page_size = kern.globals.native_pt_attr.pta_page_size 1109 pt_index = (pte % kern.globals.page_size) // page_size 1110 refcnt = ptd.ptd_info[pt_index].refcnt 1111 if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'): 1112 stage2 = ptd.pmap.pmap_pt_attr.stage2 1113 if level is None: 1114 if refcnt == 0x4000: 1115 level = 2 1116 else: 1117 level = 3 1118 if is_iommu_pte: 1119 iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name) 1120 if unsigned(dereference(ptd.iommu).name) != 0: 1121 iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name) 1122 info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name) 1123 else: 1124 info_str = None 1125 return (int(unsigned(refcnt)), level, info_str) 1126 else: 1127 fte = PhysToFrameTableEntry(paddr) 1128 if fte.type == XNU_IOMMU: 1129 if page_size is None: 1130 page_size = kern.globals.native_pt_attr.pta_page_size 1131 info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd) 1132 return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str) 1133 elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]: 1134 if page_size is None: 1135 if hasattr(ptd.pmap, 'pmap_pt_attr'): 1136 page_size = ptd.pmap.pmap_pt_attr.pta_page_size 1137 else: 1138 page_size = kern.globals.native_pt_attr.pta_page_size; 1139 return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None) 1140 else: 1141 raise ValueError("Unrecognized FTE type {:#x}".format(fte.type)) 1142 raise ValueError("Unable to retrieve PTD refcnt") 1143 pte_paddr = KVToPhysARM(pte) 1144 ptd = GetPtDesc(pte_paddr) 1145 refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr) 1146 wiredcnt = ptd.ptd_info[pt_index].wiredcnt 1147 va = ptd.va[pt_index] 1148 print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va)) 1149 1150 # The pmap/iommu field is a union, so only print the correct one. 1151 if info_str is not None: 1152 print(info_str) 1153 else: 1154 if ptd.pmap == kern.globals.kernel_pmap: 1155 pmap_str = "(kernel_pmap)" 1156 else: 1157 task = TaskForPmapHelper(ptd.pmap) 1158 pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>") 1159 print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str)) 1160 nttes = page_size // 8 1161 granule = page_size * (nttes ** (3 - level)) 1162 pte_pgoff = pte % page_size 1163 pte_pgoff = pte_pgoff // 8 1164 print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(ptd.va[pt_index])) + (pte_pgoff * granule))) 1165 pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *')))) 1166 print("value: {:#x}".format(pteval)) 1167 print("level: {:d}".format(level)) 1168 PmapDecodeTTEARM64(pteval, level, stage2) 1169 1170@lldb_command('showpte') 1171def ShowPTE(cmd_args=None): 1172 """ Display vital information about the page table entry at VA <pte> 1173 Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2] 1174 """ 1175 if cmd_args is None or len(cmd_args) < 1: 1176 raise ArgumentError("Too few arguments to showpte.") 1177 1178 if kern.arch.startswith('arm64'): 1179 if len(cmd_args) >= 3: 1180 pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2]) 1181 if pmap_pt_attr is None: 1182 raise ArgumentError("Invalid translation attribute type.") 1183 page_size = pmap_pt_attr.pta_page_size 1184 else: 1185 page_size = None 1186 1187 level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None 1188 ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level) 1189 else: 1190 raise NotImplementedError("showpte does not support {0}".format(kern.arch)) 1191 1192def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action): 1193 """ Perform the specified action for all valid mappings in an ARM64 translation table 1194 pmap: owner of the translation table 1195 tt: translation table or page table 1196 nttes: number of entries in tt 1197 level: translation table level, 1 2 or 3 1198 action: callback for each valid TTE 1199 """ 1200 # Obtain pmap attributes 1201 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1202 page_size = pmap_pt_attr.pta_page_size 1203 page_offset_mask = (page_size - 1) 1204 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1205 max_level = unsigned(pmap_pt_attr.pta_max_level) 1206 1207 for i in range(nttes): 1208 try: 1209 tte = tt[i] 1210 if tte & 0x1 == 0x0: 1211 continue 1212 1213 tt_next = None 1214 paddr = unsigned(tte) & unsigned(page_base_mask) 1215 1216 # Handle leaf entry 1217 if tte & 0x2 == 0x0 or level == max_level: 1218 type = 'block' if level < max_level else 'entry' 1219 granule = PmapBlockOffsetMaskARM64(page_size, level) + 1 1220 else: 1221 # Handle page table entry 1222 type = 'table' 1223 granule = page_size 1224 tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') 1225 1226 mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i) 1227 if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): 1228 if tt_next is not None: 1229 FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action) 1230 1231 except Exception as exc: 1232 print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i])))) 1233 1234def ScanPageTables(action, targetPmap=None): 1235 """ Perform the specified action for all valid mappings in all page tables, 1236 optionally restricted to a single pmap. 1237 pmap: pmap whose page table should be scanned. If None, all pmaps on system will be scanned. 1238 """ 1239 print("Scanning all available translation tables. This may take a long time...") 1240 def ScanPmap(pmap, action): 1241 if kern.arch.startswith('arm64'): 1242 # Obtain pmap attributes 1243 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1244 granule = pmap_pt_attr.pta_page_size 1245 level = unsigned(pmap_pt_attr.pta_root_level) 1246 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 1247 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 1248 1249 if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule): 1250 if kern.arch.startswith('arm64'): 1251 FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action) 1252 1253 if targetPmap is not None: 1254 ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action) 1255 else: 1256 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1257 ScanPmap(pmap, action) 1258 1259@lldb_command('showallmappings') 1260def ShowAllMappings(cmd_args=None): 1261 """ Find and display all available mappings on the system for 1262 <physical_address>. Optionally only searches the pmap 1263 specified by [<pmap>] 1264 Syntax: (lldb) showallmappings <physical_address> [<pmap>] 1265 WARNING: this macro can take a long time (up to 30min.) to complete! 1266 """ 1267 if cmd_args is None or len(cmd_args) < 1: 1268 raise ArgumentError("Too few arguments to showallmappings.") 1269 if not kern.arch.startswith('arm'): 1270 raise NotImplementedError("showallmappings does not support {0}".format(kern.arch)) 1271 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1272 targetPmap = None 1273 if len(cmd_args) > 1: 1274 targetPmap = cmd_args[1] 1275 def printMatchedMapping(pmap, level, type, tte, paddr, va, granule): 1276 if paddr <= pa < (paddr + granule): 1277 print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va)) 1278 return True 1279 ScanPageTables(printMatchedMapping, targetPmap) 1280 1281@lldb_command('showptusage') 1282def ShowPTUsage(cmd_args=None): 1283 """ Display a summary of pagetable allocations for a given pmap. 1284 Syntax: (lldb) showptusage [<pmap>] 1285 WARNING: this macro can take a long time (> 1hr) to complete! 1286 """ 1287 if not kern.arch.startswith('arm'): 1288 raise NotImplementedError("showptusage does not support {0}".format(kern.arch)) 1289 targetPmap = None 1290 if len(cmd_args) > 0: 1291 targetPmap = cmd_args[0] 1292 lastPmap = [None] 1293 numTables = [0] 1294 numUnnested = [0] 1295 numPmaps = [0] 1296 def printValidTTE(pmap, level, type, tte, paddr, va, granule): 1297 unnested = "" 1298 nested_region_addr = int(unsigned(pmap.nested_region_addr)) 1299 nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size)) 1300 if lastPmap[0] is None or (pmap != lastPmap[0]): 1301 lastPmap[0] = pmap 1302 numPmaps[0] = numPmaps[0] + 1 1303 print ("pmap {:#x}:".format(pmap)) 1304 if type == 'root': 1305 return True 1306 if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end): 1307 ptd = GetPtDesc(paddr) 1308 if ptd.pmap != pmap: 1309 return False 1310 else: 1311 numUnnested[0] = numUnnested[0] + 1 1312 unnested = " (likely unnested)" 1313 numTables[0] = numTables[0] + 1 1314 print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested) 1315 if level == 2: 1316 return False 1317 else: 1318 return True 1319 ScanPageTables(printValidTTE, targetPmap) 1320 print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0])) 1321 1322def checkPVList(pmap, level, type, tte, paddr, va, granule): 1323 """ Checks an ARM physical-to-virtual mapping list for consistency errors. 1324 pmap: owner of the translation table 1325 level: translation table level. PV lists will only be checked for L2 (arm32) or L3 (arm64) tables. 1326 type: unused 1327 tte: KVA of PTE to check for presence in PV list. If None, presence check will be skipped. 1328 paddr: physical address whose PV list should be checked. Need not be page-aligned. 1329 granule: unused 1330 """ 1331 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1332 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1333 page_size = kern.globals.page_size 1334 if kern.arch.startswith('arm64'): 1335 page_offset_mask = (page_size - 1) 1336 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1337 paddr = paddr & page_base_mask 1338 max_level = 3 1339 pvh_set_bits = PVH_HIGH_FLAGS_ARM64 1340 if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys: 1341 return True 1342 pn = (paddr - vm_first_phys) // page_size 1343 pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits 1344 pvh_type = pvh & 0x3 1345 if pmap is not None: 1346 pmap_str = "pmap: {:#x}: ".format(pmap) 1347 else: 1348 pmap_str = '' 1349 if tte is not None: 1350 tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr) 1351 else: 1352 tte_str = "paddr {:#x}: ".format(paddr) 1353 if pvh_type == 0 or pvh_type == 3: 1354 print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type)) 1355 elif pvh_type == 2: 1356 ptep = pvh & ~0x3 1357 if tte is not None and ptep != unsigned(tte): 1358 print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep)) 1359 try: 1360 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1361 if (pte != paddr): 1362 print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1363 except Exception as exc: 1364 print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep)) 1365 elif pvh_type == 1: 1366 pvep = pvh & ~0x3 1367 tte_match = False 1368 pve_ptep_idx = 0 1369 while pvep != 0: 1370 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 1371 ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3 1372 pve_ptep_idx += 1 1373 if pve_ptep_idx == 2: 1374 pve_ptep_idx = 0 1375 pvep = unsigned(pve.pve_next) 1376 if ptep == 0: 1377 continue 1378 if tte is not None and ptep == unsigned(tte): 1379 tte_match = True 1380 try: 1381 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1382 if (pte != paddr): 1383 print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1384 except Exception as exc: 1385 print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep)) 1386 if tte is not None and not tte_match: 1387 print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr)) 1388 return True 1389 1390@lldb_command('pv_check', 'P') 1391def PVCheck(cmd_args=None, cmd_options={}): 1392 """ Check the physical-to-virtual mapping for a given PTE or physical address 1393 Syntax: (lldb) pv_check <addr> [-p] 1394 -P : Interpret <addr> as a physical address rather than a PTE 1395 """ 1396 if cmd_args is None or len(cmd_args) < 1: 1397 raise ArgumentError("Too few arguments to pv_check.") 1398 if kern.arch.startswith('arm64'): 1399 level = 3 1400 else: 1401 raise NotImplementedError("pv_check does not support {0}".format(kern.arch)) 1402 if "-P" in cmd_options: 1403 pte = None 1404 pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long"))) 1405 else: 1406 pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *') 1407 pa = int(unsigned(dereference(pte))) 1408 checkPVList(None, level, None, pte, pa, 0, None) 1409 1410@lldb_command('check_pmaps') 1411def CheckPmapIntegrity(cmd_args=None): 1412 """ Performs a system-wide integrity check of all PTEs and associated PV lists. 1413 Optionally only checks the pmap specified by [<pmap>] 1414 Syntax: (lldb) check_pmaps [<pmap>] 1415 WARNING: this macro can take a HUGE amount of time (several hours) if you do not 1416 specify [pmap] to limit it to a single pmap. It will also give false positives 1417 for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM. 1418 Use of this macro without the [<pmap>] argument is heavily discouraged. 1419 """ 1420 if not kern.arch.startswith('arm'): 1421 raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch)) 1422 targetPmap = None 1423 if len(cmd_args) > 0: 1424 targetPmap = cmd_args[0] 1425 ScanPageTables(checkPVList, targetPmap) 1426 1427@lldb_command('pmapsforledger') 1428def PmapsForLedger(cmd_args=None): 1429 """ Find and display all pmaps currently using <ledger>. 1430 Syntax: (lldb) pmapsforledger <ledger> 1431 """ 1432 if cmd_args is None or len(cmd_args) < 1: 1433 raise ArgumentError("Too few arguments to pmapsforledger.") 1434 if not kern.arch.startswith('arm'): 1435 raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch)) 1436 ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t') 1437 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1438 if pmap.ledger == ledger: 1439 print("pmap: {:#x}".format(pmap)) 1440 1441 1442def IsValidPai(pai): 1443 """ Given an unsigned value, detect whether that value is a valid physical 1444 address index (PAI). It does this by first computing the last possible 1445 PAI and comparing the input to that. 1446 1447 All contemporary SoCs reserve the bottom part of the address space, so 1448 there shouldn't be any valid physical addresses between zero and the 1449 last PAI either. 1450 """ 1451 page_size = unsigned(kern.globals.page_size) 1452 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1453 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1454 1455 last_pai = (vm_last_phys - vm_first_phys) // page_size 1456 if (pai < 0) or (pai >= last_pai): 1457 return False 1458 1459 return True 1460 1461def ConvertPaiToPhysAddr(pai): 1462 """ Convert the given Physical Address Index (PAI) into a physical address. 1463 1464 If the input isn't a valid PAI (it's most likely already a physical 1465 address), then just return back the input unchanged. 1466 """ 1467 pa = pai 1468 1469 # If the value is a valid PAI, then convert it into a physical address. 1470 if IsValidPai(pai): 1471 pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys) 1472 1473 return pa 1474 1475def ConvertPhysAddrToPai(pa): 1476 """ Convert the given physical address into a Physical Address Index (PAI). 1477 1478 If the input is already a valid PAI, then just return back the input 1479 unchanged. 1480 """ 1481 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1482 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1483 pai = pa 1484 1485 if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys): 1486 raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys)) 1487 elif not IsValidPai(pa): 1488 # If the value isn't already a valid PAI, then convert it into one. 1489 pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size) 1490 1491 return pai 1492 1493@lldb_command('pmappaindex') 1494def PmapPaIndex(cmd_args=None): 1495 """ Display both a physical address and physical address index (PAI) when 1496 provided with only one of those values. 1497 1498 Syntax: (lldb) pmappaindex <physical address | PAI> 1499 1500 NOTE: This macro will throw an exception if the input isn't a valid PAI 1501 and is also not a kernel-managed physical address. 1502 """ 1503 if cmd_args is None or len(cmd_args) < 1: 1504 raise ArgumentError("Too few arguments to pmappaindex.") 1505 1506 if not kern.arch.startswith('arm'): 1507 raise NotImplementedError("pmappaindex is only supported on ARM devices.") 1508 1509 value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1510 pai = value 1511 phys_addr = value 1512 1513 if IsValidPai(value): 1514 # Input is a PAI, calculate the physical address. 1515 phys_addr = ConvertPaiToPhysAddr(value) 1516 else: 1517 # Input is a physical address, calculate the PAI 1518 pai = ConvertPhysAddrToPai(value) 1519 1520 print("Physical Address: {:#x}".format(phys_addr)) 1521 print("PAI: {:d}".format(pai)) 1522