1from __future__ import absolute_import, division, print_function 2 3from builtins import hex 4from builtins import range 5 6from xnu import * 7import xnudefines 8from kdp import * 9from utils import * 10import struct 11 12def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None): 13 """ Read a physical memory data based on address. 14 params: 15 phys_addr : int - Physical address to read 16 bitsize : int - defines how many bytes to read. defaults to 64 bit 17 cpuval : None (optional) 18 returns: 19 int - int value read from memory. in case of failure 0xBAD10AD is returned. 20 """ 21 if "kdp" == GetConnectionProtocol(): 22 return KDPReadPhysMEM(phys_addr, bitsize) 23 24 #NO KDP. Attempt to use physical memory 25 paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr)) 26 if paddr_in_kva : 27 if bitsize == 64 : 28 return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 29 if bitsize == 32 : 30 return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 31 if bitsize == 16 : 32 return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 33 if bitsize == 8 : 34 return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 35 return 0xBAD10AD 36 37@lldb_command('readphys') 38def ReadPhys(cmd_args = None): 39 """ Reads the specified untranslated address 40 The argument is interpreted as a physical address, and the 64-bit word 41 addressed is displayed. 42 usage: readphys <nbits> <address> 43 nbits: 8,16,32,64 44 address: 1234 or 0x1234 45 """ 46 if cmd_args == None or len(cmd_args) < 2: 47 print("Insufficient arguments.", ReadPhys.__doc__) 48 return False 49 else: 50 nbits = ArgumentStringToInt(cmd_args[0]) 51 phys_addr = ArgumentStringToInt(cmd_args[1]) 52 print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits))) 53 return True 54 55lldb_alias('readphys8', 'readphys 8 ') 56lldb_alias('readphys16', 'readphys 16 ') 57lldb_alias('readphys32', 'readphys 32 ') 58lldb_alias('readphys64', 'readphys 64 ') 59 60def KDPReadPhysMEM(address, bits): 61 """ Setup the state for READPHYSMEM64 commands for reading data via kdp 62 params: 63 address : int - address where to read the data from 64 bits : int - number of bits in the intval (8/16/32/64) 65 returns: 66 int: read value from memory. 67 0xBAD10AD: if failed to read data. 68 """ 69 retval = 0xBAD10AD 70 if "kdp" != GetConnectionProtocol(): 71 print("Target is not connected over kdp. Nothing to do here.") 72 return retval 73 74 if "hwprobe" == KDPMode(): 75 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 76 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 77 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 78 packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0) 79 80 ret_obj = lldb.SBCommandReturnObject() 81 ci = lldb.debugger.GetCommandInterpreter() 82 ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj) 83 84 if ret_obj.Succeeded(): 85 value = ret_obj.GetOutput() 86 87 if bits == 64 : 88 pack_fmt = "<Q" 89 unpack_fmt = ">Q" 90 if bits == 32 : 91 pack_fmt = "<I" 92 unpack_fmt = ">I" 93 if bits == 16 : 94 pack_fmt = "<H" 95 unpack_fmt = ">H" 96 if bits == 8 : 97 pack_fmt = "<B" 98 unpack_fmt = ">B" 99 100 retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0] 101 102 else: 103 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 104 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 105 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 106 107 if not WriteInt32ToMemoryAddress(0, input_address): 108 return retval 109 110 kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize() 111 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 112 return retval 113 114 data_addr = int(addressof(kern.globals.manual_pkt)) 115 pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *') 116 117 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size) 118 119 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 120 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 121 WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and 122 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 123 ): 124 125 if WriteInt32ToMemoryAddress(1, input_address): 126 # now read data from the kdp packet 127 data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data)) 128 if bits == 64 : 129 retval = kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned() 130 if bits == 32 : 131 retval = kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned() 132 if bits == 16 : 133 retval = kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned() 134 if bits == 8 : 135 retval = kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned() 136 137 return retval 138 139 140def KDPWritePhysMEM(address, intval, bits): 141 """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp 142 params: 143 address : int - address where to save the data 144 intval : int - integer value to be stored in memory 145 bits : int - number of bits in the intval (8/16/32/64) 146 returns: 147 boolean: True if the write succeeded. 148 """ 149 if "kdp" != GetConnectionProtocol(): 150 print("Target is not connected over kdp. Nothing to do here.") 151 return False 152 153 if "hwprobe" == KDPMode(): 154 # Send the proper KDP command and payload to the bare metal debug tool via a KDP server 155 addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0] 156 byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0] 157 158 if bits == 64 : 159 pack_fmt = ">Q" 160 unpack_fmt = "<Q" 161 if bits == 32 : 162 pack_fmt = ">I" 163 unpack_fmt = "<I" 164 if bits == 16 : 165 pack_fmt = ">H" 166 unpack_fmt = "<H" 167 if bits == 8 : 168 pack_fmt = ">B" 169 unpack_fmt = "<B" 170 171 data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0] 172 173 packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val) 174 175 ret_obj = lldb.SBCommandReturnObject() 176 ci = lldb.debugger.GetCommandInterpreter() 177 ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj) 178 179 if ret_obj.Succeeded(): 180 return True 181 else: 182 return False 183 184 else: 185 input_address = unsigned(addressof(kern.globals.manual_pkt.input)) 186 len_address = unsigned(addressof(kern.globals.manual_pkt.len)) 187 data_address = unsigned(addressof(kern.globals.manual_pkt.data)) 188 if not WriteInt32ToMemoryAddress(0, input_address): 189 return False 190 191 kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8) 192 if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address): 193 return False 194 195 data_addr = int(addressof(kern.globals.manual_pkt)) 196 pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *') 197 198 header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size) 199 200 if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and 201 WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and 202 WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and 203 WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu))) 204 ): 205 206 if bits == 8: 207 if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))): 208 return False 209 if bits == 16: 210 if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))): 211 return False 212 if bits == 32: 213 if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))): 214 return False 215 if bits == 64: 216 if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))): 217 return False 218 if WriteInt32ToMemoryAddress(1, input_address): 219 return True 220 return False 221 222 223def WritePhysInt(phys_addr, int_val, bitsize = 64): 224 """ Write and integer value in a physical memory data based on address. 225 params: 226 phys_addr : int - Physical address to read 227 int_val : int - int value to write in memory 228 bitsize : int - defines how many bytes to read. defaults to 64 bit 229 returns: 230 bool - True if write was successful. 231 """ 232 if "kdp" == GetConnectionProtocol(): 233 if not KDPWritePhysMEM(phys_addr, int_val, bitsize): 234 print("Failed to write via KDP.") 235 return False 236 return True 237 #We are not connected via KDP. So do manual math and savings. 238 print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol()) 239 return False 240 241@lldb_command('writephys') 242def WritePhys(cmd_args=None): 243 """ writes to the specified untranslated address 244 The argument is interpreted as a physical address, and the 64-bit word 245 addressed is displayed. 246 usage: writephys <nbits> <address> <value> 247 nbits: 8,16,32,64 248 address: 1234 or 0x1234 249 value: int value to be written 250 ex. (lldb)writephys 16 0x12345abcd 0x25 251 """ 252 if cmd_args == None or len(cmd_args) < 3: 253 print("Invalid arguments.", WritePhys.__doc__) 254 else: 255 nbits = ArgumentStringToInt(cmd_args[0]) 256 phys_addr = ArgumentStringToInt(cmd_args[1]) 257 int_value = ArgumentStringToInt(cmd_args[2]) 258 print(WritePhysInt(phys_addr, int_value, nbits)) 259 260 261lldb_alias('writephys8', 'writephys 8 ') 262lldb_alias('writephys16', 'writephys 16 ') 263lldb_alias('writephys32', 'writephys 32 ') 264lldb_alias('writephys64', 'writephys 64 ') 265 266 267def _PT_Step(paddr, index, verbose_level = vSCRIPT): 268 """ 269 Step to lower-level page table and print attributes 270 paddr: current page table entry physical address 271 index: current page table entry index (0..511) 272 verbose_level: vHUMAN: print nothing 273 vSCRIPT: print basic information 274 vDETAIL: print basic information and hex table dump 275 returns: (pt_paddr, pt_valid, pt_large) 276 pt_paddr: next level page table entry physical address 277 or null if invalid 278 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 279 should be aborted 280 pt_large: 1 if kgm_pt_paddr is a page frame address 281 of a large page and not another page table entry 282 """ 283 entry_addr = paddr + (8 * index) 284 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 285 out_string = '' 286 if verbose_level >= vDETAIL: 287 for pte_loop in range(0, 512): 288 paddr_tmp = paddr + (8 * pte_loop) 289 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 290 paddr_mask = ~((0xfff<<52) | 0xfff) 291 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 292 pt_valid = False 293 pt_large = False 294 pt_paddr = 0 295 if verbose_level < vSCRIPT: 296 if entry & 0x1 : 297 pt_valid = True 298 pt_large = False 299 pt_paddr = entry & paddr_mask 300 if entry & (0x1 <<7): 301 pt_large = True 302 pt_paddr = entry & paddr_large_mask 303 else: 304 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 305 if entry & 0x1: 306 out_string += " valid" 307 pt_paddr = entry & paddr_mask 308 pt_valid = True 309 else: 310 out_string += " invalid" 311 pt_paddr = 0 312 pt_valid = False 313 if entry & (0x1 << 62): 314 out_string += " compressed" 315 #Stop decoding other bits 316 entry = 0 317 if entry & (0x1 << 1): 318 out_string += " writable" 319 else: 320 out_string += " read-only" 321 322 if entry & (0x1 << 2): 323 out_string += " user" 324 else: 325 out_string += " supervisor" 326 327 if entry & (0x1 << 3): 328 out_string += " PWT" 329 330 if entry & (0x1 << 4): 331 out_string += " PCD" 332 333 if entry & (0x1 << 5): 334 out_string += " accessed" 335 336 if entry & (0x1 << 6): 337 out_string += " dirty" 338 339 if entry & (0x1 << 7): 340 out_string += " large" 341 pt_large = True 342 else: 343 pt_large = False 344 345 if entry & (0x1 << 8): 346 out_string += " global" 347 348 if entry & (0x3 << 9): 349 out_string += " avail:{0:x}".format((entry >> 9) & 0x3) 350 351 if entry & (0x1 << 63): 352 out_string += " noexec" 353 print(out_string) 354 return (pt_paddr, pt_valid, pt_large) 355 356def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT): 357 """ 358 Step to lower-level page table and print attributes for EPT pmap 359 paddr: current page table entry physical address 360 index: current page table entry index (0..511) 361 verbose_level: vHUMAN: print nothing 362 vSCRIPT: print basic information 363 vDETAIL: print basic information and hex table dump 364 returns: (pt_paddr, pt_valid, pt_large) 365 pt_paddr: next level page table entry physical address 366 or null if invalid 367 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk 368 should be aborted 369 pt_large: 1 if kgm_pt_paddr is a page frame address 370 of a large page and not another page table entry 371 """ 372 entry_addr = paddr + (8 * index) 373 entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self ) 374 out_string = '' 375 if verbose_level >= vDETAIL: 376 for pte_loop in range(0, 512): 377 paddr_tmp = paddr + (8 * pte_loop) 378 out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self)) 379 paddr_mask = ~((0xfff<<52) | 0xfff) 380 paddr_large_mask = ~((0xfff<<52) | 0x1fffff) 381 pt_valid = False 382 pt_large = False 383 pt_paddr = 0 384 if verbose_level < vSCRIPT: 385 if entry & 0x7 : 386 pt_valid = True 387 pt_large = False 388 pt_paddr = entry & paddr_mask 389 if entry & (0x1 <<7): 390 pt_large = True 391 pt_paddr = entry & paddr_large_mask 392 else: 393 out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry) 394 if entry & 0x7: 395 out_string += "valid" 396 pt_paddr = entry & paddr_mask 397 pt_valid = True 398 else: 399 out_string += "invalid" 400 pt_paddr = 0 401 pt_valid = False 402 if entry & (0x1 << 62): 403 out_string += " compressed" 404 #Stop decoding other bits 405 entry = 0 406 if entry & 0x1: 407 out_string += " readable" 408 else: 409 out_string += " no read" 410 if entry & (0x1 << 1): 411 out_string += " writable" 412 else: 413 out_string += " no write" 414 415 if entry & (0x1 << 2): 416 out_string += " executable" 417 else: 418 out_string += " no exec" 419 420 ctype = entry & 0x38 421 if ctype == 0x30: 422 out_string += " cache-WB" 423 elif ctype == 0x28: 424 out_string += " cache-WP" 425 elif ctype == 0x20: 426 out_string += " cache-WT" 427 elif ctype == 0x8: 428 out_string += " cache-WC" 429 else: 430 out_string += " cache-NC" 431 432 if (entry & 0x40) == 0x40: 433 out_string += " Ignore-PTA" 434 435 if (entry & 0x100) == 0x100: 436 out_string += " accessed" 437 438 if (entry & 0x200) == 0x200: 439 out_string += " dirty" 440 441 if entry & (0x1 << 7): 442 out_string += " large" 443 pt_large = True 444 else: 445 pt_large = False 446 print(out_string) 447 return (pt_paddr, pt_valid, pt_large) 448 449def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT): 450 """ Walk the l4 pmap entry. 451 params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t 452 vaddr : int - virtual address to walk 453 """ 454 pt_paddr = unsigned(pmap_addr_val) 455 pt_valid = (unsigned(pmap_addr_val) != 0) 456 pt_large = 0 457 pframe_offset = 0 458 if pt_valid: 459 # Lookup bits 47:39 of linear address in PML4T 460 pt_index = (vaddr >> 39) & 0x1ff 461 pframe_offset = vaddr & 0x7fffffffff 462 if verbose_level > vHUMAN : 463 print("pml4 (index {0:d}):".format(pt_index)) 464 if not(ept_pmap): 465 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 466 else: 467 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 468 if pt_valid: 469 # Lookup bits 38:30 of the linear address in PDPT 470 pt_index = (vaddr >> 30) & 0x1ff 471 pframe_offset = vaddr & 0x3fffffff 472 if verbose_level > vHUMAN: 473 print("pdpt (index {0:d}):".format(pt_index)) 474 if not(ept_pmap): 475 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 476 else: 477 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 478 if pt_valid and not pt_large: 479 #Lookup bits 29:21 of the linear address in PDPT 480 pt_index = (vaddr >> 21) & 0x1ff 481 pframe_offset = vaddr & 0x1fffff 482 if verbose_level > vHUMAN: 483 print("pdt (index {0:d}):".format(pt_index)) 484 if not(ept_pmap): 485 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 486 else: 487 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 488 if pt_valid and not pt_large: 489 #Lookup bits 20:21 of linear address in PT 490 pt_index = (vaddr >> 12) & 0x1ff 491 pframe_offset = vaddr & 0xfff 492 if verbose_level > vHUMAN: 493 print("pt (index {0:d}):".format(pt_index)) 494 if not(ept_pmap): 495 (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level) 496 else: 497 (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level) 498 paddr = 0 499 paddr_isvalid = False 500 if pt_valid: 501 paddr = pt_paddr + pframe_offset 502 paddr_isvalid = True 503 504 if verbose_level > vHUMAN: 505 if paddr_isvalid: 506 pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self) 507 print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue)) 508 else: 509 print("no translation") 510 511 return paddr 512 513def PmapDecodeTTEARM(tte, level, verbose_level): 514 """ Display the bits of an ARM translation table or page table entry 515 in human-readable form. 516 tte: integer value of the TTE/PTE 517 level: translation table level. Valid values are 1 or 2. 518 verbose_level: verbosity. vHUMAN, vSCRIPT, vDETAIL 519 """ 520 out_string = "" 521 if level == 1 and (tte & 0x3) == 0x2: 522 if verbose_level < vSCRIPT: 523 return 524 525 #bit [1:0] evaluated in PmapWalkARM 526 # B bit 2 527 b_bit = (tte & 0x4) >> 2 528 # C bit 3 529 c_bit = (tte & 0x8) >> 3 530 #XN bit 4 531 if (tte & 0x10) : 532 out_string += "no-execute" 533 else: 534 out_string += "execute" 535 #Domain bit [8:5] if not supersection 536 if (tte & 0x40000) == 0x0: 537 out_string += " domain ({:d})".format(((tte & 0x1e0) >> 5) ) 538 #IMP bit 9 539 out_string += " imp({:d})".format( ((tte & 0x200) >> 9) ) 540 # AP bit 15 and [11:10] merged to a single 3 bit value 541 access = ( (tte & 0xc00) >> 10 ) | ((tte & 0x8000) >> 13) 542 out_string += xnudefines.arm_level2_access_strings[access] 543 544 #TEX bit [14:12] 545 tex_bits = ((tte & 0x7000) >> 12) 546 #Print TEX, C , B all together 547 out_string += " TEX:C:B({:d}{:d}{:d}:{:d}:{:d})".format( 548 1 if (tex_bits & 0x4) else 0, 549 1 if (tex_bits & 0x2) else 0, 550 1 if (tex_bits & 0x1) else 0, 551 c_bit, 552 b_bit 553 ) 554 # S bit 16 555 if tte & 0x10000: 556 out_string += " shareable" 557 else: 558 out_string += " not-shareable" 559 # nG bit 17 560 if tte & 0x20000 : 561 out_string += " not-global" 562 else: 563 out_string += " global" 564 # Supersection bit 18 565 if tte & 0x40000: 566 out_string += " supersection" 567 else: 568 out_string += " section" 569 #NS bit 19 570 if tte & 0x80000 : 571 out_string += " no-secure" 572 else: 573 out_string += " secure" 574 575 elif level == 1 and (tte & 0x3) == 0x1: 576 577 if verbose_level >= vSCRIPT: 578 # bit [1:0] evaluated in PmapWalkARM 579 # NS bit 3 580 if tte & 0x8: 581 out_string += ' no-secure' 582 else: 583 out_string += ' secure' 584 #Domain bit [8:5] 585 out_string += " domain({:d})".format(((tte & 0x1e0) >> 5)) 586 # IMP bit 9 587 out_string += " imp({:d})".format( ((tte & 0x200) >> 9)) 588 out_string += "\n" 589 590 elif level == 2: 591 pte = tte 592 if verbose_level >= vSCRIPT: 593 if (pte & 0x3) == 0x0: 594 out_string += " invalid" 595 else: 596 if (pte & 0x3) == 0x1: 597 out_string += " large" 598 # XN bit 15 599 if pte & 0x8000 == 0x8000: 600 out_string+= " no-execute" 601 else: 602 out_string += " execute" 603 else: 604 out_string += " small" 605 # XN bit 0 606 if (pte & 0x1) == 0x01: 607 out_string += " no-execute" 608 else: 609 out_string += " execute" 610 # B bit 2 611 b_bit = (pte & 0x4) >> 2 612 c_bit = (pte & 0x8) >> 3 613 # AP bit 9 and [5:4], merged to a single 3-bit value 614 access = (pte & 0x30) >> 4 | (pte & 0x200) >> 7 615 out_string += xnudefines.arm_level2_access_strings[access] 616 617 #TEX bit [14:12] for large, [8:6] for small 618 tex_bits = ((pte & 0x1c0) >> 6) 619 if (pte & 0x3) == 0x1: 620 tex_bits = ((pte & 0x7000) >> 12) 621 622 # Print TEX, C , B alltogether 623 out_string += " TEX:C:B({:d}{:d}{:d}:{:d}:{:d})".format( 624 1 if (tex_bits & 0x4) else 0, 625 1 if (tex_bits & 0x2) else 0, 626 1 if (tex_bits & 0x1) else 0, 627 c_bit, 628 b_bit 629 ) 630 # S bit 10 631 if pte & 0x400 : 632 out_string += " shareable" 633 else: 634 out_string += " not-shareable" 635 636 # nG bit 11 637 if pte & 0x800: 638 out_string += " not-global" 639 else: 640 out_string += " global" 641 642 print(out_string) 643 644 645def _PmapWalkARMLevel1Section(tte, vaddr, verbose_level = vSCRIPT): 646 paddr = 0 647 #Supersection or just section? 648 if (tte & 0x40000) == 0x40000: 649 paddr = ( (tte & 0xFF000000) | (vaddr & 0x00FFFFFF) ) 650 else: 651 paddr = ( (tte & 0xFFF00000) | (vaddr & 0x000FFFFF) ) 652 653 if verbose_level >= vSCRIPT: 654 print("{0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(tte), tte), end=' ') 655 656 PmapDecodeTTEARM(tte, 1, verbose_level) 657 658 return paddr 659 660 661 662def _PmapWalkARMLevel2(tte, vaddr, verbose_level = vSCRIPT): 663 """ Pmap walk the level 2 tte. 664 params: 665 tte - value object 666 vaddr - int 667 returns: str - description of the tte + additional informaiton based on verbose_level 668 """ 669 pte_base = kern.PhysToKernelVirt(tte & 0xFFFFFC00) 670 pte_index = (vaddr >> 12) & 0xFF 671 pte_base_val = kern.GetValueFromAddress(pte_base, 'pt_entry_t *') 672 pte = pte_base_val[pte_index] 673 674 paddr = 0 675 if pte & 0x2: 676 paddr = (unsigned(pte) & 0xFFFFF000) | (vaddr & 0xFFF) 677 678 if verbose_level >= vSCRIPT: 679 print("{0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(tte), tte), end=' ') 680 681 PmapDecodeTTEARM(tte, 1, verbose_level) 682 if verbose_level >= vSCRIPT: 683 print("second-level table (index {:d}):".format(pte_index)) 684 if verbose_level >= vDETAIL: 685 for i in range(256): 686 tmp = pte_base_val[i] 687 print("{0: <#020x}:\t{1: <#020x}".format(addressof(tmp), unsigned(tmp))) 688 689 if verbose_level >= vSCRIPT: 690 print(" {0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(pte), unsigned(pte)), end=' ') 691 692 PmapDecodeTTEARM(pte, 2, verbose_level) 693 694 return paddr 695 #end of level 2 walking of arm 696 697 698def PmapWalkARM(pmap, vaddr, verbose_level = vHUMAN): 699 """ Pmap walking for ARM kernel. 700 params: 701 pmapval: core.value - representing pmap_t in kernel 702 vaddr: int - integer representing virtual address to walk 703 """ 704 paddr = 0 705 # shift by TTESHIFT (20) to get tte index 706 # Assume all L1 indexing starts at VA 0...for our purposes it does, 707 # as that's where all user pmaps start, and the kernel pmap contains 708 # 4 L1 pages (the lower 2 of which are unused after bootstrap) 709 tte_index = vaddr >> 20 710 tte = pmap.tte[tte_index] 711 if verbose_level >= vSCRIPT: 712 print("First-level table (index {:d}):".format(tte_index)) 713 if verbose_level >= vDETAIL: 714 for i in range(0, pmap.tte_index_max): 715 ptr = unsigned(addressof(pmap.tte[i])) 716 val = unsigned(pmap.tte[i]) 717 print("{0: <#020x}:\t {1: <#020x}".format(ptr, val)) 718 if (tte & 0x3) == 0x1: 719 paddr = _PmapWalkARMLevel2(tte, vaddr, verbose_level) 720 elif (tte & 0x3) == 0x2 : 721 paddr = _PmapWalkARMLevel1Section(tte, vaddr, verbose_level) 722 else: 723 paddr = 0 724 if verbose_level >= vSCRIPT: 725 print("Invalid First-Level Translation Table Entry: {0: #020x}".format(tte)) 726 727 if verbose_level >= vHUMAN: 728 if paddr: 729 print("Translation of {:#x} is {:#x}.".format(vaddr, paddr)) 730 else: 731 print("(no translation)") 732 733 return paddr 734 735def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT): 736 """ 737 params: pmapval - core.value representing pmap_t in kernel 738 vaddr: int - int representing virtual address to walk 739 """ 740 if pmapval.pm_cr3 != 0: 741 if verbose_level > vHUMAN: 742 print("Using normal Intel PMAP from pm_cr3\n") 743 return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity']) 744 else: 745 if verbose_level > vHUMAN: 746 print("Using EPT pmap from pm_eptp\n") 747 return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity']) 748 749def assert_64bit(val): 750 assert(val < 2**64) 751 752ARM64_TTE_SIZE = 8 753ARM64_TTE_SHIFT = 3 754ARM64_VMADDR_BITS = 48 755 756def PmapBlockOffsetMaskARM64(page_size, level): 757 assert level >= 0 and level <= 3 758 ttentries = (page_size // ARM64_TTE_SIZE) 759 return page_size * (ttentries ** (3 - level)) - 1 760 761def PmapBlockBaseMaskARM64(page_size, level): 762 assert level >= 0 and level <= 3 763 return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level) 764 765def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False): 766 """ Display the bits of an ARM64 translation table or page table entry 767 in human-readable form. 768 tte: integer value of the TTE/PTE 769 level: translation table level. Valid values are 1, 2, or 3. 770 is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise. 771 """ 772 assert(isinstance(level, numbers.Integral)) 773 assert_64bit(tte) 774 775 if tte & 0x1 == 0x0: 776 print("Invalid.") 777 return 778 779 if (tte & 0x2 == 0x2) and (level != 0x3): 780 print("Type = Table pointer.") 781 print("Table addr = {:#x}.".format(tte & 0xfffffffff000)) 782 783 if not stage2: 784 print("PXN = {:#x}.".format((tte >> 59) & 0x1)) 785 print("XN = {:#x}.".format((tte >> 60) & 0x1)) 786 print("AP = {:#x}.".format((tte >> 61) & 0x3)) 787 print("NS = {:#x}.".format(tte >> 63)) 788 else: 789 print("Type = Block.") 790 791 if stage2: 792 print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf)) 793 else: 794 attr_index = (tte >> 2) & 0x7 795 attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU', 796 3: 'CACHE DISABLE', 4: 'INNERWRITEBACK', 5: 'POSTED', 797 6: 'POSTED_REORDERED', 7: 'POSTED_COMBINED_REORDERED' } 798 799 # Only show the string version of the AttrIdx for CPU mappings since 800 # these values don't apply to IOMMU mappings. 801 if is_iommu_tte: 802 print("AttrIdx = {:#x}.".format(attr_index)) 803 else: 804 print("AttrIdx = {:#x} ({:s}).".format(attr_index, attr_string[attr_index])) 805 print("NS = {:#x}.".format((tte >> 5) & 0x1)) 806 807 if stage2: 808 print("S2AP = {:#x}.".format((tte >> 6) & 0x3)) 809 else: 810 print("AP = {:#x}.".format((tte >> 6) & 0x3)) 811 812 print("SH = {:#x}.".format((tte >> 8) & 0x3)) 813 print("AF = {:#x}.".format((tte >> 10) & 0x1)) 814 815 if not stage2: 816 print("nG = {:#x}.".format((tte >> 11) & 0x1)) 817 818 print("HINT = {:#x}.".format((tte >> 52) & 0x1)) 819 820 if stage2: 821 print("S2XN = {:#x}.".format((tte >> 53) & 0x3)) 822 else: 823 print("PXN = {:#x}.".format((tte >> 53) & 0x1)) 824 print("XN = {:#x}.".format((tte >> 54) & 0x1)) 825 826 print("SW Use = {:#x}.".format((tte >> 55) & 0xf)) 827 828 return 829 830def PmapTTnIndexARM64(vaddr, pmap_pt_attr): 831 pta_max_level = unsigned(pmap_pt_attr.pta_max_level) 832 833 tt_index = [] 834 for i in range(pta_max_level + 1): 835 tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \ 836 >> unsigned(pmap_pt_attr.pta_level_info[i].shift)) 837 838 return tt_index 839 840def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN): 841 assert(type(vaddr) in (int, int)) 842 assert_64bit(vaddr) 843 assert_64bit(root_tte) 844 845 # Obtain pmap attributes 846 page_size = pmap_pt_attr.pta_page_size 847 page_offset_mask = (page_size - 1) 848 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 849 tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr) 850 stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) 851 852 # The pmap starts at a page table level that is defined by register 853 # values; the root level can be obtained from the attributes structure 854 level = unsigned(pmap_pt_attr.pta_root_level) 855 856 root_tt_index = tt_index[level] 857 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 858 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 859 tte = int(unsigned(root_tte[root_tt_index])) 860 861 # Walk the page tables 862 paddr = -1 863 max_level = unsigned(pmap_pt_attr.pta_max_level) 864 is_valid = True 865 is_leaf = False 866 867 while (level <= max_level): 868 if verbose_level >= vSCRIPT: 869 print("L{} entry: {:#x}".format(level, tte)) 870 if verbose_level >= vDETAIL: 871 PmapDecodeTTEARM64(tte, level, stage2) 872 873 if tte & 0x1 == 0x0: 874 if verbose_level >= vHUMAN: 875 print("L{} entry invalid: {:#x}\n".format(level, tte)) 876 877 is_valid = False 878 break 879 880 # Handle leaf entry 881 if tte & 0x2 == 0x0 or level == max_level: 882 base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level) 883 offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level) 884 paddr = tte & base_mask 885 paddr = paddr | (vaddr & offset_mask) 886 887 if level != max_level: 888 print("phys: {:#x}".format(paddr)) 889 890 is_leaf = True 891 break 892 else: 893 # Handle page table entry 894 next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1]) 895 assert(isinstance(next_phys, numbers.Integral)) 896 897 next_virt = kern.PhysToKernelVirt(next_phys) 898 assert(isinstance(next_virt, numbers.Integral)) 899 900 if verbose_level >= vDETAIL: 901 print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt)) 902 903 ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*") 904 tte = int(unsigned(dereference(ttep))) 905 assert(isinstance(tte, numbers.Integral)) 906 907 # We've parsed one level, so go to the next level 908 assert(level <= 3) 909 level = level + 1 910 911 912 if verbose_level >= vHUMAN: 913 if paddr: 914 print("Translation of {:#x} is {:#x}.".format(vaddr, paddr)) 915 else: 916 print("(no translation)") 917 918 return paddr 919 920def PmapWalk(pmap, vaddr, verbose_level = vHUMAN): 921 if kern.arch == 'x86_64': 922 return PmapWalkX86_64(pmap, vaddr, verbose_level) 923 elif kern.arch == 'arm': 924 return PmapWalkARM(pmap, vaddr, verbose_level) 925 elif kern.arch.startswith('arm64'): 926 # Obtain pmap attributes from pmap structure 927 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 928 return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level) 929 else: 930 raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch)) 931 932@lldb_command('pmap_walk') 933def PmapWalkHelper(cmd_args=None): 934 """ Perform a page-table walk in <pmap> for <virtual_address>. 935 Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e] 936 Multiple -v's can be specified for increased verbosity 937 """ 938 if cmd_args == None or len(cmd_args) < 2: 939 raise ArgumentError("Too few arguments to pmap_walk.") 940 941 pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t') 942 addr = ArgumentStringToInt(cmd_args[1]) 943 PmapWalk(pmap, addr, config['verbosity']) 944 return 945 946def GetMemoryAttributesFromUser(requested_type): 947 pmap_attr_dict = { 948 '4k' : kern.globals.pmap_pt_attr_4k, 949 '16k' : kern.globals.pmap_pt_attr_16k, 950 '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None, 951 } 952 953 requested_type = requested_type.lower() 954 if requested_type not in pmap_attr_dict: 955 return None 956 957 return pmap_attr_dict[requested_type] 958 959@lldb_command('ttep_walk') 960def TTEPWalkPHelper(cmd_args=None): 961 """ Perform a page-table walk in <root_ttep> for <virtual_address>. 962 Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e] 963 Multiple -v's can be specified for increased verbosity 964 """ 965 if cmd_args == None or len(cmd_args) < 2: 966 raise ArgumentError("Too few arguments to ttep_walk.") 967 968 if not kern.arch.startswith('arm64'): 969 raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch)) 970 971 tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *') 972 addr = ArgumentStringToInt(cmd_args[1]) 973 974 pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2]) 975 if pmap_pt_attr is None: 976 raise ArgumentError("Invalid translation attribute type.") 977 978 return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity']) 979 980@lldb_command('decode_tte') 981def DecodeTTE(cmd_args=None): 982 """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2] 983 Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2] 984 """ 985 if cmd_args == None or len(cmd_args) < 2: 986 raise ArgumentError("Too few arguments to decode_tte.") 987 if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]: 988 raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2])) 989 if kern.arch == 'arm': 990 PmapDecodeTTEARM(kern.GetValueFromAddress(cmd_args[0], "unsigned long"), ArgumentStringToInt(cmd_args[1]), vSCRIPT) 991 elif kern.arch.startswith('arm64'): 992 stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False 993 PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2) 994 else: 995 raise NotImplementedError("decode_tte does not support {0}".format(kern.arch)) 996 997 998PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) 999PVH_HIGH_FLAGS_ARM32 = (1 << 31) 1000 1001def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN): 1002 """ Dump information about a single mapping retrieved by the pv_head_table. 1003 1004 pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP, 1005 or None if type PVH_TYPE_PTEP. 1006 ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with 1007 the high flags already set (the type bits don't need to be cleared). 1008 For type PVH_TYPE_PVEP this will be the value retrieved from the 1009 pve_ptep[] array. 1010 """ 1011 if kern.arch.startswith('arm64'): 1012 iommu_flag = 0x4 1013 iommu_table_flag = 1 << 63 1014 else: 1015 iommu_flag = 0 1016 iommu_table_flag = 0 1017 1018 # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries. 1019 if pvep is not None and (ptep & 0x1): 1020 # Note: It's not possible for IOMMU mappings to be marked as alt acct so 1021 # setting this string is mutually exclusive with setting the IOMMU strings. 1022 pte_str = ' (alt acct)' 1023 else: 1024 pte_str = '' 1025 1026 if pvep is not None: 1027 pve_str = 'PVEP {:#x}, '.format(pvep) 1028 else: 1029 pve_str = '' 1030 1031 # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this 1032 # either does nothing or clears out the AltAcct bit. 1033 ptep = ptep & ~0x3 1034 1035 # When printing with extra verbosity, print an extra newline that describes 1036 # who owns the mapping. 1037 extra_str = '' 1038 1039 if ptep & iommu_flag: 1040 # The mapping is an IOMMU Mapping 1041 ptep = ptep & ~iommu_flag 1042 1043 # Due to LLDB automatically setting all the high bits of pointers, when 1044 # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set 1045 # the iommu_table_flag, which means this check only works for PVH entries 1046 # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH 1047 # entry which has the right casting applied to avoid this issue). 1048 # 1049 # Why don't we just do the same casting for pve_ptep[] you ask? Well not 1050 # for a lack of trying, that's for sure. If you can figure out how to 1051 # cast that array correctly, then be my guest. 1052 if ptep & iommu_table_flag: 1053 pte_str = ' (IOMMU table), entry' 1054 1055 ptd = GetPtDesc(KVToPhysARM(ptep)) 1056 iommu = dereference(ptd.iommu) 1057 else: 1058 # Instead of dumping the PTE (since we don't have that), dump the 1059 # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc). 1060 # 1061 # This works because later on when the "ptep" is dereferenced as a 1062 # PTE pointer (uint64_t pointer), the descriptor pointer will be 1063 # dumped as that's the first 64-bit value in the IOMMU state object. 1064 pte_str = ' (IOMMU state), descriptor' 1065 ptep = ptep | iommu_table_flag 1066 iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *')) 1067 1068 # For IOMMU mappings, dump who owns the mapping as the extra string. 1069 extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name) 1070 if unsigned(iommu.name) != 0: 1071 extra_str += '/{:s}'.format(iommu.name) 1072 extra_str += ' (iommu state: {:x})'.format(addressof(iommu)) 1073 else: 1074 # The mapping is a CPU Mapping 1075 pte_str += ', entry' 1076 ptd = GetPtDesc(KVToPhysARM(ptep)) 1077 if ptd.pmap == kern.globals.kernel_pmap: 1078 extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap) 1079 elif verbose_level >= vDETAIL: 1080 task = TaskForPmapHelper(ptd.pmap) 1081 extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>") 1082 try: 1083 print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) 1084 except: 1085 print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str)) 1086 1087 if verbose_level >= vDETAIL: 1088 print(" |-- {:s}".format(extra_str)) 1089 1090def PVWalkARM(pai, verbose_level = vHUMAN): 1091 """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap. 1092 1093 pai: physical address index (PAI) corresponding to the pv_head_table 1094 entry to walk. 1095 verbose_level: Set to vSCRIPT or higher to print extra info around the 1096 the pv_head_table/pp_attr_table flags and to dump the 1097 pt_desc_t object if the type is a PTD. 1098 """ 1099 # LLDB will automatically try to make pointer values dereferencable by 1100 # setting the upper bits if they aren't set. We need to parse the flags 1101 # stored in the upper bits later, so cast the pv_head_table to an array of 1102 # integers to get around this "feature". We'll add the upper bits back 1103 # manually before deref'ing anything. 1104 pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*") 1105 pvh_raw = unsigned(pv_head_table[pai]) 1106 pvh = pvh_raw 1107 pvh_type = pvh & 0x3 1108 1109 print("PVH raw value: {:#x}".format(pvh_raw)) 1110 if kern.arch.startswith('arm64'): 1111 pvh = pvh | PVH_HIGH_FLAGS_ARM64 1112 else: 1113 pvh = pvh | PVH_HIGH_FLAGS_ARM32 1114 1115 if pvh_type == 0: 1116 print("PVH type: NULL") 1117 elif pvh_type == 3: 1118 print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3)) 1119 elif pvh_type == 2: 1120 print("PVH type: single PTE") 1121 PVDumpPTE(None, pvh, verbose_level) 1122 elif pvh_type == 1: 1123 pvep = pvh & ~0x3 1124 print("PVH type: PTE list") 1125 pve_ptep_idx = 0 1126 while pvep != 0: 1127 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 1128 1129 if pve.pve_ptep[pve_ptep_idx] != 0: 1130 PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level) 1131 1132 pve_ptep_idx += 1 1133 if pve_ptep_idx == 2: 1134 pve_ptep_idx = 0 1135 pvep = unsigned(pve.pve_next) 1136 1137 if verbose_level >= vDETAIL: 1138 if (pvh_type == 1) or (pvh_type == 2): 1139 # Dump pv_head_table flags when there's a valid mapping. 1140 pvh_flags = [] 1141 1142 if pvh_raw & (1 << 62): 1143 pvh_flags.append("CPU") 1144 if pvh_raw & (1 << 60): 1145 pvh_flags.append("EXEC") 1146 if pvh_raw & (1 << 59): 1147 pvh_flags.append("LOCKDOWN_KC") 1148 if pvh_raw & (1 << 58): 1149 pvh_flags.append("HASHED") 1150 if pvh_raw & (1 << 57): 1151 pvh_flags.append("LOCKDOWN_CS") 1152 if pvh_raw & (1 << 56): 1153 pvh_flags.append("LOCKDOWN_RO") 1154 if kern.arch.startswith('arm64') and pvh_raw & (1 << 61): 1155 pvh_flags.append("LOCK") 1156 elif kern.arch == 'arm' and pvh_raw & (1 << 31): 1157 pvh_flags.append("LOCK") 1158 1159 print("PVH Flags: {}".format(pvh_flags)) 1160 1161 # Always dump pp_attr_table flags (these can be updated even if there aren't mappings). 1162 ppattr = unsigned(kern.globals.pp_attr_table[pai]) 1163 print("PPATTR raw value: {:#x}".format(ppattr)) 1164 1165 ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)] 1166 if ppattr & 0x40: 1167 ppattr_flags.append("REFERENCED") 1168 if ppattr & 0x80: 1169 ppattr_flags.append("MODIFIED") 1170 if ppattr & 0x100: 1171 ppattr_flags.append("INTERNAL") 1172 if ppattr & 0x200: 1173 ppattr_flags.append("REUSABLE") 1174 if ppattr & 0x400: 1175 ppattr_flags.append("ALTACCT") 1176 if ppattr & 0x800: 1177 ppattr_flags.append("NOENCRYPT") 1178 if ppattr & 0x1000: 1179 ppattr_flags.append("REFFAULT") 1180 if ppattr & 0x2000: 1181 ppattr_flags.append("MODFAULT") 1182 if ppattr & 0x4000: 1183 ppattr_flags.append("MONITOR") 1184 if ppattr & 0x8000: 1185 ppattr_flags.append("NO_MONITOR") 1186 1187 print("PPATTR Flags: {}".format(ppattr_flags)) 1188 1189 if pvh_type == 3: 1190 def RunLldbCmdHelper(command): 1191 """Helper for dumping an LLDB command right before executing it 1192 and printing the results. 1193 command: The LLDB command (as a string) to run. 1194 1195 Example input: "p/x kernel_pmap". 1196 """ 1197 print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command))) 1198 # Dump the page table descriptor object 1199 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 1200 RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd)) 1201 1202 # Depending on the system, more than one ptd_info can be associated 1203 # with a single PTD. Only dump the first PTD info and assume the 1204 # user knows to dump the rest if they're on one of those systems. 1205 RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]") 1206 1207@lldb_command('pv_walk') 1208def PVWalk(cmd_args=None): 1209 """ Show mappings for <physical_address | PAI> tracked in the PV list. 1210 Syntax: (lldb) pv_walk <physical_address | PAI> [-vv] 1211 1212 Extra verbosity will pretty print the pv_head_table/pp_attr_table flags 1213 as well as dump the page table descriptor (PTD) struct if the entry is a 1214 PTD. 1215 """ 1216 if cmd_args == None or len(cmd_args) < 1: 1217 raise ArgumentError("Too few arguments to pv_walk.") 1218 if not kern.arch.startswith('arm'): 1219 raise NotImplementedError("pv_walk does not support {0}".format(kern.arch)) 1220 1221 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1222 1223 # If the input is already a PAI, this function will return the input unchanged. 1224 # This function also ensures that the physical address is kernel-managed. 1225 pai = ConvertPhysAddrToPai(pa) 1226 1227 PVWalkARM(pai, config['verbosity']) 1228 1229@lldb_command('kvtophys') 1230def KVToPhys(cmd_args=None): 1231 """ Translate a kernel virtual address to the corresponding physical address. 1232 Assumes the virtual address falls within the kernel static region. 1233 Syntax: (lldb) kvtophys <kernel virtual address> 1234 """ 1235 if cmd_args == None or len(cmd_args) < 1: 1236 raise ArgumentError("Too few arguments to kvtophys.") 1237 if kern.arch.startswith('arm'): 1238 print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1239 elif kern.arch == 'x86_64': 1240 print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base))) 1241 1242@lldb_command('phystokv') 1243def PhysToKV(cmd_args=None): 1244 """ Translate a physical address to the corresponding static kernel virtual address. 1245 Assumes the physical address corresponds to managed DRAM. 1246 Syntax: (lldb) phystokv <physical address> 1247 """ 1248 if cmd_args == None or len(cmd_args) < 1: 1249 raise ArgumentError("Too few arguments to phystokv.") 1250 print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')))))) 1251 1252def KVToPhysARM(addr): 1253 if kern.arch.startswith('arm64'): 1254 ptov_table = kern.globals.ptov_table 1255 for i in range(0, kern.globals.ptov_index): 1256 if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))): 1257 return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa))) 1258 return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase)) 1259 1260 1261def GetPtDesc(paddr): 1262 pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size 1263 pvh = unsigned(kern.globals.pv_head_table[pn]) 1264 if kern.arch.startswith('arm64'): 1265 pvh = pvh | PVH_HIGH_FLAGS_ARM64 1266 else: 1267 pvh = pvh | PVH_HIGH_FLAGS_ARM32 1268 pvh_type = pvh & 0x3 1269 if pvh_type != 0x3: 1270 raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh)) 1271 ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') 1272 return ptd 1273 1274def ShowPTEARM(pte, page_size, stage2 = False): 1275 """ Display vital information about an ARM page table entry 1276 pte: kernel virtual address of the PTE. Should be L3 PTE. May also work with L2 TTEs for certain devices. 1277 """ 1278 ptd = GetPtDesc(KVToPhysARM(pte)) 1279 pt_index = (pte % kern.globals.page_size) // page_size 1280 refcnt = ptd.ptd_info[pt_index].refcnt 1281 wiredcnt = ptd.ptd_info[pt_index].wiredcnt 1282 print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x})".format(ptd, refcnt, wiredcnt)) 1283 1284 # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001. 1285 is_iommu_pte = (refcnt & 0x8000) == 0x8000 1286 1287 # The pmap/iommu field is a union, so only print the correct one. 1288 if is_iommu_pte: 1289 iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name) 1290 if unsigned(dereference(ptd.iommu).name) != 0: 1291 iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name) 1292 1293 print("iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)) 1294 else: 1295 if ptd.pmap == kern.globals.kernel_pmap: 1296 pmap_str = "(kernel_pmap)" 1297 else: 1298 task = TaskForPmapHelper(ptd.pmap) 1299 pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>") 1300 print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str)) 1301 1302 pte_pgoff = pte % page_size 1303 if kern.arch.startswith('arm64'): 1304 pte_pgoff = pte_pgoff // 8 1305 nttes = page_size // 8 1306 else: 1307 pte_pgoff = pte_pgoff // 4 1308 nttes = page_size // 4 1309 if ptd.ptd_info[pt_index].refcnt == 0x4000: 1310 level = 2 1311 granule = nttes * page_size 1312 else: 1313 level = 3 1314 granule = page_size 1315 print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(ptd.va[pt_index])) + (pte_pgoff * granule))) 1316 pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *')))) 1317 print("value: {:#x}".format(pteval)) 1318 if kern.arch.startswith('arm64'): 1319 print("level: {:d}".format(level)) 1320 PmapDecodeTTEARM64(pteval, level, stage2, is_iommu_pte) 1321 1322 elif kern.arch == 'arm': 1323 PmapDecodeTTEARM(pteval, 2, vSCRIPT) 1324 1325@lldb_command('showpte') 1326def ShowPTE(cmd_args=None): 1327 """ Display vital information about the page table entry at VA <pte> 1328 Syntax: (lldb) showpte <pte_va> [4k|16k|16k_s2] 1329 """ 1330 if cmd_args == None or len(cmd_args) < 1: 1331 raise ArgumentError("Too few arguments to showpte.") 1332 1333 if kern.arch == 'arm': 1334 ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), kern.globals.page_size) 1335 elif kern.arch.startswith('arm64'): 1336 pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 2 else GetMemoryAttributesFromUser(cmd_args[1]) 1337 if pmap_pt_attr is None: 1338 raise ArgumentError("Invalid translation attribute type.") 1339 1340 stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) 1341 ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), pmap_pt_attr.pta_page_size, stage2) 1342 else: 1343 raise NotImplementedError("showpte does not support {0}".format(kern.arch)) 1344 1345def FindMappingAtLevelARM(pmap, tt, nttes, level, va, action): 1346 """ Perform the specified action for all valid mappings in an ARM translation table 1347 pmap: owner of the translation table 1348 tt: translation table or page table 1349 nttes: number of entries in tt 1350 level: translation table level, 1 or 2 1351 action: callback for each valid TTE 1352 """ 1353 for i in range(nttes): 1354 try: 1355 tte = tt[i] 1356 va_size = None 1357 if level == 1: 1358 if tte & 0x3 == 0x1: 1359 type = 'table' 1360 granule = 1024 1361 va_size = kern.globals.page_size * 256 1362 paddr = tte & 0xFFFFFC00 1363 elif tte & 0x3 == 0x2: 1364 type = 'block' 1365 if (tte & 0x40000) == 0x40000: 1366 granule = 1 << 24 1367 paddr = tte & 0xFF000000 1368 else: 1369 granule = 1 << 20 1370 paddr = tte & 0xFFF00000 1371 else: 1372 continue 1373 elif (tte & 0x3) == 0x1: 1374 type = 'entry' 1375 granule = 1 << 16 1376 paddr = tte & 0xFFFF0000 1377 elif (tte & 0x3) != 0: 1378 type = 'entry' 1379 granule = 1 << 12 1380 paddr = tte & 0xFFFFF000 1381 else: 1382 continue 1383 if va_size is None: 1384 va_size = granule 1385 mapped_va = va + (va_size * i) 1386 if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): 1387 if level == 1 and (tte & 0x3) == 0x1: 1388 tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') 1389 FindMappingAtLevelARM(pmap, tt_next, granule // 4, level + 1, mapped_va, action) 1390 except Exception as exc: 1391 print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i])))) 1392 1393def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action): 1394 """ Perform the specified action for all valid mappings in an ARM64 translation table 1395 pmap: owner of the translation table 1396 tt: translation table or page table 1397 nttes: number of entries in tt 1398 level: translation table level, 1 2 or 3 1399 action: callback for each valid TTE 1400 """ 1401 # Obtain pmap attributes 1402 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1403 page_size = pmap_pt_attr.pta_page_size 1404 page_offset_mask = (page_size - 1) 1405 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1406 max_level = unsigned(pmap_pt_attr.pta_max_level) 1407 1408 for i in range(nttes): 1409 try: 1410 tte = tt[i] 1411 if tte & 0x1 == 0x0: 1412 continue 1413 1414 tt_next = None 1415 paddr = unsigned(tte) & unsigned(page_base_mask) 1416 1417 # Handle leaf entry 1418 if tte & 0x2 == 0x0 or level == max_level: 1419 type = 'block' if level < max_level else 'entry' 1420 granule = PmapBlockOffsetMaskARM64(page_size, level) + 1 1421 else: 1422 # Handle page table entry 1423 type = 'table' 1424 granule = page_size 1425 tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') 1426 1427 mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i) 1428 if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): 1429 if tt_next is not None: 1430 FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action) 1431 1432 except Exception as exc: 1433 print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i])))) 1434 1435def ScanPageTables(action, targetPmap=None): 1436 """ Perform the specified action for all valid mappings in all page tables, 1437 optionally restricted to a single pmap. 1438 pmap: pmap whose page table should be scanned. If None, all pmaps on system will be scanned. 1439 """ 1440 print("Scanning all available translation tables. This may take a long time...") 1441 def ScanPmap(pmap, action): 1442 if kern.arch.startswith('arm64'): 1443 # Obtain pmap attributes 1444 pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr 1445 granule = pmap_pt_attr.pta_page_size 1446 level = unsigned(pmap_pt_attr.pta_root_level) 1447 root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ 1448 unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 1449 elif kern.arch == 'arm': 1450 granule = pmap.tte_index_max * 4 1451 1452 if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule): 1453 if kern.arch.startswith('arm64'): 1454 FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action) 1455 elif kern.arch == 'arm': 1456 FindMappingAtLevelARM(pmap, pmap.tte, pmap.tte_index_max, 1, pmap.min, action) 1457 1458 if targetPmap is not None: 1459 ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action) 1460 else: 1461 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1462 ScanPmap(pmap, action) 1463 1464@lldb_command('showallmappings') 1465def ShowAllMappings(cmd_args=None): 1466 """ Find and display all available mappings on the system for 1467 <physical_address>. Optionally only searches the pmap 1468 specified by [<pmap>] 1469 Syntax: (lldb) showallmappings <physical_address> [<pmap>] 1470 WARNING: this macro can take a long time (up to 30min.) to complete! 1471 """ 1472 if cmd_args == None or len(cmd_args) < 1: 1473 raise ArgumentError("Too few arguments to showallmappings.") 1474 if not kern.arch.startswith('arm'): 1475 raise NotImplementedError("showallmappings does not support {0}".format(kern.arch)) 1476 pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1477 targetPmap = None 1478 if len(cmd_args) > 1: 1479 targetPmap = cmd_args[1] 1480 def printMatchedMapping(pmap, level, type, tte, paddr, va, granule): 1481 if paddr <= pa < (paddr + granule): 1482 print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va)) 1483 return True 1484 ScanPageTables(printMatchedMapping, targetPmap) 1485 1486@lldb_command('showptusage') 1487def ShowPTUsage(cmd_args=None): 1488 """ Display a summary of pagetable allocations for a given pmap. 1489 Syntax: (lldb) showptusage [<pmap>] 1490 WARNING: this macro can take a long time (> 1hr) to complete! 1491 """ 1492 if not kern.arch.startswith('arm'): 1493 raise NotImplementedError("showptusage does not support {0}".format(kern.arch)) 1494 targetPmap = None 1495 if len(cmd_args) > 0: 1496 targetPmap = cmd_args[0] 1497 lastPmap = [None] 1498 numTables = [0] 1499 numUnnested = [0] 1500 numPmaps = [0] 1501 def printValidTTE(pmap, level, type, tte, paddr, va, granule): 1502 unnested = "" 1503 nested_region_addr = int(unsigned(pmap.nested_region_addr)) 1504 nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size)) 1505 if lastPmap[0] is None or (pmap != lastPmap[0]): 1506 lastPmap[0] = pmap 1507 numPmaps[0] = numPmaps[0] + 1 1508 print ("pmap {:#x}:".format(pmap)) 1509 if type == 'root': 1510 return True 1511 if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end): 1512 ptd = GetPtDesc(paddr) 1513 if ptd.pmap != pmap: 1514 return False 1515 else: 1516 numUnnested[0] = numUnnested[0] + 1 1517 unnested = " (likely unnested)" 1518 numTables[0] = numTables[0] + 1 1519 print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested) 1520 if level == 2: 1521 return False 1522 else: 1523 return True 1524 ScanPageTables(printValidTTE, targetPmap) 1525 print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0])) 1526 1527def checkPVList(pmap, level, type, tte, paddr, va, granule): 1528 """ Checks an ARM physical-to-virtual mapping list for consistency errors. 1529 pmap: owner of the translation table 1530 level: translation table level. PV lists will only be checked for L2 (arm32) or L3 (arm64) tables. 1531 type: unused 1532 tte: KVA of PTE to check for presence in PV list. If None, presence check will be skipped. 1533 paddr: physical address whose PV list should be checked. Need not be page-aligned. 1534 granule: unused 1535 """ 1536 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1537 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1538 page_size = kern.globals.page_size 1539 if kern.arch.startswith('arm64'): 1540 page_offset_mask = (page_size - 1) 1541 page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) 1542 paddr = paddr & page_base_mask 1543 max_level = 3 1544 pvh_set_bits = PVH_HIGH_FLAGS_ARM64 1545 elif kern.arch == 'arm': 1546 page_base_mask = 0xFFFFF000 1547 paddr = paddr & page_base_mask 1548 max_level = 2 1549 pvh_set_bits = PVH_HIGH_FLAGS_ARM32 1550 if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys: 1551 return True 1552 pn = (paddr - vm_first_phys) // page_size 1553 pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits 1554 pvh_type = pvh & 0x3 1555 if pmap is not None: 1556 pmap_str = "pmap: {:#x}: ".format(pmap) 1557 else: 1558 pmap_str = '' 1559 if tte is not None: 1560 tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr) 1561 else: 1562 tte_str = "paddr {:#x}: ".format(paddr) 1563 if pvh_type == 0 or pvh_type == 3: 1564 print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type)) 1565 elif pvh_type == 2: 1566 ptep = pvh & ~0x3 1567 if tte is not None and ptep != unsigned(tte): 1568 print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep)) 1569 try: 1570 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1571 if (pte != paddr): 1572 print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1573 except Exception as exc: 1574 print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep)) 1575 elif pvh_type == 1: 1576 pvep = pvh & ~0x3 1577 tte_match = False 1578 pve_ptep_idx = 0 1579 while pvep != 0: 1580 pve = kern.GetValueFromAddress(pvep, "pv_entry_t *") 1581 ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3 1582 pve_ptep_idx += 1 1583 if pve_ptep_idx == 2: 1584 pve_ptep_idx = 0 1585 pvep = unsigned(pve.pve_next) 1586 if ptep == 0: 1587 continue 1588 if tte is not None and ptep == unsigned(tte): 1589 tte_match = True 1590 try: 1591 pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask 1592 if (pte != paddr): 1593 print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte)) 1594 except Exception as exc: 1595 print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep)) 1596 if tte is not None and not tte_match: 1597 print("{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr)) 1598 return True 1599 1600@lldb_command('pv_check', 'P') 1601def PVCheck(cmd_args=None, cmd_options={}): 1602 """ Check the physical-to-virtual mapping for a given PTE or physical address 1603 Syntax: (lldb) pv_check <addr> [-p] 1604 -P : Interpret <addr> as a physical address rather than a PTE 1605 """ 1606 if cmd_args == None or len(cmd_args) < 1: 1607 raise ArgumentError("Too few arguments to pv_check.") 1608 if kern.arch == 'arm': 1609 level = 2 1610 elif kern.arch.startswith('arm64'): 1611 level = 3 1612 else: 1613 raise NotImplementedError("pv_check does not support {0}".format(kern.arch)) 1614 if "-P" in cmd_options: 1615 pte = None 1616 pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long"))) 1617 else: 1618 pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *') 1619 pa = int(unsigned(dereference(pte))) 1620 checkPVList(None, level, None, pte, pa, 0, None) 1621 1622@lldb_command('check_pmaps') 1623def CheckPmapIntegrity(cmd_args=None): 1624 """ Performs a system-wide integrity check of all PTEs and associated PV lists. 1625 Optionally only checks the pmap specified by [<pmap>] 1626 Syntax: (lldb) check_pmaps [<pmap>] 1627 WARNING: this macro can take a HUGE amount of time (several hours) if you do not 1628 specify [pmap] to limit it to a single pmap. It will also give false positives 1629 for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM. 1630 Use of this macro without the [<pmap>] argument is heavily discouraged. 1631 """ 1632 if not kern.arch.startswith('arm'): 1633 raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch)) 1634 targetPmap = None 1635 if len(cmd_args) > 0: 1636 targetPmap = cmd_args[0] 1637 ScanPageTables(checkPVList, targetPmap) 1638 1639@lldb_command('pmapsforledger') 1640def PmapsForLedger(cmd_args=None): 1641 """ Find and display all pmaps currently using <ledger>. 1642 Syntax: (lldb) pmapsforledger <ledger> 1643 """ 1644 if cmd_args == None or len(cmd_args) < 1: 1645 raise ArgumentError("Too few arguments to pmapsforledger.") 1646 if not kern.arch.startswith('arm'): 1647 raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch)) 1648 ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t') 1649 for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): 1650 if pmap.ledger == ledger: 1651 print("pmap: {:#x}".format(pmap)) 1652 1653 1654def IsValidPai(pai): 1655 """ Given an unsigned value, detect whether that value is a valid physical 1656 address index (PAI). It does this by first computing the last possible 1657 PAI and comparing the input to that. 1658 1659 All contemporary SoCs reserve the bottom part of the address space, so 1660 there shouldn't be any valid physical addresses between zero and the 1661 last PAI either. 1662 """ 1663 page_size = unsigned(kern.globals.page_size) 1664 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1665 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1666 1667 last_pai = (vm_last_phys - vm_first_phys) // page_size 1668 if (pai < 0) or (pai >= last_pai): 1669 return False 1670 1671 return True 1672 1673def ConvertPaiToPhysAddr(pai): 1674 """ Convert the given Physical Address Index (PAI) into a physical address. 1675 1676 If the input isn't a valid PAI (it's most likely already a physical 1677 address), then just return back the input unchanged. 1678 """ 1679 pa = pai 1680 1681 # If the value is a valid PAI, then convert it into a physical address. 1682 if IsValidPai(pai): 1683 pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys) 1684 1685 return pa 1686 1687def ConvertPhysAddrToPai(pa): 1688 """ Convert the given physical address into a Physical Address Index (PAI). 1689 1690 If the input is already a valid PAI, then just return back the input 1691 unchanged. 1692 """ 1693 vm_first_phys = unsigned(kern.globals.vm_first_phys) 1694 vm_last_phys = unsigned(kern.globals.vm_last_phys) 1695 pai = pa 1696 1697 if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys): 1698 raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys)) 1699 elif not IsValidPai(pa): 1700 # If the value isn't already a valid PAI, then convert it into one. 1701 pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size) 1702 1703 return pai 1704 1705@lldb_command('pmappaindex') 1706def PmapPaIndex(cmd_args=None): 1707 """ Display both a physical address and physical address index (PAI) when 1708 provided with only one of those values. 1709 1710 Syntax: (lldb) pmappaindex <physical address | PAI> 1711 1712 NOTE: This macro will throw an exception if the input isn't a valid PAI 1713 and is also not a kernel-managed physical address. 1714 """ 1715 if (cmd_args == None) or (len(cmd_args) < 1): 1716 raise ArgumentError("Too few arguments to pmappaindex.") 1717 1718 if not kern.arch.startswith('arm'): 1719 raise NotImplementedError("pmappaindex is only supported on ARM devices.") 1720 1721 value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long') 1722 pai = value 1723 phys_addr = value 1724 1725 if IsValidPai(value): 1726 # Input is a PAI, calculate the physical address. 1727 phys_addr = ConvertPaiToPhysAddr(value) 1728 else: 1729 # Input is a physical address, calculate the PAI 1730 pai = ConvertPhysAddrToPai(value) 1731 1732 print("Physical Address: {:#x}".format(phys_addr)) 1733 print("PAI: {:d}".format(pai)) 1734