xref: /xnu-12377.61.12/tools/lldbmacros/pmap.py (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1from xnu import *
2import xnudefines
3from kdp import *
4from utils import *
5import struct
6from collections import namedtuple
7
8def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
9    """ Read a physical memory data based on address.
10        params:
11            phys_addr : int - Physical address to read
12            bitsize   : int - defines how many bytes to read. defaults to 64 bit
13            cpuval    : None (optional)
14        returns:
15            int - int value read from memory. in case of failure 0xBAD10AD is returned.
16    """
17    if "kdp" == GetConnectionProtocol():
18        return KDPReadPhysMEM(phys_addr, bitsize)
19
20    # NO KDP. Attempt to use physical memory
21    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
22    if paddr_in_kva:
23        if bitsize == 64 :
24            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
25        if bitsize == 32 :
26            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
27        if bitsize == 16 :
28            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
29        if bitsize == 8 :
30            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31    return 0xBAD10AD
32
33@lldb_command('readphys')
34def ReadPhys(cmd_args = None):
35    """ Reads the specified untranslated address
36        The argument is interpreted as a physical address, and the 64-bit word
37        addressed is displayed.
38        usage: readphys <nbits> <address>
39        nbits: 8,16,32,64
40        address: 1234 or 0x1234 or `foo_ptr`
41    """
42    if cmd_args is None or len(cmd_args) < 2:
43        raise ArgumentError()
44
45    else:
46        nbits = ArgumentStringToInt(cmd_args[0])
47        phys_addr = ArgumentStringToInt(cmd_args[1])
48        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
49    return True
50
51lldb_alias('readphys8', 'readphys 8 ')
52lldb_alias('readphys16', 'readphys 16 ')
53lldb_alias('readphys32', 'readphys 32 ')
54lldb_alias('readphys64', 'readphys 64 ')
55
56def KDPReadPhysMEM(address, bits):
57    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
58        params:
59            address : int - address where to read the data from
60            bits : int - number of bits in the intval (8/16/32/64)
61        returns:
62            int: read value from memory.
63            0xBAD10AD: if failed to read data.
64    """
65    retval = 0xBAD10AD
66    if "kdp" != GetConnectionProtocol():
67        print("Target is not connected over kdp. Nothing to do here.")
68        return retval
69
70    if "hwprobe" == KDPMode():
71        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
72        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
73        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
74        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
75
76        ret_obj = lldb.SBCommandReturnObject()
77        ci = lldb.debugger.GetCommandInterpreter()
78        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
79
80        if ret_obj.Succeeded():
81            value = ret_obj.GetOutput()
82
83            if bits == 64 :
84                pack_fmt = "<Q"
85                unpack_fmt = ">Q"
86            if bits == 32 :
87                pack_fmt = "<I"
88                unpack_fmt = ">I"
89            if bits == 16 :
90                pack_fmt = "<H"
91                unpack_fmt = ">H"
92            if bits == 8 :
93                pack_fmt = "<B"
94                unpack_fmt = ">B"
95
96            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
97
98    else:
99        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
100        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
101        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
102
103        if not WriteInt32ToMemoryAddress(0, input_address):
104            return retval
105
106        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
107        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
108            return retval
109
110        data_addr = int(addressof(kern.globals.manual_pkt))
111        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
112
113        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
114
115        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
116             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
117             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
118             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
119             ):
120
121            if WriteInt32ToMemoryAddress(1, input_address):
122                # now read data from the kdp packet
123                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
124                if bits == 64 :
125                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
126                if bits == 32 :
127                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
128                if bits == 16 :
129                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
130                if bits == 8 :
131                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
132
133    return retval
134
135
136def KDPWritePhysMEM(address, intval, bits):
137    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
138        params:
139            address : int - address where to save the data
140            intval : int - integer value to be stored in memory
141            bits : int - number of bits in the intval (8/16/32/64)
142        returns:
143            boolean: True if the write succeeded.
144    """
145    if "kdp" != GetConnectionProtocol():
146        print("Target is not connected over kdp. Nothing to do here.")
147        return False
148
149    if "hwprobe" == KDPMode():
150        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
151        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
152        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
153
154        if bits == 64 :
155            pack_fmt = ">Q"
156            unpack_fmt = "<Q"
157        if bits == 32 :
158            pack_fmt = ">I"
159            unpack_fmt = "<I"
160        if bits == 16 :
161            pack_fmt = ">H"
162            unpack_fmt = "<H"
163        if bits == 8 :
164            pack_fmt = ">B"
165            unpack_fmt = "<B"
166
167        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
168
169        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
170
171        ret_obj = lldb.SBCommandReturnObject()
172        ci = lldb.debugger.GetCommandInterpreter()
173        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
174
175        if ret_obj.Succeeded():
176            return True
177        else:
178            return False
179
180    else:
181        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
182        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
183        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
184        if not WriteInt32ToMemoryAddress(0, input_address):
185            return False
186
187        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
188        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
189            return False
190
191        data_addr = int(addressof(kern.globals.manual_pkt))
192        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
193
194        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
195
196        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
197             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
198             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
199             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
200             ):
201
202            if bits == 8:
203                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
204                    return False
205            if bits == 16:
206                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
207                    return False
208            if bits == 32:
209                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
210                    return False
211            if bits == 64:
212                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
213                    return False
214            if WriteInt32ToMemoryAddress(1, input_address):
215                return True
216        return False
217
218
219def WritePhysInt(phys_addr, int_val, bitsize = 64):
220    """ Write and integer value in a physical memory data based on address.
221        params:
222            phys_addr : int - Physical address to read
223            int_val   : int - int value to write in memory
224            bitsize   : int - defines how many bytes to read. defaults to 64 bit
225        returns:
226            bool - True if write was successful.
227    """
228    if "kdp" == GetConnectionProtocol():
229        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
230            print("Failed to write via KDP.")
231            return False
232        return True
233    #We are not connected via KDP. So do manual math and savings.
234    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
235    return False
236
237@lldb_command('writephys')
238def WritePhys(cmd_args=None):
239    """ writes to the specified untranslated address
240        The argument is interpreted as a physical address, and the 64-bit word
241        addressed is displayed.
242        usage: writephys <nbits> <address> <value>
243        nbits: 8,16,32,64
244        address: 1234 or 0x1234 or `foo_ptr`
245        value: int value to be written
246        ex. (lldb)writephys 16 0x12345abcd 0x25
247    """
248    if cmd_args is None or len(cmd_args) < 3:
249        raise ArgumentError()
250
251    else:
252        nbits = ArgumentStringToInt(cmd_args[0])
253        phys_addr = ArgumentStringToInt(cmd_args[1])
254        int_value = ArgumentStringToInt(cmd_args[2])
255        print(WritePhysInt(phys_addr, int_value, nbits))
256
257
258lldb_alias('writephys8', 'writephys 8 ')
259lldb_alias('writephys16', 'writephys 16 ')
260lldb_alias('writephys32', 'writephys 32 ')
261lldb_alias('writephys64', 'writephys 64 ')
262
263
264def _PT_Step(paddr, index, verbose_level = vSCRIPT):
265    """
266     Step to lower-level page table and print attributes
267       paddr: current page table entry physical address
268       index: current page table entry index (0..511)
269       verbose_level:    vHUMAN: print nothing
270                         vSCRIPT: print basic information
271                         vDETAIL: print basic information and hex table dump
272     returns: (pt_paddr, pt_valid, pt_large)
273       pt_paddr: next level page table entry physical address
274                      or null if invalid
275       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
276                      should be aborted
277       pt_large: 1 if kgm_pt_paddr is a page frame address
278                      of a large page and not another page table entry
279    """
280    entry_addr = paddr + (8 * index)
281    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
282    out_string = ''
283    if verbose_level >= vDETAIL:
284        for pte_loop in range(0, 512):
285            paddr_tmp = paddr + (8 * pte_loop)
286            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
287    paddr_mask = ~((0xfff<<52) | 0xfff)
288    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
289    pt_valid = False
290    pt_large = False
291    pt_paddr = 0
292    if verbose_level < vSCRIPT:
293        if entry & 0x1 :
294            pt_valid = True
295            pt_large = False
296            pt_paddr = entry & paddr_mask
297            if entry & (0x1 <<7):
298                pt_large = True
299                pt_paddr = entry & paddr_large_mask
300    else:
301        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
302        if entry & 0x1:
303            out_string += " valid"
304            pt_paddr = entry & paddr_mask
305            pt_valid = True
306        else:
307            out_string += " invalid"
308            pt_paddr = 0
309            pt_valid = False
310            if entry & (0x1 << 62):
311                out_string += " compressed"
312            #Stop decoding other bits
313            entry = 0
314        if entry & (0x1 << 1):
315            out_string += " writable"
316        else:
317            out_string += " read-only"
318
319        if entry & (0x1 << 2):
320            out_string += " user"
321        else:
322            out_string += " supervisor"
323
324        if entry & (0x1 << 3):
325            out_string += " PWT"
326
327        if entry & (0x1 << 4):
328            out_string += " PCD"
329
330        if entry & (0x1 << 5):
331            out_string += " accessed"
332
333        if entry & (0x1 << 6):
334            out_string += " dirty"
335
336        if entry & (0x1 << 7):
337            out_string += " large"
338            pt_large = True
339        else:
340            pt_large = False
341
342        if entry & (0x1 << 8):
343            out_string += " global"
344
345        if entry & (0x3 << 9):
346            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
347
348        if entry & (0x1 << 63):
349            out_string += " noexec"
350    print(out_string)
351    return (pt_paddr, pt_valid, pt_large)
352
353def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
354    """
355     Step to lower-level page table and print attributes for EPT pmap
356       paddr: current page table entry physical address
357       index: current page table entry index (0..511)
358       verbose_level:    vHUMAN: print nothing
359                         vSCRIPT: print basic information
360                         vDETAIL: print basic information and hex table dump
361     returns: (pt_paddr, pt_valid, pt_large)
362       pt_paddr: next level page table entry physical address
363                      or null if invalid
364       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
365                      should be aborted
366       pt_large: 1 if kgm_pt_paddr is a page frame address
367                      of a large page and not another page table entry
368    """
369    entry_addr = paddr + (8 * index)
370    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
371    out_string = ''
372    if verbose_level >= vDETAIL:
373        for pte_loop in range(0, 512):
374            paddr_tmp = paddr + (8 * pte_loop)
375            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
376    paddr_mask = ~((0xfff<<52) | 0xfff)
377    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
378    pt_valid = False
379    pt_large = False
380    pt_paddr = 0
381    if verbose_level < vSCRIPT:
382        if entry & 0x7 :
383            pt_valid = True
384            pt_large = False
385            pt_paddr = entry & paddr_mask
386            if entry & (0x1 <<7):
387                pt_large = True
388                pt_paddr = entry & paddr_large_mask
389    else:
390        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
391        if entry & 0x7:
392            out_string += "valid"
393            pt_paddr = entry & paddr_mask
394            pt_valid = True
395        else:
396            out_string += "invalid"
397            pt_paddr = 0
398            pt_valid = False
399            if entry & (0x1 << 62):
400                out_string += " compressed"
401            #Stop decoding other bits
402            entry = 0
403        if entry & 0x1:
404            out_string += " readable"
405        else:
406            out_string += " no read"
407        if entry & (0x1 << 1):
408            out_string += " writable"
409        else:
410            out_string += " no write"
411
412        if entry & (0x1 << 2):
413            out_string += " executable"
414        else:
415            out_string += " no exec"
416
417        ctype = entry & 0x38
418        if ctype == 0x30:
419            out_string += " cache-WB"
420        elif ctype == 0x28:
421            out_string += " cache-WP"
422        elif ctype == 0x20:
423            out_string += " cache-WT"
424        elif ctype == 0x8:
425            out_string += " cache-WC"
426        else:
427            out_string += " cache-NC"
428
429        if (entry & 0x40) == 0x40:
430            out_string += " Ignore-PTA"
431
432        if (entry & 0x100) == 0x100:
433            out_string += " accessed"
434
435        if (entry & 0x200) == 0x200:
436            out_string += " dirty"
437
438        if entry & (0x1 << 7):
439            out_string += " large"
440            pt_large = True
441        else:
442            pt_large = False
443    print(out_string)
444    return (pt_paddr, pt_valid, pt_large)
445
446def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
447    """ Walk the l4 pmap entry.
448        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
449        vaddr : int - virtual address to walk
450    """
451    pt_paddr = unsigned(pmap_addr_val)
452    pt_valid = (unsigned(pmap_addr_val) != 0)
453    pt_large = 0
454    pframe_offset = 0
455    if pt_valid:
456        # Lookup bits 47:39 of linear address in PML4T
457        pt_index = (vaddr >> 39) & 0x1ff
458        pframe_offset = vaddr & 0x7fffffffff
459        if verbose_level > vHUMAN :
460            print("pml4 (index {0:d}):".format(pt_index))
461        if not(ept_pmap):
462            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
463        else:
464            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
465    if pt_valid:
466        # Lookup bits 38:30 of the linear address in PDPT
467        pt_index = (vaddr >> 30) & 0x1ff
468        pframe_offset = vaddr & 0x3fffffff
469        if verbose_level > vHUMAN:
470            print("pdpt (index {0:d}):".format(pt_index))
471        if not(ept_pmap):
472            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
473        else:
474            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
475    if pt_valid and not pt_large:
476        #Lookup bits 29:21 of the linear address in PDPT
477        pt_index = (vaddr >> 21) & 0x1ff
478        pframe_offset = vaddr & 0x1fffff
479        if verbose_level > vHUMAN:
480            print("pdt (index {0:d}):".format(pt_index))
481        if not(ept_pmap):
482            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
483        else:
484            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
485    if pt_valid and not pt_large:
486        #Lookup bits 20:21 of linear address in PT
487        pt_index = (vaddr >> 12) & 0x1ff
488        pframe_offset = vaddr & 0xfff
489        if verbose_level > vHUMAN:
490            print("pt (index {0:d}):".format(pt_index))
491        if not(ept_pmap):
492            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
493        else:
494            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
495    paddr = 0
496    paddr_isvalid = False
497    if pt_valid:
498        paddr = pt_paddr + pframe_offset
499        paddr_isvalid = True
500
501    if verbose_level > vHUMAN:
502        if paddr_isvalid:
503            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
504            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
505        else:
506            print("no translation")
507
508    return paddr
509
510def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
511    """
512        params: pmapval - core.value representing pmap_t in kernel
513        vaddr:  int     - int representing virtual address to walk
514    """
515    if pmapval.pm_cr3 != 0:
516        if verbose_level > vHUMAN:
517            print("Using normal Intel PMAP from pm_cr3\n")
518        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
519    else:
520        if verbose_level > vHUMAN:
521            print("Using EPT pmap from pm_eptp\n")
522        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
523
524def assert_64bit(val):
525    assert(val < 2**64)
526
527ARM64_TTE_SIZE = 8
528ARM64_TTE_SHIFT = 3
529ARM64_VMADDR_BITS = 48
530
531def PmapBlockOffsetMaskARM64(page_size, level):
532    assert level >= 0 and level <= 3
533    ttentries = (page_size // ARM64_TTE_SIZE)
534    return page_size * (ttentries ** (3 - level)) - 1
535
536def PmapBlockBaseMaskARM64(page_size, level):
537    assert level >= 0 and level <= 3
538    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
539
540PmapTTEARM64 = namedtuple('PmapTTEARM64', ['level', 'value', 'stage2'])
541
542def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
543    """ Display the bits of an ARM64 translation table or page table entry
544        in human-readable form.
545        tte: integer value of the TTE/PTE
546        level: translation table level.  Valid values are 1, 2, or 3.
547        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
548    """
549    assert(isinstance(level, numbers.Integral))
550    assert_64bit(tte)
551
552    if tte & 0x1 == 0x0:
553        print("Invalid.")
554        return
555
556    if (tte & 0x2 == 0x2) and (level != 0x3):
557        print("Type       = Table pointer.")
558        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
559
560        if not stage2:
561            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
562            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
563            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
564            print("NS         = {:#x}.".format(tte >> 63))
565    else:
566        print("Type       = Block.")
567
568        if stage2:
569            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
570        else:
571            attr_index = (tte >> 2) & 0x7
572            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
573                3: 'CACHE DISABLE',
574                4: 'RESERVED (MTE if FEAT_MTE supported)',
575                5: 'POSTED (DISABLE_XS if FEAT_XS supported)',
576                6: 'POSTED_REORDERED (POSTED_COMBINED_REORDERED if FEAT_XS supported)',
577                7: 'POSTED_COMBINED_REORDERED (POSTED_COMBINED_REORDERED_XS if FEAT_XS supported)' }
578
579            # Only show the string version of the AttrIdx for CPU mappings since
580            # these values don't apply to IOMMU mappings.
581            if is_iommu_tte:
582                print("AttrIdx    = {:#x}.".format(attr_index))
583            else:
584                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
585            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
586
587        if stage2:
588            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
589        else:
590            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
591
592        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
593        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
594
595        if not stage2:
596            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
597
598        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
599
600        if stage2:
601            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
602        else:
603            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
604            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
605
606        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
607
608    return
609
610def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
611    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
612
613    tt_index = []
614    for i in range(pta_max_level + 1):
615        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
616            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
617
618    return tt_index
619
620def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN, extra=None):
621    assert(type(vaddr) in (int, int))
622    assert_64bit(vaddr)
623    assert_64bit(root_tte)
624
625    # Obtain pmap attributes
626    page_size = pmap_pt_attr.pta_page_size
627    page_offset_mask = (page_size - 1)
628    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
629    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
630    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
631
632    # The pmap starts at a page table level that is defined by register
633    # values; the root level can be obtained from the attributes structure
634    level = unsigned(pmap_pt_attr.pta_root_level)
635
636    root_tt_index = tt_index[level]
637    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
638        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
639    tte = int(unsigned(root_tte[root_tt_index]))
640
641    # Walk the page tables
642    paddr = None
643    max_level = unsigned(pmap_pt_attr.pta_max_level)
644    is_valid = True
645    is_leaf = False
646
647    if extra is not None:
648        extra['page_size'] = page_size
649        extra['page_mask'] = page_size - 1
650        extra['paddr']     = None
651        extra['is_valid']  = True
652        extra['is_leaf']   = False
653        extra['tte']       = []
654
655    while (level <= max_level):
656        if extra is not None:
657            extra['tte'].append(PmapTTEARM64(level=level, value=tte, stage2=stage2))
658
659        if verbose_level >= vSCRIPT:
660            print("L{} entry: {:#x}".format(level, tte))
661        if verbose_level >= vDETAIL:
662            PmapDecodeTTEARM64(tte, level, stage2)
663
664        if tte & 0x1 == 0x0:
665            if verbose_level >= vHUMAN:
666                print("L{} entry invalid: {:#x}\n".format(level, tte))
667
668            if extra is not None:
669                extra['is_valid'] = False
670            is_valid = False
671            break
672
673        # Handle leaf entry
674        if tte & 0x2 == 0x0 or level == max_level:
675            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
676            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
677            paddr = tte & base_mask
678            paddr = paddr | (vaddr & offset_mask)
679
680            if level != max_level:
681                print("phys: {:#x}".format(paddr))
682
683            if extra is not None:
684                extra['is_leaf'] = True
685                extra['paddr'] = paddr
686            is_leaf = True
687            break
688        else:
689        # Handle page table entry
690            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
691            assert(isinstance(next_phys, numbers.Integral))
692
693            next_virt = kern.PhysToKernelVirt(next_phys)
694            assert(isinstance(next_virt, numbers.Integral))
695
696            if verbose_level >= vDETAIL:
697                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
698
699            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
700            tte = int(unsigned(dereference(ttep)))
701            assert(isinstance(tte, numbers.Integral))
702
703        # We've parsed one level, so go to the next level
704        assert(level <= 3)
705        level = level + 1
706
707
708    if verbose_level >= vHUMAN:
709        if paddr:
710            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
711        else:
712            print("(no translation)")
713
714    return paddr
715
716def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
717    if kern.arch == 'x86_64':
718        return PmapWalkX86_64(pmap, vaddr, verbose_level)
719    elif kern.arch.startswith('arm64'):
720        # Obtain pmap attributes from pmap structure
721        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
722        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
723    else:
724        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
725
726@lldb_command('pmap_walk')
727def PmapWalkHelper(cmd_args=None):
728    """ Perform a page-table walk in <pmap> for <virtual_address>.
729        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
730            Multiple -v's can be specified for increased verbosity
731    """
732    if cmd_args is None or len(cmd_args) < 2:
733        raise ArgumentError("Too few arguments to pmap_walk.")
734
735    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
736    addr = ArgumentStringToInt(cmd_args[1])
737    PmapWalk(pmap, addr, config['verbosity'])
738    return
739
740def GetMemoryAttributesFromUser(requested_type):
741    pmap_attr_dict = {
742        '4k' : kern.globals.pmap_pt_attr_4k,
743        '16k' : kern.globals.pmap_pt_attr_16k,
744        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
745    }
746
747    requested_type = requested_type.lower()
748    if requested_type not in pmap_attr_dict:
749        return None
750
751    return pmap_attr_dict[requested_type]
752
753@lldb_command('ttep_walk')
754def TTEPWalkPHelper(cmd_args=None):
755    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
756        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
757        Multiple -v's can be specified for increased verbosity
758        """
759    if cmd_args is None or len(cmd_args) < 2:
760        raise ArgumentError("Too few arguments to ttep_walk.")
761
762    if not kern.arch.startswith('arm64'):
763        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
764
765    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
766    addr = ArgumentStringToInt(cmd_args[1])
767
768    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
769    if pmap_pt_attr is None:
770        raise ArgumentError("Invalid translation attribute type.")
771
772    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
773
774@lldb_command('decode_tte')
775def DecodeTTE(cmd_args=None):
776    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
777        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
778    """
779    if cmd_args is None or len(cmd_args) < 2:
780        raise ArgumentError("Too few arguments to decode_tte.")
781    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
782        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
783    if kern.arch.startswith('arm64'):
784        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
785        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
786    else:
787        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
788
789PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54)
790PVH_HIGH_FLAGS_ARM32 = (1 << 31)
791
792def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
793    """ Dump information about a single mapping retrieved by the pv_head_table.
794
795        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
796              or None if type PVH_TYPE_PTEP.
797        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
798              the high flags already set (the type bits don't need to be cleared).
799              For type PVH_TYPE_PVEP this will be the value retrieved from the
800              pve_ptep[] array.
801    """
802    if kern.arch.startswith('arm64'):
803        iommu_flag = 0x4
804        iommu_table_flag = 1 << 63
805    else:
806        iommu_flag = 0
807        iommu_table_flag = 0
808
809    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
810    if pvep is not None and (ptep & 0x1):
811        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
812        # setting this string is mutually exclusive with setting the IOMMU strings.
813        pte_str = ' (alt acct)'
814    else:
815        pte_str = ''
816
817    if pvep is not None:
818        pve_str = 'PVEP {:#x}, '.format(pvep)
819    else:
820        pve_str = ''
821
822    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
823    # either does nothing or clears out the AltAcct bit.
824    ptep = ptep & ~0x3
825
826    # When printing with extra verbosity, print an extra newline that describes
827    # who owns the mapping.
828    extra_str = ''
829
830    if ptep & iommu_flag:
831        # The mapping is an IOMMU Mapping
832        ptep = ptep & ~iommu_flag
833
834        # Due to LLDB automatically setting all the high bits of pointers, when
835        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
836        # the iommu_table_flag, which means this check only works for PVH entries
837        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
838        # entry which has the right casting applied to avoid this issue).
839        #
840        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
841        # for a lack of trying, that's for sure. If you can figure out how to
842        # cast that array correctly, then be my guest.
843        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
844            if ptep & iommu_table_flag:
845                pte_str = ' (IOMMU table), entry'
846                ptd = GetPtDesc(KVToPhysARM(ptep))
847                iommu = dereference(ptd.iommu)
848            else:
849                # Instead of dumping the PTE (since we don't have that), dump the
850                # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
851                #
852                # This works because later on when the "ptep" is dereferenced as a
853                # PTE pointer (uint64_t pointer), the descriptor pointer will be
854                # dumped as that's the first 64-bit value in the IOMMU state object.
855                pte_str = ' (IOMMU state), descriptor'
856                ptep = ptep | iommu_table_flag
857                iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
858
859            # For IOMMU mappings, dump who owns the mapping as the extra string.
860            extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
861            if unsigned(iommu.name) != 0:
862                extra_str += '/{:s}'.format(iommu.name)
863            extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
864        else:
865            ptd = GetPtDesc(KVToPhysARM(ptep))
866            extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu)
867    else:
868        # The mapping is a CPU Mapping
869        pte_str += ', entry'
870        ptd = GetPtDesc(KVToPhysARM(ptep))
871        if ptd.pmap == kern.globals.kernel_pmap:
872            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
873        elif verbose_level >= vDETAIL:
874            task = TaskForPmapHelper(ptd.pmap)
875            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
876    try:
877        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
878    except:
879        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
880
881    if verbose_level >= vDETAIL:
882        print("    |-- {:s}".format(extra_str))
883
884def PVWalkARM(pai, verbose_level = vHUMAN):
885    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
886
887        pai: physical address index (PAI) corresponding to the pv_head_table
888             entry to walk.
889        verbose_level: Set to vSCRIPT or higher to print extra info around the
890                       the pv_head_table/pp_attr_table flags and to dump the
891                       pt_desc_t object if the type is a PTD.
892    """
893    # LLDB will automatically try to make pointer values dereferencable by
894    # setting the upper bits if they aren't set. We need to parse the flags
895    # stored in the upper bits later, so cast the pv_head_table to an array of
896    # integers to get around this "feature". We'll add the upper bits back
897    # manually before deref'ing anything.
898    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
899    pvh_raw = unsigned(pv_head_table[pai])
900    pvh = pvh_raw
901    pvh_type = pvh & 0x3
902
903    print("PVH raw value: {:#x}".format(pvh_raw))
904    if kern.arch.startswith('arm64'):
905        pvh = pvh | PVH_HIGH_FLAGS_ARM64
906    else:
907        pvh = pvh | PVH_HIGH_FLAGS_ARM32
908
909    if pvh_type == 0:
910        print("PVH type: NULL")
911    elif pvh_type == 3:
912        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
913    elif pvh_type == 2:
914        print("PVH type: single PTE")
915        PVDumpPTE(None, pvh, verbose_level)
916    elif pvh_type == 1:
917        pvep = pvh & ~0x3
918        print("PVH type: PTE list")
919        pve_ptep_idx = 0
920        while pvep != 0:
921            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
922
923            if pve.pve_ptep[pve_ptep_idx] != 0:
924                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
925
926            pve_ptep_idx += 1
927            if pve_ptep_idx == 2:
928                pve_ptep_idx = 0
929                pvep = unsigned(pve.pve_next)
930
931    if verbose_level >= vDETAIL:
932        if (pvh_type == 1) or (pvh_type == 2):
933            # Dump pv_head_table flags when there's a valid mapping.
934            pvh_flags = []
935
936            if pvh_raw & (1 << 62):
937                pvh_flags.append("CPU")
938            if pvh_raw & (1 << 60):
939                pvh_flags.append("EXEC")
940            if pvh_raw & (1 << 59):
941                pvh_flags.append("LOCKDOWN_KC")
942            if pvh_raw & (1 << 58):
943                pvh_flags.append("HASHED")
944            if pvh_raw & (1 << 57):
945                pvh_flags.append("LOCKDOWN_CS")
946            if pvh_raw & (1 << 56):
947                pvh_flags.append("LOCKDOWN_RO")
948            if pvh_raw & (1 << 55):
949                pvh_flags.append("RETIRED")
950            if pvh_raw & (1 << 54):
951                if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
952                    pvh_flags.append("SECURE_FLUSH_NEEDED")
953                else:
954                    pvh_flags.append("SLEEPABLE_LOCK")
955            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
956                pvh_flags.append("LOCK")
957
958            print("PVH Flags: {}".format(pvh_flags))
959
960        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
961        ppattr = unsigned(kern.globals.pp_attr_table[pai])
962        print("PPATTR raw value: {:#x}".format(ppattr))
963
964        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
965        if ppattr & 0x40:
966            ppattr_flags.append("REFERENCED")
967        if ppattr & 0x80:
968            ppattr_flags.append("MODIFIED")
969        if ppattr & 0x100:
970            ppattr_flags.append("INTERNAL")
971        if ppattr & 0x200:
972            ppattr_flags.append("REUSABLE")
973        if ppattr & 0x400:
974            ppattr_flags.append("ALTACCT")
975        if ppattr & 0x800:
976            ppattr_flags.append("NOENCRYPT")
977        if ppattr & 0x1000:
978            ppattr_flags.append("REFFAULT")
979        if ppattr & 0x2000:
980            ppattr_flags.append("MODFAULT")
981        if ppattr & 0x4000:
982            ppattr_flags.append("MONITOR")
983        if ppattr & 0x8000:
984            ppattr_flags.append("NO_MONITOR")
985
986        print("PPATTR Flags: {}".format(ppattr_flags))
987
988        if pvh_type == 3:
989            def RunLldbCmdHelper(command):
990                """Helper for dumping an LLDB command right before executing it
991                and printing the results.
992                command: The LLDB command (as a string) to run.
993
994                Example input: "p/x kernel_pmap".
995                """
996                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
997            # Dump the page table descriptor object
998            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
999            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
1000
1001            # Depending on the system, more than one ptd_info can be associated
1002            # with a single PTD. Only dump the first PTD info and assume the
1003            # user knows to dump the rest if they're on one of those systems.
1004            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
1005
1006@lldb_command('pv_walk')
1007def PVWalk(cmd_args=None):
1008    """ Show mappings for <physical_address | PAI> tracked in the PV list.
1009        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
1010
1011        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
1012        as well as dump the page table descriptor (PTD) struct if the entry is a
1013        PTD.
1014    """
1015    if cmd_args is None or len(cmd_args) == 0:
1016        raise ArgumentError("Too few arguments to pv_walk.")
1017    if not kern.arch.startswith('arm'):
1018        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
1019
1020    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1021
1022    # If the input is already a PAI, this function will return the input unchanged.
1023    # This function also ensures that the physical address is kernel-managed.
1024    pai = ConvertPhysAddrToPai(pa)
1025
1026    PVWalkARM(pai, config['verbosity'])
1027
1028@lldb_command('kvtophys')
1029def KVToPhys(cmd_args=None):
1030    """ Translate a kernel virtual address to the corresponding physical address.
1031        Assumes the virtual address falls within the kernel static region.
1032        Syntax: (lldb) kvtophys <kernel virtual address>
1033    """
1034    if cmd_args is None or len(cmd_args) == 0:
1035        raise ArgumentError("Too few arguments to kvtophys.")
1036    if kern.arch.startswith('arm'):
1037        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1038    elif kern.arch == 'x86_64':
1039        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1040
1041@lldb_command('phystokv')
1042def PhysToKV(cmd_args=None):
1043    """ Translate a physical address to the corresponding static kernel virtual address.
1044        Assumes the physical address corresponds to managed DRAM.
1045        Syntax: (lldb) phystokv <physical address>
1046    """
1047    if cmd_args is None or len(cmd_args) == 0:
1048        raise ArgumentError("Too few arguments to phystokv.")
1049    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1050
1051def KVToPhysARM(addr):
1052    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1053        ptov_table = kern.globals.ptov_table
1054        for i in range(0, kern.globals.ptov_index):
1055            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1056                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1057    else:
1058        papt_table = kern.globals.libsptm_papt_ranges
1059        page_size = kern.globals.page_size
1060        for i in range(0, unsigned(dereference(kern.globals.libsptm_n_papt_ranges))):
1061            if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))):
1062                return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start)))
1063        raise ValueError("VA {:#x} not found in physical region lookup table".format(addr))
1064    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1065
1066
1067def GetPtDesc(paddr):
1068    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1069    pvh = unsigned(kern.globals.pv_head_table[pn])
1070    if kern.arch.startswith('arm64'):
1071        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1072    else:
1073        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1074    pvh_type = pvh & 0x3
1075    if pvh_type != 0x3:
1076        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1077    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1078    return ptd
1079
1080def PhysToFrameTableEntry(paddr):
1081    if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)):
1082        return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size]
1083    page_idx = paddr / kern.globals.page_size
1084    for i in range(0, kern.globals.sptm_n_io_ranges):
1085        base = kern.globals.io_frame_table[i].io_range.phys_page_idx
1086        end = base + kern.globals.io_frame_table[i].io_range.num_pages
1087        if page_idx >= base and page_idx < end:
1088            return kern.globals.io_frame_table[i]
1089    return kern.globals.xnu_io_fte
1090
1091@lldb_command('phystofte')
1092def PhysToFTE(cmd_args=None):
1093    """ Translate a physical address to the corresponding SPTM frame table entry pointer
1094        Syntax: (lldb) phystofte <physical address>
1095    """
1096    if cmd_args is None or len(cmd_args) == 0:
1097        raise ArgumentError("Too few arguments to phystofte.")
1098
1099    fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))
1100    print(repr(fte))
1101
1102XNU_IOMMU = 23
1103XNU_PAGE_TABLE = 19
1104XNU_PAGE_TABLE_SHARED = 20
1105XNU_PAGE_TABLE_ROZONE = 21
1106XNU_PAGE_TABLE_COMMPAGE = 22
1107SPTM_PAGE_TABLE = 9
1108
1109def ShowPTEARM(pte, page_size, level):
1110    """ Display vital information about an ARM page table entry
1111        pte: kernel virtual address of the PTE.  page_size and level may be None,
1112        in which case we'll try to infer them from the page table descriptor.
1113        Inference of level may only work for L2 and L3 TTEs depending upon system
1114        configuration.
1115    """
1116    pt_index = 0
1117    stage2 = False
1118    def GetPageTableInfo(ptd, paddr):
1119        nonlocal pt_index, page_size, level
1120        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1121            # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page.
1122            # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes
1123            # do, so the IOMMU refcnt token is always stored at index 0.  If this is not
1124            # an IOMMU page, we may end up using a different final value for pt_index below.
1125            refcnt = ptd.ptd_info[0].refcnt
1126            # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1127            is_iommu_pte = (refcnt & 0x8000) == 0x8000
1128            if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'):
1129                page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1130            elif page_size is None:
1131                page_size = kern.globals.native_pt_attr.pta_page_size
1132            pt_index = (pte % kern.globals.page_size) // page_size
1133            refcnt =  ptd.ptd_info[pt_index].refcnt
1134            if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'):
1135                stage2 = ptd.pmap.pmap_pt_attr.stage2
1136            if level is None:
1137                if refcnt == 0x4000:
1138                    level = 2
1139                else:
1140                    level = 3
1141            if is_iommu_pte:
1142                iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1143                if unsigned(dereference(ptd.iommu).name) != 0:
1144                    iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1145                info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)
1146            else:
1147                info_str = None
1148            return (int(unsigned(refcnt)), level, info_str)
1149        else:
1150            fte = PhysToFrameTableEntry(paddr)
1151            if fte.type == XNU_IOMMU:
1152                if page_size is None:
1153                    page_size = kern.globals.native_pt_attr.pta_page_size
1154                info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd)
1155                return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str)
1156            elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]:
1157                if page_size is None:
1158                    if hasattr(ptd.pmap, 'pmap_pt_attr'):
1159                        page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1160                    else:
1161                        page_size = kern.globals.native_pt_attr.pta_page_size;
1162                return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None)
1163            else:
1164                raise ValueError("Unrecognized FTE type {:#x}".format(fte.type))
1165            raise ValueError("Unable to retrieve PTD refcnt")
1166    pte_paddr = KVToPhysARM(pte)
1167    ptd = GetPtDesc(pte_paddr)
1168    refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr)
1169    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1170    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1171        va = ptd.va[pt_index]
1172    else:
1173        va = ptd.va
1174    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va))
1175
1176    # The pmap/iommu field is a union, so only print the correct one.
1177    if info_str is not None:
1178        print(info_str)
1179    else:
1180        if ptd.pmap == kern.globals.kernel_pmap:
1181            pmap_str = "(kernel_pmap)"
1182        else:
1183            task = TaskForPmapHelper(ptd.pmap)
1184            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1185        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1186        nttes = page_size // 8
1187        granule = page_size * (nttes ** (3 - level))
1188        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1189            pte_pgoff = pte % page_size
1190        else:
1191            pte_pgoff = pte % kern.globals.native_pt_attr.pta_page_size
1192        pte_pgoff = pte_pgoff // 8
1193        print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(va)) + (pte_pgoff * granule)))
1194        pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1195        print("value: {:#x}".format(pteval))
1196        print("level: {:d}".format(level))
1197        PmapDecodeTTEARM64(pteval, level, stage2)
1198
1199@lldb_command('showpte')
1200def ShowPTE(cmd_args=None):
1201    """ Display vital information about the page table entry at VA <pte>
1202        Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2]
1203    """
1204    if cmd_args is None or len(cmd_args) == 0:
1205        raise ArgumentError("Too few arguments to showpte.")
1206
1207    if kern.arch.startswith('arm64'):
1208        if len(cmd_args) >= 3:
1209            pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2])
1210            if pmap_pt_attr is None:
1211                raise ArgumentError("Invalid translation attribute type.")
1212            page_size = pmap_pt_attr.pta_page_size
1213        else:
1214            page_size = None
1215
1216        level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None
1217        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level)
1218    else:
1219        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1220
1221def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1222    """ Perform the specified action for all valid mappings in an ARM64 translation table
1223        pmap: owner of the translation table
1224        tt: translation table or page table
1225        nttes: number of entries in tt
1226        level: translation table level, 1 2 or 3
1227        action: callback for each valid TTE
1228    """
1229    # Obtain pmap attributes
1230    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1231    page_size = pmap_pt_attr.pta_page_size
1232    page_offset_mask = (page_size - 1)
1233    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1234    max_level = unsigned(pmap_pt_attr.pta_max_level)
1235
1236    for i in range(nttes):
1237        try:
1238            tte = tt[i]
1239            if tte & 0x1 == 0x0:
1240                continue
1241
1242            tt_next = None
1243            paddr = unsigned(tte) & unsigned(page_base_mask)
1244
1245            # Handle leaf entry
1246            if tte & 0x2 == 0x0 or level == max_level:
1247                type = 'block' if level < max_level else 'entry'
1248                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1249            else:
1250            # Handle page table entry
1251                type = 'table'
1252                granule = page_size
1253                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1254
1255            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1256            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1257                if tt_next is not None:
1258                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1259
1260        except Exception as exc:
1261            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1262
1263def ScanPageTables(action, targetPmap=None):
1264    """ Perform the specified action for all valid mappings in all page tables,
1265        optionally restricted to a single pmap.
1266        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1267    """
1268    print("Scanning all available translation tables.  This may take a long time...")
1269    def ScanPmap(pmap, action):
1270        if kern.arch.startswith('arm64'):
1271            # Obtain pmap attributes
1272            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1273            granule = pmap_pt_attr.pta_page_size
1274            level = unsigned(pmap_pt_attr.pta_root_level)
1275            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1276                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1277
1278        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1279            if kern.arch.startswith('arm64'):
1280                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1281
1282    if targetPmap is not None:
1283        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1284    else:
1285        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1286            ScanPmap(pmap, action)
1287
1288@lldb_command('showallmappings')
1289def ShowAllMappings(cmd_args=None):
1290    """ Find and display all available mappings on the system for
1291        <physical_address>.  Optionally only searches the pmap
1292        specified by [<pmap>]
1293        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1294        WARNING: this macro can take a long time (up to 30min.) to complete!
1295    """
1296    if cmd_args is None or len(cmd_args) == 0:
1297        raise ArgumentError("Too few arguments to showallmappings.")
1298    if not kern.arch.startswith('arm'):
1299        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1300    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1301    targetPmap = None
1302    if len(cmd_args) > 1:
1303        targetPmap = cmd_args[1]
1304    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1305        if paddr <= pa < (paddr + granule):
1306            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1307        return True
1308    ScanPageTables(printMatchedMapping, targetPmap)
1309
1310@lldb_command('showptusage')
1311def ShowPTUsage(cmd_args=None):
1312    """ Display a summary of pagetable allocations for a given pmap.
1313        Syntax: (lldb) showptusage [<pmap>]
1314        WARNING: this macro can take a long time (> 1hr) to complete!
1315    """
1316    if not kern.arch.startswith('arm'):
1317        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1318    targetPmap = None
1319    if len(cmd_args) > 0:
1320        targetPmap = cmd_args[0]
1321    lastPmap = [None]
1322    numTables = [0]
1323    numUnnested = [0]
1324    numPmaps = [0]
1325    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1326        unnested = ""
1327        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1328        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1329        if lastPmap[0] is None or (pmap != lastPmap[0]):
1330            lastPmap[0] = pmap
1331            numPmaps[0] = numPmaps[0] + 1
1332            print ("pmap {:#x}:".format(pmap))
1333        if type == 'root':
1334            return True
1335        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1336            ptd = GetPtDesc(paddr)
1337            if ptd.pmap != pmap:
1338                return False
1339            else:
1340                numUnnested[0] = numUnnested[0] + 1
1341                unnested = " (likely unnested)"
1342        numTables[0] = numTables[0] + 1
1343        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1344        if level == 2:
1345            return False
1346        else:
1347            return True
1348    ScanPageTables(printValidTTE, targetPmap)
1349    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1350
1351def checkPVList(pmap, level, type, tte, paddr, va, granule):
1352    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1353        pmap: owner of the translation table
1354        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1355        type: unused
1356        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1357        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1358        granule: unused
1359    """
1360    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1361    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1362    page_size = kern.globals.page_size
1363    if kern.arch.startswith('arm64'):
1364        page_offset_mask = (page_size - 1)
1365        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1366        paddr = paddr & page_base_mask
1367        max_level = 3
1368        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1369    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1370        return True
1371    pn = (paddr - vm_first_phys) // page_size
1372    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1373    pvh_type = pvh & 0x3
1374    if pmap is not None:
1375        pmap_str = "pmap: {:#x}: ".format(pmap)
1376    else:
1377        pmap_str = ''
1378    if tte is not None:
1379        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1380    else:
1381        tte_str = "paddr {:#x}: ".format(paddr)
1382    if pvh_type == 0 or pvh_type == 3:
1383        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1384    elif pvh_type == 2:
1385        ptep = pvh & ~0x3
1386        if tte is not None and ptep != unsigned(tte):
1387            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1388        try:
1389            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1390            if (pte != paddr):
1391                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1392        except Exception as exc:
1393            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1394    elif pvh_type == 1:
1395        pvep = pvh & ~0x3
1396        tte_match = False
1397        pve_ptep_idx = 0
1398        while pvep != 0:
1399            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1400            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1401            pve_ptep_idx += 1
1402            if pve_ptep_idx == 2:
1403                pve_ptep_idx = 0
1404                pvep = unsigned(pve.pve_next)
1405            if ptep == 0:
1406                continue
1407            if tte is not None and ptep == unsigned(tte):
1408                tte_match = True
1409            try:
1410                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1411                if (pte != paddr):
1412                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1413            except Exception as exc:
1414                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1415        if tte is not None and not tte_match:
1416            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1417    return True
1418
1419@lldb_command('pv_check', 'P')
1420def PVCheck(cmd_args=None, cmd_options={}):
1421    """ Check the physical-to-virtual mapping for a given PTE or physical address
1422        Syntax: (lldb) pv_check <addr> [-p]
1423            -P        : Interpret <addr> as a physical address rather than a PTE
1424    """
1425    if cmd_args is None or len(cmd_args) == 0:
1426        raise ArgumentError("Too few arguments to pv_check.")
1427    if kern.arch.startswith('arm64'):
1428        level = 3
1429    else:
1430        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1431    if "-P" in cmd_options:
1432        pte = None
1433        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1434    else:
1435        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1436        pa = int(unsigned(dereference(pte)))
1437    checkPVList(None, level, None, pte, pa, 0, None)
1438
1439@lldb_command('check_pmaps')
1440def CheckPmapIntegrity(cmd_args=None):
1441    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1442        Optionally only checks the pmap specified by [<pmap>]
1443        Syntax: (lldb) check_pmaps [<pmap>]
1444        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1445        specify [pmap] to limit it to a single pmap.  It will also give false positives
1446        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1447        Use of this macro without the [<pmap>] argument is heavily discouraged.
1448    """
1449    if not kern.arch.startswith('arm'):
1450        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1451    targetPmap = None
1452    if len(cmd_args) > 0:
1453        targetPmap = cmd_args[0]
1454    ScanPageTables(checkPVList, targetPmap)
1455
1456@lldb_command('pmapsforledger')
1457def PmapsForLedger(cmd_args=None):
1458    """ Find and display all pmaps currently using <ledger>.
1459        Syntax: (lldb) pmapsforledger <ledger>
1460    """
1461    if cmd_args is None or len(cmd_args) == 0:
1462        raise ArgumentError("Too few arguments to pmapsforledger.")
1463    if not kern.arch.startswith('arm'):
1464        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1465    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1466    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1467        if pmap.ledger == ledger:
1468            print("pmap: {:#x}".format(pmap))
1469
1470
1471def IsValidPai(pai):
1472    """ Given an unsigned value, detect whether that value is a valid physical
1473        address index (PAI). It does this by first computing the last possible
1474        PAI and comparing the input to that.
1475
1476        All contemporary SoCs reserve the bottom part of the address space, so
1477        there shouldn't be any valid physical addresses between zero and the
1478        last PAI either.
1479    """
1480    page_size = unsigned(kern.globals.page_size)
1481    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1482    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1483
1484    last_pai = (vm_last_phys - vm_first_phys) // page_size
1485    if (pai < 0) or (pai >= last_pai):
1486        return False
1487
1488    return True
1489
1490def ConvertPaiToPhysAddr(pai):
1491    """ Convert the given Physical Address Index (PAI) into a physical address.
1492
1493        If the input isn't a valid PAI (it's most likely already a physical
1494        address), then just return back the input unchanged.
1495    """
1496    pa = pai
1497
1498    # If the value is a valid PAI, then convert it into a physical address.
1499    if IsValidPai(pai):
1500        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1501
1502    return pa
1503
1504def ConvertPhysAddrToPai(pa):
1505    """ Convert the given physical address into a Physical Address Index (PAI).
1506
1507        If the input is already a valid PAI, then just return back the input
1508        unchanged.
1509    """
1510    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1511    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1512    pai = pa
1513
1514    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1515        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1516    elif not IsValidPai(pa):
1517        # If the value isn't already a valid PAI, then convert it into one.
1518        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1519
1520    return pai
1521
1522@lldb_command('pmappaindex')
1523def PmapPaIndex(cmd_args=None):
1524    """ Display both a physical address and physical address index (PAI) when
1525        provided with only one of those values.
1526
1527        Syntax: (lldb) pmappaindex <physical address | PAI>
1528
1529        NOTE: This macro will throw an exception if the input isn't a valid PAI
1530              and is also not a kernel-managed physical address.
1531    """
1532    if cmd_args is None or len(cmd_args) == 0:
1533        raise ArgumentError("Too few arguments to pmappaindex.")
1534
1535    if not kern.arch.startswith('arm'):
1536        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1537
1538    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1539    pai = value
1540    phys_addr = value
1541
1542    if IsValidPai(value):
1543        # Input is a PAI, calculate the physical address.
1544        phys_addr = ConvertPaiToPhysAddr(value)
1545    else:
1546        # Input is a physical address, calculate the PAI
1547        pai = ConvertPhysAddrToPai(value)
1548
1549    print("Physical Address: {:#x}".format(phys_addr))
1550    print("PAI: {:d}".format(pai))
1551
1552@lldb_command('pmapdumpsurts')
1553def PmapDumpSurts(cmd_args=None):
1554    """ Dump the SURT list.
1555
1556        Syntax: (lldb) pmapdumpsurts
1557    """
1558    from scheduler import IterateBitmap
1559
1560    if "surt_list" not in kern.globals:
1561        raise NotImplementedError("SURT is not supported on this device.")
1562
1563    i = 0
1564    for surt_page in IterateLinkageChain(kern.globals.surt_list, 'surt_page_t *', 'surt_chain'):
1565        print(f"SURT Page {i} at physical address {hex(surt_page.surt_page_pa)}")
1566        print('')
1567        print('Allocation status (O: free, X: allocated):')
1568        bitmap_visual = bytearray('X' * 128, 'ascii')
1569        for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[0]):
1570            bitmap_index = 127 - free_bit
1571            bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O'
1572        for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[1]):
1573            bitmap_index = 127 - (free_bit + 64)
1574            bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O'
1575
1576        for j in range(0, 128, 8):
1577            print(f"{bitmap_visual[j:(j+8)].decode('ascii')} bit [{127 - j}:{120 - j}]")
1578
1579        print('')
1580        print('SURT list structure raw:')
1581        print(dereference(surt_page))
1582        print('')
1583        print('')
1584
1585        i = i + 1
1586
1587@lldb_command('showallpmaps')
1588def ShowAllPmaps(cmd_args=None):
1589    """ Dump all pmaps.
1590
1591        Syntax: (lldb) showallpmaps
1592    """
1593    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1594        print(dereference(pmap))
1595        print()
1596
1597@lldb_command('pmapforroottablepa')
1598def PmapForRootTablePa(cmd_args=None):
1599    """ Dump the pmap with matching root TTE physical address.
1600
1601        Syntax: (lldb) pmapforroottablepa <pa>
1602    """
1603    if cmd_args is None or len(cmd_args) == 0:
1604        raise ArgumentError('Invalid argument, expecting the physical address of a root translation table')
1605
1606    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1607    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1608        if pmap.ttep == pa:
1609            print(dereference(pmap))
1610            print()
1611