xref: /xnu-12377.1.9/tools/lldbmacros/pmap.py (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1from xnu import *
2import xnudefines
3from kdp import *
4from utils import *
5import struct
6from collections import namedtuple
7
8def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
9    """ Read a physical memory data based on address.
10        params:
11            phys_addr : int - Physical address to read
12            bitsize   : int - defines how many bytes to read. defaults to 64 bit
13            cpuval    : None (optional)
14        returns:
15            int - int value read from memory. in case of failure 0xBAD10AD is returned.
16    """
17    if "kdp" == GetConnectionProtocol():
18        return KDPReadPhysMEM(phys_addr, bitsize)
19
20    # NO KDP. Attempt to use physical memory
21    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
22    if paddr_in_kva:
23        if bitsize == 64 :
24            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
25        if bitsize == 32 :
26            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
27        if bitsize == 16 :
28            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
29        if bitsize == 8 :
30            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31    return 0xBAD10AD
32
33@lldb_command('readphys')
34def ReadPhys(cmd_args = None):
35    """ Reads the specified untranslated address
36        The argument is interpreted as a physical address, and the 64-bit word
37        addressed is displayed.
38        usage: readphys <nbits> <address>
39        nbits: 8,16,32,64
40        address: 1234 or 0x1234 or `foo_ptr`
41    """
42    if cmd_args is None or len(cmd_args) < 2:
43        raise ArgumentError()
44
45    else:
46        nbits = ArgumentStringToInt(cmd_args[0])
47        phys_addr = ArgumentStringToInt(cmd_args[1])
48        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
49    return True
50
51lldb_alias('readphys8', 'readphys 8 ')
52lldb_alias('readphys16', 'readphys 16 ')
53lldb_alias('readphys32', 'readphys 32 ')
54lldb_alias('readphys64', 'readphys 64 ')
55
56def KDPReadPhysMEM(address, bits):
57    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
58        params:
59            address : int - address where to read the data from
60            bits : int - number of bits in the intval (8/16/32/64)
61        returns:
62            int: read value from memory.
63            0xBAD10AD: if failed to read data.
64    """
65    retval = 0xBAD10AD
66    if "kdp" != GetConnectionProtocol():
67        print("Target is not connected over kdp. Nothing to do here.")
68        return retval
69
70    if "hwprobe" == KDPMode():
71        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
72        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
73        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
74        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
75
76        ret_obj = lldb.SBCommandReturnObject()
77        ci = lldb.debugger.GetCommandInterpreter()
78        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
79
80        if ret_obj.Succeeded():
81            value = ret_obj.GetOutput()
82
83            if bits == 64 :
84                pack_fmt = "<Q"
85                unpack_fmt = ">Q"
86            if bits == 32 :
87                pack_fmt = "<I"
88                unpack_fmt = ">I"
89            if bits == 16 :
90                pack_fmt = "<H"
91                unpack_fmt = ">H"
92            if bits == 8 :
93                pack_fmt = "<B"
94                unpack_fmt = ">B"
95
96            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
97
98    else:
99        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
100        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
101        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
102
103        if not WriteInt32ToMemoryAddress(0, input_address):
104            return retval
105
106        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
107        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
108            return retval
109
110        data_addr = int(addressof(kern.globals.manual_pkt))
111        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
112
113        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
114
115        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
116             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
117             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
118             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
119             ):
120
121            if WriteInt32ToMemoryAddress(1, input_address):
122                # now read data from the kdp packet
123                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
124                if bits == 64 :
125                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
126                if bits == 32 :
127                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
128                if bits == 16 :
129                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
130                if bits == 8 :
131                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
132
133    return retval
134
135
136def KDPWritePhysMEM(address, intval, bits):
137    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
138        params:
139            address : int - address where to save the data
140            intval : int - integer value to be stored in memory
141            bits : int - number of bits in the intval (8/16/32/64)
142        returns:
143            boolean: True if the write succeeded.
144    """
145    if "kdp" != GetConnectionProtocol():
146        print("Target is not connected over kdp. Nothing to do here.")
147        return False
148
149    if "hwprobe" == KDPMode():
150        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
151        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
152        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
153
154        if bits == 64 :
155            pack_fmt = ">Q"
156            unpack_fmt = "<Q"
157        if bits == 32 :
158            pack_fmt = ">I"
159            unpack_fmt = "<I"
160        if bits == 16 :
161            pack_fmt = ">H"
162            unpack_fmt = "<H"
163        if bits == 8 :
164            pack_fmt = ">B"
165            unpack_fmt = "<B"
166
167        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
168
169        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
170
171        ret_obj = lldb.SBCommandReturnObject()
172        ci = lldb.debugger.GetCommandInterpreter()
173        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
174
175        if ret_obj.Succeeded():
176            return True
177        else:
178            return False
179
180    else:
181        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
182        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
183        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
184        if not WriteInt32ToMemoryAddress(0, input_address):
185            return False
186
187        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
188        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
189            return False
190
191        data_addr = int(addressof(kern.globals.manual_pkt))
192        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
193
194        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
195
196        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
197             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
198             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
199             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
200             ):
201
202            if bits == 8:
203                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
204                    return False
205            if bits == 16:
206                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
207                    return False
208            if bits == 32:
209                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
210                    return False
211            if bits == 64:
212                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
213                    return False
214            if WriteInt32ToMemoryAddress(1, input_address):
215                return True
216        return False
217
218
219def WritePhysInt(phys_addr, int_val, bitsize = 64):
220    """ Write and integer value in a physical memory data based on address.
221        params:
222            phys_addr : int - Physical address to read
223            int_val   : int - int value to write in memory
224            bitsize   : int - defines how many bytes to read. defaults to 64 bit
225        returns:
226            bool - True if write was successful.
227    """
228    if "kdp" == GetConnectionProtocol():
229        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
230            print("Failed to write via KDP.")
231            return False
232        return True
233    #We are not connected via KDP. So do manual math and savings.
234    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
235    return False
236
237@lldb_command('writephys')
238def WritePhys(cmd_args=None):
239    """ writes to the specified untranslated address
240        The argument is interpreted as a physical address, and the 64-bit word
241        addressed is displayed.
242        usage: writephys <nbits> <address> <value>
243        nbits: 8,16,32,64
244        address: 1234 or 0x1234 or `foo_ptr`
245        value: int value to be written
246        ex. (lldb)writephys 16 0x12345abcd 0x25
247    """
248    if cmd_args is None or len(cmd_args) < 3:
249        raise ArgumentError()
250
251    else:
252        nbits = ArgumentStringToInt(cmd_args[0])
253        phys_addr = ArgumentStringToInt(cmd_args[1])
254        int_value = ArgumentStringToInt(cmd_args[2])
255        print(WritePhysInt(phys_addr, int_value, nbits))
256
257
258lldb_alias('writephys8', 'writephys 8 ')
259lldb_alias('writephys16', 'writephys 16 ')
260lldb_alias('writephys32', 'writephys 32 ')
261lldb_alias('writephys64', 'writephys 64 ')
262
263
264def _PT_Step(paddr, index, verbose_level = vSCRIPT):
265    """
266     Step to lower-level page table and print attributes
267       paddr: current page table entry physical address
268       index: current page table entry index (0..511)
269       verbose_level:    vHUMAN: print nothing
270                         vSCRIPT: print basic information
271                         vDETAIL: print basic information and hex table dump
272     returns: (pt_paddr, pt_valid, pt_large)
273       pt_paddr: next level page table entry physical address
274                      or null if invalid
275       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
276                      should be aborted
277       pt_large: 1 if kgm_pt_paddr is a page frame address
278                      of a large page and not another page table entry
279    """
280    entry_addr = paddr + (8 * index)
281    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
282    out_string = ''
283    if verbose_level >= vDETAIL:
284        for pte_loop in range(0, 512):
285            paddr_tmp = paddr + (8 * pte_loop)
286            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
287    paddr_mask = ~((0xfff<<52) | 0xfff)
288    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
289    pt_valid = False
290    pt_large = False
291    pt_paddr = 0
292    if verbose_level < vSCRIPT:
293        if entry & 0x1 :
294            pt_valid = True
295            pt_large = False
296            pt_paddr = entry & paddr_mask
297            if entry & (0x1 <<7):
298                pt_large = True
299                pt_paddr = entry & paddr_large_mask
300    else:
301        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
302        if entry & 0x1:
303            out_string += " valid"
304            pt_paddr = entry & paddr_mask
305            pt_valid = True
306        else:
307            out_string += " invalid"
308            pt_paddr = 0
309            pt_valid = False
310            if entry & (0x1 << 62):
311                out_string += " compressed"
312            #Stop decoding other bits
313            entry = 0
314        if entry & (0x1 << 1):
315            out_string += " writable"
316        else:
317            out_string += " read-only"
318
319        if entry & (0x1 << 2):
320            out_string += " user"
321        else:
322            out_string += " supervisor"
323
324        if entry & (0x1 << 3):
325            out_string += " PWT"
326
327        if entry & (0x1 << 4):
328            out_string += " PCD"
329
330        if entry & (0x1 << 5):
331            out_string += " accessed"
332
333        if entry & (0x1 << 6):
334            out_string += " dirty"
335
336        if entry & (0x1 << 7):
337            out_string += " large"
338            pt_large = True
339        else:
340            pt_large = False
341
342        if entry & (0x1 << 8):
343            out_string += " global"
344
345        if entry & (0x3 << 9):
346            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
347
348        if entry & (0x1 << 63):
349            out_string += " noexec"
350    print(out_string)
351    return (pt_paddr, pt_valid, pt_large)
352
353def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
354    """
355     Step to lower-level page table and print attributes for EPT pmap
356       paddr: current page table entry physical address
357       index: current page table entry index (0..511)
358       verbose_level:    vHUMAN: print nothing
359                         vSCRIPT: print basic information
360                         vDETAIL: print basic information and hex table dump
361     returns: (pt_paddr, pt_valid, pt_large)
362       pt_paddr: next level page table entry physical address
363                      or null if invalid
364       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
365                      should be aborted
366       pt_large: 1 if kgm_pt_paddr is a page frame address
367                      of a large page and not another page table entry
368    """
369    entry_addr = paddr + (8 * index)
370    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
371    out_string = ''
372    if verbose_level >= vDETAIL:
373        for pte_loop in range(0, 512):
374            paddr_tmp = paddr + (8 * pte_loop)
375            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
376    paddr_mask = ~((0xfff<<52) | 0xfff)
377    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
378    pt_valid = False
379    pt_large = False
380    pt_paddr = 0
381    if verbose_level < vSCRIPT:
382        if entry & 0x7 :
383            pt_valid = True
384            pt_large = False
385            pt_paddr = entry & paddr_mask
386            if entry & (0x1 <<7):
387                pt_large = True
388                pt_paddr = entry & paddr_large_mask
389    else:
390        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
391        if entry & 0x7:
392            out_string += "valid"
393            pt_paddr = entry & paddr_mask
394            pt_valid = True
395        else:
396            out_string += "invalid"
397            pt_paddr = 0
398            pt_valid = False
399            if entry & (0x1 << 62):
400                out_string += " compressed"
401            #Stop decoding other bits
402            entry = 0
403        if entry & 0x1:
404            out_string += " readable"
405        else:
406            out_string += " no read"
407        if entry & (0x1 << 1):
408            out_string += " writable"
409        else:
410            out_string += " no write"
411
412        if entry & (0x1 << 2):
413            out_string += " executable"
414        else:
415            out_string += " no exec"
416
417        ctype = entry & 0x38
418        if ctype == 0x30:
419            out_string += " cache-WB"
420        elif ctype == 0x28:
421            out_string += " cache-WP"
422        elif ctype == 0x20:
423            out_string += " cache-WT"
424        elif ctype == 0x8:
425            out_string += " cache-WC"
426        else:
427            out_string += " cache-NC"
428
429        if (entry & 0x40) == 0x40:
430            out_string += " Ignore-PTA"
431
432        if (entry & 0x100) == 0x100:
433            out_string += " accessed"
434
435        if (entry & 0x200) == 0x200:
436            out_string += " dirty"
437
438        if entry & (0x1 << 7):
439            out_string += " large"
440            pt_large = True
441        else:
442            pt_large = False
443    print(out_string)
444    return (pt_paddr, pt_valid, pt_large)
445
446def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
447    """ Walk the l4 pmap entry.
448        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
449        vaddr : int - virtual address to walk
450    """
451    pt_paddr = unsigned(pmap_addr_val)
452    pt_valid = (unsigned(pmap_addr_val) != 0)
453    pt_large = 0
454    pframe_offset = 0
455    if pt_valid:
456        # Lookup bits 47:39 of linear address in PML4T
457        pt_index = (vaddr >> 39) & 0x1ff
458        pframe_offset = vaddr & 0x7fffffffff
459        if verbose_level > vHUMAN :
460            print("pml4 (index {0:d}):".format(pt_index))
461        if not(ept_pmap):
462            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
463        else:
464            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
465    if pt_valid:
466        # Lookup bits 38:30 of the linear address in PDPT
467        pt_index = (vaddr >> 30) & 0x1ff
468        pframe_offset = vaddr & 0x3fffffff
469        if verbose_level > vHUMAN:
470            print("pdpt (index {0:d}):".format(pt_index))
471        if not(ept_pmap):
472            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
473        else:
474            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
475    if pt_valid and not pt_large:
476        #Lookup bits 29:21 of the linear address in PDPT
477        pt_index = (vaddr >> 21) & 0x1ff
478        pframe_offset = vaddr & 0x1fffff
479        if verbose_level > vHUMAN:
480            print("pdt (index {0:d}):".format(pt_index))
481        if not(ept_pmap):
482            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
483        else:
484            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
485    if pt_valid and not pt_large:
486        #Lookup bits 20:21 of linear address in PT
487        pt_index = (vaddr >> 12) & 0x1ff
488        pframe_offset = vaddr & 0xfff
489        if verbose_level > vHUMAN:
490            print("pt (index {0:d}):".format(pt_index))
491        if not(ept_pmap):
492            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
493        else:
494            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
495    paddr = 0
496    paddr_isvalid = False
497    if pt_valid:
498        paddr = pt_paddr + pframe_offset
499        paddr_isvalid = True
500
501    if verbose_level > vHUMAN:
502        if paddr_isvalid:
503            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
504            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
505        else:
506            print("no translation")
507
508    return paddr
509
510def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
511    """
512        params: pmapval - core.value representing pmap_t in kernel
513        vaddr:  int     - int representing virtual address to walk
514    """
515    if pmapval.pm_cr3 != 0:
516        if verbose_level > vHUMAN:
517            print("Using normal Intel PMAP from pm_cr3\n")
518        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
519    else:
520        if verbose_level > vHUMAN:
521            print("Using EPT pmap from pm_eptp\n")
522        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
523
524def assert_64bit(val):
525    assert(val < 2**64)
526
527ARM64_TTE_SIZE = 8
528ARM64_TTE_SHIFT = 3
529ARM64_VMADDR_BITS = 48
530
531def PmapBlockOffsetMaskARM64(page_size, level):
532    assert level >= 0 and level <= 3
533    ttentries = (page_size // ARM64_TTE_SIZE)
534    return page_size * (ttentries ** (3 - level)) - 1
535
536def PmapBlockBaseMaskARM64(page_size, level):
537    assert level >= 0 and level <= 3
538    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
539
540PmapTTEARM64 = namedtuple('PmapTTEARM64', ['level', 'value', 'stage2'])
541
542def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
543    """ Display the bits of an ARM64 translation table or page table entry
544        in human-readable form.
545        tte: integer value of the TTE/PTE
546        level: translation table level.  Valid values are 1, 2, or 3.
547        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
548    """
549    assert(isinstance(level, numbers.Integral))
550    assert_64bit(tte)
551
552    if tte & 0x1 == 0x0:
553        print("Invalid.")
554        return
555
556    if (tte & 0x2 == 0x2) and (level != 0x3):
557        print("Type       = Table pointer.")
558        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
559
560        if not stage2:
561            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
562            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
563            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
564            print("NS         = {:#x}.".format(tte >> 63))
565    else:
566        print("Type       = Block.")
567
568        if stage2:
569            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
570        else:
571            attr_index = (tte >> 2) & 0x7
572            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
573                3: 'CACHE DISABLE',
574                4: 'RESERVED'
575                ,
576                5: 'POSTED (DISABLE_XS if FEAT_XS supported)',
577                6: 'POSTED_REORDERED (POSTED_COMBINED_REORDERED if FEAT_XS supported)',
578                7: 'POSTED_COMBINED_REORDERED (POSTED_COMBINED_REORDERED_XS if FEAT_XS supported)' }
579
580            # Only show the string version of the AttrIdx for CPU mappings since
581            # these values don't apply to IOMMU mappings.
582            if is_iommu_tte:
583                print("AttrIdx    = {:#x}.".format(attr_index))
584            else:
585                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
586            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
587
588        if stage2:
589            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
590        else:
591            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
592
593        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
594        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
595
596        if not stage2:
597            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
598
599        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
600
601        if stage2:
602            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
603        else:
604            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
605            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
606
607        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
608
609    return
610
611def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
612    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
613
614    tt_index = []
615    for i in range(pta_max_level + 1):
616        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
617            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
618
619    return tt_index
620
621def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN, extra=None):
622    assert(type(vaddr) in (int, int))
623    assert_64bit(vaddr)
624    assert_64bit(root_tte)
625
626    # Obtain pmap attributes
627    page_size = pmap_pt_attr.pta_page_size
628    page_offset_mask = (page_size - 1)
629    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
630    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
631    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
632
633    # The pmap starts at a page table level that is defined by register
634    # values; the root level can be obtained from the attributes structure
635    level = unsigned(pmap_pt_attr.pta_root_level)
636
637    root_tt_index = tt_index[level]
638    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
639        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
640    tte = int(unsigned(root_tte[root_tt_index]))
641
642    # Walk the page tables
643    paddr = None
644    max_level = unsigned(pmap_pt_attr.pta_max_level)
645    is_valid = True
646    is_leaf = False
647
648    if extra is not None:
649        extra['page_size'] = page_size
650        extra['page_mask'] = page_size - 1
651        extra['paddr']     = None
652        extra['is_valid']  = True
653        extra['is_leaf']   = False
654        extra['tte']       = []
655
656    while (level <= max_level):
657        if extra is not None:
658            extra['tte'].append(PmapTTEARM64(level=level, value=tte, stage2=stage2))
659
660        if verbose_level >= vSCRIPT:
661            print("L{} entry: {:#x}".format(level, tte))
662        if verbose_level >= vDETAIL:
663            PmapDecodeTTEARM64(tte, level, stage2)
664
665        if tte & 0x1 == 0x0:
666            if verbose_level >= vHUMAN:
667                print("L{} entry invalid: {:#x}\n".format(level, tte))
668
669            if extra is not None:
670                extra['is_valid'] = False
671            is_valid = False
672            break
673
674        # Handle leaf entry
675        if tte & 0x2 == 0x0 or level == max_level:
676            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
677            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
678            paddr = tte & base_mask
679            paddr = paddr | (vaddr & offset_mask)
680
681            if level != max_level:
682                print("phys: {:#x}".format(paddr))
683
684            if extra is not None:
685                extra['is_leaf'] = True
686                extra['paddr'] = paddr
687            is_leaf = True
688            break
689        else:
690        # Handle page table entry
691            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
692            assert(isinstance(next_phys, numbers.Integral))
693
694            next_virt = kern.PhysToKernelVirt(next_phys)
695            assert(isinstance(next_virt, numbers.Integral))
696
697            if verbose_level >= vDETAIL:
698                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
699
700            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
701            tte = int(unsigned(dereference(ttep)))
702            assert(isinstance(tte, numbers.Integral))
703
704        # We've parsed one level, so go to the next level
705        assert(level <= 3)
706        level = level + 1
707
708
709    if verbose_level >= vHUMAN:
710        if paddr:
711            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
712        else:
713            print("(no translation)")
714
715    return paddr
716
717def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
718    if kern.arch == 'x86_64':
719        return PmapWalkX86_64(pmap, vaddr, verbose_level)
720    elif kern.arch.startswith('arm64'):
721        # Obtain pmap attributes from pmap structure
722        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
723        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
724    else:
725        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
726
727@lldb_command('pmap_walk')
728def PmapWalkHelper(cmd_args=None):
729    """ Perform a page-table walk in <pmap> for <virtual_address>.
730        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
731            Multiple -v's can be specified for increased verbosity
732    """
733    if cmd_args is None or len(cmd_args) < 2:
734        raise ArgumentError("Too few arguments to pmap_walk.")
735
736    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
737    addr = ArgumentStringToInt(cmd_args[1])
738    PmapWalk(pmap, addr, config['verbosity'])
739    return
740
741def GetMemoryAttributesFromUser(requested_type):
742    pmap_attr_dict = {
743        '4k' : kern.globals.pmap_pt_attr_4k,
744        '16k' : kern.globals.pmap_pt_attr_16k,
745        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
746    }
747
748    requested_type = requested_type.lower()
749    if requested_type not in pmap_attr_dict:
750        return None
751
752    return pmap_attr_dict[requested_type]
753
754@lldb_command('ttep_walk')
755def TTEPWalkPHelper(cmd_args=None):
756    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
757        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
758        Multiple -v's can be specified for increased verbosity
759        """
760    if cmd_args is None or len(cmd_args) < 2:
761        raise ArgumentError("Too few arguments to ttep_walk.")
762
763    if not kern.arch.startswith('arm64'):
764        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
765
766    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
767    addr = ArgumentStringToInt(cmd_args[1])
768
769    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
770    if pmap_pt_attr is None:
771        raise ArgumentError("Invalid translation attribute type.")
772
773    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
774
775@lldb_command('decode_tte')
776def DecodeTTE(cmd_args=None):
777    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
778        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
779    """
780    if cmd_args is None or len(cmd_args) < 2:
781        raise ArgumentError("Too few arguments to decode_tte.")
782    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
783        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
784    if kern.arch.startswith('arm64'):
785        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
786        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
787    else:
788        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
789
790PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54)
791PVH_HIGH_FLAGS_ARM32 = (1 << 31)
792
793def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
794    """ Dump information about a single mapping retrieved by the pv_head_table.
795
796        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
797              or None if type PVH_TYPE_PTEP.
798        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
799              the high flags already set (the type bits don't need to be cleared).
800              For type PVH_TYPE_PVEP this will be the value retrieved from the
801              pve_ptep[] array.
802    """
803    if kern.arch.startswith('arm64'):
804        iommu_flag = 0x4
805        iommu_table_flag = 1 << 63
806    else:
807        iommu_flag = 0
808        iommu_table_flag = 0
809
810    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
811    if pvep is not None and (ptep & 0x1):
812        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
813        # setting this string is mutually exclusive with setting the IOMMU strings.
814        pte_str = ' (alt acct)'
815    else:
816        pte_str = ''
817
818    if pvep is not None:
819        pve_str = 'PVEP {:#x}, '.format(pvep)
820    else:
821        pve_str = ''
822
823    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
824    # either does nothing or clears out the AltAcct bit.
825    ptep = ptep & ~0x3
826
827    # When printing with extra verbosity, print an extra newline that describes
828    # who owns the mapping.
829    extra_str = ''
830
831    if ptep & iommu_flag:
832        # The mapping is an IOMMU Mapping
833        ptep = ptep & ~iommu_flag
834
835        # Due to LLDB automatically setting all the high bits of pointers, when
836        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
837        # the iommu_table_flag, which means this check only works for PVH entries
838        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
839        # entry which has the right casting applied to avoid this issue).
840        #
841        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
842        # for a lack of trying, that's for sure. If you can figure out how to
843        # cast that array correctly, then be my guest.
844        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
845            if ptep & iommu_table_flag:
846                pte_str = ' (IOMMU table), entry'
847                ptd = GetPtDesc(KVToPhysARM(ptep))
848                iommu = dereference(ptd.iommu)
849            else:
850                # Instead of dumping the PTE (since we don't have that), dump the
851                # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
852                #
853                # This works because later on when the "ptep" is dereferenced as a
854                # PTE pointer (uint64_t pointer), the descriptor pointer will be
855                # dumped as that's the first 64-bit value in the IOMMU state object.
856                pte_str = ' (IOMMU state), descriptor'
857                ptep = ptep | iommu_table_flag
858                iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
859
860            # For IOMMU mappings, dump who owns the mapping as the extra string.
861            extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
862            if unsigned(iommu.name) != 0:
863                extra_str += '/{:s}'.format(iommu.name)
864            extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
865        else:
866            ptd = GetPtDesc(KVToPhysARM(ptep))
867            extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu)
868    else:
869        # The mapping is a CPU Mapping
870        pte_str += ', entry'
871        ptd = GetPtDesc(KVToPhysARM(ptep))
872        if ptd.pmap == kern.globals.kernel_pmap:
873            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
874        elif verbose_level >= vDETAIL:
875            task = TaskForPmapHelper(ptd.pmap)
876            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
877    try:
878        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
879    except:
880        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
881
882    if verbose_level >= vDETAIL:
883        print("    |-- {:s}".format(extra_str))
884
885def PVWalkARM(pai, verbose_level = vHUMAN):
886    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
887
888        pai: physical address index (PAI) corresponding to the pv_head_table
889             entry to walk.
890        verbose_level: Set to vSCRIPT or higher to print extra info around the
891                       the pv_head_table/pp_attr_table flags and to dump the
892                       pt_desc_t object if the type is a PTD.
893    """
894    # LLDB will automatically try to make pointer values dereferencable by
895    # setting the upper bits if they aren't set. We need to parse the flags
896    # stored in the upper bits later, so cast the pv_head_table to an array of
897    # integers to get around this "feature". We'll add the upper bits back
898    # manually before deref'ing anything.
899    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
900    pvh_raw = unsigned(pv_head_table[pai])
901    pvh = pvh_raw
902    pvh_type = pvh & 0x3
903
904    print("PVH raw value: {:#x}".format(pvh_raw))
905    if kern.arch.startswith('arm64'):
906        pvh = pvh | PVH_HIGH_FLAGS_ARM64
907    else:
908        pvh = pvh | PVH_HIGH_FLAGS_ARM32
909
910    if pvh_type == 0:
911        print("PVH type: NULL")
912    elif pvh_type == 3:
913        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
914    elif pvh_type == 2:
915        print("PVH type: single PTE")
916        PVDumpPTE(None, pvh, verbose_level)
917    elif pvh_type == 1:
918        pvep = pvh & ~0x3
919        print("PVH type: PTE list")
920        pve_ptep_idx = 0
921        while pvep != 0:
922            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
923
924            if pve.pve_ptep[pve_ptep_idx] != 0:
925                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
926
927            pve_ptep_idx += 1
928            if pve_ptep_idx == 2:
929                pve_ptep_idx = 0
930                pvep = unsigned(pve.pve_next)
931
932    if verbose_level >= vDETAIL:
933        if (pvh_type == 1) or (pvh_type == 2):
934            # Dump pv_head_table flags when there's a valid mapping.
935            pvh_flags = []
936
937            if pvh_raw & (1 << 62):
938                pvh_flags.append("CPU")
939            if pvh_raw & (1 << 60):
940                pvh_flags.append("EXEC")
941            if pvh_raw & (1 << 59):
942                pvh_flags.append("LOCKDOWN_KC")
943            if pvh_raw & (1 << 58):
944                pvh_flags.append("HASHED")
945            if pvh_raw & (1 << 57):
946                pvh_flags.append("LOCKDOWN_CS")
947            if pvh_raw & (1 << 56):
948                pvh_flags.append("LOCKDOWN_RO")
949            if pvh_raw & (1 << 55):
950                pvh_flags.append("RETIRED")
951            if pvh_raw & (1 << 54):
952                if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
953                    pvh_flags.append("SECURE_FLUSH_NEEDED")
954                else:
955                    pvh_flags.append("SLEEPABLE_LOCK")
956            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
957                pvh_flags.append("LOCK")
958
959            print("PVH Flags: {}".format(pvh_flags))
960
961        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
962        ppattr = unsigned(kern.globals.pp_attr_table[pai])
963        print("PPATTR raw value: {:#x}".format(ppattr))
964
965        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
966        if ppattr & 0x40:
967            ppattr_flags.append("REFERENCED")
968        if ppattr & 0x80:
969            ppattr_flags.append("MODIFIED")
970        if ppattr & 0x100:
971            ppattr_flags.append("INTERNAL")
972        if ppattr & 0x200:
973            ppattr_flags.append("REUSABLE")
974        if ppattr & 0x400:
975            ppattr_flags.append("ALTACCT")
976        if ppattr & 0x800:
977            ppattr_flags.append("NOENCRYPT")
978        if ppattr & 0x1000:
979            ppattr_flags.append("REFFAULT")
980        if ppattr & 0x2000:
981            ppattr_flags.append("MODFAULT")
982        if ppattr & 0x4000:
983            ppattr_flags.append("MONITOR")
984        if ppattr & 0x8000:
985            ppattr_flags.append("NO_MONITOR")
986
987        print("PPATTR Flags: {}".format(ppattr_flags))
988
989        if pvh_type == 3:
990            def RunLldbCmdHelper(command):
991                """Helper for dumping an LLDB command right before executing it
992                and printing the results.
993                command: The LLDB command (as a string) to run.
994
995                Example input: "p/x kernel_pmap".
996                """
997                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
998            # Dump the page table descriptor object
999            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1000            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
1001
1002            # Depending on the system, more than one ptd_info can be associated
1003            # with a single PTD. Only dump the first PTD info and assume the
1004            # user knows to dump the rest if they're on one of those systems.
1005            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
1006
1007@lldb_command('pv_walk')
1008def PVWalk(cmd_args=None):
1009    """ Show mappings for <physical_address | PAI> tracked in the PV list.
1010        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
1011
1012        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
1013        as well as dump the page table descriptor (PTD) struct if the entry is a
1014        PTD.
1015    """
1016    if cmd_args is None or len(cmd_args) == 0:
1017        raise ArgumentError("Too few arguments to pv_walk.")
1018    if not kern.arch.startswith('arm'):
1019        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
1020
1021    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1022
1023    # If the input is already a PAI, this function will return the input unchanged.
1024    # This function also ensures that the physical address is kernel-managed.
1025    pai = ConvertPhysAddrToPai(pa)
1026
1027    PVWalkARM(pai, config['verbosity'])
1028
1029@lldb_command('kvtophys')
1030def KVToPhys(cmd_args=None):
1031    """ Translate a kernel virtual address to the corresponding physical address.
1032        Assumes the virtual address falls within the kernel static region.
1033        Syntax: (lldb) kvtophys <kernel virtual address>
1034    """
1035    if cmd_args is None or len(cmd_args) == 0:
1036        raise ArgumentError("Too few arguments to kvtophys.")
1037    if kern.arch.startswith('arm'):
1038        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1039    elif kern.arch == 'x86_64':
1040        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1041
1042@lldb_command('phystokv')
1043def PhysToKV(cmd_args=None):
1044    """ Translate a physical address to the corresponding static kernel virtual address.
1045        Assumes the physical address corresponds to managed DRAM.
1046        Syntax: (lldb) phystokv <physical address>
1047    """
1048    if cmd_args is None or len(cmd_args) == 0:
1049        raise ArgumentError("Too few arguments to phystokv.")
1050    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1051
1052def KVToPhysARM(addr):
1053    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1054        ptov_table = kern.globals.ptov_table
1055        for i in range(0, kern.globals.ptov_index):
1056            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1057                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1058    else:
1059        papt_table = kern.globals.libsptm_papt_ranges
1060        page_size = kern.globals.page_size
1061        for i in range(0, kern.globals.libsptm_n_papt_ranges):
1062            if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))):
1063                return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start)))
1064        raise ValueError("VA {:#x} not found in physical region lookup table".format(addr))
1065    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1066
1067
1068def GetPtDesc(paddr):
1069    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1070    pvh = unsigned(kern.globals.pv_head_table[pn])
1071    if kern.arch.startswith('arm64'):
1072        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1073    else:
1074        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1075    pvh_type = pvh & 0x3
1076    if pvh_type != 0x3:
1077        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1078    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1079    return ptd
1080
1081def PhysToFrameTableEntry(paddr):
1082    if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)):
1083        return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size]
1084    page_idx = paddr / kern.globals.page_size
1085    for i in range(0, kern.globals.sptm_n_io_ranges):
1086        base = kern.globals.io_frame_table[i].io_range.phys_page_idx
1087        end = base + kern.globals.io_frame_table[i].io_range.num_pages
1088        if page_idx >= base and page_idx < end:
1089            return kern.globals.io_frame_table[i]
1090    return kern.globals.xnu_io_fte
1091
1092@lldb_command('phystofte')
1093def PhysToFTE(cmd_args=None):
1094    """ Translate a physical address to the corresponding SPTM frame table entry pointer
1095        Syntax: (lldb) phystofte <physical address>
1096    """
1097    if cmd_args is None or len(cmd_args) == 0:
1098        raise ArgumentError("Too few arguments to phystofte.")
1099
1100    fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))
1101    print(repr(fte))
1102
1103XNU_IOMMU = 23
1104XNU_PAGE_TABLE = 19
1105XNU_PAGE_TABLE_SHARED = 20
1106XNU_PAGE_TABLE_ROZONE = 21
1107XNU_PAGE_TABLE_COMMPAGE = 22
1108SPTM_PAGE_TABLE = 9
1109
1110def ShowPTEARM(pte, page_size, level):
1111    """ Display vital information about an ARM page table entry
1112        pte: kernel virtual address of the PTE.  page_size and level may be None,
1113        in which case we'll try to infer them from the page table descriptor.
1114        Inference of level may only work for L2 and L3 TTEs depending upon system
1115        configuration.
1116    """
1117    pt_index = 0
1118    stage2 = False
1119    def GetPageTableInfo(ptd, paddr):
1120        nonlocal pt_index, page_size, level
1121        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1122            # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page.
1123            # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes
1124            # do, so the IOMMU refcnt token is always stored at index 0.  If this is not
1125            # an IOMMU page, we may end up using a different final value for pt_index below.
1126            refcnt = ptd.ptd_info[0].refcnt
1127            # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1128            is_iommu_pte = (refcnt & 0x8000) == 0x8000
1129            if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'):
1130                page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1131            elif page_size is None:
1132                page_size = kern.globals.native_pt_attr.pta_page_size
1133            pt_index = (pte % kern.globals.page_size) // page_size
1134            refcnt =  ptd.ptd_info[pt_index].refcnt
1135            if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'):
1136                stage2 = ptd.pmap.pmap_pt_attr.stage2
1137            if level is None:
1138                if refcnt == 0x4000:
1139                    level = 2
1140                else:
1141                    level = 3
1142            if is_iommu_pte:
1143                iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1144                if unsigned(dereference(ptd.iommu).name) != 0:
1145                    iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1146                info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)
1147            else:
1148                info_str = None
1149            return (int(unsigned(refcnt)), level, info_str)
1150        else:
1151            fte = PhysToFrameTableEntry(paddr)
1152            if fte.type == XNU_IOMMU:
1153                if page_size is None:
1154                    page_size = kern.globals.native_pt_attr.pta_page_size
1155                info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd)
1156                return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str)
1157            elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]:
1158                if page_size is None:
1159                    if hasattr(ptd.pmap, 'pmap_pt_attr'):
1160                        page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1161                    else:
1162                        page_size = kern.globals.native_pt_attr.pta_page_size;
1163                return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None)
1164            else:
1165                raise ValueError("Unrecognized FTE type {:#x}".format(fte.type))
1166            raise ValueError("Unable to retrieve PTD refcnt")
1167    pte_paddr = KVToPhysARM(pte)
1168    ptd = GetPtDesc(pte_paddr)
1169    refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr)
1170    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1171    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1172        va = ptd.va[pt_index]
1173    else:
1174        va = ptd.va
1175    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va))
1176
1177    # The pmap/iommu field is a union, so only print the correct one.
1178    if info_str is not None:
1179        print(info_str)
1180    else:
1181        if ptd.pmap == kern.globals.kernel_pmap:
1182            pmap_str = "(kernel_pmap)"
1183        else:
1184            task = TaskForPmapHelper(ptd.pmap)
1185            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1186        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1187        nttes = page_size // 8
1188        granule = page_size * (nttes ** (3 - level))
1189        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1190            pte_pgoff = pte % page_size
1191        else:
1192            pte_pgoff = pte % kern.globals.native_pt_attr.pta_page_size
1193        pte_pgoff = pte_pgoff // 8
1194        print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(va)) + (pte_pgoff * granule)))
1195        pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1196        print("value: {:#x}".format(pteval))
1197        print("level: {:d}".format(level))
1198        PmapDecodeTTEARM64(pteval, level, stage2)
1199
1200@lldb_command('showpte')
1201def ShowPTE(cmd_args=None):
1202    """ Display vital information about the page table entry at VA <pte>
1203        Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2]
1204    """
1205    if cmd_args is None or len(cmd_args) == 0:
1206        raise ArgumentError("Too few arguments to showpte.")
1207
1208    if kern.arch.startswith('arm64'):
1209        if len(cmd_args) >= 3:
1210            pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2])
1211            if pmap_pt_attr is None:
1212                raise ArgumentError("Invalid translation attribute type.")
1213            page_size = pmap_pt_attr.pta_page_size
1214        else:
1215            page_size = None
1216
1217        level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None
1218        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level)
1219    else:
1220        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1221
1222def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1223    """ Perform the specified action for all valid mappings in an ARM64 translation table
1224        pmap: owner of the translation table
1225        tt: translation table or page table
1226        nttes: number of entries in tt
1227        level: translation table level, 1 2 or 3
1228        action: callback for each valid TTE
1229    """
1230    # Obtain pmap attributes
1231    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1232    page_size = pmap_pt_attr.pta_page_size
1233    page_offset_mask = (page_size - 1)
1234    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1235    max_level = unsigned(pmap_pt_attr.pta_max_level)
1236
1237    for i in range(nttes):
1238        try:
1239            tte = tt[i]
1240            if tte & 0x1 == 0x0:
1241                continue
1242
1243            tt_next = None
1244            paddr = unsigned(tte) & unsigned(page_base_mask)
1245
1246            # Handle leaf entry
1247            if tte & 0x2 == 0x0 or level == max_level:
1248                type = 'block' if level < max_level else 'entry'
1249                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1250            else:
1251            # Handle page table entry
1252                type = 'table'
1253                granule = page_size
1254                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1255
1256            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1257            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1258                if tt_next is not None:
1259                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1260
1261        except Exception as exc:
1262            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1263
1264def ScanPageTables(action, targetPmap=None):
1265    """ Perform the specified action for all valid mappings in all page tables,
1266        optionally restricted to a single pmap.
1267        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1268    """
1269    print("Scanning all available translation tables.  This may take a long time...")
1270    def ScanPmap(pmap, action):
1271        if kern.arch.startswith('arm64'):
1272            # Obtain pmap attributes
1273            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1274            granule = pmap_pt_attr.pta_page_size
1275            level = unsigned(pmap_pt_attr.pta_root_level)
1276            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1277                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1278
1279        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1280            if kern.arch.startswith('arm64'):
1281                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1282
1283    if targetPmap is not None:
1284        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1285    else:
1286        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1287            ScanPmap(pmap, action)
1288
1289@lldb_command('showallmappings')
1290def ShowAllMappings(cmd_args=None):
1291    """ Find and display all available mappings on the system for
1292        <physical_address>.  Optionally only searches the pmap
1293        specified by [<pmap>]
1294        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1295        WARNING: this macro can take a long time (up to 30min.) to complete!
1296    """
1297    if cmd_args is None or len(cmd_args) == 0:
1298        raise ArgumentError("Too few arguments to showallmappings.")
1299    if not kern.arch.startswith('arm'):
1300        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1301    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1302    targetPmap = None
1303    if len(cmd_args) > 1:
1304        targetPmap = cmd_args[1]
1305    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1306        if paddr <= pa < (paddr + granule):
1307            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1308        return True
1309    ScanPageTables(printMatchedMapping, targetPmap)
1310
1311@lldb_command('showptusage')
1312def ShowPTUsage(cmd_args=None):
1313    """ Display a summary of pagetable allocations for a given pmap.
1314        Syntax: (lldb) showptusage [<pmap>]
1315        WARNING: this macro can take a long time (> 1hr) to complete!
1316    """
1317    if not kern.arch.startswith('arm'):
1318        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1319    targetPmap = None
1320    if len(cmd_args) > 0:
1321        targetPmap = cmd_args[0]
1322    lastPmap = [None]
1323    numTables = [0]
1324    numUnnested = [0]
1325    numPmaps = [0]
1326    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1327        unnested = ""
1328        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1329        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1330        if lastPmap[0] is None or (pmap != lastPmap[0]):
1331            lastPmap[0] = pmap
1332            numPmaps[0] = numPmaps[0] + 1
1333            print ("pmap {:#x}:".format(pmap))
1334        if type == 'root':
1335            return True
1336        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1337            ptd = GetPtDesc(paddr)
1338            if ptd.pmap != pmap:
1339                return False
1340            else:
1341                numUnnested[0] = numUnnested[0] + 1
1342                unnested = " (likely unnested)"
1343        numTables[0] = numTables[0] + 1
1344        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1345        if level == 2:
1346            return False
1347        else:
1348            return True
1349    ScanPageTables(printValidTTE, targetPmap)
1350    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1351
1352def checkPVList(pmap, level, type, tte, paddr, va, granule):
1353    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1354        pmap: owner of the translation table
1355        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1356        type: unused
1357        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1358        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1359        granule: unused
1360    """
1361    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1362    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1363    page_size = kern.globals.page_size
1364    if kern.arch.startswith('arm64'):
1365        page_offset_mask = (page_size - 1)
1366        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1367        paddr = paddr & page_base_mask
1368        max_level = 3
1369        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1370    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1371        return True
1372    pn = (paddr - vm_first_phys) // page_size
1373    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1374    pvh_type = pvh & 0x3
1375    if pmap is not None:
1376        pmap_str = "pmap: {:#x}: ".format(pmap)
1377    else:
1378        pmap_str = ''
1379    if tte is not None:
1380        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1381    else:
1382        tte_str = "paddr {:#x}: ".format(paddr)
1383    if pvh_type == 0 or pvh_type == 3:
1384        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1385    elif pvh_type == 2:
1386        ptep = pvh & ~0x3
1387        if tte is not None and ptep != unsigned(tte):
1388            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1389        try:
1390            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1391            if (pte != paddr):
1392                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1393        except Exception as exc:
1394            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1395    elif pvh_type == 1:
1396        pvep = pvh & ~0x3
1397        tte_match = False
1398        pve_ptep_idx = 0
1399        while pvep != 0:
1400            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1401            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1402            pve_ptep_idx += 1
1403            if pve_ptep_idx == 2:
1404                pve_ptep_idx = 0
1405                pvep = unsigned(pve.pve_next)
1406            if ptep == 0:
1407                continue
1408            if tte is not None and ptep == unsigned(tte):
1409                tte_match = True
1410            try:
1411                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1412                if (pte != paddr):
1413                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1414            except Exception as exc:
1415                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1416        if tte is not None and not tte_match:
1417            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1418    return True
1419
1420@lldb_command('pv_check', 'P')
1421def PVCheck(cmd_args=None, cmd_options={}):
1422    """ Check the physical-to-virtual mapping for a given PTE or physical address
1423        Syntax: (lldb) pv_check <addr> [-p]
1424            -P        : Interpret <addr> as a physical address rather than a PTE
1425    """
1426    if cmd_args is None or len(cmd_args) == 0:
1427        raise ArgumentError("Too few arguments to pv_check.")
1428    if kern.arch.startswith('arm64'):
1429        level = 3
1430    else:
1431        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1432    if "-P" in cmd_options:
1433        pte = None
1434        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1435    else:
1436        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1437        pa = int(unsigned(dereference(pte)))
1438    checkPVList(None, level, None, pte, pa, 0, None)
1439
1440@lldb_command('check_pmaps')
1441def CheckPmapIntegrity(cmd_args=None):
1442    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1443        Optionally only checks the pmap specified by [<pmap>]
1444        Syntax: (lldb) check_pmaps [<pmap>]
1445        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1446        specify [pmap] to limit it to a single pmap.  It will also give false positives
1447        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1448        Use of this macro without the [<pmap>] argument is heavily discouraged.
1449    """
1450    if not kern.arch.startswith('arm'):
1451        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1452    targetPmap = None
1453    if len(cmd_args) > 0:
1454        targetPmap = cmd_args[0]
1455    ScanPageTables(checkPVList, targetPmap)
1456
1457@lldb_command('pmapsforledger')
1458def PmapsForLedger(cmd_args=None):
1459    """ Find and display all pmaps currently using <ledger>.
1460        Syntax: (lldb) pmapsforledger <ledger>
1461    """
1462    if cmd_args is None or len(cmd_args) == 0:
1463        raise ArgumentError("Too few arguments to pmapsforledger.")
1464    if not kern.arch.startswith('arm'):
1465        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1466    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1467    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1468        if pmap.ledger == ledger:
1469            print("pmap: {:#x}".format(pmap))
1470
1471
1472def IsValidPai(pai):
1473    """ Given an unsigned value, detect whether that value is a valid physical
1474        address index (PAI). It does this by first computing the last possible
1475        PAI and comparing the input to that.
1476
1477        All contemporary SoCs reserve the bottom part of the address space, so
1478        there shouldn't be any valid physical addresses between zero and the
1479        last PAI either.
1480    """
1481    page_size = unsigned(kern.globals.page_size)
1482    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1483    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1484
1485    last_pai = (vm_last_phys - vm_first_phys) // page_size
1486    if (pai < 0) or (pai >= last_pai):
1487        return False
1488
1489    return True
1490
1491def ConvertPaiToPhysAddr(pai):
1492    """ Convert the given Physical Address Index (PAI) into a physical address.
1493
1494        If the input isn't a valid PAI (it's most likely already a physical
1495        address), then just return back the input unchanged.
1496    """
1497    pa = pai
1498
1499    # If the value is a valid PAI, then convert it into a physical address.
1500    if IsValidPai(pai):
1501        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1502
1503    return pa
1504
1505def ConvertPhysAddrToPai(pa):
1506    """ Convert the given physical address into a Physical Address Index (PAI).
1507
1508        If the input is already a valid PAI, then just return back the input
1509        unchanged.
1510    """
1511    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1512    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1513    pai = pa
1514
1515    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1516        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1517    elif not IsValidPai(pa):
1518        # If the value isn't already a valid PAI, then convert it into one.
1519        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1520
1521    return pai
1522
1523@lldb_command('pmappaindex')
1524def PmapPaIndex(cmd_args=None):
1525    """ Display both a physical address and physical address index (PAI) when
1526        provided with only one of those values.
1527
1528        Syntax: (lldb) pmappaindex <physical address | PAI>
1529
1530        NOTE: This macro will throw an exception if the input isn't a valid PAI
1531              and is also not a kernel-managed physical address.
1532    """
1533    if cmd_args is None or len(cmd_args) == 0:
1534        raise ArgumentError("Too few arguments to pmappaindex.")
1535
1536    if not kern.arch.startswith('arm'):
1537        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1538
1539    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1540    pai = value
1541    phys_addr = value
1542
1543    if IsValidPai(value):
1544        # Input is a PAI, calculate the physical address.
1545        phys_addr = ConvertPaiToPhysAddr(value)
1546    else:
1547        # Input is a physical address, calculate the PAI
1548        pai = ConvertPhysAddrToPai(value)
1549
1550    print("Physical Address: {:#x}".format(phys_addr))
1551    print("PAI: {:d}".format(pai))
1552
1553@lldb_command('pmapdumpsurts')
1554def PmapDumpSurts(cmd_args=None):
1555    """ Dump the SURT list.
1556
1557        Syntax: (lldb) pmapdumpsurts
1558    """
1559    from scheduler import IterateBitmap
1560
1561    if "surt_list" not in kern.globals:
1562        raise NotImplementedError("SURT is not supported on this device.")
1563
1564    i = 0
1565    for surt_page in IterateLinkageChain(kern.globals.surt_list, 'surt_page_t *', 'surt_chain'):
1566        print(f"SURT Page {i} at physical address {hex(surt_page.surt_page_pa)}")
1567        print('')
1568        print('Allocation status (O: free, X: allocated):')
1569        bitmap_visual = bytearray('X' * 128, 'ascii')
1570        for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[0]):
1571            bitmap_index = 127 - free_bit
1572            bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O'
1573        for free_bit in IterateBitmap(surt_page.surt_page_free_bitmap[1]):
1574            bitmap_index = 127 - (free_bit + 64)
1575            bitmap_visual[bitmap_index:(bitmap_index + 1)] = b'O'
1576
1577        for j in range(0, 128, 8):
1578            print(f"{bitmap_visual[j:(j+8)].decode('ascii')} bit [{127 - j}:{120 - j}]")
1579
1580        print('')
1581        print('SURT list structure raw:')
1582        print(dereference(surt_page))
1583        print('')
1584        print('')
1585
1586        i = i + 1
1587
1588@lldb_command('showallpmaps')
1589def ShowAllPmaps(cmd_args=None):
1590    """ Dump all pmaps.
1591
1592        Syntax: (lldb) showallpmaps
1593    """
1594    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1595        print(dereference(pmap))
1596        print()
1597
1598@lldb_command('pmapforroottablepa')
1599def PmapForRootTablePa(cmd_args=None):
1600    """ Dump the pmap with matching root TTE physical address.
1601
1602        Syntax: (lldb) pmapforroottablepa <pa>
1603    """
1604    if cmd_args is None or len(cmd_args) == 0:
1605        raise ArgumentError('Invalid argument, expecting the physical address of a root translation table')
1606
1607    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1608    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1609        if pmap.ttep == pa:
1610            print(dereference(pmap))
1611            print()
1612