xref: /xnu-11417.121.6/tools/lldbmacros/pmap.py (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1from xnu import *
2import xnudefines
3from kdp import *
4from utils import *
5import struct
6from collections import namedtuple
7
8def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
9    """ Read a physical memory data based on address.
10        params:
11            phys_addr : int - Physical address to read
12            bitsize   : int - defines how many bytes to read. defaults to 64 bit
13            cpuval    : None (optional)
14        returns:
15            int - int value read from memory. in case of failure 0xBAD10AD is returned.
16    """
17    if "kdp" == GetConnectionProtocol():
18        return KDPReadPhysMEM(phys_addr, bitsize)
19
20    # NO KDP. Attempt to use physical memory
21    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
22    if paddr_in_kva:
23        if bitsize == 64 :
24            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
25        if bitsize == 32 :
26            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
27        if bitsize == 16 :
28            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
29        if bitsize == 8 :
30            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31    return 0xBAD10AD
32
33@lldb_command('readphys')
34def ReadPhys(cmd_args = None):
35    """ Reads the specified untranslated address
36        The argument is interpreted as a physical address, and the 64-bit word
37        addressed is displayed.
38        usage: readphys <nbits> <address>
39        nbits: 8,16,32,64
40        address: 1234 or 0x1234 or `foo_ptr`
41    """
42    if cmd_args is None or len(cmd_args) < 2:
43        raise ArgumentError()
44
45    else:
46        nbits = ArgumentStringToInt(cmd_args[0])
47        phys_addr = ArgumentStringToInt(cmd_args[1])
48        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
49    return True
50
51lldb_alias('readphys8', 'readphys 8 ')
52lldb_alias('readphys16', 'readphys 16 ')
53lldb_alias('readphys32', 'readphys 32 ')
54lldb_alias('readphys64', 'readphys 64 ')
55
56def KDPReadPhysMEM(address, bits):
57    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
58        params:
59            address : int - address where to read the data from
60            bits : int - number of bits in the intval (8/16/32/64)
61        returns:
62            int: read value from memory.
63            0xBAD10AD: if failed to read data.
64    """
65    retval = 0xBAD10AD
66    if "kdp" != GetConnectionProtocol():
67        print("Target is not connected over kdp. Nothing to do here.")
68        return retval
69
70    if "hwprobe" == KDPMode():
71        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
72        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
73        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
74        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
75
76        ret_obj = lldb.SBCommandReturnObject()
77        ci = lldb.debugger.GetCommandInterpreter()
78        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
79
80        if ret_obj.Succeeded():
81            value = ret_obj.GetOutput()
82
83            if bits == 64 :
84                pack_fmt = "<Q"
85                unpack_fmt = ">Q"
86            if bits == 32 :
87                pack_fmt = "<I"
88                unpack_fmt = ">I"
89            if bits == 16 :
90                pack_fmt = "<H"
91                unpack_fmt = ">H"
92            if bits == 8 :
93                pack_fmt = "<B"
94                unpack_fmt = ">B"
95
96            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
97
98    else:
99        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
100        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
101        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
102
103        if not WriteInt32ToMemoryAddress(0, input_address):
104            return retval
105
106        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
107        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
108            return retval
109
110        data_addr = int(addressof(kern.globals.manual_pkt))
111        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
112
113        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
114
115        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
116             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
117             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
118             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
119             ):
120
121            if WriteInt32ToMemoryAddress(1, input_address):
122                # now read data from the kdp packet
123                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
124                if bits == 64 :
125                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
126                if bits == 32 :
127                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
128                if bits == 16 :
129                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
130                if bits == 8 :
131                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
132
133    return retval
134
135
136def KDPWritePhysMEM(address, intval, bits):
137    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
138        params:
139            address : int - address where to save the data
140            intval : int - integer value to be stored in memory
141            bits : int - number of bits in the intval (8/16/32/64)
142        returns:
143            boolean: True if the write succeeded.
144    """
145    if "kdp" != GetConnectionProtocol():
146        print("Target is not connected over kdp. Nothing to do here.")
147        return False
148
149    if "hwprobe" == KDPMode():
150        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
151        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
152        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
153
154        if bits == 64 :
155            pack_fmt = ">Q"
156            unpack_fmt = "<Q"
157        if bits == 32 :
158            pack_fmt = ">I"
159            unpack_fmt = "<I"
160        if bits == 16 :
161            pack_fmt = ">H"
162            unpack_fmt = "<H"
163        if bits == 8 :
164            pack_fmt = ">B"
165            unpack_fmt = "<B"
166
167        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
168
169        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
170
171        ret_obj = lldb.SBCommandReturnObject()
172        ci = lldb.debugger.GetCommandInterpreter()
173        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
174
175        if ret_obj.Succeeded():
176            return True
177        else:
178            return False
179
180    else:
181        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
182        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
183        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
184        if not WriteInt32ToMemoryAddress(0, input_address):
185            return False
186
187        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
188        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
189            return False
190
191        data_addr = int(addressof(kern.globals.manual_pkt))
192        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
193
194        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
195
196        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
197             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
198             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
199             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
200             ):
201
202            if bits == 8:
203                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
204                    return False
205            if bits == 16:
206                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
207                    return False
208            if bits == 32:
209                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
210                    return False
211            if bits == 64:
212                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
213                    return False
214            if WriteInt32ToMemoryAddress(1, input_address):
215                return True
216        return False
217
218
219def WritePhysInt(phys_addr, int_val, bitsize = 64):
220    """ Write and integer value in a physical memory data based on address.
221        params:
222            phys_addr : int - Physical address to read
223            int_val   : int - int value to write in memory
224            bitsize   : int - defines how many bytes to read. defaults to 64 bit
225        returns:
226            bool - True if write was successful.
227    """
228    if "kdp" == GetConnectionProtocol():
229        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
230            print("Failed to write via KDP.")
231            return False
232        return True
233    #We are not connected via KDP. So do manual math and savings.
234    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
235    return False
236
237@lldb_command('writephys')
238def WritePhys(cmd_args=None):
239    """ writes to the specified untranslated address
240        The argument is interpreted as a physical address, and the 64-bit word
241        addressed is displayed.
242        usage: writephys <nbits> <address> <value>
243        nbits: 8,16,32,64
244        address: 1234 or 0x1234 or `foo_ptr`
245        value: int value to be written
246        ex. (lldb)writephys 16 0x12345abcd 0x25
247    """
248    if cmd_args is None or len(cmd_args) < 3:
249        raise ArgumentError()
250
251    else:
252        nbits = ArgumentStringToInt(cmd_args[0])
253        phys_addr = ArgumentStringToInt(cmd_args[1])
254        int_value = ArgumentStringToInt(cmd_args[2])
255        print(WritePhysInt(phys_addr, int_value, nbits))
256
257
258lldb_alias('writephys8', 'writephys 8 ')
259lldb_alias('writephys16', 'writephys 16 ')
260lldb_alias('writephys32', 'writephys 32 ')
261lldb_alias('writephys64', 'writephys 64 ')
262
263
264def _PT_Step(paddr, index, verbose_level = vSCRIPT):
265    """
266     Step to lower-level page table and print attributes
267       paddr: current page table entry physical address
268       index: current page table entry index (0..511)
269       verbose_level:    vHUMAN: print nothing
270                         vSCRIPT: print basic information
271                         vDETAIL: print basic information and hex table dump
272     returns: (pt_paddr, pt_valid, pt_large)
273       pt_paddr: next level page table entry physical address
274                      or null if invalid
275       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
276                      should be aborted
277       pt_large: 1 if kgm_pt_paddr is a page frame address
278                      of a large page and not another page table entry
279    """
280    entry_addr = paddr + (8 * index)
281    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
282    out_string = ''
283    if verbose_level >= vDETAIL:
284        for pte_loop in range(0, 512):
285            paddr_tmp = paddr + (8 * pte_loop)
286            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
287    paddr_mask = ~((0xfff<<52) | 0xfff)
288    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
289    pt_valid = False
290    pt_large = False
291    pt_paddr = 0
292    if verbose_level < vSCRIPT:
293        if entry & 0x1 :
294            pt_valid = True
295            pt_large = False
296            pt_paddr = entry & paddr_mask
297            if entry & (0x1 <<7):
298                pt_large = True
299                pt_paddr = entry & paddr_large_mask
300    else:
301        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
302        if entry & 0x1:
303            out_string += " valid"
304            pt_paddr = entry & paddr_mask
305            pt_valid = True
306        else:
307            out_string += " invalid"
308            pt_paddr = 0
309            pt_valid = False
310            if entry & (0x1 << 62):
311                out_string += " compressed"
312            #Stop decoding other bits
313            entry = 0
314        if entry & (0x1 << 1):
315            out_string += " writable"
316        else:
317            out_string += " read-only"
318
319        if entry & (0x1 << 2):
320            out_string += " user"
321        else:
322            out_string += " supervisor"
323
324        if entry & (0x1 << 3):
325            out_string += " PWT"
326
327        if entry & (0x1 << 4):
328            out_string += " PCD"
329
330        if entry & (0x1 << 5):
331            out_string += " accessed"
332
333        if entry & (0x1 << 6):
334            out_string += " dirty"
335
336        if entry & (0x1 << 7):
337            out_string += " large"
338            pt_large = True
339        else:
340            pt_large = False
341
342        if entry & (0x1 << 8):
343            out_string += " global"
344
345        if entry & (0x3 << 9):
346            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
347
348        if entry & (0x1 << 63):
349            out_string += " noexec"
350    print(out_string)
351    return (pt_paddr, pt_valid, pt_large)
352
353def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
354    """
355     Step to lower-level page table and print attributes for EPT pmap
356       paddr: current page table entry physical address
357       index: current page table entry index (0..511)
358       verbose_level:    vHUMAN: print nothing
359                         vSCRIPT: print basic information
360                         vDETAIL: print basic information and hex table dump
361     returns: (pt_paddr, pt_valid, pt_large)
362       pt_paddr: next level page table entry physical address
363                      or null if invalid
364       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
365                      should be aborted
366       pt_large: 1 if kgm_pt_paddr is a page frame address
367                      of a large page and not another page table entry
368    """
369    entry_addr = paddr + (8 * index)
370    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
371    out_string = ''
372    if verbose_level >= vDETAIL:
373        for pte_loop in range(0, 512):
374            paddr_tmp = paddr + (8 * pte_loop)
375            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
376    paddr_mask = ~((0xfff<<52) | 0xfff)
377    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
378    pt_valid = False
379    pt_large = False
380    pt_paddr = 0
381    if verbose_level < vSCRIPT:
382        if entry & 0x7 :
383            pt_valid = True
384            pt_large = False
385            pt_paddr = entry & paddr_mask
386            if entry & (0x1 <<7):
387                pt_large = True
388                pt_paddr = entry & paddr_large_mask
389    else:
390        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
391        if entry & 0x7:
392            out_string += "valid"
393            pt_paddr = entry & paddr_mask
394            pt_valid = True
395        else:
396            out_string += "invalid"
397            pt_paddr = 0
398            pt_valid = False
399            if entry & (0x1 << 62):
400                out_string += " compressed"
401            #Stop decoding other bits
402            entry = 0
403        if entry & 0x1:
404            out_string += " readable"
405        else:
406            out_string += " no read"
407        if entry & (0x1 << 1):
408            out_string += " writable"
409        else:
410            out_string += " no write"
411
412        if entry & (0x1 << 2):
413            out_string += " executable"
414        else:
415            out_string += " no exec"
416
417        ctype = entry & 0x38
418        if ctype == 0x30:
419            out_string += " cache-WB"
420        elif ctype == 0x28:
421            out_string += " cache-WP"
422        elif ctype == 0x20:
423            out_string += " cache-WT"
424        elif ctype == 0x8:
425            out_string += " cache-WC"
426        else:
427            out_string += " cache-NC"
428
429        if (entry & 0x40) == 0x40:
430            out_string += " Ignore-PTA"
431
432        if (entry & 0x100) == 0x100:
433            out_string += " accessed"
434
435        if (entry & 0x200) == 0x200:
436            out_string += " dirty"
437
438        if entry & (0x1 << 7):
439            out_string += " large"
440            pt_large = True
441        else:
442            pt_large = False
443    print(out_string)
444    return (pt_paddr, pt_valid, pt_large)
445
446def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
447    """ Walk the l4 pmap entry.
448        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
449        vaddr : int - virtual address to walk
450    """
451    pt_paddr = unsigned(pmap_addr_val)
452    pt_valid = (unsigned(pmap_addr_val) != 0)
453    pt_large = 0
454    pframe_offset = 0
455    if pt_valid:
456        # Lookup bits 47:39 of linear address in PML4T
457        pt_index = (vaddr >> 39) & 0x1ff
458        pframe_offset = vaddr & 0x7fffffffff
459        if verbose_level > vHUMAN :
460            print("pml4 (index {0:d}):".format(pt_index))
461        if not(ept_pmap):
462            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
463        else:
464            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
465    if pt_valid:
466        # Lookup bits 38:30 of the linear address in PDPT
467        pt_index = (vaddr >> 30) & 0x1ff
468        pframe_offset = vaddr & 0x3fffffff
469        if verbose_level > vHUMAN:
470            print("pdpt (index {0:d}):".format(pt_index))
471        if not(ept_pmap):
472            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
473        else:
474            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
475    if pt_valid and not pt_large:
476        #Lookup bits 29:21 of the linear address in PDPT
477        pt_index = (vaddr >> 21) & 0x1ff
478        pframe_offset = vaddr & 0x1fffff
479        if verbose_level > vHUMAN:
480            print("pdt (index {0:d}):".format(pt_index))
481        if not(ept_pmap):
482            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
483        else:
484            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
485    if pt_valid and not pt_large:
486        #Lookup bits 20:21 of linear address in PT
487        pt_index = (vaddr >> 12) & 0x1ff
488        pframe_offset = vaddr & 0xfff
489        if verbose_level > vHUMAN:
490            print("pt (index {0:d}):".format(pt_index))
491        if not(ept_pmap):
492            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
493        else:
494            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
495    paddr = 0
496    paddr_isvalid = False
497    if pt_valid:
498        paddr = pt_paddr + pframe_offset
499        paddr_isvalid = True
500
501    if verbose_level > vHUMAN:
502        if paddr_isvalid:
503            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
504            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
505        else:
506            print("no translation")
507
508    return paddr
509
510def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
511    """
512        params: pmapval - core.value representing pmap_t in kernel
513        vaddr:  int     - int representing virtual address to walk
514    """
515    if pmapval.pm_cr3 != 0:
516        if verbose_level > vHUMAN:
517            print("Using normal Intel PMAP from pm_cr3\n")
518        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
519    else:
520        if verbose_level > vHUMAN:
521            print("Using EPT pmap from pm_eptp\n")
522        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
523
524def assert_64bit(val):
525    assert(val < 2**64)
526
527ARM64_TTE_SIZE = 8
528ARM64_TTE_SHIFT = 3
529ARM64_VMADDR_BITS = 48
530
531def PmapBlockOffsetMaskARM64(page_size, level):
532    assert level >= 0 and level <= 3
533    ttentries = (page_size // ARM64_TTE_SIZE)
534    return page_size * (ttentries ** (3 - level)) - 1
535
536def PmapBlockBaseMaskARM64(page_size, level):
537    assert level >= 0 and level <= 3
538    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
539
540PmapTTEARM64 = namedtuple('PmapTTEARM64', ['level', 'value', 'stage2'])
541
542def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
543    """ Display the bits of an ARM64 translation table or page table entry
544        in human-readable form.
545        tte: integer value of the TTE/PTE
546        level: translation table level.  Valid values are 1, 2, or 3.
547        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
548    """
549    assert(isinstance(level, numbers.Integral))
550    assert_64bit(tte)
551
552    if tte & 0x1 == 0x0:
553        print("Invalid.")
554        return
555
556    if (tte & 0x2 == 0x2) and (level != 0x3):
557        print("Type       = Table pointer.")
558        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
559
560        if not stage2:
561            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
562            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
563            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
564            print("NS         = {:#x}.".format(tte >> 63))
565    else:
566        print("Type       = Block.")
567
568        if stage2:
569            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
570        else:
571            attr_index = (tte >> 2) & 0x7
572            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
573                3: 'CACHE DISABLE',
574                4: 'RESERVED'
575                ,
576                5: 'POSTED (DISABLE_XS if FEAT_XS supported)',
577                6: 'POSTED_REORDERED (POSTED_COMBINED_REORDERED if FEAT_XS supported)',
578                7: 'POSTED_COMBINED_REORDERED (POSTED_COMBINED_REORDERED_XS if FEAT_XS supported)' }
579
580            # Only show the string version of the AttrIdx for CPU mappings since
581            # these values don't apply to IOMMU mappings.
582            if is_iommu_tte:
583                print("AttrIdx    = {:#x}.".format(attr_index))
584            else:
585                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
586            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
587
588        if stage2:
589            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
590        else:
591            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
592
593        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
594        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
595
596        if not stage2:
597            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
598
599        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
600
601        if stage2:
602            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
603        else:
604            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
605            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
606
607        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
608
609    return
610
611def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
612    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
613
614    tt_index = []
615    for i in range(pta_max_level + 1):
616        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
617            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
618
619    return tt_index
620
621def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN, extra=None):
622    assert(type(vaddr) in (int, int))
623    assert_64bit(vaddr)
624    assert_64bit(root_tte)
625
626    # Obtain pmap attributes
627    page_size = pmap_pt_attr.pta_page_size
628    page_offset_mask = (page_size - 1)
629    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
630    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
631    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
632
633    # The pmap starts at a page table level that is defined by register
634    # values; the root level can be obtained from the attributes structure
635    level = unsigned(pmap_pt_attr.pta_root_level)
636
637    root_tt_index = tt_index[level]
638    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
639        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
640    tte = int(unsigned(root_tte[root_tt_index]))
641
642    # Walk the page tables
643    paddr = None
644    max_level = unsigned(pmap_pt_attr.pta_max_level)
645    is_valid = True
646    is_leaf = False
647
648    if extra is not None:
649        extra['page_size'] = page_size
650        extra['page_mask'] = page_size - 1
651        extra['paddr']     = None
652        extra['is_valid']  = True
653        extra['is_leaf']   = False
654        extra['tte']       = []
655
656    while (level <= max_level):
657        if extra is not None:
658            extra['tte'].append(PmapTTEARM64(level=level, value=tte, stage2=stage2))
659
660        if verbose_level >= vSCRIPT:
661            print("L{} entry: {:#x}".format(level, tte))
662        if verbose_level >= vDETAIL:
663            PmapDecodeTTEARM64(tte, level, stage2)
664
665        if tte & 0x1 == 0x0:
666            if verbose_level >= vHUMAN:
667                print("L{} entry invalid: {:#x}\n".format(level, tte))
668
669            if extra is not None:
670                extra['is_valid'] = False
671            is_valid = False
672            break
673
674        # Handle leaf entry
675        if tte & 0x2 == 0x0 or level == max_level:
676            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
677            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
678            paddr = tte & base_mask
679            paddr = paddr | (vaddr & offset_mask)
680
681            if level != max_level:
682                print("phys: {:#x}".format(paddr))
683
684            if extra is not None:
685                extra['is_leaf'] = True
686                extra['paddr'] = paddr
687            is_leaf = True
688            break
689        else:
690        # Handle page table entry
691            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
692            assert(isinstance(next_phys, numbers.Integral))
693
694            next_virt = kern.PhysToKernelVirt(next_phys)
695            assert(isinstance(next_virt, numbers.Integral))
696
697            if verbose_level >= vDETAIL:
698                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
699
700            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
701            tte = int(unsigned(dereference(ttep)))
702            assert(isinstance(tte, numbers.Integral))
703
704        # We've parsed one level, so go to the next level
705        assert(level <= 3)
706        level = level + 1
707
708
709    if verbose_level >= vHUMAN:
710        if paddr:
711            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
712        else:
713            print("(no translation)")
714
715    return paddr
716
717def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
718    if kern.arch == 'x86_64':
719        return PmapWalkX86_64(pmap, vaddr, verbose_level)
720    elif kern.arch.startswith('arm64'):
721        # Obtain pmap attributes from pmap structure
722        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
723        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
724    else:
725        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
726
727@lldb_command('pmap_walk')
728def PmapWalkHelper(cmd_args=None):
729    """ Perform a page-table walk in <pmap> for <virtual_address>.
730        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
731            Multiple -v's can be specified for increased verbosity
732    """
733    if cmd_args is None or len(cmd_args) < 2:
734        raise ArgumentError("Too few arguments to pmap_walk.")
735
736    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
737    addr = ArgumentStringToInt(cmd_args[1])
738    PmapWalk(pmap, addr, config['verbosity'])
739    return
740
741def GetMemoryAttributesFromUser(requested_type):
742    pmap_attr_dict = {
743        '4k' : kern.globals.pmap_pt_attr_4k,
744        '16k' : kern.globals.pmap_pt_attr_16k,
745        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
746    }
747
748    requested_type = requested_type.lower()
749    if requested_type not in pmap_attr_dict:
750        return None
751
752    return pmap_attr_dict[requested_type]
753
754@lldb_command('ttep_walk')
755def TTEPWalkPHelper(cmd_args=None):
756    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
757        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
758        Multiple -v's can be specified for increased verbosity
759        """
760    if cmd_args is None or len(cmd_args) < 2:
761        raise ArgumentError("Too few arguments to ttep_walk.")
762
763    if not kern.arch.startswith('arm64'):
764        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
765
766    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
767    addr = ArgumentStringToInt(cmd_args[1])
768
769    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
770    if pmap_pt_attr is None:
771        raise ArgumentError("Invalid translation attribute type.")
772
773    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
774
775@lldb_command('decode_tte')
776def DecodeTTE(cmd_args=None):
777    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
778        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
779    """
780    if cmd_args is None or len(cmd_args) < 2:
781        raise ArgumentError("Too few arguments to decode_tte.")
782    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
783        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
784    if kern.arch.startswith('arm64'):
785        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
786        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
787    else:
788        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
789
790PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54)
791PVH_HIGH_FLAGS_ARM32 = (1 << 31)
792
793def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
794    """ Dump information about a single mapping retrieved by the pv_head_table.
795
796        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
797              or None if type PVH_TYPE_PTEP.
798        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
799              the high flags already set (the type bits don't need to be cleared).
800              For type PVH_TYPE_PVEP this will be the value retrieved from the
801              pve_ptep[] array.
802    """
803    if kern.arch.startswith('arm64'):
804        iommu_flag = 0x4
805        iommu_table_flag = 1 << 63
806    else:
807        iommu_flag = 0
808        iommu_table_flag = 0
809
810    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
811    if pvep is not None and (ptep & 0x1):
812        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
813        # setting this string is mutually exclusive with setting the IOMMU strings.
814        pte_str = ' (alt acct)'
815    else:
816        pte_str = ''
817
818    if pvep is not None:
819        pve_str = 'PVEP {:#x}, '.format(pvep)
820    else:
821        pve_str = ''
822
823    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
824    # either does nothing or clears out the AltAcct bit.
825    ptep = ptep & ~0x3
826
827    # When printing with extra verbosity, print an extra newline that describes
828    # who owns the mapping.
829    extra_str = ''
830
831    if ptep & iommu_flag:
832        # The mapping is an IOMMU Mapping
833        ptep = ptep & ~iommu_flag
834
835        # Due to LLDB automatically setting all the high bits of pointers, when
836        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
837        # the iommu_table_flag, which means this check only works for PVH entries
838        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
839        # entry which has the right casting applied to avoid this issue).
840        #
841        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
842        # for a lack of trying, that's for sure. If you can figure out how to
843        # cast that array correctly, then be my guest.
844        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
845            if ptep & iommu_table_flag:
846                pte_str = ' (IOMMU table), entry'
847                ptd = GetPtDesc(KVToPhysARM(ptep))
848                iommu = dereference(ptd.iommu)
849            else:
850                # Instead of dumping the PTE (since we don't have that), dump the
851                # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
852                #
853                # This works because later on when the "ptep" is dereferenced as a
854                # PTE pointer (uint64_t pointer), the descriptor pointer will be
855                # dumped as that's the first 64-bit value in the IOMMU state object.
856                pte_str = ' (IOMMU state), descriptor'
857                ptep = ptep | iommu_table_flag
858                iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
859
860            # For IOMMU mappings, dump who owns the mapping as the extra string.
861            extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
862            if unsigned(iommu.name) != 0:
863                extra_str += '/{:s}'.format(iommu.name)
864            extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
865        else:
866            ptd = GetPtDesc(KVToPhysARM(ptep))
867            extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu)
868    else:
869        # The mapping is a CPU Mapping
870        pte_str += ', entry'
871        ptd = GetPtDesc(KVToPhysARM(ptep))
872        if ptd.pmap == kern.globals.kernel_pmap:
873            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
874        elif verbose_level >= vDETAIL:
875            task = TaskForPmapHelper(ptd.pmap)
876            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
877    try:
878        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
879    except:
880        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
881
882    if verbose_level >= vDETAIL:
883        print("    |-- {:s}".format(extra_str))
884
885def PVWalkARM(pai, verbose_level = vHUMAN):
886    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
887
888        pai: physical address index (PAI) corresponding to the pv_head_table
889             entry to walk.
890        verbose_level: Set to vSCRIPT or higher to print extra info around the
891                       the pv_head_table/pp_attr_table flags and to dump the
892                       pt_desc_t object if the type is a PTD.
893    """
894    # LLDB will automatically try to make pointer values dereferencable by
895    # setting the upper bits if they aren't set. We need to parse the flags
896    # stored in the upper bits later, so cast the pv_head_table to an array of
897    # integers to get around this "feature". We'll add the upper bits back
898    # manually before deref'ing anything.
899    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
900    pvh_raw = unsigned(pv_head_table[pai])
901    pvh = pvh_raw
902    pvh_type = pvh & 0x3
903
904    print("PVH raw value: {:#x}".format(pvh_raw))
905    if kern.arch.startswith('arm64'):
906        pvh = pvh | PVH_HIGH_FLAGS_ARM64
907    else:
908        pvh = pvh | PVH_HIGH_FLAGS_ARM32
909
910    if pvh_type == 0:
911        print("PVH type: NULL")
912    elif pvh_type == 3:
913        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
914    elif pvh_type == 2:
915        print("PVH type: single PTE")
916        PVDumpPTE(None, pvh, verbose_level)
917    elif pvh_type == 1:
918        pvep = pvh & ~0x3
919        print("PVH type: PTE list")
920        pve_ptep_idx = 0
921        while pvep != 0:
922            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
923
924            if pve.pve_ptep[pve_ptep_idx] != 0:
925                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
926
927            pve_ptep_idx += 1
928            if pve_ptep_idx == 2:
929                pve_ptep_idx = 0
930                pvep = unsigned(pve.pve_next)
931
932    if verbose_level >= vDETAIL:
933        if (pvh_type == 1) or (pvh_type == 2):
934            # Dump pv_head_table flags when there's a valid mapping.
935            pvh_flags = []
936
937            if pvh_raw & (1 << 62):
938                pvh_flags.append("CPU")
939            if pvh_raw & (1 << 60):
940                pvh_flags.append("EXEC")
941            if pvh_raw & (1 << 59):
942                pvh_flags.append("LOCKDOWN_KC")
943            if pvh_raw & (1 << 58):
944                pvh_flags.append("HASHED")
945            if pvh_raw & (1 << 57):
946                pvh_flags.append("LOCKDOWN_CS")
947            if pvh_raw & (1 << 56):
948                pvh_flags.append("LOCKDOWN_RO")
949            if pvh_raw & (1 << 55):
950                pvh_flags.append("RETIRED")
951            if pvh_raw & (1 << 54):
952                if kern.globals.page_protection_type > kern.PAGE_PROTECTION_TYPE_PPL:
953                    pvh_flags.append("SLEEPABLE_LOCK")
954            if pvh_raw & (1 << 52):
955                if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
956                    pvh_flags.append("SECURE_FLUSH_NEEDED")
957            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
958                pvh_flags.append("LOCK")
959
960            print("PVH Flags: {}".format(pvh_flags))
961
962        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
963        ppattr = unsigned(kern.globals.pp_attr_table[pai])
964        print("PPATTR raw value: {:#x}".format(ppattr))
965
966        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
967        if ppattr & 0x40:
968            ppattr_flags.append("REFERENCED")
969        if ppattr & 0x80:
970            ppattr_flags.append("MODIFIED")
971        if ppattr & 0x100:
972            ppattr_flags.append("INTERNAL")
973        if ppattr & 0x200:
974            ppattr_flags.append("REUSABLE")
975        if ppattr & 0x400:
976            ppattr_flags.append("ALTACCT")
977        if ppattr & 0x800:
978            ppattr_flags.append("NOENCRYPT")
979        if ppattr & 0x1000:
980            ppattr_flags.append("REFFAULT")
981        if ppattr & 0x2000:
982            ppattr_flags.append("MODFAULT")
983        if ppattr & 0x4000:
984            ppattr_flags.append("MONITOR")
985        if ppattr & 0x8000:
986            ppattr_flags.append("NO_MONITOR")
987
988        print("PPATTR Flags: {}".format(ppattr_flags))
989
990        if pvh_type == 3:
991            def RunLldbCmdHelper(command):
992                """Helper for dumping an LLDB command right before executing it
993                and printing the results.
994                command: The LLDB command (as a string) to run.
995
996                Example input: "p/x kernel_pmap".
997                """
998                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
999            # Dump the page table descriptor object
1000            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1001            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
1002
1003            # Depending on the system, more than one ptd_info can be associated
1004            # with a single PTD. Only dump the first PTD info and assume the
1005            # user knows to dump the rest if they're on one of those systems.
1006            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
1007
1008@lldb_command('pv_walk')
1009def PVWalk(cmd_args=None):
1010    """ Show mappings for <physical_address | PAI> tracked in the PV list.
1011        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
1012
1013        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
1014        as well as dump the page table descriptor (PTD) struct if the entry is a
1015        PTD.
1016    """
1017    if cmd_args is None or len(cmd_args) == 0:
1018        raise ArgumentError("Too few arguments to pv_walk.")
1019    if not kern.arch.startswith('arm'):
1020        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
1021
1022    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1023
1024    # If the input is already a PAI, this function will return the input unchanged.
1025    # This function also ensures that the physical address is kernel-managed.
1026    pai = ConvertPhysAddrToPai(pa)
1027
1028    PVWalkARM(pai, config['verbosity'])
1029
1030@lldb_command('kvtophys')
1031def KVToPhys(cmd_args=None):
1032    """ Translate a kernel virtual address to the corresponding physical address.
1033        Assumes the virtual address falls within the kernel static region.
1034        Syntax: (lldb) kvtophys <kernel virtual address>
1035    """
1036    if cmd_args is None or len(cmd_args) == 0:
1037        raise ArgumentError("Too few arguments to kvtophys.")
1038    if kern.arch.startswith('arm'):
1039        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1040    elif kern.arch == 'x86_64':
1041        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1042
1043@lldb_command('phystokv')
1044def PhysToKV(cmd_args=None):
1045    """ Translate a physical address to the corresponding static kernel virtual address.
1046        Assumes the physical address corresponds to managed DRAM.
1047        Syntax: (lldb) phystokv <physical address>
1048    """
1049    if cmd_args is None or len(cmd_args) == 0:
1050        raise ArgumentError("Too few arguments to phystokv.")
1051    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1052
1053def KVToPhysARM(addr):
1054    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1055        ptov_table = kern.globals.ptov_table
1056        for i in range(0, kern.globals.ptov_index):
1057            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1058                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1059    else:
1060        papt_table = kern.globals.libsptm_papt_ranges
1061        page_size = kern.globals.page_size
1062        for i in range(0, kern.globals.libsptm_n_papt_ranges):
1063            if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))):
1064                return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start)))
1065        raise ValueError("VA {:#x} not found in physical region lookup table".format(addr))
1066    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1067
1068
1069def GetPtDesc(paddr):
1070    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1071    pvh = unsigned(kern.globals.pv_head_table[pn])
1072    if kern.arch.startswith('arm64'):
1073        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1074    else:
1075        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1076    pvh_type = pvh & 0x3
1077    if pvh_type != 0x3:
1078        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1079    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1080    return ptd
1081
1082def PhysToFrameTableEntry(paddr):
1083    if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)):
1084        return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size]
1085    page_idx = paddr / kern.globals.page_size
1086    for i in range(0, kern.globals.sptm_n_io_ranges):
1087        base = kern.globals.io_frame_table[i].io_range.phys_page_idx
1088        end = base + kern.globals.io_frame_table[i].io_range.num_pages
1089        if page_idx >= base and page_idx < end:
1090            return kern.globals.io_frame_table[i]
1091    return kern.globals.xnu_io_fte
1092
1093@lldb_command('phystofte')
1094def PhysToFTE(cmd_args=None):
1095    """ Translate a physical address to the corresponding SPTM frame table entry pointer
1096        Syntax: (lldb) phystofte <physical address>
1097    """
1098    if cmd_args is None or len(cmd_args) == 0:
1099        raise ArgumentError("Too few arguments to phystofte.")
1100
1101    fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))
1102    print(repr(fte))
1103
1104XNU_IOMMU = 23
1105XNU_PAGE_TABLE = 19
1106XNU_PAGE_TABLE_SHARED = 20
1107XNU_PAGE_TABLE_ROZONE = 21
1108XNU_PAGE_TABLE_COMMPAGE = 22
1109SPTM_PAGE_TABLE = 9
1110
1111def ShowPTEARM(pte, page_size, level):
1112    """ Display vital information about an ARM page table entry
1113        pte: kernel virtual address of the PTE.  page_size and level may be None,
1114        in which case we'll try to infer them from the page table descriptor.
1115        Inference of level may only work for L2 and L3 TTEs depending upon system
1116        configuration.
1117    """
1118    pt_index = 0
1119    stage2 = False
1120    def GetPageTableInfo(ptd, paddr):
1121        nonlocal pt_index, page_size, level
1122        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1123            # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page.
1124            # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes
1125            # do, so the IOMMU refcnt token is always stored at index 0.  If this is not
1126            # an IOMMU page, we may end up using a different final value for pt_index below.
1127            refcnt = ptd.ptd_info[0].refcnt
1128            # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1129            is_iommu_pte = (refcnt & 0x8000) == 0x8000
1130            if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'):
1131                page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1132            elif page_size is None:
1133                page_size = kern.globals.native_pt_attr.pta_page_size
1134            pt_index = (pte % kern.globals.page_size) // page_size
1135            refcnt =  ptd.ptd_info[pt_index].refcnt
1136            if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'):
1137                stage2 = ptd.pmap.pmap_pt_attr.stage2
1138            if level is None:
1139                if refcnt == 0x4000:
1140                    level = 2
1141                else:
1142                    level = 3
1143            if is_iommu_pte:
1144                iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1145                if unsigned(dereference(ptd.iommu).name) != 0:
1146                    iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1147                info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)
1148            else:
1149                info_str = None
1150            return (int(unsigned(refcnt)), level, info_str)
1151        else:
1152            fte = PhysToFrameTableEntry(paddr)
1153            if fte.type == XNU_IOMMU:
1154                if page_size is None:
1155                    page_size = kern.globals.native_pt_attr.pta_page_size
1156                info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd)
1157                return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str)
1158            elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]:
1159                if page_size is None:
1160                    if hasattr(ptd.pmap, 'pmap_pt_attr'):
1161                        page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1162                    else:
1163                        page_size = kern.globals.native_pt_attr.pta_page_size;
1164                return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None)
1165            else:
1166                raise ValueError("Unrecognized FTE type {:#x}".format(fte.type))
1167            raise ValueError("Unable to retrieve PTD refcnt")
1168    pte_paddr = KVToPhysARM(pte)
1169    ptd = GetPtDesc(pte_paddr)
1170    refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr)
1171    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1172    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1173        va = ptd.va[pt_index]
1174    else:
1175        va = ptd.va
1176    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va))
1177
1178    # The pmap/iommu field is a union, so only print the correct one.
1179    if info_str is not None:
1180        print(info_str)
1181    else:
1182        if ptd.pmap == kern.globals.kernel_pmap:
1183            pmap_str = "(kernel_pmap)"
1184        else:
1185            task = TaskForPmapHelper(ptd.pmap)
1186            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1187        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1188        nttes = page_size // 8
1189        granule = page_size * (nttes ** (3 - level))
1190        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1191            pte_pgoff = pte % page_size
1192        else:
1193            pte_pgoff = pte % kern.globals.native_pt_attr.pta_page_size
1194        pte_pgoff = pte_pgoff // 8
1195        print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(va)) + (pte_pgoff * granule)))
1196        pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1197        print("value: {:#x}".format(pteval))
1198        print("level: {:d}".format(level))
1199        PmapDecodeTTEARM64(pteval, level, stage2)
1200
1201@lldb_command('showpte')
1202def ShowPTE(cmd_args=None):
1203    """ Display vital information about the page table entry at VA <pte>
1204        Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2]
1205    """
1206    if cmd_args is None or len(cmd_args) == 0:
1207        raise ArgumentError("Too few arguments to showpte.")
1208
1209    if kern.arch.startswith('arm64'):
1210        if len(cmd_args) >= 3:
1211            pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2])
1212            if pmap_pt_attr is None:
1213                raise ArgumentError("Invalid translation attribute type.")
1214            page_size = pmap_pt_attr.pta_page_size
1215        else:
1216            page_size = None
1217
1218        level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None
1219        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level)
1220    else:
1221        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1222
1223def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1224    """ Perform the specified action for all valid mappings in an ARM64 translation table
1225        pmap: owner of the translation table
1226        tt: translation table or page table
1227        nttes: number of entries in tt
1228        level: translation table level, 1 2 or 3
1229        action: callback for each valid TTE
1230    """
1231    # Obtain pmap attributes
1232    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1233    page_size = pmap_pt_attr.pta_page_size
1234    page_offset_mask = (page_size - 1)
1235    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1236    max_level = unsigned(pmap_pt_attr.pta_max_level)
1237
1238    for i in range(nttes):
1239        try:
1240            tte = tt[i]
1241            if tte & 0x1 == 0x0:
1242                continue
1243
1244            tt_next = None
1245            paddr = unsigned(tte) & unsigned(page_base_mask)
1246
1247            # Handle leaf entry
1248            if tte & 0x2 == 0x0 or level == max_level:
1249                type = 'block' if level < max_level else 'entry'
1250                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1251            else:
1252            # Handle page table entry
1253                type = 'table'
1254                granule = page_size
1255                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1256
1257            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1258            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1259                if tt_next is not None:
1260                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1261
1262        except Exception as exc:
1263            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1264
1265def ScanPageTables(action, targetPmap=None):
1266    """ Perform the specified action for all valid mappings in all page tables,
1267        optionally restricted to a single pmap.
1268        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1269    """
1270    print("Scanning all available translation tables.  This may take a long time...")
1271    def ScanPmap(pmap, action):
1272        if kern.arch.startswith('arm64'):
1273            # Obtain pmap attributes
1274            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1275            granule = pmap_pt_attr.pta_page_size
1276            level = unsigned(pmap_pt_attr.pta_root_level)
1277            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1278                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1279
1280        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1281            if kern.arch.startswith('arm64'):
1282                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1283
1284    if targetPmap is not None:
1285        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1286    else:
1287        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1288            ScanPmap(pmap, action)
1289
1290@lldb_command('showallmappings')
1291def ShowAllMappings(cmd_args=None):
1292    """ Find and display all available mappings on the system for
1293        <physical_address>.  Optionally only searches the pmap
1294        specified by [<pmap>]
1295        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1296        WARNING: this macro can take a long time (up to 30min.) to complete!
1297    """
1298    if cmd_args is None or len(cmd_args) == 0:
1299        raise ArgumentError("Too few arguments to showallmappings.")
1300    if not kern.arch.startswith('arm'):
1301        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1302    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1303    targetPmap = None
1304    if len(cmd_args) > 1:
1305        targetPmap = cmd_args[1]
1306    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1307        if paddr <= pa < (paddr + granule):
1308            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1309        return True
1310    ScanPageTables(printMatchedMapping, targetPmap)
1311
1312@lldb_command('showptusage')
1313def ShowPTUsage(cmd_args=None):
1314    """ Display a summary of pagetable allocations for a given pmap.
1315        Syntax: (lldb) showptusage [<pmap>]
1316        WARNING: this macro can take a long time (> 1hr) to complete!
1317    """
1318    if not kern.arch.startswith('arm'):
1319        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1320    targetPmap = None
1321    if len(cmd_args) > 0:
1322        targetPmap = cmd_args[0]
1323    lastPmap = [None]
1324    numTables = [0]
1325    numUnnested = [0]
1326    numPmaps = [0]
1327    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1328        unnested = ""
1329        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1330        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1331        if lastPmap[0] is None or (pmap != lastPmap[0]):
1332            lastPmap[0] = pmap
1333            numPmaps[0] = numPmaps[0] + 1
1334            print ("pmap {:#x}:".format(pmap))
1335        if type == 'root':
1336            return True
1337        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1338            ptd = GetPtDesc(paddr)
1339            if ptd.pmap != pmap:
1340                return False
1341            else:
1342                numUnnested[0] = numUnnested[0] + 1
1343                unnested = " (likely unnested)"
1344        numTables[0] = numTables[0] + 1
1345        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1346        if level == 2:
1347            return False
1348        else:
1349            return True
1350    ScanPageTables(printValidTTE, targetPmap)
1351    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1352
1353def checkPVList(pmap, level, type, tte, paddr, va, granule):
1354    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1355        pmap: owner of the translation table
1356        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1357        type: unused
1358        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1359        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1360        granule: unused
1361    """
1362    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1363    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1364    page_size = kern.globals.page_size
1365    if kern.arch.startswith('arm64'):
1366        page_offset_mask = (page_size - 1)
1367        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1368        paddr = paddr & page_base_mask
1369        max_level = 3
1370        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1371    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1372        return True
1373    pn = (paddr - vm_first_phys) // page_size
1374    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1375    pvh_type = pvh & 0x3
1376    if pmap is not None:
1377        pmap_str = "pmap: {:#x}: ".format(pmap)
1378    else:
1379        pmap_str = ''
1380    if tte is not None:
1381        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1382    else:
1383        tte_str = "paddr {:#x}: ".format(paddr)
1384    if pvh_type == 0 or pvh_type == 3:
1385        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1386    elif pvh_type == 2:
1387        ptep = pvh & ~0x3
1388        if tte is not None and ptep != unsigned(tte):
1389            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1390        try:
1391            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1392            if (pte != paddr):
1393                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1394        except Exception as exc:
1395            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1396    elif pvh_type == 1:
1397        pvep = pvh & ~0x3
1398        tte_match = False
1399        pve_ptep_idx = 0
1400        while pvep != 0:
1401            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1402            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1403            pve_ptep_idx += 1
1404            if pve_ptep_idx == 2:
1405                pve_ptep_idx = 0
1406                pvep = unsigned(pve.pve_next)
1407            if ptep == 0:
1408                continue
1409            if tte is not None and ptep == unsigned(tte):
1410                tte_match = True
1411            try:
1412                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1413                if (pte != paddr):
1414                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1415            except Exception as exc:
1416                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1417        if tte is not None and not tte_match:
1418            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1419    return True
1420
1421@lldb_command('pv_check', 'P')
1422def PVCheck(cmd_args=None, cmd_options={}):
1423    """ Check the physical-to-virtual mapping for a given PTE or physical address
1424        Syntax: (lldb) pv_check <addr> [-p]
1425            -P        : Interpret <addr> as a physical address rather than a PTE
1426    """
1427    if cmd_args is None or len(cmd_args) == 0:
1428        raise ArgumentError("Too few arguments to pv_check.")
1429    if kern.arch.startswith('arm64'):
1430        level = 3
1431    else:
1432        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1433    if "-P" in cmd_options:
1434        pte = None
1435        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1436    else:
1437        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1438        pa = int(unsigned(dereference(pte)))
1439    checkPVList(None, level, None, pte, pa, 0, None)
1440
1441@lldb_command('check_pmaps')
1442def CheckPmapIntegrity(cmd_args=None):
1443    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1444        Optionally only checks the pmap specified by [<pmap>]
1445        Syntax: (lldb) check_pmaps [<pmap>]
1446        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1447        specify [pmap] to limit it to a single pmap.  It will also give false positives
1448        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1449        Use of this macro without the [<pmap>] argument is heavily discouraged.
1450    """
1451    if not kern.arch.startswith('arm'):
1452        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1453    targetPmap = None
1454    if len(cmd_args) > 0:
1455        targetPmap = cmd_args[0]
1456    ScanPageTables(checkPVList, targetPmap)
1457
1458@lldb_command('pmapsforledger')
1459def PmapsForLedger(cmd_args=None):
1460    """ Find and display all pmaps currently using <ledger>.
1461        Syntax: (lldb) pmapsforledger <ledger>
1462    """
1463    if cmd_args is None or len(cmd_args) == 0:
1464        raise ArgumentError("Too few arguments to pmapsforledger.")
1465    if not kern.arch.startswith('arm'):
1466        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1467    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1468    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1469        if pmap.ledger == ledger:
1470            print("pmap: {:#x}".format(pmap))
1471
1472
1473def IsValidPai(pai):
1474    """ Given an unsigned value, detect whether that value is a valid physical
1475        address index (PAI). It does this by first computing the last possible
1476        PAI and comparing the input to that.
1477
1478        All contemporary SoCs reserve the bottom part of the address space, so
1479        there shouldn't be any valid physical addresses between zero and the
1480        last PAI either.
1481    """
1482    page_size = unsigned(kern.globals.page_size)
1483    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1484    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1485
1486    last_pai = (vm_last_phys - vm_first_phys) // page_size
1487    if (pai < 0) or (pai >= last_pai):
1488        return False
1489
1490    return True
1491
1492def ConvertPaiToPhysAddr(pai):
1493    """ Convert the given Physical Address Index (PAI) into a physical address.
1494
1495        If the input isn't a valid PAI (it's most likely already a physical
1496        address), then just return back the input unchanged.
1497    """
1498    pa = pai
1499
1500    # If the value is a valid PAI, then convert it into a physical address.
1501    if IsValidPai(pai):
1502        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1503
1504    return pa
1505
1506def ConvertPhysAddrToPai(pa):
1507    """ Convert the given physical address into a Physical Address Index (PAI).
1508
1509        If the input is already a valid PAI, then just return back the input
1510        unchanged.
1511    """
1512    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1513    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1514    pai = pa
1515
1516    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1517        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1518    elif not IsValidPai(pa):
1519        # If the value isn't already a valid PAI, then convert it into one.
1520        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1521
1522    return pai
1523
1524@lldb_command('pmappaindex')
1525def PmapPaIndex(cmd_args=None):
1526    """ Display both a physical address and physical address index (PAI) when
1527        provided with only one of those values.
1528
1529        Syntax: (lldb) pmappaindex <physical address | PAI>
1530
1531        NOTE: This macro will throw an exception if the input isn't a valid PAI
1532              and is also not a kernel-managed physical address.
1533    """
1534    if cmd_args is None or len(cmd_args) == 0:
1535        raise ArgumentError("Too few arguments to pmappaindex.")
1536
1537    if not kern.arch.startswith('arm'):
1538        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1539
1540    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1541    pai = value
1542    phys_addr = value
1543
1544    if IsValidPai(value):
1545        # Input is a PAI, calculate the physical address.
1546        phys_addr = ConvertPaiToPhysAddr(value)
1547    else:
1548        # Input is a physical address, calculate the PAI
1549        pai = ConvertPhysAddrToPai(value)
1550
1551    print("Physical Address: {:#x}".format(phys_addr))
1552    print("PAI: {:d}".format(pai))
1553