xref: /xnu-10002.81.5/tools/lldbmacros/pmap.py (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1from __future__ import absolute_import, division, print_function
2
3from builtins import hex
4from builtins import range
5
6from xnu import *
7import xnudefines
8from kdp import *
9from utils import *
10import struct
11
12def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
13    """ Read a physical memory data based on address.
14        params:
15            phys_addr : int - Physical address to read
16            bitsize   : int - defines how many bytes to read. defaults to 64 bit
17            cpuval    : None (optional)
18        returns:
19            int - int value read from memory. in case of failure 0xBAD10AD is returned.
20    """
21    if "kdp" == GetConnectionProtocol():
22        return KDPReadPhysMEM(phys_addr, bitsize)
23
24    #NO KDP. Attempt to use physical memory
25    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
26    if paddr_in_kva :
27        if bitsize == 64 :
28            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
29        if bitsize == 32 :
30            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31        if bitsize == 16 :
32            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
33        if bitsize == 8 :
34            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
35    return 0xBAD10AD
36
37@lldb_command('readphys')
38def ReadPhys(cmd_args = None):
39    """ Reads the specified untranslated address
40        The argument is interpreted as a physical address, and the 64-bit word
41        addressed is displayed.
42        usage: readphys <nbits> <address>
43        nbits: 8,16,32,64
44        address: 1234 or 0x1234
45    """
46    if cmd_args is None or len(cmd_args) < 2:
47        print("Insufficient arguments.", ReadPhys.__doc__)
48        return False
49    else:
50        nbits = ArgumentStringToInt(cmd_args[0])
51        phys_addr = ArgumentStringToInt(cmd_args[1])
52        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
53    return True
54
55lldb_alias('readphys8', 'readphys 8 ')
56lldb_alias('readphys16', 'readphys 16 ')
57lldb_alias('readphys32', 'readphys 32 ')
58lldb_alias('readphys64', 'readphys 64 ')
59
60def KDPReadPhysMEM(address, bits):
61    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
62        params:
63            address : int - address where to read the data from
64            bits : int - number of bits in the intval (8/16/32/64)
65        returns:
66            int: read value from memory.
67            0xBAD10AD: if failed to read data.
68    """
69    retval = 0xBAD10AD
70    if "kdp" != GetConnectionProtocol():
71        print("Target is not connected over kdp. Nothing to do here.")
72        return retval
73
74    if "hwprobe" == KDPMode():
75        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
76        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
77        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
78        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
79
80        ret_obj = lldb.SBCommandReturnObject()
81        ci = lldb.debugger.GetCommandInterpreter()
82        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
83
84        if ret_obj.Succeeded():
85            value = ret_obj.GetOutput()
86
87            if bits == 64 :
88                pack_fmt = "<Q"
89                unpack_fmt = ">Q"
90            if bits == 32 :
91                pack_fmt = "<I"
92                unpack_fmt = ">I"
93            if bits == 16 :
94                pack_fmt = "<H"
95                unpack_fmt = ">H"
96            if bits == 8 :
97                pack_fmt = "<B"
98                unpack_fmt = ">B"
99
100            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
101
102    else:
103        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
104        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
105        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
106
107        if not WriteInt32ToMemoryAddress(0, input_address):
108            return retval
109
110        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
111        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
112            return retval
113
114        data_addr = int(addressof(kern.globals.manual_pkt))
115        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
116
117        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
118
119        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
120             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
121             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
122             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
123             ):
124
125            if WriteInt32ToMemoryAddress(1, input_address):
126                # now read data from the kdp packet
127                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
128                if bits == 64 :
129                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
130                if bits == 32 :
131                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
132                if bits == 16 :
133                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
134                if bits == 8 :
135                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
136
137    return retval
138
139
140def KDPWritePhysMEM(address, intval, bits):
141    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
142        params:
143            address : int - address where to save the data
144            intval : int - integer value to be stored in memory
145            bits : int - number of bits in the intval (8/16/32/64)
146        returns:
147            boolean: True if the write succeeded.
148    """
149    if "kdp" != GetConnectionProtocol():
150        print("Target is not connected over kdp. Nothing to do here.")
151        return False
152
153    if "hwprobe" == KDPMode():
154        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
155        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
156        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
157
158        if bits == 64 :
159            pack_fmt = ">Q"
160            unpack_fmt = "<Q"
161        if bits == 32 :
162            pack_fmt = ">I"
163            unpack_fmt = "<I"
164        if bits == 16 :
165            pack_fmt = ">H"
166            unpack_fmt = "<H"
167        if bits == 8 :
168            pack_fmt = ">B"
169            unpack_fmt = "<B"
170
171        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
172
173        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
174
175        ret_obj = lldb.SBCommandReturnObject()
176        ci = lldb.debugger.GetCommandInterpreter()
177        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
178
179        if ret_obj.Succeeded():
180            return True
181        else:
182            return False
183
184    else:
185        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
186        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
187        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
188        if not WriteInt32ToMemoryAddress(0, input_address):
189            return False
190
191        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
192        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
193            return False
194
195        data_addr = int(addressof(kern.globals.manual_pkt))
196        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
197
198        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
199
200        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
201             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
202             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
203             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
204             ):
205
206            if bits == 8:
207                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
208                    return False
209            if bits == 16:
210                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
211                    return False
212            if bits == 32:
213                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
214                    return False
215            if bits == 64:
216                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
217                    return False
218            if WriteInt32ToMemoryAddress(1, input_address):
219                return True
220        return False
221
222
223def WritePhysInt(phys_addr, int_val, bitsize = 64):
224    """ Write and integer value in a physical memory data based on address.
225        params:
226            phys_addr : int - Physical address to read
227            int_val   : int - int value to write in memory
228            bitsize   : int - defines how many bytes to read. defaults to 64 bit
229        returns:
230            bool - True if write was successful.
231    """
232    if "kdp" == GetConnectionProtocol():
233        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
234            print("Failed to write via KDP.")
235            return False
236        return True
237    #We are not connected via KDP. So do manual math and savings.
238    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
239    return False
240
241@lldb_command('writephys')
242def WritePhys(cmd_args=None):
243    """ writes to the specified untranslated address
244        The argument is interpreted as a physical address, and the 64-bit word
245        addressed is displayed.
246        usage: writephys <nbits> <address> <value>
247        nbits: 8,16,32,64
248        address: 1234 or 0x1234
249        value: int value to be written
250        ex. (lldb)writephys 16 0x12345abcd 0x25
251    """
252    if cmd_args is None or len(cmd_args) < 3:
253        print("Invalid arguments.", WritePhys.__doc__)
254    else:
255        nbits = ArgumentStringToInt(cmd_args[0])
256        phys_addr = ArgumentStringToInt(cmd_args[1])
257        int_value = ArgumentStringToInt(cmd_args[2])
258        print(WritePhysInt(phys_addr, int_value, nbits))
259
260
261lldb_alias('writephys8', 'writephys 8 ')
262lldb_alias('writephys16', 'writephys 16 ')
263lldb_alias('writephys32', 'writephys 32 ')
264lldb_alias('writephys64', 'writephys 64 ')
265
266
267def _PT_Step(paddr, index, verbose_level = vSCRIPT):
268    """
269     Step to lower-level page table and print attributes
270       paddr: current page table entry physical address
271       index: current page table entry index (0..511)
272       verbose_level:    vHUMAN: print nothing
273                         vSCRIPT: print basic information
274                         vDETAIL: print basic information and hex table dump
275     returns: (pt_paddr, pt_valid, pt_large)
276       pt_paddr: next level page table entry physical address
277                      or null if invalid
278       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
279                      should be aborted
280       pt_large: 1 if kgm_pt_paddr is a page frame address
281                      of a large page and not another page table entry
282    """
283    entry_addr = paddr + (8 * index)
284    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
285    out_string = ''
286    if verbose_level >= vDETAIL:
287        for pte_loop in range(0, 512):
288            paddr_tmp = paddr + (8 * pte_loop)
289            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
290    paddr_mask = ~((0xfff<<52) | 0xfff)
291    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
292    pt_valid = False
293    pt_large = False
294    pt_paddr = 0
295    if verbose_level < vSCRIPT:
296        if entry & 0x1 :
297            pt_valid = True
298            pt_large = False
299            pt_paddr = entry & paddr_mask
300            if entry & (0x1 <<7):
301                pt_large = True
302                pt_paddr = entry & paddr_large_mask
303    else:
304        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
305        if entry & 0x1:
306            out_string += " valid"
307            pt_paddr = entry & paddr_mask
308            pt_valid = True
309        else:
310            out_string += " invalid"
311            pt_paddr = 0
312            pt_valid = False
313            if entry & (0x1 << 62):
314                out_string += " compressed"
315            #Stop decoding other bits
316            entry = 0
317        if entry & (0x1 << 1):
318            out_string += " writable"
319        else:
320            out_string += " read-only"
321
322        if entry & (0x1 << 2):
323            out_string += " user"
324        else:
325            out_string += " supervisor"
326
327        if entry & (0x1 << 3):
328            out_string += " PWT"
329
330        if entry & (0x1 << 4):
331            out_string += " PCD"
332
333        if entry & (0x1 << 5):
334            out_string += " accessed"
335
336        if entry & (0x1 << 6):
337            out_string += " dirty"
338
339        if entry & (0x1 << 7):
340            out_string += " large"
341            pt_large = True
342        else:
343            pt_large = False
344
345        if entry & (0x1 << 8):
346            out_string += " global"
347
348        if entry & (0x3 << 9):
349            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
350
351        if entry & (0x1 << 63):
352            out_string += " noexec"
353    print(out_string)
354    return (pt_paddr, pt_valid, pt_large)
355
356def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
357    """
358     Step to lower-level page table and print attributes for EPT pmap
359       paddr: current page table entry physical address
360       index: current page table entry index (0..511)
361       verbose_level:    vHUMAN: print nothing
362                         vSCRIPT: print basic information
363                         vDETAIL: print basic information and hex table dump
364     returns: (pt_paddr, pt_valid, pt_large)
365       pt_paddr: next level page table entry physical address
366                      or null if invalid
367       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
368                      should be aborted
369       pt_large: 1 if kgm_pt_paddr is a page frame address
370                      of a large page and not another page table entry
371    """
372    entry_addr = paddr + (8 * index)
373    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
374    out_string = ''
375    if verbose_level >= vDETAIL:
376        for pte_loop in range(0, 512):
377            paddr_tmp = paddr + (8 * pte_loop)
378            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
379    paddr_mask = ~((0xfff<<52) | 0xfff)
380    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
381    pt_valid = False
382    pt_large = False
383    pt_paddr = 0
384    if verbose_level < vSCRIPT:
385        if entry & 0x7 :
386            pt_valid = True
387            pt_large = False
388            pt_paddr = entry & paddr_mask
389            if entry & (0x1 <<7):
390                pt_large = True
391                pt_paddr = entry & paddr_large_mask
392    else:
393        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
394        if entry & 0x7:
395            out_string += "valid"
396            pt_paddr = entry & paddr_mask
397            pt_valid = True
398        else:
399            out_string += "invalid"
400            pt_paddr = 0
401            pt_valid = False
402            if entry & (0x1 << 62):
403                out_string += " compressed"
404            #Stop decoding other bits
405            entry = 0
406        if entry & 0x1:
407            out_string += " readable"
408        else:
409            out_string += " no read"
410        if entry & (0x1 << 1):
411            out_string += " writable"
412        else:
413            out_string += " no write"
414
415        if entry & (0x1 << 2):
416            out_string += " executable"
417        else:
418            out_string += " no exec"
419
420        ctype = entry & 0x38
421        if ctype == 0x30:
422            out_string += " cache-WB"
423        elif ctype == 0x28:
424            out_string += " cache-WP"
425        elif ctype == 0x20:
426            out_string += " cache-WT"
427        elif ctype == 0x8:
428            out_string += " cache-WC"
429        else:
430            out_string += " cache-NC"
431
432        if (entry & 0x40) == 0x40:
433            out_string += " Ignore-PTA"
434
435        if (entry & 0x100) == 0x100:
436            out_string += " accessed"
437
438        if (entry & 0x200) == 0x200:
439            out_string += " dirty"
440
441        if entry & (0x1 << 7):
442            out_string += " large"
443            pt_large = True
444        else:
445            pt_large = False
446    print(out_string)
447    return (pt_paddr, pt_valid, pt_large)
448
449def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
450    """ Walk the l4 pmap entry.
451        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
452        vaddr : int - virtual address to walk
453    """
454    pt_paddr = unsigned(pmap_addr_val)
455    pt_valid = (unsigned(pmap_addr_val) != 0)
456    pt_large = 0
457    pframe_offset = 0
458    if pt_valid:
459        # Lookup bits 47:39 of linear address in PML4T
460        pt_index = (vaddr >> 39) & 0x1ff
461        pframe_offset = vaddr & 0x7fffffffff
462        if verbose_level > vHUMAN :
463            print("pml4 (index {0:d}):".format(pt_index))
464        if not(ept_pmap):
465            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
466        else:
467            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
468    if pt_valid:
469        # Lookup bits 38:30 of the linear address in PDPT
470        pt_index = (vaddr >> 30) & 0x1ff
471        pframe_offset = vaddr & 0x3fffffff
472        if verbose_level > vHUMAN:
473            print("pdpt (index {0:d}):".format(pt_index))
474        if not(ept_pmap):
475            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
476        else:
477            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
478    if pt_valid and not pt_large:
479        #Lookup bits 29:21 of the linear address in PDPT
480        pt_index = (vaddr >> 21) & 0x1ff
481        pframe_offset = vaddr & 0x1fffff
482        if verbose_level > vHUMAN:
483            print("pdt (index {0:d}):".format(pt_index))
484        if not(ept_pmap):
485            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
486        else:
487            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
488    if pt_valid and not pt_large:
489        #Lookup bits 20:21 of linear address in PT
490        pt_index = (vaddr >> 12) & 0x1ff
491        pframe_offset = vaddr & 0xfff
492        if verbose_level > vHUMAN:
493            print("pt (index {0:d}):".format(pt_index))
494        if not(ept_pmap):
495            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
496        else:
497            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
498    paddr = 0
499    paddr_isvalid = False
500    if pt_valid:
501        paddr = pt_paddr + pframe_offset
502        paddr_isvalid = True
503
504    if verbose_level > vHUMAN:
505        if paddr_isvalid:
506            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
507            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
508        else:
509            print("no translation")
510
511    return paddr
512
513def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
514    """
515        params: pmapval - core.value representing pmap_t in kernel
516        vaddr:  int     - int representing virtual address to walk
517    """
518    if pmapval.pm_cr3 != 0:
519        if verbose_level > vHUMAN:
520            print("Using normal Intel PMAP from pm_cr3\n")
521        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
522    else:
523        if verbose_level > vHUMAN:
524            print("Using EPT pmap from pm_eptp\n")
525        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
526
527def assert_64bit(val):
528    assert(val < 2**64)
529
530ARM64_TTE_SIZE = 8
531ARM64_TTE_SHIFT = 3
532ARM64_VMADDR_BITS = 48
533
534def PmapBlockOffsetMaskARM64(page_size, level):
535    assert level >= 0 and level <= 3
536    ttentries = (page_size // ARM64_TTE_SIZE)
537    return page_size * (ttentries ** (3 - level)) - 1
538
539def PmapBlockBaseMaskARM64(page_size, level):
540    assert level >= 0 and level <= 3
541    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
542
543def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
544    """ Display the bits of an ARM64 translation table or page table entry
545        in human-readable form.
546        tte: integer value of the TTE/PTE
547        level: translation table level.  Valid values are 1, 2, or 3.
548        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
549    """
550    assert(isinstance(level, numbers.Integral))
551    assert_64bit(tte)
552
553    if tte & 0x1 == 0x0:
554        print("Invalid.")
555        return
556
557    if (tte & 0x2 == 0x2) and (level != 0x3):
558        print("Type       = Table pointer.")
559        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
560
561        if not stage2:
562            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
563            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
564            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
565            print("NS         = {:#x}.".format(tte >> 63))
566    else:
567        print("Type       = Block.")
568
569        if stage2:
570            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
571        else:
572            attr_index = (tte >> 2) & 0x7
573            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
574                3: 'CACHE DISABLE', 4: 'INNERWRITEBACK', 5: 'POSTED',
575                6: 'POSTED_REORDERED', 7: 'POSTED_COMBINED_REORDERED' }
576
577            # Only show the string version of the AttrIdx for CPU mappings since
578            # these values don't apply to IOMMU mappings.
579            if is_iommu_tte:
580                print("AttrIdx    = {:#x}.".format(attr_index))
581            else:
582                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
583            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
584
585        if stage2:
586            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
587        else:
588            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
589
590        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
591        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
592
593        if not stage2:
594            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
595
596        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
597
598        if stage2:
599            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
600        else:
601            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
602            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
603
604        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
605
606    return
607
608def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
609    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
610
611    tt_index = []
612    for i in range(pta_max_level + 1):
613        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
614            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
615
616    return tt_index
617
618def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN):
619    assert(type(vaddr) in (int, int))
620    assert_64bit(vaddr)
621    assert_64bit(root_tte)
622
623    # Obtain pmap attributes
624    page_size = pmap_pt_attr.pta_page_size
625    page_offset_mask = (page_size - 1)
626    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
627    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
628    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
629
630    # The pmap starts at a page table level that is defined by register
631    # values; the root level can be obtained from the attributes structure
632    level = unsigned(pmap_pt_attr.pta_root_level)
633
634    root_tt_index = tt_index[level]
635    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
636        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
637    tte = int(unsigned(root_tte[root_tt_index]))
638
639    # Walk the page tables
640    paddr = -1
641    max_level = unsigned(pmap_pt_attr.pta_max_level)
642    is_valid = True
643    is_leaf = False
644
645    while (level <= max_level):
646        if verbose_level >= vSCRIPT:
647            print("L{} entry: {:#x}".format(level, tte))
648        if verbose_level >= vDETAIL:
649            PmapDecodeTTEARM64(tte, level, stage2)
650
651        if tte & 0x1 == 0x0:
652            if verbose_level >= vHUMAN:
653                print("L{} entry invalid: {:#x}\n".format(level, tte))
654
655            is_valid = False
656            break
657
658        # Handle leaf entry
659        if tte & 0x2 == 0x0 or level == max_level:
660            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
661            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
662            paddr = tte & base_mask
663            paddr = paddr | (vaddr & offset_mask)
664
665            if level != max_level:
666                print("phys: {:#x}".format(paddr))
667
668            is_leaf = True
669            break
670        else:
671        # Handle page table entry
672            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
673            assert(isinstance(next_phys, numbers.Integral))
674
675            next_virt = kern.PhysToKernelVirt(next_phys)
676            assert(isinstance(next_virt, numbers.Integral))
677
678            if verbose_level >= vDETAIL:
679                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
680
681            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
682            tte = int(unsigned(dereference(ttep)))
683            assert(isinstance(tte, numbers.Integral))
684
685        # We've parsed one level, so go to the next level
686        assert(level <= 3)
687        level = level + 1
688
689
690    if verbose_level >= vHUMAN:
691        if paddr:
692            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
693        else:
694            print("(no translation)")
695
696    return paddr
697
698def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
699    if kern.arch == 'x86_64':
700        return PmapWalkX86_64(pmap, vaddr, verbose_level)
701    elif kern.arch.startswith('arm64'):
702        # Obtain pmap attributes from pmap structure
703        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
704        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
705    else:
706        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
707
708@lldb_command('pmap_walk')
709def PmapWalkHelper(cmd_args=None):
710    """ Perform a page-table walk in <pmap> for <virtual_address>.
711        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
712            Multiple -v's can be specified for increased verbosity
713    """
714    if cmd_args is None or len(cmd_args) < 2:
715        raise ArgumentError("Too few arguments to pmap_walk.")
716
717    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
718    addr = ArgumentStringToInt(cmd_args[1])
719    PmapWalk(pmap, addr, config['verbosity'])
720    return
721
722def GetMemoryAttributesFromUser(requested_type):
723    pmap_attr_dict = {
724        '4k' : kern.globals.pmap_pt_attr_4k,
725        '16k' : kern.globals.pmap_pt_attr_16k,
726        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
727    }
728
729    requested_type = requested_type.lower()
730    if requested_type not in pmap_attr_dict:
731        return None
732
733    return pmap_attr_dict[requested_type]
734
735@lldb_command('ttep_walk')
736def TTEPWalkPHelper(cmd_args=None):
737    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
738        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
739        Multiple -v's can be specified for increased verbosity
740        """
741    if cmd_args is None or len(cmd_args) < 2:
742        raise ArgumentError("Too few arguments to ttep_walk.")
743
744    if not kern.arch.startswith('arm64'):
745        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
746
747    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
748    addr = ArgumentStringToInt(cmd_args[1])
749
750    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
751    if pmap_pt_attr is None:
752        raise ArgumentError("Invalid translation attribute type.")
753
754    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
755
756@lldb_command('decode_tte')
757def DecodeTTE(cmd_args=None):
758    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
759        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
760    """
761    if cmd_args is None or len(cmd_args) < 2:
762        raise ArgumentError("Too few arguments to decode_tte.")
763    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
764        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
765    if kern.arch.startswith('arm64'):
766        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
767        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
768    else:
769        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
770
771PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54)
772PVH_HIGH_FLAGS_ARM32 = (1 << 31)
773
774def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
775    """ Dump information about a single mapping retrieved by the pv_head_table.
776
777        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
778              or None if type PVH_TYPE_PTEP.
779        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
780              the high flags already set (the type bits don't need to be cleared).
781              For type PVH_TYPE_PVEP this will be the value retrieved from the
782              pve_ptep[] array.
783    """
784    if kern.arch.startswith('arm64'):
785        iommu_flag = 0x4
786        iommu_table_flag = 1 << 63
787    else:
788        iommu_flag = 0
789        iommu_table_flag = 0
790
791    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
792    if pvep is not None and (ptep & 0x1):
793        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
794        # setting this string is mutually exclusive with setting the IOMMU strings.
795        pte_str = ' (alt acct)'
796    else:
797        pte_str = ''
798
799    if pvep is not None:
800        pve_str = 'PVEP {:#x}, '.format(pvep)
801    else:
802        pve_str = ''
803
804    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
805    # either does nothing or clears out the AltAcct bit.
806    ptep = ptep & ~0x3
807
808    # When printing with extra verbosity, print an extra newline that describes
809    # who owns the mapping.
810    extra_str = ''
811
812    if ptep & iommu_flag:
813        # The mapping is an IOMMU Mapping
814        ptep = ptep & ~iommu_flag
815
816        # Due to LLDB automatically setting all the high bits of pointers, when
817        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
818        # the iommu_table_flag, which means this check only works for PVH entries
819        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
820        # entry which has the right casting applied to avoid this issue).
821        #
822        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
823        # for a lack of trying, that's for sure. If you can figure out how to
824        # cast that array correctly, then be my guest.
825        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
826            if ptep & iommu_table_flag:
827                pte_str = ' (IOMMU table), entry'
828                ptd = GetPtDesc(KVToPhysARM(ptep))
829                iommu = dereference(ptd.iommu)
830            else:
831                # Instead of dumping the PTE (since we don't have that), dump the
832                # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
833                #
834                # This works because later on when the "ptep" is dereferenced as a
835                # PTE pointer (uint64_t pointer), the descriptor pointer will be
836                # dumped as that's the first 64-bit value in the IOMMU state object.
837                pte_str = ' (IOMMU state), descriptor'
838                ptep = ptep | iommu_table_flag
839                iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
840
841            # For IOMMU mappings, dump who owns the mapping as the extra string.
842            extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
843            if unsigned(iommu.name) != 0:
844                extra_str += '/{:s}'.format(iommu.name)
845            extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
846        else:
847            ptd = GetPtDesc(KVToPhysARM(ptep))
848            extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu)
849    else:
850        # The mapping is a CPU Mapping
851        pte_str += ', entry'
852        ptd = GetPtDesc(KVToPhysARM(ptep))
853        if ptd.pmap == kern.globals.kernel_pmap:
854            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
855        elif verbose_level >= vDETAIL:
856            task = TaskForPmapHelper(ptd.pmap)
857            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
858    try:
859        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
860    except:
861        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
862
863    if verbose_level >= vDETAIL:
864        print("    |-- {:s}".format(extra_str))
865
866def PVWalkARM(pai, verbose_level = vHUMAN):
867    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
868
869        pai: physical address index (PAI) corresponding to the pv_head_table
870             entry to walk.
871        verbose_level: Set to vSCRIPT or higher to print extra info around the
872                       the pv_head_table/pp_attr_table flags and to dump the
873                       pt_desc_t object if the type is a PTD.
874    """
875    # LLDB will automatically try to make pointer values dereferencable by
876    # setting the upper bits if they aren't set. We need to parse the flags
877    # stored in the upper bits later, so cast the pv_head_table to an array of
878    # integers to get around this "feature". We'll add the upper bits back
879    # manually before deref'ing anything.
880    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
881    pvh_raw = unsigned(pv_head_table[pai])
882    pvh = pvh_raw
883    pvh_type = pvh & 0x3
884
885    print("PVH raw value: {:#x}".format(pvh_raw))
886    if kern.arch.startswith('arm64'):
887        pvh = pvh | PVH_HIGH_FLAGS_ARM64
888    else:
889        pvh = pvh | PVH_HIGH_FLAGS_ARM32
890
891    if pvh_type == 0:
892        print("PVH type: NULL")
893    elif pvh_type == 3:
894        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
895    elif pvh_type == 2:
896        print("PVH type: single PTE")
897        PVDumpPTE(None, pvh, verbose_level)
898    elif pvh_type == 1:
899        pvep = pvh & ~0x3
900        print("PVH type: PTE list")
901        pve_ptep_idx = 0
902        while pvep != 0:
903            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
904
905            if pve.pve_ptep[pve_ptep_idx] != 0:
906                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
907
908            pve_ptep_idx += 1
909            if pve_ptep_idx == 2:
910                pve_ptep_idx = 0
911                pvep = unsigned(pve.pve_next)
912
913    if verbose_level >= vDETAIL:
914        if (pvh_type == 1) or (pvh_type == 2):
915            # Dump pv_head_table flags when there's a valid mapping.
916            pvh_flags = []
917
918            if pvh_raw & (1 << 62):
919                pvh_flags.append("CPU")
920            if pvh_raw & (1 << 60):
921                pvh_flags.append("EXEC")
922            if pvh_raw & (1 << 59):
923                pvh_flags.append("LOCKDOWN_KC")
924            if pvh_raw & (1 << 58):
925                pvh_flags.append("HASHED")
926            if pvh_raw & (1 << 57):
927                pvh_flags.append("LOCKDOWN_CS")
928            if pvh_raw & (1 << 56):
929                pvh_flags.append("LOCKDOWN_RO")
930            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
931                pvh_flags.append("LOCK")
932
933            print("PVH Flags: {}".format(pvh_flags))
934
935        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
936        ppattr = unsigned(kern.globals.pp_attr_table[pai])
937        print("PPATTR raw value: {:#x}".format(ppattr))
938
939        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
940        if ppattr & 0x40:
941            ppattr_flags.append("REFERENCED")
942        if ppattr & 0x80:
943            ppattr_flags.append("MODIFIED")
944        if ppattr & 0x100:
945            ppattr_flags.append("INTERNAL")
946        if ppattr & 0x200:
947            ppattr_flags.append("REUSABLE")
948        if ppattr & 0x400:
949            ppattr_flags.append("ALTACCT")
950        if ppattr & 0x800:
951            ppattr_flags.append("NOENCRYPT")
952        if ppattr & 0x1000:
953            ppattr_flags.append("REFFAULT")
954        if ppattr & 0x2000:
955            ppattr_flags.append("MODFAULT")
956        if ppattr & 0x4000:
957            ppattr_flags.append("MONITOR")
958        if ppattr & 0x8000:
959            ppattr_flags.append("NO_MONITOR")
960
961        print("PPATTR Flags: {}".format(ppattr_flags))
962
963        if pvh_type == 3:
964            def RunLldbCmdHelper(command):
965                """Helper for dumping an LLDB command right before executing it
966                and printing the results.
967                command: The LLDB command (as a string) to run.
968
969                Example input: "p/x kernel_pmap".
970                """
971                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
972            # Dump the page table descriptor object
973            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
974            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
975
976            # Depending on the system, more than one ptd_info can be associated
977            # with a single PTD. Only dump the first PTD info and assume the
978            # user knows to dump the rest if they're on one of those systems.
979            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
980
981@lldb_command('pv_walk')
982def PVWalk(cmd_args=None):
983    """ Show mappings for <physical_address | PAI> tracked in the PV list.
984        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
985
986        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
987        as well as dump the page table descriptor (PTD) struct if the entry is a
988        PTD.
989    """
990    if cmd_args is None or len(cmd_args) < 1:
991        raise ArgumentError("Too few arguments to pv_walk.")
992    if not kern.arch.startswith('arm'):
993        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
994
995    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
996
997    # If the input is already a PAI, this function will return the input unchanged.
998    # This function also ensures that the physical address is kernel-managed.
999    pai = ConvertPhysAddrToPai(pa)
1000
1001    PVWalkARM(pai, config['verbosity'])
1002
1003@lldb_command('kvtophys')
1004def KVToPhys(cmd_args=None):
1005    """ Translate a kernel virtual address to the corresponding physical address.
1006        Assumes the virtual address falls within the kernel static region.
1007        Syntax: (lldb) kvtophys <kernel virtual address>
1008    """
1009    if cmd_args is None or len(cmd_args) < 1:
1010        raise ArgumentError("Too few arguments to kvtophys.")
1011    if kern.arch.startswith('arm'):
1012        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1013    elif kern.arch == 'x86_64':
1014        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1015
1016@lldb_command('phystokv')
1017def PhysToKV(cmd_args=None):
1018    """ Translate a physical address to the corresponding static kernel virtual address.
1019        Assumes the physical address corresponds to managed DRAM.
1020        Syntax: (lldb) phystokv <physical address>
1021    """
1022    if cmd_args is None or len(cmd_args) < 1:
1023        raise ArgumentError("Too few arguments to phystokv.")
1024    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1025
1026def KVToPhysARM(addr):
1027    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1028        ptov_table = kern.globals.ptov_table
1029        for i in range(0, kern.globals.ptov_index):
1030            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1031                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1032    else:
1033        raise ValueError("VA {:#x} not found in physical region lookup table".format(addr))
1034    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1035
1036
1037def GetPtDesc(paddr):
1038    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1039    pvh = unsigned(kern.globals.pv_head_table[pn])
1040    if kern.arch.startswith('arm64'):
1041        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1042    else:
1043        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1044    pvh_type = pvh & 0x3
1045    if pvh_type != 0x3:
1046        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1047    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1048    return ptd
1049
1050
1051def ShowPTEARM(pte, page_size, level):
1052    """ Display vital information about an ARM page table entry
1053        pte: kernel virtual address of the PTE.  page_size and level may be None,
1054        in which case we'll try to infer them from the page table descriptor.
1055        Inference of level may only work for L2 and L3 TTEs depending upon system
1056        configuration.
1057    """
1058    pt_index = 0
1059    stage2 = False
1060    def GetPageTableInfo(ptd, paddr):
1061        nonlocal pt_index, page_size, level
1062        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1063            # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page.
1064            # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes
1065            # do, so the IOMMU refcnt token is always stored at index 0.  If this is not
1066            # an IOMMU page, we may end up using a different final value for pt_index below.
1067            refcnt = ptd.ptd_info[0].refcnt
1068            # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1069            is_iommu_pte = (refcnt & 0x8000) == 0x8000
1070            if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'):
1071                page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1072            elif page_size is None:
1073                page_size = kern.globals.native_pt_attr.pta_page_size
1074            pt_index = (pte % kern.globals.page_size) // page_size
1075            refcnt =  ptd.ptd_info[pt_index].refcnt
1076            if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'):
1077                stage2 = ptd.pmap.pmap_pt_attr.stage2
1078            if level is None:
1079                if refcnt == 0x4000:
1080                    level = 2
1081                else:
1082                    level = 3
1083            if is_iommu_pte:
1084                iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1085                if unsigned(dereference(ptd.iommu).name) != 0:
1086                    iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1087                info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)
1088            else:
1089                info_str = None
1090            return (int(unsigned(refcnt)), level, info_str)
1091        else:
1092            raise ValueError("Unable to retrieve PTD refcnt")
1093    pte_paddr = KVToPhysARM(pte)
1094    ptd = GetPtDesc(pte_paddr)
1095    refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr)
1096    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1097    va = ptd.va[pt_index]
1098    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va))
1099
1100    # The pmap/iommu field is a union, so only print the correct one.
1101    if info_str is not None:
1102        print(info_str)
1103    else:
1104        if ptd.pmap == kern.globals.kernel_pmap:
1105            pmap_str = "(kernel_pmap)"
1106        else:
1107            task = TaskForPmapHelper(ptd.pmap)
1108            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1109        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1110        nttes = page_size // 8
1111        granule = page_size * (nttes ** (3 - level))
1112        pte_pgoff = pte % page_size
1113        pte_pgoff = pte_pgoff // 8
1114        print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(ptd.va[pt_index])) + (pte_pgoff * granule)))
1115        pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1116        print("value: {:#x}".format(pteval))
1117        print("level: {:d}".format(level))
1118        PmapDecodeTTEARM64(pteval, level, stage2)
1119
1120@lldb_command('showpte')
1121def ShowPTE(cmd_args=None):
1122    """ Display vital information about the page table entry at VA <pte>
1123        Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2]
1124    """
1125    if cmd_args is None or len(cmd_args) < 1:
1126        raise ArgumentError("Too few arguments to showpte.")
1127
1128    if kern.arch.startswith('arm64'):
1129        if len(cmd_args) >= 3:
1130            pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2])
1131            if pmap_pt_attr is None:
1132                raise ArgumentError("Invalid translation attribute type.")
1133            page_size = pmap_pt_attr.pta_page_size
1134        else:
1135            page_size = None
1136
1137        level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None
1138        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level)
1139    else:
1140        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1141
1142def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1143    """ Perform the specified action for all valid mappings in an ARM64 translation table
1144        pmap: owner of the translation table
1145        tt: translation table or page table
1146        nttes: number of entries in tt
1147        level: translation table level, 1 2 or 3
1148        action: callback for each valid TTE
1149    """
1150    # Obtain pmap attributes
1151    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1152    page_size = pmap_pt_attr.pta_page_size
1153    page_offset_mask = (page_size - 1)
1154    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1155    max_level = unsigned(pmap_pt_attr.pta_max_level)
1156
1157    for i in range(nttes):
1158        try:
1159            tte = tt[i]
1160            if tte & 0x1 == 0x0:
1161                continue
1162
1163            tt_next = None
1164            paddr = unsigned(tte) & unsigned(page_base_mask)
1165
1166            # Handle leaf entry
1167            if tte & 0x2 == 0x0 or level == max_level:
1168                type = 'block' if level < max_level else 'entry'
1169                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1170            else:
1171            # Handle page table entry
1172                type = 'table'
1173                granule = page_size
1174                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1175
1176            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1177            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1178                if tt_next is not None:
1179                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1180
1181        except Exception as exc:
1182            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1183
1184def ScanPageTables(action, targetPmap=None):
1185    """ Perform the specified action for all valid mappings in all page tables,
1186        optionally restricted to a single pmap.
1187        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1188    """
1189    print("Scanning all available translation tables.  This may take a long time...")
1190    def ScanPmap(pmap, action):
1191        if kern.arch.startswith('arm64'):
1192            # Obtain pmap attributes
1193            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1194            granule = pmap_pt_attr.pta_page_size
1195            level = unsigned(pmap_pt_attr.pta_root_level)
1196            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1197                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1198
1199        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1200            if kern.arch.startswith('arm64'):
1201                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1202
1203    if targetPmap is not None:
1204        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1205    else:
1206        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1207            ScanPmap(pmap, action)
1208
1209@lldb_command('showallmappings')
1210def ShowAllMappings(cmd_args=None):
1211    """ Find and display all available mappings on the system for
1212        <physical_address>.  Optionally only searches the pmap
1213        specified by [<pmap>]
1214        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1215        WARNING: this macro can take a long time (up to 30min.) to complete!
1216    """
1217    if cmd_args is None or len(cmd_args) < 1:
1218        raise ArgumentError("Too few arguments to showallmappings.")
1219    if not kern.arch.startswith('arm'):
1220        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1221    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1222    targetPmap = None
1223    if len(cmd_args) > 1:
1224        targetPmap = cmd_args[1]
1225    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1226        if paddr <= pa < (paddr + granule):
1227            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1228        return True
1229    ScanPageTables(printMatchedMapping, targetPmap)
1230
1231@lldb_command('showptusage')
1232def ShowPTUsage(cmd_args=None):
1233    """ Display a summary of pagetable allocations for a given pmap.
1234        Syntax: (lldb) showptusage [<pmap>]
1235        WARNING: this macro can take a long time (> 1hr) to complete!
1236    """
1237    if not kern.arch.startswith('arm'):
1238        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1239    targetPmap = None
1240    if len(cmd_args) > 0:
1241        targetPmap = cmd_args[0]
1242    lastPmap = [None]
1243    numTables = [0]
1244    numUnnested = [0]
1245    numPmaps = [0]
1246    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1247        unnested = ""
1248        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1249        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1250        if lastPmap[0] is None or (pmap != lastPmap[0]):
1251            lastPmap[0] = pmap
1252            numPmaps[0] = numPmaps[0] + 1
1253            print ("pmap {:#x}:".format(pmap))
1254        if type == 'root':
1255            return True
1256        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1257            ptd = GetPtDesc(paddr)
1258            if ptd.pmap != pmap:
1259                return False
1260            else:
1261                numUnnested[0] = numUnnested[0] + 1
1262                unnested = " (likely unnested)"
1263        numTables[0] = numTables[0] + 1
1264        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1265        if level == 2:
1266            return False
1267        else:
1268            return True
1269    ScanPageTables(printValidTTE, targetPmap)
1270    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1271
1272def checkPVList(pmap, level, type, tte, paddr, va, granule):
1273    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1274        pmap: owner of the translation table
1275        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1276        type: unused
1277        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1278        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1279        granule: unused
1280    """
1281    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1282    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1283    page_size = kern.globals.page_size
1284    if kern.arch.startswith('arm64'):
1285        page_offset_mask = (page_size - 1)
1286        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1287        paddr = paddr & page_base_mask
1288        max_level = 3
1289        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1290    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1291        return True
1292    pn = (paddr - vm_first_phys) // page_size
1293    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1294    pvh_type = pvh & 0x3
1295    if pmap is not None:
1296        pmap_str = "pmap: {:#x}: ".format(pmap)
1297    else:
1298        pmap_str = ''
1299    if tte is not None:
1300        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1301    else:
1302        tte_str = "paddr {:#x}: ".format(paddr)
1303    if pvh_type == 0 or pvh_type == 3:
1304        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1305    elif pvh_type == 2:
1306        ptep = pvh & ~0x3
1307        if tte is not None and ptep != unsigned(tte):
1308            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1309        try:
1310            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1311            if (pte != paddr):
1312                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1313        except Exception as exc:
1314            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1315    elif pvh_type == 1:
1316        pvep = pvh & ~0x3
1317        tte_match = False
1318        pve_ptep_idx = 0
1319        while pvep != 0:
1320            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1321            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1322            pve_ptep_idx += 1
1323            if pve_ptep_idx == 2:
1324                pve_ptep_idx = 0
1325                pvep = unsigned(pve.pve_next)
1326            if ptep == 0:
1327                continue
1328            if tte is not None and ptep == unsigned(tte):
1329                tte_match = True
1330            try:
1331                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1332                if (pte != paddr):
1333                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1334            except Exception as exc:
1335                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1336        if tte is not None and not tte_match:
1337            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1338    return True
1339
1340@lldb_command('pv_check', 'P')
1341def PVCheck(cmd_args=None, cmd_options={}):
1342    """ Check the physical-to-virtual mapping for a given PTE or physical address
1343        Syntax: (lldb) pv_check <addr> [-p]
1344            -P        : Interpret <addr> as a physical address rather than a PTE
1345    """
1346    if cmd_args is None or len(cmd_args) < 1:
1347        raise ArgumentError("Too few arguments to pv_check.")
1348    if kern.arch.startswith('arm64'):
1349        level = 3
1350    else:
1351        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1352    if "-P" in cmd_options:
1353        pte = None
1354        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1355    else:
1356        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1357        pa = int(unsigned(dereference(pte)))
1358    checkPVList(None, level, None, pte, pa, 0, None)
1359
1360@lldb_command('check_pmaps')
1361def CheckPmapIntegrity(cmd_args=None):
1362    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1363        Optionally only checks the pmap specified by [<pmap>]
1364        Syntax: (lldb) check_pmaps [<pmap>]
1365        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1366        specify [pmap] to limit it to a single pmap.  It will also give false positives
1367        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1368        Use of this macro without the [<pmap>] argument is heavily discouraged.
1369    """
1370    if not kern.arch.startswith('arm'):
1371        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1372    targetPmap = None
1373    if len(cmd_args) > 0:
1374        targetPmap = cmd_args[0]
1375    ScanPageTables(checkPVList, targetPmap)
1376
1377@lldb_command('pmapsforledger')
1378def PmapsForLedger(cmd_args=None):
1379    """ Find and display all pmaps currently using <ledger>.
1380        Syntax: (lldb) pmapsforledger <ledger>
1381    """
1382    if cmd_args is None or len(cmd_args) < 1:
1383        raise ArgumentError("Too few arguments to pmapsforledger.")
1384    if not kern.arch.startswith('arm'):
1385        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1386    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1387    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1388        if pmap.ledger == ledger:
1389            print("pmap: {:#x}".format(pmap))
1390
1391
1392def IsValidPai(pai):
1393    """ Given an unsigned value, detect whether that value is a valid physical
1394        address index (PAI). It does this by first computing the last possible
1395        PAI and comparing the input to that.
1396
1397        All contemporary SoCs reserve the bottom part of the address space, so
1398        there shouldn't be any valid physical addresses between zero and the
1399        last PAI either.
1400    """
1401    page_size = unsigned(kern.globals.page_size)
1402    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1403    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1404
1405    last_pai = (vm_last_phys - vm_first_phys) // page_size
1406    if (pai < 0) or (pai >= last_pai):
1407        return False
1408
1409    return True
1410
1411def ConvertPaiToPhysAddr(pai):
1412    """ Convert the given Physical Address Index (PAI) into a physical address.
1413
1414        If the input isn't a valid PAI (it's most likely already a physical
1415        address), then just return back the input unchanged.
1416    """
1417    pa = pai
1418
1419    # If the value is a valid PAI, then convert it into a physical address.
1420    if IsValidPai(pai):
1421        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1422
1423    return pa
1424
1425def ConvertPhysAddrToPai(pa):
1426    """ Convert the given physical address into a Physical Address Index (PAI).
1427
1428        If the input is already a valid PAI, then just return back the input
1429        unchanged.
1430    """
1431    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1432    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1433    pai = pa
1434
1435    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1436        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1437    elif not IsValidPai(pa):
1438        # If the value isn't already a valid PAI, then convert it into one.
1439        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1440
1441    return pai
1442
1443@lldb_command('pmappaindex')
1444def PmapPaIndex(cmd_args=None):
1445    """ Display both a physical address and physical address index (PAI) when
1446        provided with only one of those values.
1447
1448        Syntax: (lldb) pmappaindex <physical address | PAI>
1449
1450        NOTE: This macro will throw an exception if the input isn't a valid PAI
1451              and is also not a kernel-managed physical address.
1452    """
1453    if cmd_args is None or len(cmd_args) < 1:
1454        raise ArgumentError("Too few arguments to pmappaindex.")
1455
1456    if not kern.arch.startswith('arm'):
1457        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1458
1459    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1460    pai = value
1461    phys_addr = value
1462
1463    if IsValidPai(value):
1464        # Input is a PAI, calculate the physical address.
1465        phys_addr = ConvertPaiToPhysAddr(value)
1466    else:
1467        # Input is a physical address, calculate the PAI
1468        pai = ConvertPhysAddrToPai(value)
1469
1470    print("Physical Address: {:#x}".format(phys_addr))
1471    print("PAI: {:d}".format(pai))
1472