xref: /xnu-8792.61.2/tools/lldbmacros/pmap.py (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1from __future__ import absolute_import, division, print_function
2
3from builtins import hex
4from builtins import range
5
6from xnu import *
7import xnudefines
8from kdp import *
9from utils import *
10import struct
11
12def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
13    """ Read a physical memory data based on address.
14        params:
15            phys_addr : int - Physical address to read
16            bitsize   : int - defines how many bytes to read. defaults to 64 bit
17            cpuval    : None (optional)
18        returns:
19            int - int value read from memory. in case of failure 0xBAD10AD is returned.
20    """
21    if "kdp" == GetConnectionProtocol():
22        return KDPReadPhysMEM(phys_addr, bitsize)
23
24    #NO KDP. Attempt to use physical memory
25    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
26    if paddr_in_kva :
27        if bitsize == 64 :
28            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
29        if bitsize == 32 :
30            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31        if bitsize == 16 :
32            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
33        if bitsize == 8 :
34            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
35    return 0xBAD10AD
36
37@lldb_command('readphys')
38def ReadPhys(cmd_args = None):
39    """ Reads the specified untranslated address
40        The argument is interpreted as a physical address, and the 64-bit word
41        addressed is displayed.
42        usage: readphys <nbits> <address>
43        nbits: 8,16,32,64
44        address: 1234 or 0x1234
45    """
46    if cmd_args == None or len(cmd_args) < 2:
47        print("Insufficient arguments.", ReadPhys.__doc__)
48        return False
49    else:
50        nbits = ArgumentStringToInt(cmd_args[0])
51        phys_addr = ArgumentStringToInt(cmd_args[1])
52        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
53    return True
54
55lldb_alias('readphys8', 'readphys 8 ')
56lldb_alias('readphys16', 'readphys 16 ')
57lldb_alias('readphys32', 'readphys 32 ')
58lldb_alias('readphys64', 'readphys 64 ')
59
60def KDPReadPhysMEM(address, bits):
61    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
62        params:
63            address : int - address where to read the data from
64            bits : int - number of bits in the intval (8/16/32/64)
65        returns:
66            int: read value from memory.
67            0xBAD10AD: if failed to read data.
68    """
69    retval = 0xBAD10AD
70    if "kdp" != GetConnectionProtocol():
71        print("Target is not connected over kdp. Nothing to do here.")
72        return retval
73
74    if "hwprobe" == KDPMode():
75        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
76        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
77        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
78        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
79
80        ret_obj = lldb.SBCommandReturnObject()
81        ci = lldb.debugger.GetCommandInterpreter()
82        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
83
84        if ret_obj.Succeeded():
85            value = ret_obj.GetOutput()
86
87            if bits == 64 :
88                pack_fmt = "<Q"
89                unpack_fmt = ">Q"
90            if bits == 32 :
91                pack_fmt = "<I"
92                unpack_fmt = ">I"
93            if bits == 16 :
94                pack_fmt = "<H"
95                unpack_fmt = ">H"
96            if bits == 8 :
97                pack_fmt = "<B"
98                unpack_fmt = ">B"
99
100            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
101
102    else:
103        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
104        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
105        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
106
107        if not WriteInt32ToMemoryAddress(0, input_address):
108            return retval
109
110        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
111        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
112            return retval
113
114        data_addr = int(addressof(kern.globals.manual_pkt))
115        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
116
117        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
118
119        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
120             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
121             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
122             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
123             ):
124
125            if WriteInt32ToMemoryAddress(1, input_address):
126                # now read data from the kdp packet
127                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
128                if bits == 64 :
129                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
130                if bits == 32 :
131                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
132                if bits == 16 :
133                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
134                if bits == 8 :
135                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
136
137    return retval
138
139
140def KDPWritePhysMEM(address, intval, bits):
141    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
142        params:
143            address : int - address where to save the data
144            intval : int - integer value to be stored in memory
145            bits : int - number of bits in the intval (8/16/32/64)
146        returns:
147            boolean: True if the write succeeded.
148    """
149    if "kdp" != GetConnectionProtocol():
150        print("Target is not connected over kdp. Nothing to do here.")
151        return False
152
153    if "hwprobe" == KDPMode():
154        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
155        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
156        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
157
158        if bits == 64 :
159            pack_fmt = ">Q"
160            unpack_fmt = "<Q"
161        if bits == 32 :
162            pack_fmt = ">I"
163            unpack_fmt = "<I"
164        if bits == 16 :
165            pack_fmt = ">H"
166            unpack_fmt = "<H"
167        if bits == 8 :
168            pack_fmt = ">B"
169            unpack_fmt = "<B"
170
171        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
172
173        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
174
175        ret_obj = lldb.SBCommandReturnObject()
176        ci = lldb.debugger.GetCommandInterpreter()
177        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
178
179        if ret_obj.Succeeded():
180            return True
181        else:
182            return False
183
184    else:
185        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
186        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
187        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
188        if not WriteInt32ToMemoryAddress(0, input_address):
189            return False
190
191        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
192        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
193            return False
194
195        data_addr = int(addressof(kern.globals.manual_pkt))
196        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
197
198        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
199
200        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
201             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
202             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
203             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
204             ):
205
206            if bits == 8:
207                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
208                    return False
209            if bits == 16:
210                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
211                    return False
212            if bits == 32:
213                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
214                    return False
215            if bits == 64:
216                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
217                    return False
218            if WriteInt32ToMemoryAddress(1, input_address):
219                return True
220        return False
221
222
223def WritePhysInt(phys_addr, int_val, bitsize = 64):
224    """ Write and integer value in a physical memory data based on address.
225        params:
226            phys_addr : int - Physical address to read
227            int_val   : int - int value to write in memory
228            bitsize   : int - defines how many bytes to read. defaults to 64 bit
229        returns:
230            bool - True if write was successful.
231    """
232    if "kdp" == GetConnectionProtocol():
233        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
234            print("Failed to write via KDP.")
235            return False
236        return True
237    #We are not connected via KDP. So do manual math and savings.
238    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
239    return False
240
241@lldb_command('writephys')
242def WritePhys(cmd_args=None):
243    """ writes to the specified untranslated address
244        The argument is interpreted as a physical address, and the 64-bit word
245        addressed is displayed.
246        usage: writephys <nbits> <address> <value>
247        nbits: 8,16,32,64
248        address: 1234 or 0x1234
249        value: int value to be written
250        ex. (lldb)writephys 16 0x12345abcd 0x25
251    """
252    if cmd_args == None or len(cmd_args) < 3:
253        print("Invalid arguments.", WritePhys.__doc__)
254    else:
255        nbits = ArgumentStringToInt(cmd_args[0])
256        phys_addr = ArgumentStringToInt(cmd_args[1])
257        int_value = ArgumentStringToInt(cmd_args[2])
258        print(WritePhysInt(phys_addr, int_value, nbits))
259
260
261lldb_alias('writephys8', 'writephys 8 ')
262lldb_alias('writephys16', 'writephys 16 ')
263lldb_alias('writephys32', 'writephys 32 ')
264lldb_alias('writephys64', 'writephys 64 ')
265
266
267def _PT_Step(paddr, index, verbose_level = vSCRIPT):
268    """
269     Step to lower-level page table and print attributes
270       paddr: current page table entry physical address
271       index: current page table entry index (0..511)
272       verbose_level:    vHUMAN: print nothing
273                         vSCRIPT: print basic information
274                         vDETAIL: print basic information and hex table dump
275     returns: (pt_paddr, pt_valid, pt_large)
276       pt_paddr: next level page table entry physical address
277                      or null if invalid
278       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
279                      should be aborted
280       pt_large: 1 if kgm_pt_paddr is a page frame address
281                      of a large page and not another page table entry
282    """
283    entry_addr = paddr + (8 * index)
284    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
285    out_string = ''
286    if verbose_level >= vDETAIL:
287        for pte_loop in range(0, 512):
288            paddr_tmp = paddr + (8 * pte_loop)
289            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
290    paddr_mask = ~((0xfff<<52) | 0xfff)
291    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
292    pt_valid = False
293    pt_large = False
294    pt_paddr = 0
295    if verbose_level < vSCRIPT:
296        if entry & 0x1 :
297            pt_valid = True
298            pt_large = False
299            pt_paddr = entry & paddr_mask
300            if entry & (0x1 <<7):
301                pt_large = True
302                pt_paddr = entry & paddr_large_mask
303    else:
304        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
305        if entry & 0x1:
306            out_string += " valid"
307            pt_paddr = entry & paddr_mask
308            pt_valid = True
309        else:
310            out_string += " invalid"
311            pt_paddr = 0
312            pt_valid = False
313            if entry & (0x1 << 62):
314                out_string += " compressed"
315            #Stop decoding other bits
316            entry = 0
317        if entry & (0x1 << 1):
318            out_string += " writable"
319        else:
320            out_string += " read-only"
321
322        if entry & (0x1 << 2):
323            out_string += " user"
324        else:
325            out_string += " supervisor"
326
327        if entry & (0x1 << 3):
328            out_string += " PWT"
329
330        if entry & (0x1 << 4):
331            out_string += " PCD"
332
333        if entry & (0x1 << 5):
334            out_string += " accessed"
335
336        if entry & (0x1 << 6):
337            out_string += " dirty"
338
339        if entry & (0x1 << 7):
340            out_string += " large"
341            pt_large = True
342        else:
343            pt_large = False
344
345        if entry & (0x1 << 8):
346            out_string += " global"
347
348        if entry & (0x3 << 9):
349            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
350
351        if entry & (0x1 << 63):
352            out_string += " noexec"
353    print(out_string)
354    return (pt_paddr, pt_valid, pt_large)
355
356def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
357    """
358     Step to lower-level page table and print attributes for EPT pmap
359       paddr: current page table entry physical address
360       index: current page table entry index (0..511)
361       verbose_level:    vHUMAN: print nothing
362                         vSCRIPT: print basic information
363                         vDETAIL: print basic information and hex table dump
364     returns: (pt_paddr, pt_valid, pt_large)
365       pt_paddr: next level page table entry physical address
366                      or null if invalid
367       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
368                      should be aborted
369       pt_large: 1 if kgm_pt_paddr is a page frame address
370                      of a large page and not another page table entry
371    """
372    entry_addr = paddr + (8 * index)
373    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
374    out_string = ''
375    if verbose_level >= vDETAIL:
376        for pte_loop in range(0, 512):
377            paddr_tmp = paddr + (8 * pte_loop)
378            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
379    paddr_mask = ~((0xfff<<52) | 0xfff)
380    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
381    pt_valid = False
382    pt_large = False
383    pt_paddr = 0
384    if verbose_level < vSCRIPT:
385        if entry & 0x7 :
386            pt_valid = True
387            pt_large = False
388            pt_paddr = entry & paddr_mask
389            if entry & (0x1 <<7):
390                pt_large = True
391                pt_paddr = entry & paddr_large_mask
392    else:
393        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
394        if entry & 0x7:
395            out_string += "valid"
396            pt_paddr = entry & paddr_mask
397            pt_valid = True
398        else:
399            out_string += "invalid"
400            pt_paddr = 0
401            pt_valid = False
402            if entry & (0x1 << 62):
403                out_string += " compressed"
404            #Stop decoding other bits
405            entry = 0
406        if entry & 0x1:
407            out_string += " readable"
408        else:
409            out_string += " no read"
410        if entry & (0x1 << 1):
411            out_string += " writable"
412        else:
413            out_string += " no write"
414
415        if entry & (0x1 << 2):
416            out_string += " executable"
417        else:
418            out_string += " no exec"
419
420        ctype = entry & 0x38
421        if ctype == 0x30:
422            out_string += " cache-WB"
423        elif ctype == 0x28:
424            out_string += " cache-WP"
425        elif ctype == 0x20:
426            out_string += " cache-WT"
427        elif ctype == 0x8:
428            out_string += " cache-WC"
429        else:
430            out_string += " cache-NC"
431
432        if (entry & 0x40) == 0x40:
433            out_string += " Ignore-PTA"
434
435        if (entry & 0x100) == 0x100:
436            out_string += " accessed"
437
438        if (entry & 0x200) == 0x200:
439            out_string += " dirty"
440
441        if entry & (0x1 << 7):
442            out_string += " large"
443            pt_large = True
444        else:
445            pt_large = False
446    print(out_string)
447    return (pt_paddr, pt_valid, pt_large)
448
449def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
450    """ Walk the l4 pmap entry.
451        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
452        vaddr : int - virtual address to walk
453    """
454    pt_paddr = unsigned(pmap_addr_val)
455    pt_valid = (unsigned(pmap_addr_val) != 0)
456    pt_large = 0
457    pframe_offset = 0
458    if pt_valid:
459        # Lookup bits 47:39 of linear address in PML4T
460        pt_index = (vaddr >> 39) & 0x1ff
461        pframe_offset = vaddr & 0x7fffffffff
462        if verbose_level > vHUMAN :
463            print("pml4 (index {0:d}):".format(pt_index))
464        if not(ept_pmap):
465            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
466        else:
467            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
468    if pt_valid:
469        # Lookup bits 38:30 of the linear address in PDPT
470        pt_index = (vaddr >> 30) & 0x1ff
471        pframe_offset = vaddr & 0x3fffffff
472        if verbose_level > vHUMAN:
473            print("pdpt (index {0:d}):".format(pt_index))
474        if not(ept_pmap):
475            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
476        else:
477            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
478    if pt_valid and not pt_large:
479        #Lookup bits 29:21 of the linear address in PDPT
480        pt_index = (vaddr >> 21) & 0x1ff
481        pframe_offset = vaddr & 0x1fffff
482        if verbose_level > vHUMAN:
483            print("pdt (index {0:d}):".format(pt_index))
484        if not(ept_pmap):
485            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
486        else:
487            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
488    if pt_valid and not pt_large:
489        #Lookup bits 20:21 of linear address in PT
490        pt_index = (vaddr >> 12) & 0x1ff
491        pframe_offset = vaddr & 0xfff
492        if verbose_level > vHUMAN:
493            print("pt (index {0:d}):".format(pt_index))
494        if not(ept_pmap):
495            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
496        else:
497            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
498    paddr = 0
499    paddr_isvalid = False
500    if pt_valid:
501        paddr = pt_paddr + pframe_offset
502        paddr_isvalid = True
503
504    if verbose_level > vHUMAN:
505        if paddr_isvalid:
506            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
507            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
508        else:
509            print("no translation")
510
511    return paddr
512
513def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
514    """
515        params: pmapval - core.value representing pmap_t in kernel
516        vaddr:  int     - int representing virtual address to walk
517    """
518    if pmapval.pm_cr3 != 0:
519        if verbose_level > vHUMAN:
520            print("Using normal Intel PMAP from pm_cr3\n")
521        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
522    else:
523        if verbose_level > vHUMAN:
524            print("Using EPT pmap from pm_eptp\n")
525        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
526
527def assert_64bit(val):
528    assert(val < 2**64)
529
530ARM64_TTE_SIZE = 8
531ARM64_TTE_SHIFT = 3
532ARM64_VMADDR_BITS = 48
533
534def PmapBlockOffsetMaskARM64(page_size, level):
535    assert level >= 0 and level <= 3
536    ttentries = (page_size // ARM64_TTE_SIZE)
537    return page_size * (ttentries ** (3 - level)) - 1
538
539def PmapBlockBaseMaskARM64(page_size, level):
540    assert level >= 0 and level <= 3
541    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
542
543def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
544    """ Display the bits of an ARM64 translation table or page table entry
545        in human-readable form.
546        tte: integer value of the TTE/PTE
547        level: translation table level.  Valid values are 1, 2, or 3.
548        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
549    """
550    assert(isinstance(level, numbers.Integral))
551    assert_64bit(tte)
552
553    if tte & 0x1 == 0x0:
554        print("Invalid.")
555        return
556
557    if (tte & 0x2 == 0x2) and (level != 0x3):
558        print("Type       = Table pointer.")
559        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
560
561        if not stage2:
562            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
563            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
564            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
565            print("NS         = {:#x}.".format(tte >> 63))
566    else:
567        print("Type       = Block.")
568
569        if stage2:
570            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
571        else:
572            attr_index = (tte >> 2) & 0x7
573            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
574                3: 'CACHE DISABLE', 4: 'INNERWRITEBACK', 5: 'POSTED',
575                6: 'POSTED_REORDERED', 7: 'POSTED_COMBINED_REORDERED' }
576
577            # Only show the string version of the AttrIdx for CPU mappings since
578            # these values don't apply to IOMMU mappings.
579            if is_iommu_tte:
580                print("AttrIdx    = {:#x}.".format(attr_index))
581            else:
582                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
583            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
584
585        if stage2:
586            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
587        else:
588            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
589
590        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
591        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
592
593        if not stage2:
594            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
595
596        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
597
598        if stage2:
599            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
600        else:
601            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
602            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
603
604        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
605
606    return
607
608def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
609    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
610
611    tt_index = []
612    for i in range(pta_max_level + 1):
613        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
614            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
615
616    return tt_index
617
618def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN):
619    assert(type(vaddr) in (int, int))
620    assert_64bit(vaddr)
621    assert_64bit(root_tte)
622
623    # Obtain pmap attributes
624    page_size = pmap_pt_attr.pta_page_size
625    page_offset_mask = (page_size - 1)
626    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
627    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
628    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
629
630    # The pmap starts at a page table level that is defined by register
631    # values; the root level can be obtained from the attributes structure
632    level = unsigned(pmap_pt_attr.pta_root_level)
633
634    root_tt_index = tt_index[level]
635    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
636        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
637    tte = int(unsigned(root_tte[root_tt_index]))
638
639    # Walk the page tables
640    paddr = -1
641    max_level = unsigned(pmap_pt_attr.pta_max_level)
642    is_valid = True
643    is_leaf = False
644
645    while (level <= max_level):
646        if verbose_level >= vSCRIPT:
647            print("L{} entry: {:#x}".format(level, tte))
648        if verbose_level >= vDETAIL:
649            PmapDecodeTTEARM64(tte, level, stage2)
650
651        if tte & 0x1 == 0x0:
652            if verbose_level >= vHUMAN:
653                print("L{} entry invalid: {:#x}\n".format(level, tte))
654
655            is_valid = False
656            break
657
658        # Handle leaf entry
659        if tte & 0x2 == 0x0 or level == max_level:
660            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
661            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
662            paddr = tte & base_mask
663            paddr = paddr | (vaddr & offset_mask)
664
665            if level != max_level:
666                print("phys: {:#x}".format(paddr))
667
668            is_leaf = True
669            break
670        else:
671        # Handle page table entry
672            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
673            assert(isinstance(next_phys, numbers.Integral))
674
675            next_virt = kern.PhysToKernelVirt(next_phys)
676            assert(isinstance(next_virt, numbers.Integral))
677
678            if verbose_level >= vDETAIL:
679                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
680
681            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
682            tte = int(unsigned(dereference(ttep)))
683            assert(isinstance(tte, numbers.Integral))
684
685        # We've parsed one level, so go to the next level
686        assert(level <= 3)
687        level = level + 1
688
689
690    if verbose_level >= vHUMAN:
691        if paddr:
692            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
693        else:
694            print("(no translation)")
695
696    return paddr
697
698def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
699    if kern.arch == 'x86_64':
700        return PmapWalkX86_64(pmap, vaddr, verbose_level)
701    elif kern.arch.startswith('arm64'):
702        # Obtain pmap attributes from pmap structure
703        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
704        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
705    else:
706        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
707
708@lldb_command('pmap_walk')
709def PmapWalkHelper(cmd_args=None):
710    """ Perform a page-table walk in <pmap> for <virtual_address>.
711        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
712            Multiple -v's can be specified for increased verbosity
713    """
714    if cmd_args == None or len(cmd_args) < 2:
715        raise ArgumentError("Too few arguments to pmap_walk.")
716
717    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
718    addr = ArgumentStringToInt(cmd_args[1])
719    PmapWalk(pmap, addr, config['verbosity'])
720    return
721
722def GetMemoryAttributesFromUser(requested_type):
723    pmap_attr_dict = {
724        '4k' : kern.globals.pmap_pt_attr_4k,
725        '16k' : kern.globals.pmap_pt_attr_16k,
726        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
727    }
728
729    requested_type = requested_type.lower()
730    if requested_type not in pmap_attr_dict:
731        return None
732
733    return pmap_attr_dict[requested_type]
734
735@lldb_command('ttep_walk')
736def TTEPWalkPHelper(cmd_args=None):
737    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
738        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
739        Multiple -v's can be specified for increased verbosity
740        """
741    if cmd_args == None or len(cmd_args) < 2:
742        raise ArgumentError("Too few arguments to ttep_walk.")
743
744    if not kern.arch.startswith('arm64'):
745        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
746
747    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
748    addr = ArgumentStringToInt(cmd_args[1])
749
750    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
751    if pmap_pt_attr is None:
752        raise ArgumentError("Invalid translation attribute type.")
753
754    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
755
756@lldb_command('decode_tte')
757def DecodeTTE(cmd_args=None):
758    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
759        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
760    """
761    if cmd_args == None or len(cmd_args) < 2:
762        raise ArgumentError("Too few arguments to decode_tte.")
763    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
764        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
765    if kern.arch.startswith('arm64'):
766        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
767        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
768    else:
769        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
770
771
772PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55)
773PVH_HIGH_FLAGS_ARM32 = (1 << 31)
774
775def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
776    """ Dump information about a single mapping retrieved by the pv_head_table.
777
778        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
779              or None if type PVH_TYPE_PTEP.
780        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
781              the high flags already set (the type bits don't need to be cleared).
782              For type PVH_TYPE_PVEP this will be the value retrieved from the
783              pve_ptep[] array.
784    """
785    if kern.arch.startswith('arm64'):
786        iommu_flag = 0x4
787        iommu_table_flag = 1 << 63
788    else:
789        iommu_flag = 0
790        iommu_table_flag = 0
791
792    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
793    if pvep is not None and (ptep & 0x1):
794        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
795        # setting this string is mutually exclusive with setting the IOMMU strings.
796        pte_str = ' (alt acct)'
797    else:
798        pte_str = ''
799
800    if pvep is not None:
801        pve_str = 'PVEP {:#x}, '.format(pvep)
802    else:
803        pve_str = ''
804
805    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
806    # either does nothing or clears out the AltAcct bit.
807    ptep = ptep & ~0x3
808
809    # When printing with extra verbosity, print an extra newline that describes
810    # who owns the mapping.
811    extra_str = ''
812
813    if ptep & iommu_flag:
814        # The mapping is an IOMMU Mapping
815        ptep = ptep & ~iommu_flag
816
817        # Due to LLDB automatically setting all the high bits of pointers, when
818        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
819        # the iommu_table_flag, which means this check only works for PVH entries
820        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
821        # entry which has the right casting applied to avoid this issue).
822        #
823        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
824        # for a lack of trying, that's for sure. If you can figure out how to
825        # cast that array correctly, then be my guest.
826        if ptep & iommu_table_flag:
827            pte_str = ' (IOMMU table), entry'
828
829            ptd = GetPtDesc(KVToPhysARM(ptep))
830            iommu = dereference(ptd.iommu)
831        else:
832            # Instead of dumping the PTE (since we don't have that), dump the
833            # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
834            #
835            # This works because later on when the "ptep" is dereferenced as a
836            # PTE pointer (uint64_t pointer), the descriptor pointer will be
837            # dumped as that's the first 64-bit value in the IOMMU state object.
838            pte_str = ' (IOMMU state), descriptor'
839            ptep = ptep | iommu_table_flag
840            iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
841
842        # For IOMMU mappings, dump who owns the mapping as the extra string.
843        extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
844        if unsigned(iommu.name) != 0:
845            extra_str += '/{:s}'.format(iommu.name)
846        extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
847    else:
848        # The mapping is a CPU Mapping
849        pte_str += ', entry'
850        ptd = GetPtDesc(KVToPhysARM(ptep))
851        if ptd.pmap == kern.globals.kernel_pmap:
852            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
853        elif verbose_level >= vDETAIL:
854            task = TaskForPmapHelper(ptd.pmap)
855            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
856    try:
857        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
858    except:
859        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
860
861    if verbose_level >= vDETAIL:
862        print("    |-- {:s}".format(extra_str))
863
864def PVWalkARM(pai, verbose_level = vHUMAN):
865    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
866
867        pai: physical address index (PAI) corresponding to the pv_head_table
868             entry to walk.
869        verbose_level: Set to vSCRIPT or higher to print extra info around the
870                       the pv_head_table/pp_attr_table flags and to dump the
871                       pt_desc_t object if the type is a PTD.
872    """
873    # LLDB will automatically try to make pointer values dereferencable by
874    # setting the upper bits if they aren't set. We need to parse the flags
875    # stored in the upper bits later, so cast the pv_head_table to an array of
876    # integers to get around this "feature". We'll add the upper bits back
877    # manually before deref'ing anything.
878    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
879    pvh_raw = unsigned(pv_head_table[pai])
880    pvh = pvh_raw
881    pvh_type = pvh & 0x3
882
883    print("PVH raw value: {:#x}".format(pvh_raw))
884    if kern.arch.startswith('arm64'):
885        pvh = pvh | PVH_HIGH_FLAGS_ARM64
886    else:
887        pvh = pvh | PVH_HIGH_FLAGS_ARM32
888
889    if pvh_type == 0:
890        print("PVH type: NULL")
891    elif pvh_type == 3:
892        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
893    elif pvh_type == 2:
894        print("PVH type: single PTE")
895        PVDumpPTE(None, pvh, verbose_level)
896    elif pvh_type == 1:
897        pvep = pvh & ~0x3
898        print("PVH type: PTE list")
899        pve_ptep_idx = 0
900        while pvep != 0:
901            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
902
903            if pve.pve_ptep[pve_ptep_idx] != 0:
904                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
905
906            pve_ptep_idx += 1
907            if pve_ptep_idx == 2:
908                pve_ptep_idx = 0
909                pvep = unsigned(pve.pve_next)
910
911    if verbose_level >= vDETAIL:
912        if (pvh_type == 1) or (pvh_type == 2):
913            # Dump pv_head_table flags when there's a valid mapping.
914            pvh_flags = []
915
916            if pvh_raw & (1 << 62):
917                pvh_flags.append("CPU")
918            if pvh_raw & (1 << 60):
919                pvh_flags.append("EXEC")
920            if pvh_raw & (1 << 59):
921                pvh_flags.append("LOCKDOWN_KC")
922            if pvh_raw & (1 << 58):
923                pvh_flags.append("HASHED")
924            if pvh_raw & (1 << 57):
925                pvh_flags.append("LOCKDOWN_CS")
926            if pvh_raw & (1 << 56):
927                pvh_flags.append("LOCKDOWN_RO")
928            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
929                pvh_flags.append("LOCK")
930
931            print("PVH Flags: {}".format(pvh_flags))
932
933        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
934        ppattr = unsigned(kern.globals.pp_attr_table[pai])
935        print("PPATTR raw value: {:#x}".format(ppattr))
936
937        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
938        if ppattr & 0x40:
939            ppattr_flags.append("REFERENCED")
940        if ppattr & 0x80:
941            ppattr_flags.append("MODIFIED")
942        if ppattr & 0x100:
943            ppattr_flags.append("INTERNAL")
944        if ppattr & 0x200:
945            ppattr_flags.append("REUSABLE")
946        if ppattr & 0x400:
947            ppattr_flags.append("ALTACCT")
948        if ppattr & 0x800:
949            ppattr_flags.append("NOENCRYPT")
950        if ppattr & 0x1000:
951            ppattr_flags.append("REFFAULT")
952        if ppattr & 0x2000:
953            ppattr_flags.append("MODFAULT")
954        if ppattr & 0x4000:
955            ppattr_flags.append("MONITOR")
956        if ppattr & 0x8000:
957            ppattr_flags.append("NO_MONITOR")
958
959        print("PPATTR Flags: {}".format(ppattr_flags))
960
961        if pvh_type == 3:
962            def RunLldbCmdHelper(command):
963                """Helper for dumping an LLDB command right before executing it
964                and printing the results.
965                command: The LLDB command (as a string) to run.
966
967                Example input: "p/x kernel_pmap".
968                """
969                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
970            # Dump the page table descriptor object
971            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
972            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
973
974            # Depending on the system, more than one ptd_info can be associated
975            # with a single PTD. Only dump the first PTD info and assume the
976            # user knows to dump the rest if they're on one of those systems.
977            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
978
979@lldb_command('pv_walk')
980def PVWalk(cmd_args=None):
981    """ Show mappings for <physical_address | PAI> tracked in the PV list.
982        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
983
984        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
985        as well as dump the page table descriptor (PTD) struct if the entry is a
986        PTD.
987    """
988    if cmd_args == None or len(cmd_args) < 1:
989        raise ArgumentError("Too few arguments to pv_walk.")
990    if not kern.arch.startswith('arm'):
991        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
992
993    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
994
995    # If the input is already a PAI, this function will return the input unchanged.
996    # This function also ensures that the physical address is kernel-managed.
997    pai = ConvertPhysAddrToPai(pa)
998
999    PVWalkARM(pai, config['verbosity'])
1000
1001@lldb_command('kvtophys')
1002def KVToPhys(cmd_args=None):
1003    """ Translate a kernel virtual address to the corresponding physical address.
1004        Assumes the virtual address falls within the kernel static region.
1005        Syntax: (lldb) kvtophys <kernel virtual address>
1006    """
1007    if cmd_args == None or len(cmd_args) < 1:
1008        raise ArgumentError("Too few arguments to kvtophys.")
1009    if kern.arch.startswith('arm'):
1010        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1011    elif kern.arch == 'x86_64':
1012        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1013
1014@lldb_command('phystokv')
1015def PhysToKV(cmd_args=None):
1016    """ Translate a physical address to the corresponding static kernel virtual address.
1017        Assumes the physical address corresponds to managed DRAM.
1018        Syntax: (lldb) phystokv <physical address>
1019    """
1020    if cmd_args == None or len(cmd_args) < 1:
1021        raise ArgumentError("Too few arguments to phystokv.")
1022    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1023
1024def KVToPhysARM(addr):
1025    if kern.arch.startswith('arm64'):
1026        ptov_table = kern.globals.ptov_table
1027        for i in range(0, kern.globals.ptov_index):
1028            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1029                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1030    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1031
1032
1033def GetPtDesc(paddr):
1034    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1035    pvh = unsigned(kern.globals.pv_head_table[pn])
1036    if kern.arch.startswith('arm64'):
1037        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1038    else:
1039        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1040    pvh_type = pvh & 0x3
1041    if pvh_type != 0x3:
1042        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1043    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1044    return ptd
1045
1046def ShowPTEARM(pte, page_size, stage2 = False):
1047    """ Display vital information about an ARM page table entry
1048        pte: kernel virtual address of the PTE.  Should be L3 PTE.  May also work with L2 TTEs for certain devices.
1049    """
1050    ptd = GetPtDesc(KVToPhysARM(pte))
1051    pt_index = (pte % kern.globals.page_size) // page_size
1052    refcnt = ptd.ptd_info[pt_index].refcnt
1053    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1054    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x})".format(ptd, refcnt, wiredcnt))
1055
1056    # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1057    is_iommu_pte = (refcnt & 0x8000) == 0x8000
1058
1059    # The pmap/iommu field is a union, so only print the correct one.
1060    if is_iommu_pte:
1061        iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1062        if unsigned(dereference(ptd.iommu).name) != 0:
1063            iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1064
1065        print("iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name))
1066    else:
1067        if ptd.pmap == kern.globals.kernel_pmap:
1068            pmap_str = "(kernel_pmap)"
1069        else:
1070            task = TaskForPmapHelper(ptd.pmap)
1071            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1072        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1073
1074    pte_pgoff = pte % page_size
1075    if kern.arch.startswith('arm64'):
1076        pte_pgoff = pte_pgoff // 8
1077        nttes = page_size // 8
1078    else:
1079        pte_pgoff = pte_pgoff // 4
1080        nttes = page_size // 4
1081    if ptd.ptd_info[pt_index].refcnt == 0x4000:
1082        level = 2
1083        granule = nttes * page_size
1084    else:
1085        level = 3
1086        granule = page_size
1087    print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(ptd.va[pt_index])) + (pte_pgoff * granule)))
1088    pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1089    print("value: {:#x}".format(pteval))
1090    if kern.arch.startswith('arm64'):
1091        print("level: {:d}".format(level))
1092        PmapDecodeTTEARM64(pteval, level, stage2, is_iommu_pte)
1093
1094    else:
1095        raise UnsupportedArchitectureError(kern.arch)
1096
1097@lldb_command('showpte')
1098def ShowPTE(cmd_args=None):
1099    """ Display vital information about the page table entry at VA <pte>
1100        Syntax: (lldb) showpte <pte_va> [4k|16k|16k_s2]
1101    """
1102    if cmd_args == None or len(cmd_args) < 1:
1103        raise ArgumentError("Too few arguments to showpte.")
1104
1105    if kern.arch.startswith('arm64'):
1106        pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 2 else GetMemoryAttributesFromUser(cmd_args[1])
1107        if pmap_pt_attr is None:
1108            raise ArgumentError("Invalid translation attribute type.")
1109
1110        stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
1111        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), pmap_pt_attr.pta_page_size, stage2)
1112    else:
1113        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1114
1115def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1116    """ Perform the specified action for all valid mappings in an ARM64 translation table
1117        pmap: owner of the translation table
1118        tt: translation table or page table
1119        nttes: number of entries in tt
1120        level: translation table level, 1 2 or 3
1121        action: callback for each valid TTE
1122    """
1123    # Obtain pmap attributes
1124    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1125    page_size = pmap_pt_attr.pta_page_size
1126    page_offset_mask = (page_size - 1)
1127    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1128    max_level = unsigned(pmap_pt_attr.pta_max_level)
1129
1130    for i in range(nttes):
1131        try:
1132            tte = tt[i]
1133            if tte & 0x1 == 0x0:
1134                continue
1135
1136            tt_next = None
1137            paddr = unsigned(tte) & unsigned(page_base_mask)
1138
1139            # Handle leaf entry
1140            if tte & 0x2 == 0x0 or level == max_level:
1141                type = 'block' if level < max_level else 'entry'
1142                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1143            else:
1144            # Handle page table entry
1145                type = 'table'
1146                granule = page_size
1147                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1148
1149            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1150            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1151                if tt_next is not None:
1152                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1153
1154        except Exception as exc:
1155            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1156
1157def ScanPageTables(action, targetPmap=None):
1158    """ Perform the specified action for all valid mappings in all page tables,
1159        optionally restricted to a single pmap.
1160        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1161    """
1162    print("Scanning all available translation tables.  This may take a long time...")
1163    def ScanPmap(pmap, action):
1164        if kern.arch.startswith('arm64'):
1165            # Obtain pmap attributes
1166            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1167            granule = pmap_pt_attr.pta_page_size
1168            level = unsigned(pmap_pt_attr.pta_root_level)
1169            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1170                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1171
1172        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1173            if kern.arch.startswith('arm64'):
1174                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1175
1176    if targetPmap is not None:
1177        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1178    else:
1179        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1180            ScanPmap(pmap, action)
1181
1182@lldb_command('showallmappings')
1183def ShowAllMappings(cmd_args=None):
1184    """ Find and display all available mappings on the system for
1185        <physical_address>.  Optionally only searches the pmap
1186        specified by [<pmap>]
1187        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1188        WARNING: this macro can take a long time (up to 30min.) to complete!
1189    """
1190    if cmd_args == None or len(cmd_args) < 1:
1191        raise ArgumentError("Too few arguments to showallmappings.")
1192    if not kern.arch.startswith('arm'):
1193        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1194    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1195    targetPmap = None
1196    if len(cmd_args) > 1:
1197        targetPmap = cmd_args[1]
1198    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1199        if paddr <= pa < (paddr + granule):
1200            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1201        return True
1202    ScanPageTables(printMatchedMapping, targetPmap)
1203
1204@lldb_command('showptusage')
1205def ShowPTUsage(cmd_args=None):
1206    """ Display a summary of pagetable allocations for a given pmap.
1207        Syntax: (lldb) showptusage [<pmap>]
1208        WARNING: this macro can take a long time (> 1hr) to complete!
1209    """
1210    if not kern.arch.startswith('arm'):
1211        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1212    targetPmap = None
1213    if len(cmd_args) > 0:
1214        targetPmap = cmd_args[0]
1215    lastPmap = [None]
1216    numTables = [0]
1217    numUnnested = [0]
1218    numPmaps = [0]
1219    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1220        unnested = ""
1221        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1222        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1223        if lastPmap[0] is None or (pmap != lastPmap[0]):
1224            lastPmap[0] = pmap
1225            numPmaps[0] = numPmaps[0] + 1
1226            print ("pmap {:#x}:".format(pmap))
1227        if type == 'root':
1228            return True
1229        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1230            ptd = GetPtDesc(paddr)
1231            if ptd.pmap != pmap:
1232                return False
1233            else:
1234                numUnnested[0] = numUnnested[0] + 1
1235                unnested = " (likely unnested)"
1236        numTables[0] = numTables[0] + 1
1237        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1238        if level == 2:
1239            return False
1240        else:
1241            return True
1242    ScanPageTables(printValidTTE, targetPmap)
1243    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1244
1245def checkPVList(pmap, level, type, tte, paddr, va, granule):
1246    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1247        pmap: owner of the translation table
1248        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1249        type: unused
1250        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1251        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1252        granule: unused
1253    """
1254    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1255    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1256    page_size = kern.globals.page_size
1257    if kern.arch.startswith('arm64'):
1258        page_offset_mask = (page_size - 1)
1259        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1260        paddr = paddr & page_base_mask
1261        max_level = 3
1262        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1263    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1264        return True
1265    pn = (paddr - vm_first_phys) // page_size
1266    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1267    pvh_type = pvh & 0x3
1268    if pmap is not None:
1269        pmap_str = "pmap: {:#x}: ".format(pmap)
1270    else:
1271        pmap_str = ''
1272    if tte is not None:
1273        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1274    else:
1275        tte_str = "paddr {:#x}: ".format(paddr)
1276    if pvh_type == 0 or pvh_type == 3:
1277        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1278    elif pvh_type == 2:
1279        ptep = pvh & ~0x3
1280        if tte is not None and ptep != unsigned(tte):
1281            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1282        try:
1283            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1284            if (pte != paddr):
1285                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1286        except Exception as exc:
1287            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1288    elif pvh_type == 1:
1289        pvep = pvh & ~0x3
1290        tte_match = False
1291        pve_ptep_idx = 0
1292        while pvep != 0:
1293            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1294            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1295            pve_ptep_idx += 1
1296            if pve_ptep_idx == 2:
1297                pve_ptep_idx = 0
1298                pvep = unsigned(pve.pve_next)
1299            if ptep == 0:
1300                continue
1301            if tte is not None and ptep == unsigned(tte):
1302                tte_match = True
1303            try:
1304                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1305                if (pte != paddr):
1306                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1307            except Exception as exc:
1308                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1309        if tte is not None and not tte_match:
1310            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1311    return True
1312
1313@lldb_command('pv_check', 'P')
1314def PVCheck(cmd_args=None, cmd_options={}):
1315    """ Check the physical-to-virtual mapping for a given PTE or physical address
1316        Syntax: (lldb) pv_check <addr> [-p]
1317            -P        : Interpret <addr> as a physical address rather than a PTE
1318    """
1319    if cmd_args == None or len(cmd_args) < 1:
1320        raise ArgumentError("Too few arguments to pv_check.")
1321    if kern.arch.startswith('arm64'):
1322        level = 3
1323    else:
1324        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1325    if "-P" in cmd_options:
1326        pte = None
1327        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1328    else:
1329        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1330        pa = int(unsigned(dereference(pte)))
1331    checkPVList(None, level, None, pte, pa, 0, None)
1332
1333@lldb_command('check_pmaps')
1334def CheckPmapIntegrity(cmd_args=None):
1335    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1336        Optionally only checks the pmap specified by [<pmap>]
1337        Syntax: (lldb) check_pmaps [<pmap>]
1338        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1339        specify [pmap] to limit it to a single pmap.  It will also give false positives
1340        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1341        Use of this macro without the [<pmap>] argument is heavily discouraged.
1342    """
1343    if not kern.arch.startswith('arm'):
1344        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1345    targetPmap = None
1346    if len(cmd_args) > 0:
1347        targetPmap = cmd_args[0]
1348    ScanPageTables(checkPVList, targetPmap)
1349
1350@lldb_command('pmapsforledger')
1351def PmapsForLedger(cmd_args=None):
1352    """ Find and display all pmaps currently using <ledger>.
1353        Syntax: (lldb) pmapsforledger <ledger>
1354    """
1355    if cmd_args == None or len(cmd_args) < 1:
1356        raise ArgumentError("Too few arguments to pmapsforledger.")
1357    if not kern.arch.startswith('arm'):
1358        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1359    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1360    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1361        if pmap.ledger == ledger:
1362            print("pmap: {:#x}".format(pmap))
1363
1364
1365def IsValidPai(pai):
1366    """ Given an unsigned value, detect whether that value is a valid physical
1367        address index (PAI). It does this by first computing the last possible
1368        PAI and comparing the input to that.
1369
1370        All contemporary SoCs reserve the bottom part of the address space, so
1371        there shouldn't be any valid physical addresses between zero and the
1372        last PAI either.
1373    """
1374    page_size = unsigned(kern.globals.page_size)
1375    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1376    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1377
1378    last_pai = (vm_last_phys - vm_first_phys) // page_size
1379    if (pai < 0) or (pai >= last_pai):
1380        return False
1381
1382    return True
1383
1384def ConvertPaiToPhysAddr(pai):
1385    """ Convert the given Physical Address Index (PAI) into a physical address.
1386
1387        If the input isn't a valid PAI (it's most likely already a physical
1388        address), then just return back the input unchanged.
1389    """
1390    pa = pai
1391
1392    # If the value is a valid PAI, then convert it into a physical address.
1393    if IsValidPai(pai):
1394        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1395
1396    return pa
1397
1398def ConvertPhysAddrToPai(pa):
1399    """ Convert the given physical address into a Physical Address Index (PAI).
1400
1401        If the input is already a valid PAI, then just return back the input
1402        unchanged.
1403    """
1404    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1405    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1406    pai = pa
1407
1408    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1409        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1410    elif not IsValidPai(pa):
1411        # If the value isn't already a valid PAI, then convert it into one.
1412        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1413
1414    return pai
1415
1416@lldb_command('pmappaindex')
1417def PmapPaIndex(cmd_args=None):
1418    """ Display both a physical address and physical address index (PAI) when
1419        provided with only one of those values.
1420
1421        Syntax: (lldb) pmappaindex <physical address | PAI>
1422
1423        NOTE: This macro will throw an exception if the input isn't a valid PAI
1424              and is also not a kernel-managed physical address.
1425    """
1426    if (cmd_args == None) or (len(cmd_args) < 1):
1427        raise ArgumentError("Too few arguments to pmappaindex.")
1428
1429    if not kern.arch.startswith('arm'):
1430        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1431
1432    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1433    pai = value
1434    phys_addr = value
1435
1436    if IsValidPai(value):
1437        # Input is a PAI, calculate the physical address.
1438        phys_addr = ConvertPaiToPhysAddr(value)
1439    else:
1440        # Input is a physical address, calculate the PAI
1441        pai = ConvertPhysAddrToPai(value)
1442
1443    print("Physical Address: {:#x}".format(phys_addr))
1444    print("PAI: {:d}".format(pai))
1445