xref: /xnu-11215.41.3/tools/lldbmacros/pmap.py (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1from xnu import *
2import xnudefines
3from kdp import *
4from utils import *
5import struct
6
7def ReadPhysInt(phys_addr, bitsize = 64, cpuval = None):
8    """ Read a physical memory data based on address.
9        params:
10            phys_addr : int - Physical address to read
11            bitsize   : int - defines how many bytes to read. defaults to 64 bit
12            cpuval    : None (optional)
13        returns:
14            int - int value read from memory. in case of failure 0xBAD10AD is returned.
15    """
16    if "kdp" == GetConnectionProtocol():
17        return KDPReadPhysMEM(phys_addr, bitsize)
18
19    #NO KDP. Attempt to use physical memory
20    paddr_in_kva = kern.PhysToKernelVirt(int(phys_addr))
21    if paddr_in_kva :
22        if bitsize == 64 :
23            return kern.GetValueFromAddress(paddr_in_kva, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
24        if bitsize == 32 :
25            return kern.GetValueFromAddress(paddr_in_kva, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
26        if bitsize == 16 :
27            return kern.GetValueFromAddress(paddr_in_kva, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
28        if bitsize == 8 :
29            return kern.GetValueFromAddress(paddr_in_kva, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
30    return 0xBAD10AD
31
32@lldb_command('readphys')
33def ReadPhys(cmd_args = None):
34    """ Reads the specified untranslated address
35        The argument is interpreted as a physical address, and the 64-bit word
36        addressed is displayed.
37        usage: readphys <nbits> <address>
38        nbits: 8,16,32,64
39        address: 1234 or 0x1234 or `foo_ptr`
40    """
41    if cmd_args is None or len(cmd_args) < 2:
42        print("Insufficient arguments.", ReadPhys.__doc__)
43        return False
44    else:
45        nbits = ArgumentStringToInt(cmd_args[0])
46        phys_addr = ArgumentStringToInt(cmd_args[1])
47        print("{0: <#x}".format(ReadPhysInt(phys_addr, nbits)))
48    return True
49
50lldb_alias('readphys8', 'readphys 8 ')
51lldb_alias('readphys16', 'readphys 16 ')
52lldb_alias('readphys32', 'readphys 32 ')
53lldb_alias('readphys64', 'readphys 64 ')
54
55def KDPReadPhysMEM(address, bits):
56    """ Setup the state for READPHYSMEM64 commands for reading data via kdp
57        params:
58            address : int - address where to read the data from
59            bits : int - number of bits in the intval (8/16/32/64)
60        returns:
61            int: read value from memory.
62            0xBAD10AD: if failed to read data.
63    """
64    retval = 0xBAD10AD
65    if "kdp" != GetConnectionProtocol():
66        print("Target is not connected over kdp. Nothing to do here.")
67        return retval
68
69    if "hwprobe" == KDPMode():
70        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
71        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
72        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
73        packet = "{0:016x}{1:08x}{2:04x}".format(addr_for_kdp, byte_count, 0x0)
74
75        ret_obj = lldb.SBCommandReturnObject()
76        ci = lldb.debugger.GetCommandInterpreter()
77        ci.HandleCommand('process plugin packet send -c 25 -p {0}'.format(packet), ret_obj)
78
79        if ret_obj.Succeeded():
80            value = ret_obj.GetOutput()
81
82            if bits == 64 :
83                pack_fmt = "<Q"
84                unpack_fmt = ">Q"
85            if bits == 32 :
86                pack_fmt = "<I"
87                unpack_fmt = ">I"
88            if bits == 16 :
89                pack_fmt = "<H"
90                unpack_fmt = ">H"
91            if bits == 8 :
92                pack_fmt = "<B"
93                unpack_fmt = ">B"
94
95            retval = struct.unpack(unpack_fmt, struct.pack(pack_fmt, int(value[-((bits // 4)+1):], 16)))[0]
96
97    else:
98        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
99        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
100        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
101
102        if not WriteInt32ToMemoryAddress(0, input_address):
103            return retval
104
105        kdp_pkt_size = GetType('kdp_readphysmem64_req_t').GetByteSize()
106        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
107            return retval
108
109        data_addr = int(addressof(kern.globals.manual_pkt))
110        pkt = kern.GetValueFromAddress(data_addr, 'kdp_readphysmem64_req_t *')
111
112        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length=kdp_pkt_size)
113
114        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
115             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
116             WriteInt32ToMemoryAddress((bits // 8), int(addressof(pkt.nbytes))) and
117             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
118             ):
119
120            if WriteInt32ToMemoryAddress(1, input_address):
121                # now read data from the kdp packet
122                data_address = unsigned(addressof(kern.GetValueFromAddress(int(addressof(kern.globals.manual_pkt.data)), 'kdp_readphysmem64_reply_t *').data))
123                if bits == 64 :
124                    retval =  kern.GetValueFromAddress(data_address, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
125                if bits == 32 :
126                    retval =  kern.GetValueFromAddress(data_address, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
127                if bits == 16 :
128                    retval =  kern.GetValueFromAddress(data_address, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
129                if bits == 8 :
130                    retval =  kern.GetValueFromAddress(data_address, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
131
132    return retval
133
134
135def KDPWritePhysMEM(address, intval, bits):
136    """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
137        params:
138            address : int - address where to save the data
139            intval : int - integer value to be stored in memory
140            bits : int - number of bits in the intval (8/16/32/64)
141        returns:
142            boolean: True if the write succeeded.
143    """
144    if "kdp" != GetConnectionProtocol():
145        print("Target is not connected over kdp. Nothing to do here.")
146        return False
147
148    if "hwprobe" == KDPMode():
149        # Send the proper KDP command and payload to the bare metal debug tool via a KDP server
150        addr_for_kdp = struct.unpack("<Q", struct.pack(">Q", address))[0]
151        byte_count = struct.unpack("<I", struct.pack(">I", bits // 8))[0]
152
153        if bits == 64 :
154            pack_fmt = ">Q"
155            unpack_fmt = "<Q"
156        if bits == 32 :
157            pack_fmt = ">I"
158            unpack_fmt = "<I"
159        if bits == 16 :
160            pack_fmt = ">H"
161            unpack_fmt = "<H"
162        if bits == 8 :
163            pack_fmt = ">B"
164            unpack_fmt = "<B"
165
166        data_val = struct.unpack(unpack_fmt, struct.pack(pack_fmt, intval))[0]
167
168        packet = "{0:016x}{1:08x}{2:04x}{3:016x}".format(addr_for_kdp, byte_count, 0x0, data_val)
169
170        ret_obj = lldb.SBCommandReturnObject()
171        ci = lldb.debugger.GetCommandInterpreter()
172        ci.HandleCommand('process plugin packet send -c 26 -p {0}'.format(packet), ret_obj)
173
174        if ret_obj.Succeeded():
175            return True
176        else:
177            return False
178
179    else:
180        input_address = unsigned(addressof(kern.globals.manual_pkt.input))
181        len_address = unsigned(addressof(kern.globals.manual_pkt.len))
182        data_address = unsigned(addressof(kern.globals.manual_pkt.data))
183        if not WriteInt32ToMemoryAddress(0, input_address):
184            return False
185
186        kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits // 8)
187        if not WriteInt32ToMemoryAddress(kdp_pkt_size, len_address):
188            return False
189
190        data_addr = int(addressof(kern.globals.manual_pkt))
191        pkt = kern.GetValueFromAddress(data_addr, 'kdp_writephysmem64_req_t *')
192
193        header_value =GetKDPPacketHeaderInt(request=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length=kdp_pkt_size)
194
195        if ( WriteInt64ToMemoryAddress((header_value), int(addressof(pkt.hdr))) and
196             WriteInt64ToMemoryAddress(address, int(addressof(pkt.address))) and
197             WriteInt32ToMemoryAddress(bits // 8, int(addressof(pkt.nbytes))) and
198             WriteInt16ToMemoryAddress(xnudefines.lcpu_self, int(addressof(pkt.lcpu)))
199             ):
200
201            if bits == 8:
202                if not WriteInt8ToMemoryAddress(intval, int(addressof(pkt.data))):
203                    return False
204            if bits == 16:
205                if not WriteInt16ToMemoryAddress(intval, int(addressof(pkt.data))):
206                    return False
207            if bits == 32:
208                if not WriteInt32ToMemoryAddress(intval, int(addressof(pkt.data))):
209                    return False
210            if bits == 64:
211                if not WriteInt64ToMemoryAddress(intval, int(addressof(pkt.data))):
212                    return False
213            if WriteInt32ToMemoryAddress(1, input_address):
214                return True
215        return False
216
217
218def WritePhysInt(phys_addr, int_val, bitsize = 64):
219    """ Write and integer value in a physical memory data based on address.
220        params:
221            phys_addr : int - Physical address to read
222            int_val   : int - int value to write in memory
223            bitsize   : int - defines how many bytes to read. defaults to 64 bit
224        returns:
225            bool - True if write was successful.
226    """
227    if "kdp" == GetConnectionProtocol():
228        if not KDPWritePhysMEM(phys_addr, int_val, bitsize):
229            print("Failed to write via KDP.")
230            return False
231        return True
232    #We are not connected via KDP. So do manual math and savings.
233    print("Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol())
234    return False
235
236@lldb_command('writephys')
237def WritePhys(cmd_args=None):
238    """ writes to the specified untranslated address
239        The argument is interpreted as a physical address, and the 64-bit word
240        addressed is displayed.
241        usage: writephys <nbits> <address> <value>
242        nbits: 8,16,32,64
243        address: 1234 or 0x1234 or `foo_ptr`
244        value: int value to be written
245        ex. (lldb)writephys 16 0x12345abcd 0x25
246    """
247    if cmd_args is None or len(cmd_args) < 3:
248        print("Invalid arguments.", WritePhys.__doc__)
249    else:
250        nbits = ArgumentStringToInt(cmd_args[0])
251        phys_addr = ArgumentStringToInt(cmd_args[1])
252        int_value = ArgumentStringToInt(cmd_args[2])
253        print(WritePhysInt(phys_addr, int_value, nbits))
254
255
256lldb_alias('writephys8', 'writephys 8 ')
257lldb_alias('writephys16', 'writephys 16 ')
258lldb_alias('writephys32', 'writephys 32 ')
259lldb_alias('writephys64', 'writephys 64 ')
260
261
262def _PT_Step(paddr, index, verbose_level = vSCRIPT):
263    """
264     Step to lower-level page table and print attributes
265       paddr: current page table entry physical address
266       index: current page table entry index (0..511)
267       verbose_level:    vHUMAN: print nothing
268                         vSCRIPT: print basic information
269                         vDETAIL: print basic information and hex table dump
270     returns: (pt_paddr, pt_valid, pt_large)
271       pt_paddr: next level page table entry physical address
272                      or null if invalid
273       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
274                      should be aborted
275       pt_large: 1 if kgm_pt_paddr is a page frame address
276                      of a large page and not another page table entry
277    """
278    entry_addr = paddr + (8 * index)
279    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
280    out_string = ''
281    if verbose_level >= vDETAIL:
282        for pte_loop in range(0, 512):
283            paddr_tmp = paddr + (8 * pte_loop)
284            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
285    paddr_mask = ~((0xfff<<52) | 0xfff)
286    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
287    pt_valid = False
288    pt_large = False
289    pt_paddr = 0
290    if verbose_level < vSCRIPT:
291        if entry & 0x1 :
292            pt_valid = True
293            pt_large = False
294            pt_paddr = entry & paddr_mask
295            if entry & (0x1 <<7):
296                pt_large = True
297                pt_paddr = entry & paddr_large_mask
298    else:
299        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
300        if entry & 0x1:
301            out_string += " valid"
302            pt_paddr = entry & paddr_mask
303            pt_valid = True
304        else:
305            out_string += " invalid"
306            pt_paddr = 0
307            pt_valid = False
308            if entry & (0x1 << 62):
309                out_string += " compressed"
310            #Stop decoding other bits
311            entry = 0
312        if entry & (0x1 << 1):
313            out_string += " writable"
314        else:
315            out_string += " read-only"
316
317        if entry & (0x1 << 2):
318            out_string += " user"
319        else:
320            out_string += " supervisor"
321
322        if entry & (0x1 << 3):
323            out_string += " PWT"
324
325        if entry & (0x1 << 4):
326            out_string += " PCD"
327
328        if entry & (0x1 << 5):
329            out_string += " accessed"
330
331        if entry & (0x1 << 6):
332            out_string += " dirty"
333
334        if entry & (0x1 << 7):
335            out_string += " large"
336            pt_large = True
337        else:
338            pt_large = False
339
340        if entry & (0x1 << 8):
341            out_string += " global"
342
343        if entry & (0x3 << 9):
344            out_string += " avail:{0:x}".format((entry >> 9) & 0x3)
345
346        if entry & (0x1 << 63):
347            out_string += " noexec"
348    print(out_string)
349    return (pt_paddr, pt_valid, pt_large)
350
351def _PT_StepEPT(paddr, index, verbose_level = vSCRIPT):
352    """
353     Step to lower-level page table and print attributes for EPT pmap
354       paddr: current page table entry physical address
355       index: current page table entry index (0..511)
356       verbose_level:    vHUMAN: print nothing
357                         vSCRIPT: print basic information
358                         vDETAIL: print basic information and hex table dump
359     returns: (pt_paddr, pt_valid, pt_large)
360       pt_paddr: next level page table entry physical address
361                      or null if invalid
362       pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
363                      should be aborted
364       pt_large: 1 if kgm_pt_paddr is a page frame address
365                      of a large page and not another page table entry
366    """
367    entry_addr = paddr + (8 * index)
368    entry = ReadPhysInt(entry_addr, 64, xnudefines.lcpu_self )
369    out_string = ''
370    if verbose_level >= vDETAIL:
371        for pte_loop in range(0, 512):
372            paddr_tmp = paddr + (8 * pte_loop)
373            out_string += "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp, ReadPhysInt(paddr_tmp, 64, xnudefines.lcpu_self))
374    paddr_mask = ~((0xfff<<52) | 0xfff)
375    paddr_large_mask =  ~((0xfff<<52) | 0x1fffff)
376    pt_valid = False
377    pt_large = False
378    pt_paddr = 0
379    if verbose_level < vSCRIPT:
380        if entry & 0x7 :
381            pt_valid = True
382            pt_large = False
383            pt_paddr = entry & paddr_mask
384            if entry & (0x1 <<7):
385                pt_large = True
386                pt_paddr = entry & paddr_large_mask
387    else:
388        out_string+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr, entry)
389        if entry & 0x7:
390            out_string += "valid"
391            pt_paddr = entry & paddr_mask
392            pt_valid = True
393        else:
394            out_string += "invalid"
395            pt_paddr = 0
396            pt_valid = False
397            if entry & (0x1 << 62):
398                out_string += " compressed"
399            #Stop decoding other bits
400            entry = 0
401        if entry & 0x1:
402            out_string += " readable"
403        else:
404            out_string += " no read"
405        if entry & (0x1 << 1):
406            out_string += " writable"
407        else:
408            out_string += " no write"
409
410        if entry & (0x1 << 2):
411            out_string += " executable"
412        else:
413            out_string += " no exec"
414
415        ctype = entry & 0x38
416        if ctype == 0x30:
417            out_string += " cache-WB"
418        elif ctype == 0x28:
419            out_string += " cache-WP"
420        elif ctype == 0x20:
421            out_string += " cache-WT"
422        elif ctype == 0x8:
423            out_string += " cache-WC"
424        else:
425            out_string += " cache-NC"
426
427        if (entry & 0x40) == 0x40:
428            out_string += " Ignore-PTA"
429
430        if (entry & 0x100) == 0x100:
431            out_string += " accessed"
432
433        if (entry & 0x200) == 0x200:
434            out_string += " dirty"
435
436        if entry & (0x1 << 7):
437            out_string += " large"
438            pt_large = True
439        else:
440            pt_large = False
441    print(out_string)
442    return (pt_paddr, pt_valid, pt_large)
443
444def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT):
445    """ Walk the l4 pmap entry.
446        params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
447        vaddr : int - virtual address to walk
448    """
449    pt_paddr = unsigned(pmap_addr_val)
450    pt_valid = (unsigned(pmap_addr_val) != 0)
451    pt_large = 0
452    pframe_offset = 0
453    if pt_valid:
454        # Lookup bits 47:39 of linear address in PML4T
455        pt_index = (vaddr >> 39) & 0x1ff
456        pframe_offset = vaddr & 0x7fffffffff
457        if verbose_level > vHUMAN :
458            print("pml4 (index {0:d}):".format(pt_index))
459        if not(ept_pmap):
460            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
461        else:
462            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
463    if pt_valid:
464        # Lookup bits 38:30 of the linear address in PDPT
465        pt_index = (vaddr >> 30) & 0x1ff
466        pframe_offset = vaddr & 0x3fffffff
467        if verbose_level > vHUMAN:
468            print("pdpt (index {0:d}):".format(pt_index))
469        if not(ept_pmap):
470            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
471        else:
472            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
473    if pt_valid and not pt_large:
474        #Lookup bits 29:21 of the linear address in PDPT
475        pt_index = (vaddr >> 21) & 0x1ff
476        pframe_offset = vaddr & 0x1fffff
477        if verbose_level > vHUMAN:
478            print("pdt (index {0:d}):".format(pt_index))
479        if not(ept_pmap):
480            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
481        else:
482            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
483    if pt_valid and not pt_large:
484        #Lookup bits 20:21 of linear address in PT
485        pt_index = (vaddr >> 12) & 0x1ff
486        pframe_offset = vaddr & 0xfff
487        if verbose_level > vHUMAN:
488            print("pt (index {0:d}):".format(pt_index))
489        if not(ept_pmap):
490            (pt_paddr, pt_valid, pt_large) = _PT_Step(pt_paddr, pt_index, verbose_level)
491        else:
492            (pt_paddr, pt_valid, pt_large) = _PT_StepEPT(pt_paddr, pt_index, verbose_level)
493    paddr = 0
494    paddr_isvalid = False
495    if pt_valid:
496        paddr = pt_paddr + pframe_offset
497        paddr_isvalid = True
498
499    if verbose_level > vHUMAN:
500        if paddr_isvalid:
501            pvalue = ReadPhysInt(paddr, 32, xnudefines.lcpu_self)
502            print("phys {0: <#020x}: {1: <#020x}".format(paddr, pvalue))
503        else:
504            print("no translation")
505
506    return paddr
507
508def PmapWalkX86_64(pmapval, vaddr, verbose_level = vSCRIPT):
509    """
510        params: pmapval - core.value representing pmap_t in kernel
511        vaddr:  int     - int representing virtual address to walk
512    """
513    if pmapval.pm_cr3 != 0:
514        if verbose_level > vHUMAN:
515            print("Using normal Intel PMAP from pm_cr3\n")
516        return _PmapL4Walk(pmapval.pm_cr3, vaddr, 0, config['verbosity'])
517    else:
518        if verbose_level > vHUMAN:
519            print("Using EPT pmap from pm_eptp\n")
520        return _PmapL4Walk(pmapval.pm_eptp, vaddr, 1, config['verbosity'])
521
522def assert_64bit(val):
523    assert(val < 2**64)
524
525ARM64_TTE_SIZE = 8
526ARM64_TTE_SHIFT = 3
527ARM64_VMADDR_BITS = 48
528
529def PmapBlockOffsetMaskARM64(page_size, level):
530    assert level >= 0 and level <= 3
531    ttentries = (page_size // ARM64_TTE_SIZE)
532    return page_size * (ttentries ** (3 - level)) - 1
533
534def PmapBlockBaseMaskARM64(page_size, level):
535    assert level >= 0 and level <= 3
536    return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level)
537
538def PmapDecodeTTEARM64(tte, level, stage2 = False, is_iommu_tte = False):
539    """ Display the bits of an ARM64 translation table or page table entry
540        in human-readable form.
541        tte: integer value of the TTE/PTE
542        level: translation table level.  Valid values are 1, 2, or 3.
543        is_iommu_tte: True if the TTE is from an IOMMU's page table, False otherwise.
544    """
545    assert(isinstance(level, numbers.Integral))
546    assert_64bit(tte)
547
548    if tte & 0x1 == 0x0:
549        print("Invalid.")
550        return
551
552    if (tte & 0x2 == 0x2) and (level != 0x3):
553        print("Type       = Table pointer.")
554        print("Table addr = {:#x}.".format(tte & 0xfffffffff000))
555
556        if not stage2:
557            print("PXN        = {:#x}.".format((tte >> 59) & 0x1))
558            print("XN         = {:#x}.".format((tte >> 60) & 0x1))
559            print("AP         = {:#x}.".format((tte >> 61) & 0x3))
560            print("NS         = {:#x}.".format(tte >> 63))
561    else:
562        print("Type       = Block.")
563
564        if stage2:
565            print("S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf))
566        else:
567            attr_index = (tte >> 2) & 0x7
568            attr_string = { 0: 'WRITEBACK', 1: 'WRITECOMB', 2: 'WRITETHRU',
569                3: 'CACHE DISABLE',
570                4: 'RESERVED'
571                ,
572                5: 'POSTED (DISABLE_XS if FEAT_XS supported)',
573                6: 'POSTED_REORDERED (POSTED_COMBINED_REORDERED if FEAT_XS supported)',
574                7: 'POSTED_COMBINED_REORDERED (POSTED_COMBINED_REORDERED_XS if FEAT_XS supported)' }
575
576            # Only show the string version of the AttrIdx for CPU mappings since
577            # these values don't apply to IOMMU mappings.
578            if is_iommu_tte:
579                print("AttrIdx    = {:#x}.".format(attr_index))
580            else:
581                print("AttrIdx    = {:#x} ({:s}).".format(attr_index, attr_string[attr_index]))
582            print("NS         = {:#x}.".format((tte >> 5) & 0x1))
583
584        if stage2:
585            print("S2AP       = {:#x}.".format((tte >> 6) & 0x3))
586        else:
587            print("AP         = {:#x}.".format((tte >> 6) & 0x3))
588
589        print("SH         = {:#x}.".format((tte >> 8) & 0x3))
590        print("AF         = {:#x}.".format((tte >> 10) & 0x1))
591
592        if not stage2:
593            print("nG         = {:#x}.".format((tte >> 11) & 0x1))
594
595        print("HINT       = {:#x}.".format((tte >> 52) & 0x1))
596
597        if stage2:
598            print("S2XN       = {:#x}.".format((tte >> 53) & 0x3))
599        else:
600            print("PXN        = {:#x}.".format((tte >> 53) & 0x1))
601            print("XN         = {:#x}.".format((tte >> 54) & 0x1))
602
603        print("SW Use     = {:#x}.".format((tte >> 55) & 0xf))
604
605    return
606
607def PmapTTnIndexARM64(vaddr, pmap_pt_attr):
608    pta_max_level = unsigned(pmap_pt_attr.pta_max_level)
609
610    tt_index = []
611    for i in range(pta_max_level + 1):
612        tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \
613            >> unsigned(pmap_pt_attr.pta_level_info[i].shift))
614
615    return tt_index
616
617def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN):
618    assert(type(vaddr) in (int, int))
619    assert_64bit(vaddr)
620    assert_64bit(root_tte)
621
622    # Obtain pmap attributes
623    page_size = pmap_pt_attr.pta_page_size
624    page_offset_mask = (page_size - 1)
625    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
626    tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr)
627    stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False)
628
629    # The pmap starts at a page table level that is defined by register
630    # values; the root level can be obtained from the attributes structure
631    level = unsigned(pmap_pt_attr.pta_root_level)
632
633    root_tt_index = tt_index[level]
634    root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
635        unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
636    tte = int(unsigned(root_tte[root_tt_index]))
637
638    # Walk the page tables
639    paddr = -1
640    max_level = unsigned(pmap_pt_attr.pta_max_level)
641    is_valid = True
642    is_leaf = False
643
644    while (level <= max_level):
645        if verbose_level >= vSCRIPT:
646            print("L{} entry: {:#x}".format(level, tte))
647        if verbose_level >= vDETAIL:
648            PmapDecodeTTEARM64(tte, level, stage2)
649
650        if tte & 0x1 == 0x0:
651            if verbose_level >= vHUMAN:
652                print("L{} entry invalid: {:#x}\n".format(level, tte))
653
654            is_valid = False
655            break
656
657        # Handle leaf entry
658        if tte & 0x2 == 0x0 or level == max_level:
659            base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level)
660            offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level)
661            paddr = tte & base_mask
662            paddr = paddr | (vaddr & offset_mask)
663
664            if level != max_level:
665                print("phys: {:#x}".format(paddr))
666
667            is_leaf = True
668            break
669        else:
670        # Handle page table entry
671            next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1])
672            assert(isinstance(next_phys, numbers.Integral))
673
674            next_virt = kern.PhysToKernelVirt(next_phys)
675            assert(isinstance(next_virt, numbers.Integral))
676
677            if verbose_level >= vDETAIL:
678                print("L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt))
679
680            ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*")
681            tte = int(unsigned(dereference(ttep)))
682            assert(isinstance(tte, numbers.Integral))
683
684        # We've parsed one level, so go to the next level
685        assert(level <= 3)
686        level = level + 1
687
688
689    if verbose_level >= vHUMAN:
690        if paddr:
691            print("Translation of {:#x} is {:#x}.".format(vaddr, paddr))
692        else:
693            print("(no translation)")
694
695    return paddr
696
697def PmapWalk(pmap, vaddr, verbose_level = vHUMAN):
698    if kern.arch == 'x86_64':
699        return PmapWalkX86_64(pmap, vaddr, verbose_level)
700    elif kern.arch.startswith('arm64'):
701        # Obtain pmap attributes from pmap structure
702        pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
703        return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level)
704    else:
705        raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch))
706
707@lldb_command('pmap_walk')
708def PmapWalkHelper(cmd_args=None):
709    """ Perform a page-table walk in <pmap> for <virtual_address>.
710        Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
711            Multiple -v's can be specified for increased verbosity
712    """
713    if cmd_args is None or len(cmd_args) < 2:
714        raise ArgumentError("Too few arguments to pmap_walk.")
715
716    pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t')
717    addr = ArgumentStringToInt(cmd_args[1])
718    PmapWalk(pmap, addr, config['verbosity'])
719    return
720
721def GetMemoryAttributesFromUser(requested_type):
722    pmap_attr_dict = {
723        '4k' : kern.globals.pmap_pt_attr_4k,
724        '16k' : kern.globals.pmap_pt_attr_16k,
725        '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None,
726    }
727
728    requested_type = requested_type.lower()
729    if requested_type not in pmap_attr_dict:
730        return None
731
732    return pmap_attr_dict[requested_type]
733
734@lldb_command('ttep_walk')
735def TTEPWalkPHelper(cmd_args=None):
736    """ Perform a page-table walk in <root_ttep> for <virtual_address>.
737        Syntax: (lldb) ttep_walk <root_ttep> <virtual_address> [4k|16k|16k_s2] [-v] [-e]
738        Multiple -v's can be specified for increased verbosity
739        """
740    if cmd_args is None or len(cmd_args) < 2:
741        raise ArgumentError("Too few arguments to ttep_walk.")
742
743    if not kern.arch.startswith('arm64'):
744        raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch))
745
746    tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *')
747    addr = ArgumentStringToInt(cmd_args[1])
748
749    pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2])
750    if pmap_pt_attr is None:
751        raise ArgumentError("Invalid translation attribute type.")
752
753    return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity'])
754
755@lldb_command('decode_tte')
756def DecodeTTE(cmd_args=None):
757    """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level> and stage [s1|s2]
758        Syntax: (lldb) decode_tte <tte_val> <level> [s1|s2]
759    """
760    if cmd_args is None or len(cmd_args) < 2:
761        raise ArgumentError("Too few arguments to decode_tte.")
762    if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]:
763        raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2]))
764    if kern.arch.startswith('arm64'):
765        stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False
766        PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2)
767    else:
768        raise NotImplementedError("decode_tte does not support {0}".format(kern.arch))
769
770PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) | (1 << 58) | (1 << 57) | (1 << 56) | (1 << 55) | (1 << 54)
771PVH_HIGH_FLAGS_ARM32 = (1 << 31)
772
773def PVDumpPTE(pvep, ptep, verbose_level = vHUMAN):
774    """ Dump information about a single mapping retrieved by the pv_head_table.
775
776        pvep: Either a pointer to the PVE object if the PVH entry is PVH_TYPE_PVEP,
777              or None if type PVH_TYPE_PTEP.
778        ptep: For type PVH_TYPE_PTEP this should just be the raw PVH entry with
779              the high flags already set (the type bits don't need to be cleared).
780              For type PVH_TYPE_PVEP this will be the value retrieved from the
781              pve_ptep[] array.
782    """
783    if kern.arch.startswith('arm64'):
784        iommu_flag = 0x4
785        iommu_table_flag = 1 << 63
786    else:
787        iommu_flag = 0
788        iommu_table_flag = 0
789
790    # AltAcct status is only stored in the ptep for PVH_TYPE_PVEP entries.
791    if pvep is not None and (ptep & 0x1):
792        # Note: It's not possible for IOMMU mappings to be marked as alt acct so
793        # setting this string is mutually exclusive with setting the IOMMU strings.
794        pte_str = ' (alt acct)'
795    else:
796        pte_str = ''
797
798    if pvep is not None:
799        pve_str = 'PVEP {:#x}, '.format(pvep)
800    else:
801        pve_str = ''
802
803    # For PVH_TYPE_PTEP, this clears out the type bits. For PVH_TYPE_PVEP, this
804    # either does nothing or clears out the AltAcct bit.
805    ptep = ptep & ~0x3
806
807    # When printing with extra verbosity, print an extra newline that describes
808    # who owns the mapping.
809    extra_str = ''
810
811    if ptep & iommu_flag:
812        # The mapping is an IOMMU Mapping
813        ptep = ptep & ~iommu_flag
814
815        # Due to LLDB automatically setting all the high bits of pointers, when
816        # ptep is retrieved from the pve_ptep[] array, LLDB will automatically set
817        # the iommu_table_flag, which means this check only works for PVH entries
818        # of type PVH_TYPE_PTEP (since those PTEPs come directly from the PVH
819        # entry which has the right casting applied to avoid this issue).
820        #
821        # Why don't we just do the same casting for pve_ptep[] you ask? Well not
822        # for a lack of trying, that's for sure. If you can figure out how to
823        # cast that array correctly, then be my guest.
824        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
825            if ptep & iommu_table_flag:
826                pte_str = ' (IOMMU table), entry'
827                ptd = GetPtDesc(KVToPhysARM(ptep))
828                iommu = dereference(ptd.iommu)
829            else:
830                # Instead of dumping the PTE (since we don't have that), dump the
831                # descriptor object used by the IOMMU state (t8020dart/nvme_ppl/etc).
832                #
833                # This works because later on when the "ptep" is dereferenced as a
834                # PTE pointer (uint64_t pointer), the descriptor pointer will be
835                # dumped as that's the first 64-bit value in the IOMMU state object.
836                pte_str = ' (IOMMU state), descriptor'
837                ptep = ptep | iommu_table_flag
838                iommu = dereference(kern.GetValueFromAddress(ptep, 'ppl_iommu_state *'))
839
840            # For IOMMU mappings, dump who owns the mapping as the extra string.
841            extra_str = 'Mapped by {:s}'.format(dereference(iommu.desc).name)
842            if unsigned(iommu.name) != 0:
843                extra_str += '/{:s}'.format(iommu.name)
844            extra_str += ' (iommu state: {:x})'.format(addressof(iommu))
845        else:
846            ptd = GetPtDesc(KVToPhysARM(ptep))
847            extra_str = 'Mapped by IOMMU {:x}'.format(ptd.iommu)
848    else:
849        # The mapping is a CPU Mapping
850        pte_str += ', entry'
851        ptd = GetPtDesc(KVToPhysARM(ptep))
852        if ptd.pmap == kern.globals.kernel_pmap:
853            extra_str = "Mapped by kernel task (kernel_pmap: {:#x})".format(ptd.pmap)
854        elif verbose_level >= vDETAIL:
855            task = TaskForPmapHelper(ptd.pmap)
856            extra_str = "Mapped by user task (pmap: {:#x}, task: {:s})".format(ptd.pmap, "{:#x}".format(task) if task is not None else "<unknown>")
857    try:
858        print("{:s}PTEP {:#x}{:s}: {:#x}".format(pve_str, ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))))
859    except:
860        print("{:s}PTEP {:#x}{:s}: <unavailable>".format(pve_str, ptep, pte_str))
861
862    if verbose_level >= vDETAIL:
863        print("    |-- {:s}".format(extra_str))
864
865def PVWalkARM(pai, verbose_level = vHUMAN):
866    """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap.
867
868        pai: physical address index (PAI) corresponding to the pv_head_table
869             entry to walk.
870        verbose_level: Set to vSCRIPT or higher to print extra info around the
871                       the pv_head_table/pp_attr_table flags and to dump the
872                       pt_desc_t object if the type is a PTD.
873    """
874    # LLDB will automatically try to make pointer values dereferencable by
875    # setting the upper bits if they aren't set. We need to parse the flags
876    # stored in the upper bits later, so cast the pv_head_table to an array of
877    # integers to get around this "feature". We'll add the upper bits back
878    # manually before deref'ing anything.
879    pv_head_table = cast(kern.GetGlobalVariable('pv_head_table'), "uintptr_t*")
880    pvh_raw = unsigned(pv_head_table[pai])
881    pvh = pvh_raw
882    pvh_type = pvh & 0x3
883
884    print("PVH raw value: {:#x}".format(pvh_raw))
885    if kern.arch.startswith('arm64'):
886        pvh = pvh | PVH_HIGH_FLAGS_ARM64
887    else:
888        pvh = pvh | PVH_HIGH_FLAGS_ARM32
889
890    if pvh_type == 0:
891        print("PVH type: NULL")
892    elif pvh_type == 3:
893        print("PVH type: page-table descriptor ({:#x})".format(pvh & ~0x3))
894    elif pvh_type == 2:
895        print("PVH type: single PTE")
896        PVDumpPTE(None, pvh, verbose_level)
897    elif pvh_type == 1:
898        pvep = pvh & ~0x3
899        print("PVH type: PTE list")
900        pve_ptep_idx = 0
901        while pvep != 0:
902            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
903
904            if pve.pve_ptep[pve_ptep_idx] != 0:
905                PVDumpPTE(pvep, pve.pve_ptep[pve_ptep_idx], verbose_level)
906
907            pve_ptep_idx += 1
908            if pve_ptep_idx == 2:
909                pve_ptep_idx = 0
910                pvep = unsigned(pve.pve_next)
911
912    if verbose_level >= vDETAIL:
913        if (pvh_type == 1) or (pvh_type == 2):
914            # Dump pv_head_table flags when there's a valid mapping.
915            pvh_flags = []
916
917            if pvh_raw & (1 << 62):
918                pvh_flags.append("CPU")
919            if pvh_raw & (1 << 60):
920                pvh_flags.append("EXEC")
921            if pvh_raw & (1 << 59):
922                pvh_flags.append("LOCKDOWN_KC")
923            if pvh_raw & (1 << 58):
924                pvh_flags.append("HASHED")
925            if pvh_raw & (1 << 57):
926                pvh_flags.append("LOCKDOWN_CS")
927            if pvh_raw & (1 << 56):
928                pvh_flags.append("LOCKDOWN_RO")
929            if pvh_raw & (1 << 55):
930                pvh_flags.append("RETIRED")
931            if pvh_raw & (1 << 54):
932                if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
933                    pvh_flags.append("SECURE_FLUSH_NEEDED")
934                else:
935                    pvh_flags.append("SLEEPABLE_LOCK")
936            if kern.arch.startswith('arm64') and pvh_raw & (1 << 61):
937                pvh_flags.append("LOCK")
938
939            print("PVH Flags: {}".format(pvh_flags))
940
941        # Always dump pp_attr_table flags (these can be updated even if there aren't mappings).
942        ppattr = unsigned(kern.globals.pp_attr_table[pai])
943        print("PPATTR raw value: {:#x}".format(ppattr))
944
945        ppattr_flags = ["WIMG ({:#x})".format(ppattr & 0x3F)]
946        if ppattr & 0x40:
947            ppattr_flags.append("REFERENCED")
948        if ppattr & 0x80:
949            ppattr_flags.append("MODIFIED")
950        if ppattr & 0x100:
951            ppattr_flags.append("INTERNAL")
952        if ppattr & 0x200:
953            ppattr_flags.append("REUSABLE")
954        if ppattr & 0x400:
955            ppattr_flags.append("ALTACCT")
956        if ppattr & 0x800:
957            ppattr_flags.append("NOENCRYPT")
958        if ppattr & 0x1000:
959            ppattr_flags.append("REFFAULT")
960        if ppattr & 0x2000:
961            ppattr_flags.append("MODFAULT")
962        if ppattr & 0x4000:
963            ppattr_flags.append("MONITOR")
964        if ppattr & 0x8000:
965            ppattr_flags.append("NO_MONITOR")
966
967        print("PPATTR Flags: {}".format(ppattr_flags))
968
969        if pvh_type == 3:
970            def RunLldbCmdHelper(command):
971                """Helper for dumping an LLDB command right before executing it
972                and printing the results.
973                command: The LLDB command (as a string) to run.
974
975                Example input: "p/x kernel_pmap".
976                """
977                print("\nExecuting: {:s}\n{:s}".format(command, lldb_run_command(command)))
978            # Dump the page table descriptor object
979            ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
980            RunLldbCmdHelper("p/x *(pt_desc_t*)" + hex(ptd))
981
982            # Depending on the system, more than one ptd_info can be associated
983            # with a single PTD. Only dump the first PTD info and assume the
984            # user knows to dump the rest if they're on one of those systems.
985            RunLldbCmdHelper("p/x ((pt_desc_t*)" + hex(ptd) + ")->ptd_info[0]")
986
987@lldb_command('pv_walk')
988def PVWalk(cmd_args=None):
989    """ Show mappings for <physical_address | PAI> tracked in the PV list.
990        Syntax: (lldb) pv_walk <physical_address | PAI> [-vv]
991
992        Extra verbosity will pretty print the pv_head_table/pp_attr_table flags
993        as well as dump the page table descriptor (PTD) struct if the entry is a
994        PTD.
995    """
996    if cmd_args is None or len(cmd_args) < 1:
997        raise ArgumentError("Too few arguments to pv_walk.")
998    if not kern.arch.startswith('arm'):
999        raise NotImplementedError("pv_walk does not support {0}".format(kern.arch))
1000
1001    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1002
1003    # If the input is already a PAI, this function will return the input unchanged.
1004    # This function also ensures that the physical address is kernel-managed.
1005    pai = ConvertPhysAddrToPai(pa)
1006
1007    PVWalkARM(pai, config['verbosity'])
1008
1009@lldb_command('kvtophys')
1010def KVToPhys(cmd_args=None):
1011    """ Translate a kernel virtual address to the corresponding physical address.
1012        Assumes the virtual address falls within the kernel static region.
1013        Syntax: (lldb) kvtophys <kernel virtual address>
1014    """
1015    if cmd_args is None or len(cmd_args) < 1:
1016        raise ArgumentError("Too few arguments to kvtophys.")
1017    if kern.arch.startswith('arm'):
1018        print("{:#x}".format(KVToPhysARM(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1019    elif kern.arch == 'x86_64':
1020        print("{:#x}".format(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)))
1021
1022@lldb_command('phystokv')
1023def PhysToKV(cmd_args=None):
1024    """ Translate a physical address to the corresponding static kernel virtual address.
1025        Assumes the physical address corresponds to managed DRAM.
1026        Syntax: (lldb) phystokv <physical address>
1027    """
1028    if cmd_args is None or len(cmd_args) < 1:
1029        raise ArgumentError("Too few arguments to phystokv.")
1030    print("{:#x}".format(kern.PhysToKernelVirt(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))))
1031
1032def KVToPhysARM(addr):
1033    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1034        ptov_table = kern.globals.ptov_table
1035        for i in range(0, kern.globals.ptov_index):
1036            if (addr >= int(unsigned(ptov_table[i].va))) and (addr < (int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].len)))):
1037                return (addr - int(unsigned(ptov_table[i].va)) + int(unsigned(ptov_table[i].pa)))
1038    else:
1039        papt_table = kern.globals.libsptm_papt_ranges
1040        page_size = kern.globals.page_size
1041        for i in range(0, kern.globals.libsptm_n_papt_ranges):
1042            if (addr >= int(unsigned(papt_table[i].papt_start))) and (addr < (int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].num_mappings) * page_size))):
1043                return (addr - int(unsigned(papt_table[i].papt_start)) + int(unsigned(papt_table[i].paddr_start)))
1044        raise ValueError("VA {:#x} not found in physical region lookup table".format(addr))
1045    return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase))
1046
1047
1048def GetPtDesc(paddr):
1049    pn = (paddr - unsigned(kern.globals.vm_first_phys)) // kern.globals.page_size
1050    pvh = unsigned(kern.globals.pv_head_table[pn])
1051    if kern.arch.startswith('arm64'):
1052        pvh = pvh | PVH_HIGH_FLAGS_ARM64
1053    else:
1054        pvh = pvh | PVH_HIGH_FLAGS_ARM32
1055    pvh_type = pvh & 0x3
1056    if pvh_type != 0x3:
1057        raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh))
1058    ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *')
1059    return ptd
1060
1061def PhysToFrameTableEntry(paddr):
1062    if paddr >= int(unsigned(kern.globals.sptm_first_phys)) or paddr < int(unsigned(kern.globals.sptm_last_phys)):
1063        return kern.globals.frame_table[(paddr - int(unsigned(kern.globals.sptm_first_phys))) // kern.globals.page_size]
1064    page_idx = paddr / kern.globals.page_size
1065    for i in range(0, kern.globals.sptm_n_io_ranges):
1066        base = kern.globals.io_frame_table[i].io_range.phys_page_idx
1067        end = base + kern.globals.io_frame_table[i].io_range.num_pages
1068        if page_idx >= base and page_idx < end:
1069            return kern.globals.io_frame_table[i]
1070    return kern.globals.xnu_io_fte
1071
1072@lldb_command('phystofte')
1073def PhysToFTE(cmd_args=None):
1074    """ Translate a physical address to the corresponding SPTM frame table entry pointer
1075        Syntax: (lldb) phystofte <physical address>
1076    """
1077    if cmd_args is None or len(cmd_args) < 1:
1078        raise ArgumentError("Too few arguments to phystofte.")
1079
1080    fte = PhysToFrameTableEntry(int(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))
1081    print(repr(fte))
1082
1083XNU_IOMMU = 23
1084XNU_PAGE_TABLE = 19
1085XNU_PAGE_TABLE_SHARED = 20
1086XNU_PAGE_TABLE_ROZONE = 21
1087XNU_PAGE_TABLE_COMMPAGE = 22
1088SPTM_PAGE_TABLE = 9
1089
1090def ShowPTEARM(pte, page_size, level):
1091    """ Display vital information about an ARM page table entry
1092        pte: kernel virtual address of the PTE.  page_size and level may be None,
1093        in which case we'll try to infer them from the page table descriptor.
1094        Inference of level may only work for L2 and L3 TTEs depending upon system
1095        configuration.
1096    """
1097    pt_index = 0
1098    stage2 = False
1099    def GetPageTableInfo(ptd, paddr):
1100        nonlocal pt_index, page_size, level
1101        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1102            # First load ptd_info[0].refcnt so that we can check if this is an IOMMU page.
1103            # IOMMUs don't split PTDs across multiple 4K regions as CPU page tables sometimes
1104            # do, so the IOMMU refcnt token is always stored at index 0.  If this is not
1105            # an IOMMU page, we may end up using a different final value for pt_index below.
1106            refcnt = ptd.ptd_info[0].refcnt
1107            # PTDs used to describe IOMMU pages always have a refcnt of 0x8000/0x8001.
1108            is_iommu_pte = (refcnt & 0x8000) == 0x8000
1109            if not is_iommu_pte and page_size is None and hasattr(ptd.pmap, 'pmap_pt_attr'):
1110                page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1111            elif page_size is None:
1112                page_size = kern.globals.native_pt_attr.pta_page_size
1113            pt_index = (pte % kern.globals.page_size) // page_size
1114            refcnt =  ptd.ptd_info[pt_index].refcnt
1115            if not is_iommu_pte and hasattr(ptd.pmap, 'pmap_pt_attr') and hasattr(ptd.pmap.pmap_pt_attr, 'stage2'):
1116                stage2 = ptd.pmap.pmap_pt_attr.stage2
1117            if level is None:
1118                if refcnt == 0x4000:
1119                    level = 2
1120                else:
1121                    level = 3
1122            if is_iommu_pte:
1123                iommu_desc_name = '{:s}'.format(dereference(dereference(ptd.iommu).desc).name)
1124                if unsigned(dereference(ptd.iommu).name) != 0:
1125                    iommu_desc_name += '/{:s}'.format(dereference(ptd.iommu).name)
1126                info_str = "iommu state: {:#x} ({:s})".format(ptd.iommu, iommu_desc_name)
1127            else:
1128                info_str = None
1129            return (int(unsigned(refcnt)), level, info_str)
1130        else:
1131            fte = PhysToFrameTableEntry(paddr)
1132            if fte.type == XNU_IOMMU:
1133                if page_size is None:
1134                    page_size = kern.globals.native_pt_attr.pta_page_size
1135                info_str = "PTD iommu token: {:#x} (ID {:#x} TSD {:#x})".format(ptd.iommu, fte.iommu_page.iommu_id, fte.iommu_page.iommu_tsd)
1136                return (int(unsigned(fte.iommu_page.iommu_refcnt._value)), 0, info_str)
1137            elif fte.type in [XNU_PAGE_TABLE, XNU_PAGE_TABLE_SHARED, XNU_PAGE_TABLE_ROZONE, XNU_PAGE_TABLE_COMMPAGE, SPTM_PAGE_TABLE]:
1138                if page_size is None:
1139                    if hasattr(ptd.pmap, 'pmap_pt_attr'):
1140                        page_size = ptd.pmap.pmap_pt_attr.pta_page_size
1141                    else:
1142                        page_size = kern.globals.native_pt_attr.pta_page_size;
1143                return (int(unsigned(fte.cpu_page_table.mapping_refcnt._value)), int(unsigned(fte.cpu_page_table.level)), None)
1144            else:
1145                raise ValueError("Unrecognized FTE type {:#x}".format(fte.type))
1146            raise ValueError("Unable to retrieve PTD refcnt")
1147    pte_paddr = KVToPhysARM(pte)
1148    ptd = GetPtDesc(pte_paddr)
1149    refcnt, level, info_str = GetPageTableInfo(ptd, pte_paddr)
1150    wiredcnt = ptd.ptd_info[pt_index].wiredcnt
1151    if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1152        va = ptd.va[pt_index]
1153    else:
1154        va = ptd.va
1155    print("descriptor: {:#x} (refcnt: {:#x}, wiredcnt: {:#x}, va: {:#x})".format(ptd, refcnt, wiredcnt, va))
1156
1157    # The pmap/iommu field is a union, so only print the correct one.
1158    if info_str is not None:
1159        print(info_str)
1160    else:
1161        if ptd.pmap == kern.globals.kernel_pmap:
1162            pmap_str = "(kernel_pmap)"
1163        else:
1164            task = TaskForPmapHelper(ptd.pmap)
1165            pmap_str = "(User Task: {:s})".format("{:#x}".format(task) if task is not None else "<unknown>")
1166        print("pmap: {:#x} {:s}".format(ptd.pmap, pmap_str))
1167        nttes = page_size // 8
1168        granule = page_size * (nttes ** (3 - level))
1169        if kern.globals.page_protection_type <= kern.PAGE_PROTECTION_TYPE_PPL:
1170            pte_pgoff = pte % page_size
1171        else:
1172            pte_pgoff = pte % kern.globals.native_pt_attr.pta_page_size
1173        pte_pgoff = pte_pgoff // 8
1174        print("maps {}: {:#x}".format("IPA" if stage2 else "VA", int(unsigned(va)) + (pte_pgoff * granule)))
1175        pteval = int(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *'))))
1176        print("value: {:#x}".format(pteval))
1177        print("level: {:d}".format(level))
1178        PmapDecodeTTEARM64(pteval, level, stage2)
1179
1180@lldb_command('showpte')
1181def ShowPTE(cmd_args=None):
1182    """ Display vital information about the page table entry at VA <pte>
1183        Syntax: (lldb) showpte <pte_va> [level] [4k|16k|16k_s2]
1184    """
1185    if cmd_args is None or len(cmd_args) < 1:
1186        raise ArgumentError("Too few arguments to showpte.")
1187
1188    if kern.arch.startswith('arm64'):
1189        if len(cmd_args) >= 3:
1190            pmap_pt_attr = GetMemoryAttributesFromUser(cmd_args[2])
1191            if pmap_pt_attr is None:
1192                raise ArgumentError("Invalid translation attribute type.")
1193            page_size = pmap_pt_attr.pta_page_size
1194        else:
1195            page_size = None
1196
1197        level = ArgumentStringToInt(cmd_args[1]) if len(cmd_args) >= 2 else None
1198        ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), page_size, level)
1199    else:
1200        raise NotImplementedError("showpte does not support {0}".format(kern.arch))
1201
1202def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action):
1203    """ Perform the specified action for all valid mappings in an ARM64 translation table
1204        pmap: owner of the translation table
1205        tt: translation table or page table
1206        nttes: number of entries in tt
1207        level: translation table level, 1 2 or 3
1208        action: callback for each valid TTE
1209    """
1210    # Obtain pmap attributes
1211    pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1212    page_size = pmap_pt_attr.pta_page_size
1213    page_offset_mask = (page_size - 1)
1214    page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1215    max_level = unsigned(pmap_pt_attr.pta_max_level)
1216
1217    for i in range(nttes):
1218        try:
1219            tte = tt[i]
1220            if tte & 0x1 == 0x0:
1221                continue
1222
1223            tt_next = None
1224            paddr = unsigned(tte) & unsigned(page_base_mask)
1225
1226            # Handle leaf entry
1227            if tte & 0x2 == 0x0 or level == max_level:
1228                type = 'block' if level < max_level else 'entry'
1229                granule = PmapBlockOffsetMaskARM64(page_size, level) + 1
1230            else:
1231            # Handle page table entry
1232                type = 'table'
1233                granule = page_size
1234                tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *')
1235
1236            mapped_va = int(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i)
1237            if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule):
1238                if tt_next is not None:
1239                    FindMappingAtLevelARM64(pmap, tt_next, granule // ARM64_TTE_SIZE, level + 1, mapped_va, action)
1240
1241        except Exception as exc:
1242            print("Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))))
1243
1244def ScanPageTables(action, targetPmap=None):
1245    """ Perform the specified action for all valid mappings in all page tables,
1246        optionally restricted to a single pmap.
1247        pmap: pmap whose page table should be scanned.  If None, all pmaps on system will be scanned.
1248    """
1249    print("Scanning all available translation tables.  This may take a long time...")
1250    def ScanPmap(pmap, action):
1251        if kern.arch.startswith('arm64'):
1252            # Obtain pmap attributes
1253            pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr
1254            granule = pmap_pt_attr.pta_page_size
1255            level = unsigned(pmap_pt_attr.pta_root_level)
1256            root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \
1257                unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1
1258
1259        if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule):
1260            if kern.arch.startswith('arm64'):
1261                FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action)
1262
1263    if targetPmap is not None:
1264        ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action)
1265    else:
1266        for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1267            ScanPmap(pmap, action)
1268
1269@lldb_command('showallmappings')
1270def ShowAllMappings(cmd_args=None):
1271    """ Find and display all available mappings on the system for
1272        <physical_address>.  Optionally only searches the pmap
1273        specified by [<pmap>]
1274        Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1275        WARNING: this macro can take a long time (up to 30min.) to complete!
1276    """
1277    if cmd_args is None or len(cmd_args) < 1:
1278        raise ArgumentError("Too few arguments to showallmappings.")
1279    if not kern.arch.startswith('arm'):
1280        raise NotImplementedError("showallmappings does not support {0}".format(kern.arch))
1281    pa = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1282    targetPmap = None
1283    if len(cmd_args) > 1:
1284        targetPmap = cmd_args[1]
1285    def printMatchedMapping(pmap, level, type, tte, paddr, va, granule):
1286        if paddr <= pa < (paddr + granule):
1287            print("pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va))
1288        return True
1289    ScanPageTables(printMatchedMapping, targetPmap)
1290
1291@lldb_command('showptusage')
1292def ShowPTUsage(cmd_args=None):
1293    """ Display a summary of pagetable allocations for a given pmap.
1294        Syntax: (lldb) showptusage [<pmap>]
1295        WARNING: this macro can take a long time (> 1hr) to complete!
1296    """
1297    if not kern.arch.startswith('arm'):
1298        raise NotImplementedError("showptusage does not support {0}".format(kern.arch))
1299    targetPmap = None
1300    if len(cmd_args) > 0:
1301        targetPmap = cmd_args[0]
1302    lastPmap = [None]
1303    numTables = [0]
1304    numUnnested = [0]
1305    numPmaps = [0]
1306    def printValidTTE(pmap, level, type, tte, paddr, va, granule):
1307        unnested = ""
1308        nested_region_addr = int(unsigned(pmap.nested_region_addr))
1309        nested_region_end = nested_region_addr + int(unsigned(pmap.nested_region_size))
1310        if lastPmap[0] is None or (pmap != lastPmap[0]):
1311            lastPmap[0] = pmap
1312            numPmaps[0] = numPmaps[0] + 1
1313            print ("pmap {:#x}:".format(pmap))
1314        if type == 'root':
1315            return True
1316        if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end):
1317            ptd = GetPtDesc(paddr)
1318            if ptd.pmap != pmap:
1319                return False
1320            else:
1321                numUnnested[0] = numUnnested[0] + 1
1322                unnested = " (likely unnested)"
1323        numTables[0] = numTables[0] + 1
1324        print((" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested)
1325        if level == 2:
1326            return False
1327        else:
1328            return True
1329    ScanPageTables(printValidTTE, targetPmap)
1330    print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0]))
1331
1332def checkPVList(pmap, level, type, tte, paddr, va, granule):
1333    """ Checks an ARM physical-to-virtual mapping list for consistency errors.
1334        pmap: owner of the translation table
1335        level: translation table level.  PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1336        type: unused
1337        tte: KVA of PTE to check for presence in PV list.  If None, presence check will be skipped.
1338        paddr: physical address whose PV list should be checked.  Need not be page-aligned.
1339        granule: unused
1340    """
1341    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1342    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1343    page_size = kern.globals.page_size
1344    if kern.arch.startswith('arm64'):
1345        page_offset_mask = (page_size - 1)
1346        page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask)
1347        paddr = paddr & page_base_mask
1348        max_level = 3
1349        pvh_set_bits = PVH_HIGH_FLAGS_ARM64
1350    if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys:
1351        return True
1352    pn = (paddr - vm_first_phys) // page_size
1353    pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits
1354    pvh_type = pvh & 0x3
1355    if pmap is not None:
1356        pmap_str = "pmap: {:#x}: ".format(pmap)
1357    else:
1358        pmap_str = ''
1359    if tte is not None:
1360        tte_str = "pte {:#x} ({:#x}): ".format(unsigned(tte), paddr)
1361    else:
1362        tte_str = "paddr {:#x}: ".format(paddr)
1363    if pvh_type == 0 or pvh_type == 3:
1364        print("{:s}{:s}unexpected PVH type {:d}".format(pmap_str, tte_str, pvh_type))
1365    elif pvh_type == 2:
1366        ptep = pvh & ~0x3
1367        if tte is not None and ptep != unsigned(tte):
1368            print("{:s}{:s}PVH mismatch ({:#x})".format(pmap_str, tte_str, ptep))
1369        try:
1370            pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1371            if (pte != paddr):
1372                print("{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1373        except Exception as exc:
1374            print("{:s}{:s}Unable to read PVH {:#x}".format(pmap_str, tte_str, ptep))
1375    elif pvh_type == 1:
1376        pvep = pvh & ~0x3
1377        tte_match = False
1378        pve_ptep_idx = 0
1379        while pvep != 0:
1380            pve = kern.GetValueFromAddress(pvep, "pv_entry_t *")
1381            ptep = unsigned(pve.pve_ptep[pve_ptep_idx]) & ~0x3
1382            pve_ptep_idx += 1
1383            if pve_ptep_idx == 2:
1384                pve_ptep_idx = 0
1385                pvep = unsigned(pve.pve_next)
1386            if ptep == 0:
1387                continue
1388            if tte is not None and ptep == unsigned(tte):
1389                tte_match = True
1390            try:
1391                pte = int(unsigned(dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *')))) & page_base_mask
1392                if (pte != paddr):
1393                    print("{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str, tte_str, ptep, pte))
1394            except Exception as exc:
1395                print("{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep))
1396        if tte is not None and not tte_match:
1397            print("{:s}{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr))
1398    return True
1399
1400@lldb_command('pv_check', 'P')
1401def PVCheck(cmd_args=None, cmd_options={}):
1402    """ Check the physical-to-virtual mapping for a given PTE or physical address
1403        Syntax: (lldb) pv_check <addr> [-p]
1404            -P        : Interpret <addr> as a physical address rather than a PTE
1405    """
1406    if cmd_args is None or len(cmd_args) < 1:
1407        raise ArgumentError("Too few arguments to pv_check.")
1408    if kern.arch.startswith('arm64'):
1409        level = 3
1410    else:
1411        raise NotImplementedError("pv_check does not support {0}".format(kern.arch))
1412    if "-P" in cmd_options:
1413        pte = None
1414        pa = int(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long")))
1415    else:
1416        pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *')
1417        pa = int(unsigned(dereference(pte)))
1418    checkPVList(None, level, None, pte, pa, 0, None)
1419
1420@lldb_command('check_pmaps')
1421def CheckPmapIntegrity(cmd_args=None):
1422    """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1423        Optionally only checks the pmap specified by [<pmap>]
1424        Syntax: (lldb) check_pmaps [<pmap>]
1425        WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1426        specify [pmap] to limit it to a single pmap.  It will also give false positives
1427        for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1428        Use of this macro without the [<pmap>] argument is heavily discouraged.
1429    """
1430    if not kern.arch.startswith('arm'):
1431        raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch))
1432    targetPmap = None
1433    if len(cmd_args) > 0:
1434        targetPmap = cmd_args[0]
1435    ScanPageTables(checkPVList, targetPmap)
1436
1437@lldb_command('pmapsforledger')
1438def PmapsForLedger(cmd_args=None):
1439    """ Find and display all pmaps currently using <ledger>.
1440        Syntax: (lldb) pmapsforledger <ledger>
1441    """
1442    if cmd_args is None or len(cmd_args) < 1:
1443        raise ArgumentError("Too few arguments to pmapsforledger.")
1444    if not kern.arch.startswith('arm'):
1445        raise NotImplementedError("pmapsforledger does not support {0}".format(kern.arch))
1446    ledger = kern.GetValueFromAddress(cmd_args[0], 'ledger_t')
1447    for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'):
1448        if pmap.ledger == ledger:
1449            print("pmap: {:#x}".format(pmap))
1450
1451
1452def IsValidPai(pai):
1453    """ Given an unsigned value, detect whether that value is a valid physical
1454        address index (PAI). It does this by first computing the last possible
1455        PAI and comparing the input to that.
1456
1457        All contemporary SoCs reserve the bottom part of the address space, so
1458        there shouldn't be any valid physical addresses between zero and the
1459        last PAI either.
1460    """
1461    page_size = unsigned(kern.globals.page_size)
1462    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1463    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1464
1465    last_pai = (vm_last_phys - vm_first_phys) // page_size
1466    if (pai < 0) or (pai >= last_pai):
1467        return False
1468
1469    return True
1470
1471def ConvertPaiToPhysAddr(pai):
1472    """ Convert the given Physical Address Index (PAI) into a physical address.
1473
1474        If the input isn't a valid PAI (it's most likely already a physical
1475        address), then just return back the input unchanged.
1476    """
1477    pa = pai
1478
1479    # If the value is a valid PAI, then convert it into a physical address.
1480    if IsValidPai(pai):
1481        pa = (pai * unsigned(kern.globals.page_size)) + unsigned(kern.globals.vm_first_phys)
1482
1483    return pa
1484
1485def ConvertPhysAddrToPai(pa):
1486    """ Convert the given physical address into a Physical Address Index (PAI).
1487
1488        If the input is already a valid PAI, then just return back the input
1489        unchanged.
1490    """
1491    vm_first_phys = unsigned(kern.globals.vm_first_phys)
1492    vm_last_phys = unsigned(kern.globals.vm_last_phys)
1493    pai = pa
1494
1495    if not IsValidPai(pa) and (pa < vm_first_phys or pa >= vm_last_phys):
1496        raise ArgumentError("{:#x} is neither a valid PAI nor a kernel-managed address: [{:#x}, {:#x})".format(pa, vm_first_phys, vm_last_phys))
1497    elif not IsValidPai(pa):
1498        # If the value isn't already a valid PAI, then convert it into one.
1499        pai = (pa - vm_first_phys) // unsigned(kern.globals.page_size)
1500
1501    return pai
1502
1503@lldb_command('pmappaindex')
1504def PmapPaIndex(cmd_args=None):
1505    """ Display both a physical address and physical address index (PAI) when
1506        provided with only one of those values.
1507
1508        Syntax: (lldb) pmappaindex <physical address | PAI>
1509
1510        NOTE: This macro will throw an exception if the input isn't a valid PAI
1511              and is also not a kernel-managed physical address.
1512    """
1513    if cmd_args is None or len(cmd_args) < 1:
1514        raise ArgumentError("Too few arguments to pmappaindex.")
1515
1516    if not kern.arch.startswith('arm'):
1517        raise NotImplementedError("pmappaindex is only supported on ARM devices.")
1518
1519    value = kern.GetValueFromAddress(cmd_args[0], 'unsigned long')
1520    pai = value
1521    phys_addr = value
1522
1523    if IsValidPai(value):
1524        # Input is a PAI, calculate the physical address.
1525        phys_addr = ConvertPaiToPhysAddr(value)
1526    else:
1527        # Input is a physical address, calculate the PAI
1528        pai = ConvertPhysAddrToPai(value)
1529
1530    print("Physical Address: {:#x}".format(phys_addr))
1531    print("PAI: {:d}".format(pai))
1532