1from abc import ( 2 ABCMeta, 3 abstractmethod, 4 abstractproperty, 5) 6import argparse 7import re 8import struct 9from typing import ( 10 Optional, 11) 12 13from core import ( 14 SBValueFormatter, 15 caching, 16 gettype, 17 lldbwrap, 18 value, 19 xnu_format, 20) 21from core.standard import ( 22 ArgumentError, 23) 24from core.kernelcore import ( 25 KernelTarget, 26) 27from core.iterators import ( 28 RB_HEAD, 29) 30 31from .kmem import MemoryRange 32from .btlog import BTLog, BTLibrary 33from .whatis import * 34 35# FIXME: should not import this from xnu / utils 36from pmap import ( 37 PmapWalkARM64, 38 PmapWalkX86_64, 39 KVToPhysARM, 40) 41from utils import ( 42 GetEnumName, 43 print_hex_data, 44) 45from xnu import ( 46 lldb_command, 47) 48 49@SBValueFormatter.converter("vm_prot") 50def vm_prot_converter(prot): 51 PROT_STR = "-rw?x" 52 return PROT_STR[prot & 1] + PROT_STR[prot & 2] + PROT_STR[prot & 4] 53 54 55class Pmap(object, metaclass=ABCMeta): 56 """ Helper class to manipulate a pmap_t""" 57 58 def __new__(cls, pmap: lldbwrap.SBValue, name: Optional[str]=None): 59 target = pmap.GetTarget() 60 arch = target.triple[:target.triple.find('-')] 61 62 if cls is Pmap: 63 if arch.startswith('arm64'): 64 return _PmapARM64(pmap, name) 65 elif arch.startswith('x86_64'): 66 return _PmapX86(pmap, name) 67 else: 68 return None 69 70 return super(Pmap, cls).__new__(cls) 71 72 def __init__(self, pmap: lldbwrap.SBValue, name: Optional[str]=None): 73 self.sbv = pmap 74 self.name = name 75 self.kern = KernelTarget(pmap.GetTarget().GetDebugger()) 76 self.page_size = 4096 77 78 self._last_phytokv_paddr = None 79 self._last_phytokv_result = None 80 81 def describe(self, verbose=False): 82 fmt = ( 83 "Pmap Info\n" 84 " pmap : {&v:#x} \n" 85 ) 86 87 @staticmethod 88 @caching.cache_statically 89 def kernel_pmap(target=None): 90 """ 91 Returns an object for the kernel pmap 92 """ 93 94 pmap = target.FindFirstGlobalVariable('kernel_pmap').Dereference() 95 return Pmap(pmap, 'kernel_pmap') 96 97 def phystokv(self, paddr: int) -> int: 98 base = self.trunc_page(paddr) 99 100 if self._last_phytokv_paddr != base: 101 self._last_phytokv_paddr = base 102 self._last_phytokv_result = self.kern.PhysToKernelVirt(base) 103 104 return self._last_phytokv_result + self.page_offset(paddr) 105 106 def trunc_page(self, addr: int) -> int: 107 return addr & -self.page_size 108 109 def round_page(self, addr: int) -> int: 110 return (addr + self.page_size - 1) & -self.page_size 111 112 def page_offset(self, addr: int) -> int: 113 return addr & (self.page_size - 1) 114 115 @abstractmethod 116 def kvtophys(self, vaddr: int) -> int: 117 """ 118 resolves a kernel virtual address into a physical address 119 """ 120 pass 121 122 @abstractmethod 123 def walk(self, vaddr: int, extra: Optional[dict] = None) -> Optional[int]: 124 """ 125 resolves a virtual address to a physical address for this pmap 126 127 @param vaddr (int) 128 The address to resolve 129 130 @param extra (dict) 131 Extra pmap specific information about the mapping 132 """ 133 134 pass 135 136 137class _PmapARM64(Pmap): 138 """ 139 Specialization of Pmap for arm64 140 """ 141 142 def __init__(self, pmap: lldbwrap.SBValue, name: Optional[str]=None): 143 super().__init__(pmap, name) 144 145 target = pmap.GetTarget() 146 self.gVirtBase = target.FindFirstGlobalVariable('gVirtBase').xGetValueAsInteger() 147 self.gPhysBase = target.FindFirstGlobalVariable('gPhysBase').xGetValueAsInteger() 148 149 try: 150 self.pt_attr = pmap.chkGetChildMemberWithName('pmap_pt_attr') 151 except: 152 self.pt_attr = target.FindFirstGlobalVariable('native_pt_attr') 153 self.page_size = self.pt_attr.xGetIntegerByName('pta_page_size') 154 155 156 self._last_walk_vaddr = None 157 self._last_walk_extra = None 158 self._last_walk_result = None 159 160 self._last_kvtophys_vaddr = None 161 self._last_kvtophys_result = None 162 163 def kvtophys(self, vaddr: int) -> int: 164 base = self.trunc_page(vaddr) 165 166 if self._last_kvtophys_vaddr != base: 167 self._last_walk_vaddr = base 168 self._last_walk_result = KVToPhysARM(base) 169 170 return self._last_walk_result + self.page_offset(base) 171 172 def walk(self, vaddr: int, extra: Optional[dict] = None) -> Optional[int]: 173 base = self.trunc_page(vaddr) 174 175 if self._last_walk_vaddr != base: 176 self._last_walk_vaddr = base 177 self._last_walk_extra = {} 178 179 tte = self.sbv.chkGetChildMemberWithName('tte') 180 self._last_walk_result = PmapWalkARM64( 181 value(self.pt_attr), value(tte), base, 182 0, self._last_walk_extra 183 ) 184 185 if extra is not None: 186 extra.update(self._last_walk_extra) 187 if self._last_walk_result: 188 return self._last_walk_result + self.page_offset(vaddr) 189 return None 190 191 192 193class _PmapX86(Pmap): 194 """ 195 Specialization of Pmap for Intel 196 """ 197 198 def __init__(self, pmap: lldbwrap.SBValue, name: Optional[str]=None): 199 super().__init__(pmap, name) 200 201 target = pmap.GetTarget() 202 self.physmap_base = target.FindFirstGlobalVariable('physmap_base').xGetValueAsInteger() 203 204 @property 205 def page_size(self): 206 return 4096 207 208 def kvtophys(self, vaddr: int) -> int: 209 return vaddr - self.phsmap_base 210 211 def walk(self, vaddr: int, extra: Optional[dict] = None) -> Optional[int]: 212 return PmapWalkX86_64(value(self.sbv), vaddr, 0) 213 214 215class VMMap(object): 216 """ Helper class to manipulate a vm_map_t""" 217 218 def __init__(self, vm_map, name=None): 219 self.sbv = vm_map 220 self.name = name 221 self.rb = RB_HEAD( 222 vm_map.chkGetValueForExpressionPath(".hdr.rb_head_store"), 223 "entry", 224 self.entry_compare 225 ) 226 227 vme_type = gettype('struct vm_map_entry') 228 self.to_entry = vme_type.xContainerOfTransform('store') 229 230 def entry_compare(self, rb_entry, address): 231 vme = self.to_entry(rb_entry) 232 233 if vme.xGetScalarByPath(".links.end") <= address: 234 return 1 235 if address < vme.xGetScalarByPath(".links.start"): 236 return -1 237 return 0 238 239 def find(self, address): 240 ent = self.rb.find(address) 241 return self.to_entry(ent) if ent else None 242 243 def describe(self, verbose=False): 244 fmt = ( 245 "VM Map Info\n" 246 " vm map : {&v:#x} \n" 247 ) 248 if self.name: 249 fmt += ( 250 " vm map name : {m.name:s} \n" 251 ) 252 fmt += ( 253 " pmap : {$v.pmap:#x} \n" 254 " vm size : {$v.size|human_size} ({$v.size:,d} bytes) \n" 255 " entries : {$v.hdr.nentries} \n" 256 " map range : " 257 "{$v.hdr.links.start:#x} - {$v.hdr.links.end:#x}\n" 258 " map pgshift : {$v.hdr.page_shift}\n" 259 ) 260 print(xnu_format(fmt, m=self, v=self.sbv)) 261 262 263class VMMapEntry(MemoryObject): 264 """ Memory Object for a kernel map memory entry """ 265 266 MO_KIND = "kernel map entry" 267 268 def __init__(self, kmem, address, vm_map): 269 super().__init__(kmem, address) 270 self.vm_map = vm_map 271 self.sbv = vm_map.find(address) 272 273 @property 274 def object_range(self): 275 sbv = self.sbv 276 if sbv: 277 return MemoryRange( 278 sbv.xGetScalarByPath('.links.start'), 279 sbv.xGetScalarByPath('.links.end') 280 ) 281 282 base = self.address & ~self.kmem.page_mask 283 return MemoryRange(base, base + self.kmem.page_size) 284 285 @property 286 def vme_offset(self): 287 return self.sbv.xGetScalarByName('vme_offset') << 12 288 289 @property 290 def vme_object_type(self): 291 sbv = self.sbv 292 if sbv.xGetScalarByName('is_sub_map'): 293 return "submap" 294 if sbv.xGetScalarByName('vme_kernel_object'): 295 return "kobject" 296 return "vm object" 297 298 @property 299 def vme_object(self): 300 kmem = self.kmem 301 sbv = self.sbv 302 303 if sbv.xGetScalarByName('is_sub_map'): 304 addr = sbv.xGetScalarByName('vme_submap') << 2 305 return (addr, kmem.vm_map_type) 306 307 if sbv.xGetScalarByName('vme_kernel_object'): 308 return (kmem.vm_kobject.GetLoadAddress(), kmem.vmo_type) 309 310 packed = sbv.xGetScalarByName('vme_object_or_delta') 311 addr = kmem.vm_page_packing.unpack(packed) 312 return (addr, kmem.vmo_type) 313 314 @property 315 def pages(self): 316 return self.object_range.size >> self.kmem.page_shift 317 318 def describe(self, verbose=False): 319 320 self.vm_map.describe() 321 322 if not self.sbv: 323 fmt = ( 324 "Kernel Map Entry Info\n" 325 " No memory mapped at this address\n" 326 ) 327 print(xnu_format(fmt)) 328 return 329 330 fmt = ( 331 "VM Map Entry Info\n" 332 " vm entry : {&v:#x}\n" 333 " start / end : " 334 "{$v.links.start:#x} - {$v.links.end:#x} " 335 "({0.pages:,d} pages)\n" 336 " vm tag : {$v.vme_alias|vm_kern_tag}\n" 337 ) 338 range_id = next(( 339 i 340 for i, r in enumerate(self.kmem.kmem_ranges) 341 if r.contains(self.address) 342 ), None) 343 if range_id: 344 fmt += ( 345 " vm range id : {range_id}\n" 346 ) 347 fmt += ( 348 " protection : " 349 "{$v.protection|vm_prot}/{$v.max_protection|vm_prot}\n" 350 " vm object : " 351 "{0.vme_object_type} ({0.vme_object[0]:#x})\n" 352 " entry offset : {0.vme_offset:#x}\n" 353 ) 354 print(xnu_format(fmt, self, v=self.sbv, range_id=range_id)) 355 356 357@whatis_provider 358class KernelMapWhatisProvider(WhatisProvider): 359 """ 360 Whatis Provider for the kernel map ranges 361 """ 362 363 def claims(self, address): 364 kmem = self.kmem 365 366 return ( 367 any(r.contains(address) for r in kmem.kmem_ranges) 368 or kmem.iokit_range.contains(address) 369 ) 370 371 def lookup(self, address): 372 kmem = self.kmem 373 374 if any(r.contains(address) for r in kmem.kmem_ranges): 375 return VMMapEntry(kmem, address, VMMap(kmem.kernel_map, 'kernel_map')) 376 377 iokit_pageable_map_data = kmem.target.chkFindFirstGlobalVariable('gIOKitPageableMap') 378 iokit_pageable_vm_map = iokit_pageable_map_data.chkGetChildMemberWithName("map").Dereference() 379 return VMMapEntry(kmem, address, VMMap(iokit_pageable_vm_map, "gIOKitPageableMap.map")) 380 381 382__all__ = [ 383 Pmap.__name__, 384 VMMap.__name__, 385 VMMapEntry.__name__, 386 KernelMapWhatisProvider.__name__, 387] 388