1""" 2Wrappers around globals and caches to service the kmem package 3""" 4from __future__ import absolute_import, division, print_function 5 6from builtins import bytes 7from builtins import range 8from builtins import object 9 10from abc import ABCMeta, abstractmethod 11from collections import namedtuple 12from core import ( 13 caching, 14 gettype, 15 lldbwrap, 16) 17from ctypes import c_int64 18from six import add_metaclass 19 20class MemoryRange(namedtuple('MemoryRange', ['start', 'end'])): 21 @property 22 def size(self): 23 start, end = self 24 return end - start 25 26 def contains(self, addr): 27 start, end = self 28 return start <= addr < end 29 30 def __repr__(self): 31 return "{0.__class__.__name__}[{0.start:#x}, {0.end:#x})".format(self) 32 33 34class VMPointerUnpacker(object): 35 """ 36 Pointer unpacker for pointers packed with VM_PACK_POINTER() 37 """ 38 def __init__(self, target, param_var): 39 params = target.chkFindFirstGlobalVariable(param_var) 40 self.base_relative = params.xGetScalarByName('vmpp_base_relative') 41 self.bits = params.xGetScalarByName('vmpp_bits') 42 self.shift = params.xGetScalarByName('vmpp_shift') 43 self.base = params.xGetScalarByName('vmpp_base') 44 45 def unpack(self, packed): 46 """ 47 Unpacks an address according to the VM_PACK_POINTER() scheme 48 49 @param packed (int) 50 The packed value to unpack 51 52 @returns (int) 53 The unpacked address 54 """ 55 56 if not packed: 57 return None 58 59 if self.base_relative: 60 addr = (packed << self.shift) + self.base 61 else: 62 bits = self.bits 63 shift = self.shift 64 addr = c_int64(packed << (64 - bits)).value 65 addr >>= 64 - bits - shift 66 67 return addr & 0xffffffffffffffff 68 69 def unpack_value(self, sbv): 70 """ 71 Conveniency wrapper for self.unpack(sbv.chkGetValueAsUnsigned()) 72 """ 73 return self.unpack(sbv.chkGetValueAsUnsigned()) 74 75 76@add_metaclass(ABCMeta) 77class KMem(object): 78 """ 79 Singleton class that holds various important information 80 that is needed to make sense of the kernel memory layout, 81 heap data structures, globals, ... 82 """ 83 84 _HEAP_NAMES = [ "", "shared.", "data.", "" ] 85 86 @staticmethod 87 def _parse_range(zone_info_v, name): 88 """ 89 Create a tuple representing a range (min_address, max_address, size) 90 """ 91 range_v = zone_info_v.chkGetChildMemberWithName(name) 92 left = range_v.xGetIntegerByName('min_address') 93 right = range_v.xGetIntegerByName('max_address') 94 return MemoryRange(left, right) 95 96 def __init__(self, target): 97 self.target = target 98 99 # 100 # Cache some globals everyone needs 101 # 102 self.page_shift = target.chkFindFirstGlobalVariable('page_shift').xGetValueAsInteger() 103 self.page_size = 1 << self.page_shift 104 self.page_mask = self.page_size - 1 105 106 phase_v = target.chkFindFirstGlobalVariable('startup_phase') 107 self.phase = phase_v.xGetValueAsInteger() 108 self.phases = set( 109 e.GetName()[len('STARTUP_SUB_'):] 110 for e in phase_v.GetType().get_enum_members_array() 111 if e.GetValueAsUnsigned() <= self.phase 112 ) 113 114 # 115 # Setup the number of CPUs we have 116 # 117 self.ncpus = target.chkFindFirstGlobalVariable('zpercpu_early_count').xGetValueAsInteger() 118 self.master_cpu = target.chkFindFirstGlobalVariable('master_cpu').xGetValueAsInteger() 119 self.zcpus = range(self.ncpus) if 'ZALLOC' in self.phases else (self.master_cpu, ) 120 self.pcpus = range(self.ncpus) if 'PERCPU' in self.phases else (self.master_cpu, ) 121 122 # 123 # Load all the ranges we will need 124 # 125 zone_info = target.chkFindFirstGlobalVariable('zone_info') 126 self.meta_range = self._parse_range(zone_info, 'zi_meta_range') 127 self.bits_range = self._parse_range(zone_info, 'zi_bits_range') 128 self.zone_range = self._parse_range(zone_info, 'zi_map_range') 129 try: 130 self.pgz_range = self._parse_range(zone_info, 'zi_pgz_range') 131 self.pgz_bt = target.chkFindFirstGlobalVariable('pgz_backtraces').xDereference() 132 except: 133 self.pgz_range = MemoryRange(0, 0) 134 self.pgz_bt = None 135 136 kmem_ranges = target.chkFindFirstGlobalVariable('kmem_ranges') 137 count = kmem_ranges.GetByteSize() // target.GetAddressByteSize() 138 addresses = target.xIterAsUInt64(kmem_ranges.GetLoadAddress(), count) 139 self.kmem_ranges = [ 140 MemoryRange(next(addresses), next(addresses)) 141 for i in range(0, count, 2) 142 ] 143 144 kmem_ranges = target.chkFindFirstGlobalVariable('gIOKitPageableFixedRanges') 145 count = kmem_ranges.GetByteSize() // target.GetAddressByteSize() 146 addresses = target.xIterAsUInt64(kmem_ranges.GetLoadAddress(), count) 147 self.iokit_ranges = [ 148 MemoryRange(next(addresses), next(addresses)) 149 for i in range(0, count, 2) 150 ] 151 152 # 153 # And other important globals 154 # 155 self.stext = target.chkFindFirstGlobalVariable('vm_kernel_stext').xGetValueAsInteger() 156 self.num_zones = target.chkFindFirstGlobalVariable('num_zones').xGetValueAsInteger() 157 self.mag_size = target.chkFindFirstGlobalVariable('_zc_mag_size').xGetValueAsInteger() 158 self.zone_array = target.chkFindFirstGlobalVariable('zone_array') 159 self.zsec_array = target.chkFindFirstGlobalVariable('zone_security_array') 160 161 self.kernel_map = target.chkFindFirstGlobalVariable('kernel_map').Dereference() 162 self.vm_kobject = target.chkFindFirstGlobalVariable('kernel_object_store') 163 164 # 165 # Cache some crucial types used for memory walks 166 # 167 self.zpm_type = gettype('struct zone_page_metadata') 168 self.vm_map_type = gettype('struct _vm_map') 169 self.vmo_type = self.vm_kobject.GetType() 170 171 # 172 # Recognize whether the target is any form of KASAN kernel. 173 # 174 if any(target.FindFirstGlobalVariable('kasan_enabled')): 175 self.kasan = True 176 self.kasan_tbi = any(target.FindFirstGlobalVariable('kasan_tbi_enabled')) 177 self.kasan_classic = not self.kasan_tbi 178 else: 179 self.kasan = False 180 self.kasan_tbi = False 181 self.kasan_classic = False 182 183 # 184 # VM_PACK_POINTER Unpackers 185 # 186 self.kn_kq_packing = VMPointerUnpacker(target, 'kn_kq_packing_params') 187 self.vm_page_packing = VMPointerUnpacker(target, 'vm_page_packing_params') 188 self.rwlde_caller_packing = VMPointerUnpacker(target, 'rwlde_caller_packing_params') 189 190 @staticmethod 191 @caching.cache_statically 192 def get_shared(target=None): 193 """ 194 Returns a shared instance of the class 195 """ 196 197 arch = target.triple[:target.triple.find('-')] 198 199 if arch.startswith('arm64e'): 200 return _KMemARM64e(target) 201 elif arch.startswith('arm64'): 202 return _KMemARM64(target) 203 elif arch.startswith('x86_64'): 204 return _KMemX86(target) 205 else: 206 raise RuntimeError("Unsupported architecture: {}".format(arch)) 207 208 def iter_addresses(self, iterable): 209 """ 210 Conveniency wrapper to transform a list of integer to addresses 211 """ 212 return (self.make_address(a) for a in iterable) 213 214 # 215 # Abstract per-arch methods 216 # 217 218 @property 219 @abstractmethod 220 def has_ptrauth(self): 221 """ whether this target has ptrauth """ 222 223 pass 224 225 @abstractmethod 226 def PERCPU_BASE(self, cpu): 227 """ 228 Returns the per-cpu base for a given CPU number 229 230 @param cpu (int) 231 A CPU number 232 233 @returns (int) 234 The percpu base for this CPU 235 """ 236 237 pass 238 239 @abstractmethod 240 def make_address(self, addr): 241 """ 242 Make an address out of an integer 243 244 @param addr (int) 245 An address to convert 246 247 @returns (int) 248 """ 249 250 pass 251 252 253class _KMemARM64(KMem): 254 """ 255 Specialization of KMem for arm64 256 """ 257 258 def __init__(self, target): 259 super(_KMemARM64, self).__init__(target) 260 261 self.arm64_CpuDataEntries = target.chkFindFirstGlobalVariable('CpuDataEntries') 262 self.arm64_BootCpuData = target.chkFindFirstGlobalVariable('percpu_slot_cpu_data') 263 self.arm64_t1sz = target.chkFindFirstGlobalVariable('gT1Sz').xGetValueAsInteger() 264 self.arm64_sign_mask = 1 << (63 - self.arm64_t1sz) 265 266 @property 267 def has_ptrauth(self): 268 return False 269 270 def PERCPU_BASE(self, cpu): 271 cpu_data = self.arm64_CpuDataEntries.chkGetChildAtIndex(cpu) 272 boot_vaddr = self.arm64_BootCpuData.GetLoadAddress() 273 274 return cpu_data.xGetIntegerByName('cpu_data_vaddr') - boot_vaddr 275 276 def make_address(self, addr): 277 sign_mask = self.arm64_sign_mask 278 addr = addr & (sign_mask + sign_mask - 1) 279 return ((addr ^ sign_mask) - sign_mask) & 0xffffffffffffffff 280 281 282class _KMemARM64e(_KMemARM64): 283 """ 284 Specialization of KMem for arm64e 285 """ 286 287 @property 288 def has_ptrauth(self): 289 return True 290 291 292class _KMemX86(KMem): 293 """ 294 Specialization of KMem for Intel 295 """ 296 297 def __init__(self, target): 298 super(_KMemX86, self).__init__(target) 299 300 self.intel_cpu_data = target.chkFindFirstGlobalVariable('cpu_data_ptr') 301 302 @property 303 def has_ptrauth(self): 304 return False 305 306 def PERCPU_BASE(self, cpu): 307 cpu_data = self.intel_cpu_data.chkGetChildAtIndex(cpu) 308 return cpu_data.xGetIntegerByName('cpu_pcpu_base') 309 310 def make_address(self, addr): 311 return addr 312 313 314class PERCPUValue(object): 315 """ 316 Provides an enumerator for a percpu value 317 """ 318 319 def __init__(self, name, target = None): 320 """ 321 @param name (str) 322 The percpu slot name 323 324 @param target (SBTarget or None) 325 """ 326 327 self.kmem = KMem.get_shared() 328 self.sbv = self.kmem.target.chkFindFirstGlobalVariable('percpu_slot_' + name) 329 330 def __getitem__(self, cpu): 331 if cpu in self.kmem.pcpus: 332 sbv = self.sbv 333 addr = sbv.GetLoadAddress() + self.kmem.PERCPU_BASE(cpu) 334 return sbv.chkCreateValueFromAddress(sbv.GetName(), addr, sbv.GetType()) 335 raise IndexError 336 337 def __iter__(self): 338 return (item[1] for items in self.items()) 339 340 def items(self): 341 """ 342 Iterator of (cpu, SBValue) tuples for the given PERCPUValue 343 """ 344 345 kmem = self.kmem 346 sbv = self.sbv 347 name = sbv.GetName() 348 ty = sbv.GetType() 349 addr = sbv.GetLoadAddress() 350 351 return ( 352 (cpu, sbv.chkCreateValueFromAddress(name, addr + kmem.PERCPU_BASE(cpu), ty)) 353 for cpu in kmem.pcpus 354 ) 355 356__all__ = [ 357 KMem.__name__, 358 MemoryRange.__name__, 359 PERCPUValue.__name__, 360] 361