| /xnu-8020.101.4/osfmk/vm/ ! |
| H A D | vm_map_store_rb.c | 51 if (vme_c->vme_start < vme_p->vme_start) { in rb_node_compare() 54 if (vme_c->vme_start >= vme_p->vme_end) { in rb_node_compare() 90 if (address >= cur->vme_start) { in vm_map_store_lookup_entry_rb() 115 …panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)… in vm_map_store_entry_link_rb() 116 … (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); in vm_map_store_entry_link_rb() 166 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); in vm_map_combine_hole() 176 assert(hole_entry->vme_start < hole_entry->vme_end); in vm_map_combine_hole() 177 assert(last_hole_entry->vme_start < last_hole_entry->vme_end); in vm_map_combine_hole() 238 while (map_entry->vme_start > hole_entry->vme_start) { in check_map_sanity() 248 if (map_entry->vme_start >= map->max_offset) { in check_map_sanity() [all …]
|
| H A D | vm_map_store_ll.c | 37 while (vm_map_trunc_page(next->vme_start, in first_free_is_valid_ll() 41 (vm_map_trunc_page(next->vme_start, in first_free_is_valid_ll() 43 vm_map_trunc_page(entry->vme_start, in first_free_is_valid_ll() 76 while (vm_map_trunc_page(UFF_next_entry->vme_start, \ 80 (vm_map_trunc_page(UFF_next_entry->vme_start, \ 82 vm_map_trunc_page(UFF_first_free->vme_start, \ 98 assert(VM_MAP_PAGE_ALIGNED((entry->vme_start), \ 178 if (address >= cur->vme_start) { in vm_map_store_lookup_entry_ll() 209 if (address >= cur->vme_start) { in vm_map_store_lookup_entry_ll() 251 end = entry->vme_start; in vm_map_store_find_last_free_ll() [all …]
|
| H A D | vm_map.c | 897 crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; in vm_map_apple_protected() 898 if (tmp_entry.vme_start < start) { in vm_map_apple_protected() 899 if (tmp_entry.vme_start != start_aligned) { in vm_map_apple_protected() 902 crypto_start += (start - tmp_entry.vme_start); in vm_map_apple_protected() 962 proc_selfpid(), tmp_entry.vme_start); in vm_map_apple_protected() 968 map_addr = tmp_entry.vme_start; in vm_map_apple_protected() 972 tmp_entry.vme_start), in vm_map_apple_protected() 985 assertf(map_addr == tmp_entry.vme_start, in vm_map_apple_protected() 988 (uint64_t) tmp_entry.vme_start, in vm_map_apple_protected() 1000 tmp_entry.vme_start)), in vm_map_apple_protected() [all …]
|
| H A D | vm_map_store.c | 131 assert(entry->vme_start < entry->vme_end); in _vm_map_store_entry_link() 144 entry->vme_start_original = entry->vme_start; in _vm_map_store_entry_link() 211 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) { in vm_map_store_entry_unlink()
|
| H A D | vm_map_store.h | 89 (UHEE_entry->vme_start < SHARED_REGION_BASE || \ 90 UHEE_entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) && \
|
| H A D | bsd_vm.c | 87 return vm_map_first_entry(map)->vme_start; in mach_get_vm_start() 1062 start = entry->vme_start; in fill_procregioninfo() 1191 pinfo->pri_address = (uint64_t)entry->vme_start; in fill_procregioninfo_onlymappedvnodes() 1192 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start); in fill_procregioninfo_onlymappedvnodes() 1246 *start = entry->vme_start; in find_region_details() 1247 *len = entry->vme_end - entry->vme_start; in find_region_details()
|
| H A D | vm_fault.c | 6097 if (ldelta > (laddr - entry->vme_start)) { in vm_fault_internal() 6098 ldelta = laddr - entry->vme_start; in vm_fault_internal() 6104 laddr = ((laddr - entry->vme_start) in vm_fault_internal() 6129 (entry->vme_end - entry->vme_start == object->vo_size) && in vm_fault_internal() 6130 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) { in vm_fault_internal() 6142 + (laddr - entry->vme_start)) in vm_fault_internal() 6154 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta), in vm_fault_internal() 6169 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta), in vm_fault_internal() 6322 pmap_addr + (end_addr - entry->vme_start), FALSE); in vm_fault_wire() 6330 for (va = entry->vme_start; in vm_fault_wire() [all …]
|
| H A D | vm_debug.c | 178 region.vir_start = (natural_t) entry->vme_start; in vm32_region_info() 389 region.vir_start = (natural_t) entry->vme_start; in vm32_region_info_64()
|
| H A D | vm_shared_region.c | 1141 if (tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) { in vm_shared_region_auth_remap() 2391 assert(tmp_entry->vme_end - tmp_entry->vme_start == size); in vm_shared_region_slide_mapping() 2416 map_addr = tmp_entry->vme_start; in vm_shared_region_slide_mapping() 2419 (tmp_entry->vme_end - tmp_entry->vme_start), in vm_shared_region_slide_mapping() 2431 assertf(map_addr == tmp_entry->vme_start, in vm_shared_region_slide_mapping() 2434 (uint64_t) tmp_entry->vme_start, in vm_shared_region_slide_mapping()
|
| H A D | vm_pageout.c | 6471 local_entry_start = entry->vme_start; in vm_map_create_upl() 6534 vm_object_round_page((entry->vme_end - entry->vme_start)))); in vm_map_create_upl() 6678 entry->vme_end - entry->vme_start, in vm_map_create_upl() 6684 entry->vme_start, in vm_map_create_upl() 6730 (entry->vme_end - entry->vme_start) / PAGE_SIZE; in vm_map_create_upl() 6764 local_start = entry->vme_start; in vm_map_create_upl() 6787 local_start = entry->vme_start; in vm_map_create_upl() 6810 local_start = entry->vme_start; in vm_map_create_upl() 6842 local_start = entry->vme_start; in vm_map_create_upl() 6859 (uint64_t) entry->vme_start, in vm_map_create_upl() [all …]
|
| H A D | vm_user.c | 3101 map_size = copy_entry->vme_end - copy_entry->vme_start; in mach_make_memory_entry_internal() 4187 (map_offset - entry->vme_start)); in vm_map_get_phys_page() 4206 (map_offset - entry->vme_start)); in vm_map_get_phys_page() 4212 offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); in vm_map_get_phys_page()
|
| H A D | vm_map.h | 258 #define vme_start links.start macro
|
| H A D | vm_resident.c | 9798 if (entry->vme_start != addr) { in vm_kern_allocation_info()
|
| /xnu-8020.101.4/osfmk/kern/ ! |
| H A D | gzalloc.c | 596 "vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end); in gzalloc_element_size() 610 uint32_t *p = (uint32_t*) gzvme->vme_start; in gzalloc_element_size()
|
| H A D | bsd_kern.c | 851 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { in get_vmsubmap_entries() 855 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { in get_vmsubmap_entries() 862 entry->vme_start)); in get_vmsubmap_entries() 893 entry->vme_start)); in get_vmmap_entries()
|
| H A D | kalloc.c | 2013 if (vm_entry->vme_start != (vm_map_offset_t)addr) { in vm_map_lookup_kalloc_entry_locked() 2015 addr, vm_entry, (void *)vm_entry->vme_start, in vm_map_lookup_kalloc_entry_locked() 2022 return vm_entry->vme_end - vm_entry->vme_start; in vm_map_lookup_kalloc_entry_locked()
|
| H A D | zalloc.c | 4247 if (first->vme_end + size > last->vme_start) { in zone_submap_alloc_sequestered_va() 4253 last->vme_start -= size; in zone_submap_alloc_sequestered_va() 4254 addr = last->vme_start; in zone_submap_alloc_sequestered_va()
|