| /xnu-11215.1.10/osfmk/vm/ |
| H A D | vm_map_store_rb.c | 50 if (vme_c->vme_start < vme_p->vme_start) { in rb_node_compare() 53 if (vme_c->vme_start >= vme_p->vme_end) { in rb_node_compare() 69 if (address >= cur->vme_start) { in vm_map_store_lookup_entry_rb() 96 (uintptr_t)entry->vme_start, in vm_map_store_entry_link_rb() 98 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, in vm_map_store_entry_link_rb() 147 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); in vm_map_combine_hole() 157 assert(hole_entry->vme_start < hole_entry->vme_end); in vm_map_combine_hole() 158 assert(last_hole_entry->vme_start < last_hole_entry->vme_end); in vm_map_combine_hole() 219 while (map_entry->vme_start > hole_entry->vme_start) { in check_map_sanity() 229 if (map_entry->vme_start >= map->max_offset) { in check_map_sanity() [all …]
|
| H A D | vm_map_store.c | 108 if (__improbable(entry->vme_end <= entry->vme_start)) { in _vm_map_store_entry_link() 109 …panic("maphdr %p entry %p start 0x%llx end 0x%llx\n", mapHdr, entry, (uint64_t)entry->vme_start, (… in _vm_map_store_entry_link() 112 assert(entry->vme_start < entry->vme_end); in _vm_map_store_entry_link() 117 vm_address_t, entry->vme_start, in _vm_map_store_entry_link() 129 entry->vme_start_original = entry->vme_start; in _vm_map_store_entry_link() 175 (entry->vme_start < SHARED_REGION_BASE || in vm_map_store_entry_link() 176 entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) && in vm_map_store_entry_link() 206 vm_address_t, entry->vme_start, in _vm_map_store_entry_unlink() 218 (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, in _vm_map_store_entry_unlink() 246 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) { in vm_map_store_entry_unlink() [all …]
|
| H A D | vm_map_store_ll.c | 39 while (vm_map_trunc_page(next->vme_start, map_page_mask) == in first_free_is_valid_ll() 41 (vm_map_trunc_page(next->vme_start, map_page_mask) == in first_free_is_valid_ll() 42 vm_map_trunc_page(entry->vme_start, map_page_mask) && in first_free_is_valid_ll() 71 assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, in vm_map_store_entry_link_ll() 120 while (vm_map_trunc_page(next->vme_start, map_page_mask) == in update_first_free_ll() 122 (vm_map_trunc_page(next->vme_start, map_page_mask) == in update_first_free_ll() 123 vm_map_trunc_page(new_first_free->vme_start, map_page_mask) && in update_first_free_ll()
|
| H A D | vm_map.c | 601 map, new, new->vme_start, new->vme_end); in vm_map_entry_copy_csm_assoc() 623 uint64_t, new->vme_start, in vm_map_entry_copy_code_signing() 1105 crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; in vm_map_apple_protected() 1106 if (tmp_entry.vme_start < start) { in vm_map_apple_protected() 1107 if (tmp_entry.vme_start != start_aligned) { in vm_map_apple_protected() 1112 crypto_start += (start - tmp_entry.vme_start); in vm_map_apple_protected() 1173 map_addr = tmp_entry.vme_start; in vm_map_apple_protected() 1177 tmp_entry.vme_start), in vm_map_apple_protected() 1188 assertf(map_addr == tmp_entry.vme_start, in vm_map_apple_protected() 1191 (uint64_t) tmp_entry.vme_start, in vm_map_apple_protected() [all …]
|
| H A D | bsd_vm.c | 1038 start = entry->vme_start; in fill_procregioninfo() 1163 pinfo->pri_address = (uint64_t)entry->vme_start; in fill_procregioninfo_onlymappedvnodes() 1164 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start); in fill_procregioninfo_onlymappedvnodes() 1249 *start_p = entry->vme_start; in task_find_region_details() 1250 *len_p = entry->vme_end - entry->vme_start; in task_find_region_details()
|
| H A D | vm_fault.c | 6407 if (ldelta > (laddr - entry->vme_start)) { in vm_fault_internal() 6408 ldelta = laddr - entry->vme_start; in vm_fault_internal() 6414 laddr = ((laddr - entry->vme_start) in vm_fault_internal() 6440 (entry->vme_end - entry->vme_start == object->vo_size) && in vm_fault_internal() 6441 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) { in vm_fault_internal() 6453 + (laddr - entry->vme_start)) in vm_fault_internal() 6465 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta), in vm_fault_internal() 6480 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta), in vm_fault_internal() 6641 pmap_addr + (end_addr - entry->vme_start), FALSE); in vm_fault_wire() 6649 for (va = entry->vme_start; in vm_fault_wire() [all …]
|
| H A D | vm_debug.c | 178 region.vir_start = (natural_t) entry->vme_start; in vm32_region_info() 389 region.vir_start = (natural_t) entry->vme_start; in vm32_region_info_64()
|
| H A D | vm_kern.c | 242 return entry->vme_end - entry->vme_start - in __kmem_entry_orig_size() 503 } else if (addr != entry->vme_start) { in __kmem_entry_validate_panic() 537 if (addr != entry->vme_start) { in __kmem_entry_validate_guard() 918 map_addr = entry->vme_start; 1697 vm_map_clip_end(map, entry, entry->vme_start + newsize); in kmem_realloc_shrink_guard() 1983 newaddr = newentry->vme_start; in kmem_realloc_guard() 3090 ((*entry)->vme_next->vme_start < (addr + size))) { in kmem_get_addr_from_meta() 3564 (next_entry->vme_start >= slot->max_address))) { in kmem_free_space() 3954 VME_OFFSET_SET(entry, entry->vme_start); in kmem_scramble_ranges() 4065 entry->vme_start < range.max_address) { in kmem_get_gobj_stats() [all …]
|
| H A D | vm_shared_region.c | 1240 if (tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) { in vm_shared_region_auth_remap() 2704 assert(tmp_entry->vme_end - tmp_entry->vme_start == size); in vm_shared_region_slide_mapping() 2726 map_addr = tmp_entry->vme_start; in vm_shared_region_slide_mapping() 2735 tmp_entry->vme_end - tmp_entry->vme_start, in vm_shared_region_slide_mapping() 2745 assertf(map_addr == tmp_entry->vme_start, in vm_shared_region_slide_mapping() 2748 (uint64_t) tmp_entry->vme_start, in vm_shared_region_slide_mapping()
|
| H A D | vm_pageout.c | 6770 local_entry_start = entry->vme_start; 6834 vm_object_round_page((entry->vme_end - entry->vme_start))), 6979 entry->vme_end - entry->vme_start, 6985 entry->vme_start, 7031 (entry->vme_end - entry->vme_start) / PAGE_SIZE; 7065 local_start = entry->vme_start; 7088 local_start = entry->vme_start; 7111 local_start = entry->vme_start; 7143 local_start = entry->vme_start; 7160 (uint64_t) entry->vme_start, [all …]
|
| H A D | vm_map_xnu.h | 172 #define vme_start links.start macro
|
| H A D | vm_memory_entry.c | 817 map_size = copy_entry->vme_end - copy_entry->vme_start; in mach_make_memory_entry_share() 1682 entry->vme_end - entry->vme_start != object->vo_size) { in mach_memory_entry_ownership()
|
| H A D | vm_resident.c | 10243 for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) { in vm_page_diagnose() 10306 if (entry->vme_start != addr) { in vm_kern_allocation_info()
|
| /xnu-11215.1.10/osfmk/kern/ |
| H A D | bsd_kern.c | 912 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { in get_vmsubmap_entries() 916 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { in get_vmsubmap_entries() 923 entry->vme_start)); in get_vmsubmap_entries() 954 entry->vme_start)); in get_vmmap_entries()
|
| H A D | zalloc.c | 4152 if (first->vme_end + size > last->vme_start) { in zone_submap_alloc_sequestered_va() 4158 last->vme_start -= size; in zone_submap_alloc_sequestered_va() 4159 addr = last->vme_start; in zone_submap_alloc_sequestered_va()
|
| /xnu-11215.1.10/osfmk/kdp/ |
| H A D | kdp_common.c | 128 …for (vcur = entry->vme_start; ret == KERN_SUCCESS && vcur < entry->vme_end; vcur += task_page_size… in kdp_traverse_mappings()
|