| /xnu-8019.80.24/osfmk/vm/ |
| H A D | vm_map_store_rb.c | 54 if (vme_c->vme_start >= vme_p->vme_end) { in rb_node_compare() 91 if (address < cur->vme_end) { in vm_map_store_lookup_entry_rb() 115 …INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end, in vm_map_store_entry_link_rb() 116 … (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); in vm_map_store_entry_link_rb() 160 hole_entry->vme_end = hole_entry->vme_next->vme_end; in vm_map_combine_hole() 166 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); in vm_map_combine_hole() 176 assert(hole_entry->vme_start < hole_entry->vme_end); in vm_map_combine_hole() 177 assert(last_hole_entry->vme_start < last_hole_entry->vme_end); in vm_map_combine_hole() 252 if (map_entry->vme_end != map_entry->vme_next->vme_start) { in check_map_sanity() 257 if (hole_entry->vme_start != map_entry->vme_end) { in check_map_sanity() [all …]
|
| H A D | vm_map_store_ll.c | 39 vm_map_trunc_page(entry->vme_end, in first_free_is_valid_ll() 78 vm_map_trunc_page(UFF_first_free->vme_end, \ 100 assert(VM_MAP_PAGE_ALIGNED((entry->vme_end), \ 191 if ((cur != last) && (cur->vme_end > address)) { in vm_map_store_lookup_entry_ll() 208 if (cur->vme_end > address) { in vm_map_store_lookup_entry_ll() 249 && (prev_entry->vme_end == end || prev_entry->vme_end > map->max_offset)) { in vm_map_store_find_last_free_ll()
|
| H A D | vm_map.c | 841 map_addr = tmp_entry.vme_end) { in vm_map_apple_protected() 888 crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; in vm_map_apple_protected() 895 if (tmp_entry.vme_end > end) { in vm_map_apple_protected() 896 if (tmp_entry.vme_end != end_aligned) { in vm_map_apple_protected() 899 crypto_end -= (tmp_entry.vme_end - end); in vm_map_apple_protected() 962 (tmp_entry.vme_end - in vm_map_apple_protected() 990 (uint64_t) (map_addr + (tmp_entry.vme_end - in vm_map_apple_protected() 1011 crypto_backing_offset += (tmp_entry.vme_end - in vm_map_apple_protected() 1757 if (prev->vme_end <= start) { in vm_map_find_space() 1798 start = entry->vme_end; in vm_map_find_space() [all …]
|
| H A D | vm_map_store.h | 91 UHEE_map->highest_entry_end < UHEE_entry->vme_end) { \ 92 UHEE_map->highest_entry_end = UHEE_entry->vme_end; \ 106 VMHE_map->highest_entry_end = tmp_entry->vme_end; \
|
| H A D | vm_map_store.c | 131 assert(entry->vme_start < entry->vme_end); in _vm_map_store_entry_link() 145 entry->vme_end_original = entry->vme_end; in _vm_map_store_entry_link()
|
| H A D | bsd_vm.c | 93 return vm_map_last_entry(map)->vme_end; in mach_get_vm_end() 1001 address == tmp_entry->vme_end) { in fill_procregioninfo() 1090 …vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, … in fill_procregioninfo() 1114 pinfo->pri_size = (uint64_t)(entry->vme_end - start); in fill_procregioninfo() 1192 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start); in fill_procregioninfo_onlymappedvnodes() 1247 *len = entry->vme_end - entry->vme_start; in find_region_details()
|
| H A D | vm_debug.c | 179 region.vir_end = (natural_t) entry->vme_end; in vm32_region_info() 402 region.vir_end = (natural_t) entry->vme_end; in vm32_region_info_64()
|
| H A D | vm_fault.c | 6067 if (hdelta > (entry->vme_end - laddr)) { in vm_fault_internal() 6068 hdelta = entry->vme_end - laddr; in vm_fault_internal() 6096 (entry->vme_end - entry->vme_start == object->vo_size) && in vm_fault_internal() 6270 vm_map_offset_t end_addr = entry->vme_end; in vm_fault_wire() 6319 tmp_entry.vme_end = va; in vm_fault_wire() 6343 vm_map_offset_t end_addr = entry->vme_end; in vm_fault_unwire() 6369 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); in vm_fault_unwire()
|
| H A D | vm_pageout.c | 6486 if (entry->vme_end - original_offset < adjusted_size) { in vm_map_create_upl() 6487 adjusted_size = entry->vme_end - original_offset; in vm_map_create_upl() 6542 vm_object_round_page((entry->vme_end - entry->vme_start)))); in vm_map_create_upl() 6675 if ((entry->vme_end - offset) < *upl_size) { in vm_map_create_upl() 6676 *upl_size = (upl_size_t) (entry->vme_end - offset); in vm_map_create_upl() 6677 assert(*upl_size == entry->vme_end - offset); in vm_map_create_upl() 6686 entry->vme_end - entry->vme_start, in vm_map_create_upl() 6738 (entry->vme_end - entry->vme_start) / PAGE_SIZE; in vm_map_create_upl() 6868 (uint64_t) entry->vme_end); in vm_map_create_upl() 6872 uint64_t, (uint64_t)entry->vme_end); in vm_map_create_upl()
|
| H A D | vm_shared_region.c | 1153 if (tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) { in vm_shared_region_auth_remap() 2401 assert(tmp_entry->vme_end - tmp_entry->vme_start == size); in vm_shared_region_slide_mapping() 2429 (tmp_entry->vme_end - tmp_entry->vme_start), in vm_shared_region_slide_mapping()
|
| H A D | vm_map.h | 273 #define vme_end links.end macro
|
| H A D | vm_user.c | 1776 map->highest_entry_end = map->first_free->vme_end; in vm_toggle_entry_reuse() 3122 map_size = copy_entry->vme_end - copy_entry->vme_start; in mach_make_memory_entry_internal()
|
| H A D | vm_resident.c | 9781 *size = (entry->vme_end - addr); in vm_kern_allocation_info()
|
| /xnu-8019.80.24/osfmk/kern/ |
| H A D | gzalloc.c | 597 "vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end); in gzalloc_element_size() 605 gzh = (gzhdr_t *)(gzvme->vme_end - GZHEADER_SIZE); in gzalloc_element_size() 612 while (p < (uint32_t *) gzvme->vme_end) { in gzalloc_element_size() 619 if (p >= (uint32_t *) gzvme->vme_end) { in gzalloc_element_size()
|
| H A D | bsd_kern.c | 861 entry->vme_end - in get_vmsubmap_entries() 892 entry->vme_end - in get_vmmap_entries()
|
| H A D | kalloc.c | 1486 (void *)vm_entry->vme_end, map); in vm_map_lookup_kalloc_entry_locked() 1492 return vm_entry->vme_end - vm_entry->vme_start; in vm_map_lookup_kalloc_entry_locked()
|
| H A D | zalloc.c | 4399 entry->vme_end = zone_pva_to_addr(r->zpr_min); in zone_submap_alloc_sequestered_va()
|