| /xnu-10063.141.1/osfmk/vm/ ! |
| H A D | vm_map_store_rb.c | 53 if (vme_c->vme_start >= vme_p->vme_end) { in rb_node_compare() 70 if (address < cur->vme_end) { in vm_map_store_lookup_entry_rb() 97 (uintptr_t)entry->vme_end, in vm_map_store_entry_link_rb() 99 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); in vm_map_store_entry_link_rb() 141 hole_entry->vme_end = hole_entry->vme_next->vme_end; in vm_map_combine_hole() 147 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); in vm_map_combine_hole() 157 assert(hole_entry->vme_start < hole_entry->vme_end); in vm_map_combine_hole() 158 assert(last_hole_entry->vme_start < last_hole_entry->vme_end); in vm_map_combine_hole() 233 if (map_entry->vme_end != map_entry->vme_next->vme_start) { in check_map_sanity() 238 if (hole_entry->vme_start != map_entry->vme_end) { in check_map_sanity() [all …]
|
| H A D | vm_map_store.c | 108 if (__improbable(entry->vme_end <= entry->vme_start)) { in _vm_map_store_entry_link() 109 …p start 0x%llx end 0x%llx\n", mapHdr, entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end); in _vm_map_store_entry_link() 112 assert(entry->vme_start < entry->vme_end); in _vm_map_store_entry_link() 118 vm_address_t, entry->vme_end); in _vm_map_store_entry_link() 130 entry->vme_end_original = entry->vme_end; in _vm_map_store_entry_link() 177 map->highest_entry_end < entry->vme_end) { in vm_map_store_entry_link() 178 map->highest_entry_end = entry->vme_end; in vm_map_store_entry_link() 207 vm_address_t, entry->vme_end); in _vm_map_store_entry_unlink() 218 (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, in _vm_map_store_entry_unlink() 297 entry, entry->vme_start, entry->vme_end, map, start); in __vm_map_store_find_space_holelist_corruption() [all …]
|
| H A D | vm_map.c | 473 map, new, new->vme_start, new->vme_end); in vm_map_entry_copy_csm_assoc() 496 uint64_t, new->vme_end, in vm_map_entry_copy_code_signing() 930 map_addr = tmp_entry.vme_end) { in vm_map_apple_protected() 977 crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; in vm_map_apple_protected() 986 if (tmp_entry.vme_end > end) { in vm_map_apple_protected() 987 if (tmp_entry.vme_end != end_aligned) { in vm_map_apple_protected() 992 crypto_end -= (tmp_entry.vme_end - end); in vm_map_apple_protected() 1060 (tmp_entry.vme_end - in vm_map_apple_protected() 1086 (uint64_t) (map_addr + (tmp_entry.vme_end - in vm_map_apple_protected() 1107 crypto_backing_offset += (tmp_entry.vme_end - in vm_map_apple_protected() [all …]
|
| H A D | vm_map_store_ll.c | 40 vm_map_trunc_page(entry->vme_end, map_page_mask) || in first_free_is_valid_ll() 73 assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, in vm_map_store_entry_link_ll() 121 vm_map_trunc_page(new_first_free->vme_end, map_page_mask) || in update_first_free_ll()
|
| H A D | bsd_vm.c | 967 address == tmp_entry->vme_end) { in fill_procregioninfo() 1056 …vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, … in fill_procregioninfo() 1080 pinfo->pri_size = (uint64_t)(entry->vme_end - start); in fill_procregioninfo() 1158 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start); in fill_procregioninfo_onlymappedvnodes() 1213 *len = entry->vme_end - entry->vme_start; in find_region_details()
|
| H A D | vm_debug.c | 179 region.vir_end = (natural_t) entry->vme_end; in vm32_region_info() 390 region.vir_end = (natural_t) entry->vme_end; in vm32_region_info_64()
|
| H A D | vm_kern.c | 242 return entry->vme_end - entry->vme_start - in __kmem_entry_orig_size() 505 } else if ((flags & KMF_GUESS_SIZE) == 0 && addr + size != entry->vme_end) { in __kmem_entry_validate_panic() 541 if ((flags & KMEM_GUESS_SIZE) == 0 && addr + size != entry->vme_end) { in __kmem_entry_validate_guard() 3373 prev_entry->vme_end <= slot->min_address)) && in kmem_free_space() 3876 used += (entry->vme_end - entry->vme_start); in kmem_get_gobj_stats() 4319 assert3u(addr + ptoa(10), <=, e->vme_end); in kmem_alloc_basic_test()
|
| H A D | vm_pageout.c | 6708 if (entry->vme_end - original_offset < adjusted_size) { in vm_map_create_upl() 6709 adjusted_size = entry->vme_end - original_offset; in vm_map_create_upl() 6765 vm_object_round_page((entry->vme_end - entry->vme_start))), in vm_map_create_upl() 6899 if ((entry->vme_end - offset) < *upl_size) { in vm_map_create_upl() 6900 *upl_size = (upl_size_t) (entry->vme_end - offset); in vm_map_create_upl() 6901 assert(*upl_size == entry->vme_end - offset); in vm_map_create_upl() 6910 entry->vme_end - entry->vme_start, in vm_map_create_upl() 6962 (entry->vme_end - entry->vme_start) / PAGE_SIZE; in vm_map_create_upl() 7092 (uint64_t) entry->vme_end); in vm_map_create_upl() 7096 uint64_t, (uint64_t)entry->vme_end); in vm_map_create_upl() [all …]
|
| H A D | vm_fault.c | 6369 if (hdelta > (entry->vme_end - laddr)) { in vm_fault_internal() 6370 hdelta = entry->vme_end - laddr; in vm_fault_internal() 6399 (entry->vme_end - entry->vme_start == object->vo_size) && in vm_fault_internal() 6581 vm_map_offset_t end_addr = entry->vme_end; in vm_fault_wire() 6630 tmp_entry.vme_end = va; in vm_fault_wire() 6632 pmap, pmap_addr, tmp_entry.vme_end); in vm_fault_wire() 6680 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); in vm_fault_unwire()
|
| H A D | vm_shared_region.c | 1238 if (tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) { in vm_shared_region_auth_remap() 2708 assert(tmp_entry->vme_end - tmp_entry->vme_start == size); in vm_shared_region_slide_mapping() 2739 (tmp_entry->vme_end - tmp_entry->vme_start), in vm_shared_region_slide_mapping()
|
| H A D | vm_user.c | 1756 map->highest_entry_end = map->first_free->vme_end; in vm_toggle_entry_reuse() 3131 map_size = copy_entry->vme_end - copy_entry->vme_start; in mach_make_memory_entry_internal() 3842 entry->vme_end - entry->vme_start != object->vo_size) { in mach_memory_entry_ownership()
|
| H A D | vm_map.h | 237 #define vme_end links.end macro
|
| H A D | vm_resident.c | 10064 for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) { in vm_page_diagnose() 10131 *size = (entry->vme_end - addr); in vm_kern_allocation_info()
|
| /xnu-10063.141.1/osfmk/kdp/ ! |
| H A D | kdp_common.c | 128 …for (vcur = entry->vme_start; ret == KERN_SUCCESS && vcur < entry->vme_end; vcur += task_page_size… in kdp_traverse_mappings()
|
| /xnu-10063.141.1/osfmk/kern/ ! |
| H A D | bsd_kern.c | 877 entry->vme_end - in get_vmsubmap_entries() 908 entry->vme_end - in get_vmmap_entries()
|
| H A D | zalloc.c | 4132 if (first->vme_end + size > last->vme_start) { in zone_submap_alloc_sequestered_va() 4142 addr = first->vme_end; in zone_submap_alloc_sequestered_va() 4143 first->vme_end += size; in zone_submap_alloc_sequestered_va() 10184 reloc_base = first->vme_end; in zone_metadata_init() 10185 first->vme_end += early_sz; in zone_metadata_init()
|