Lines Matching refs:assertf

714 		assertf(!new->use_pmap, "old %p new %p\n", old, new);  in vm_map_entry_copy()
1217 assertf(kr == KERN_SUCCESS, in vm_map_apple_protected()
1219 assertf(map_addr == tmp_entry.vme_start, in vm_map_apple_protected()
2059 assertf(!pgz_owned(address), in vm_map_lookup_entry()
2861 assertf(vmk_flags.__vmkf_unused2 == 0, "vmk_flags unused2=0x%llx\n", vmk_flags.__vmkf_unused2); in vm_map_enter()
3095 assertf(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK), "0x%llx", (uint64_t)*address); in vm_map_enter()
3096 assertf(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK), "0x%llx", (uint64_t)size); in vm_map_enter()
3098 assertf(page_aligned(*address), "0x%llx", (uint64_t)*address); in vm_map_enter()
3099 assertf(page_aligned(size), "0x%llx", (uint64_t)size); in vm_map_enter()
4058 assertf(vmk_flags.__vmkf_unused2 == 0, "vmk_flags unused2=0x%llx\n", vmk_flags.__vmkf_unused2); in vm_map_enter_mem_object()
4820 assertf(FALSE, "kernel_prefault && !UPL_VALID_PAGE"); in vm_map_enter_mem_object()
6963assertf(vm_object_round_page(VME_OFFSET(entry) + size) - vm_object_trunc_page(VME_OFFSET(entry)) =… in vm_map_wire_nested()
6971 assertf(os_ref_get_count_raw(&object->ref_count) == 1, in vm_map_wire_nested()
6974 assertf(!entry->needs_copy, in vm_map_wire_nested()
8747 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map), in vm_map_delete()
10373 assertf(copy->size == copy_size, in vm_map_copy_overwrite()
12488 assertf(new_entry->use_pmap, "src_map %p new_entry %p\n", src_map, new_entry); in vm_map_copyin_internal()
13411 assertf(pre_nested_start <= pre_nested_end, in vm_map_fork_unnest()
13414 assertf(start <= end, in vm_map_fork_unnest()
13446 assertf(kr == KERN_SUCCESS, in vm_map_fork_unnest()
14132 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map), in vm_map_lookup_and_lock_object()
14200 assertf(VM_MAP_PAGE_ALIGNED(local_vaddr, fault_page_mask), in vm_map_lookup_and_lock_object()
14323 assertf(VM_MAP_PAGE_ALIGNED(VME_OFFSET(submap_entry), VM_MAP_PAGE_MASK(map)), in vm_map_lookup_and_lock_object()
17405assertf(!os_add_overflow(end - start, offset, &end_offset), "size 0x%llx, offset 0x%llx caused ove… in vm_map_entry_insert()
18018 assertf(!src_entry->use_pmap, in vm_map_remap_extract()
18032 assertf(src_entry->use_pmap, in vm_map_remap_extract()
18325 assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); in vm_map_remap_extract()
18378 assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); in vm_map_remap_extract()
18938 assertf(target_size == src_copy_map_size - *trimmed_start_p - trimmed_end, in vm_map_copy_adjust_to_target()
20142 assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20166 assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20195 assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20225 assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20245 assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20250 assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20262 assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); in vm_map_footprint_query_page_info()
20540 assertf(VM_MAP_PAGE_SHIFT(sub_map) >= VM_MAP_PAGE_SHIFT(map), in vm_map_page_range_info_internal()
23656 assertf(kmr.kmr_return == KERN_SUCCESS, in vm_map_corpse_footprint_collect_done()