Lines Matching refs:ptoa

970 	return ptoa((int32_t)(meta - zone_info.zi_meta_base));  in zone_meta_to_addr()
1171 page -= ptoa(meta->zm_page_index); in zone_invalid_element_panic()
1208 offs += ptoa(meta->zm_page_index); in zone_element_resolve()
1271 page -= ptoa(meta->zm_page_index); in zone_element_bounds_check_panic()
1315 offs += ptoa(meta->zm_page_index); in zone_element_bounds_check()
1488 offs += ptoa(meta->zm_page_index); in zone_id_require_aligned()
2535 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) / in zone_alloc_pages_for_nelems()
3488 zleak_max_zonemap_size = ptoa(zone_pages_wired_max); in zleak_init()
3872 vm_memtag_set_tag(tagged_address + ptoa(index), elem_size); in zone_tag_element()
3950 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size; in zcram_and_lock()
3983 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size; in zcram_and_lock()
3984 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size; in zcram_and_lock()
3996 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID); in zcram_and_lock()
3999 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE, in zcram_and_lock()
4005 kasan_zmem_add(addr, ptoa(pg_end), in zcram_and_lock()
4070 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) { in zcram()
4084 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0); in zone_cram_early()
4122 vm_size_t size = ptoa(pages); in zone_submap_alloc_sequestered_va()
4173 kernel_memory_populate(addr, ptoa(pages), in zone_fill_initially()
4177 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages), in zone_fill_initially()
4181 zone_meta_populate(addr, ptoa(pages)); in zone_fill_initially()
4399 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE; in zone_allocate_va_locked()
4400 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE; in zone_allocate_va_locked()
4402 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) { in zone_allocate_va_locked()
4417 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE; in zone_allocate_va_locked()
4418 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE; in zone_allocate_va_locked()
4474 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE); in zone_allocate_va_locked()
4491 zone_meta_populate(addr, ptoa(pages + guards)); in zone_allocate_va_locked()
4848 addr -= ptoa(cur_pages); in zone_expand_locked()
4872 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages)); in zone_expand_locked()
4943 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list, in zone_expand_locked()
5202 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1); in pgz_addr()
5569 return ptoa(2 * pgz_slots + 1); in pgz_get_size()
5769 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK; in zfree_drop()
5996 offs += ptoa(meta->zm_page_index); in __zcache_mark_invalid()
6265 vm_memtag_bzero((char *)addr + ptoa(i), esize); in zfree_percpu()
6460 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK; in zalloc_import()
6516 offs += ptoa(meta->zm_page_index); in __zcache_mark_valid()
7471 size_to_free = ptoa(z->z_chunk_pages); in zone_reclaim_chunk()
7481 size_to_free = ptoa(page_count); in zone_reclaim_chunk()
7539 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE, in zone_reclaim_chunk()
7558 ptoa(z->z_chunk_pages + oob_guard)); in zone_reclaim_chunk()
7866 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z); in zone_reclaim()
8291 (uintptr_t)ptoa(inuse_ptepages_count)); in panic_display_zprint()
9861 ptoa(z->z_chunk_pages)); in zdestroy()
10238 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z); in zone_metadata_init()
10362 zone_bits_size = round_page(ptoa(zone_pages_wired_max) / in zone_set_map_sizes()