| /xnu-8019.80.24/osfmk/arm64/ |
| H A D | hibernate_restore.c | 192 uint64_t result = ptoa_64(ppnum); in allocate_page() 308 uint64_t header_phys = ptoa_64(headerPpnum); in pal_hib_resume_tramp() 311 uint64_t hib_text_start = ptoa_64(header->restore1CodePhysPage); in pal_hib_resume_tramp() 326 phys_start = ptoa_64(bank_bitmap->first_page); in pal_hib_resume_tramp() 327 phys_end = ptoa_64(bank_bitmap->last_page) + PAGE_SIZE; in pal_hib_resume_tramp() 358 uint64_t size = ptoa_64(seg_info->segments[i].pageCount); in pal_hib_resume_tramp() 360 uint64_t seg_start = ptoa_64(seg_info->segments[i].physPage); in pal_hib_resume_tramp() 394 uint64_t bank_start = ptoa_64(bank_bitmap->first_page); in pal_hib_resume_tramp() 395 uint64_t bank_end = ptoa_64(bank_bitmap->last_page) + PAGE_SIZE; in pal_hib_resume_tramp() 418 image_start = ptoa_64(header->handoffPages); in pal_hib_resume_tramp() [all …]
|
| H A D | hibernate_arm64.c | 138 ptoa_64(bitmap->first_page), bitmap->first_page, in hibernate_page_list_allocate() 139 ptoa_64(bitmap->last_page), bitmap->last_page); in hibernate_page_list_allocate()
|
| /xnu-8019.80.24/iokit/Kernel/ |
| H A D | IOMapper.cpp | 242 NULL, 0, ptoa_64(pages), in IOMapperIOVMAlloc() 263 IOMapper::gSystem->iovmUnmapMemory(NULL, NULL, ptoa_64(addr), ptoa_64(pages)); in IOMapperIOVMFree() 277 ptoa_64(addr), ptoa_64(offset), ptoa_64(page), ptoa_64(1)); in IOMapperInsertPage()
|
| H A D | IOHibernateRestoreKernel.c | 460 scratch->curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(scratch->headPage)); in hibernate_scratch_init() 470 result.curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(result.headPage)); in hibernate_scratch_start_read() 491 scratch->curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(*nextPage)); in hibernate_scratch_io() 528 uint64_t dst = ptoa_64(ppnum); in store_one_page() 615 headerPhys = ptoa_64(p1); in hibernate_kernel_entrypoint() 650 debug_code(kIOHibernateRestoreCodeImageEnd, ptoa_64(lastImagePage)); in hibernate_kernel_entrypoint() 652 debug_code(kIOHibernateRestoreCodeMapEnd, ptoa_64(lastMapPage)); in hibernate_kernel_entrypoint() 655 debug_code(kIOHibernateRestoreCodeHandoffPages, ptoa_64(handoffPages)); in hibernate_kernel_entrypoint() 670 wkdmScratch = (uint8_t *)pal_hib_map(WKDM_AREA, ptoa_64(hibernate_page_list_grab(map, &nextFree))); in hibernate_kernel_entrypoint() 700 debug_code(kIOHibernateRestoreCodePageIndexEnd, ptoa_64(lastPageIndexPage)); in hibernate_kernel_entrypoint() [all …]
|
| H A D | IOHibernateIO.cpp | 501 ptoa_64(gIOHibernateHandoffPageCount), page_size); in IOHibernateSystemSleep() 559 setFileSize = ((ptoa_64((106 * pageCount) / 100) * gIOHibernateCompression) >> 8) in IOHibernateSystemSleep() 1928 uint64_t physAddr = ptoa_64(page); in hibernate_write_image() 1929 uint64_t size = ptoa_64(count); in hibernate_write_image() 2047 err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size); in hibernate_write_image() 2616 err = IOMemoryDescriptorReadToPhysical(vars->srcBuffer, decoOffset, ptoa_64(ppnum), page_size); in hibernate_machine_init() 2742 element = (element & page_mask) | ptoa_64(pmap_find_phys(kernel_pmap, element)); in IOHibernateSystemRestart()
|
| H A D | IOMemoryDescriptor.cpp | 733 vm_size_t size = ptoa_64(_pages); in memoryReferenceCreate() 3271 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; in dmaCommandOperation() 3289 address = ptoa_64(pageList->phys_addr) + offset; in dmaCommandOperation() 3303 address = ptoa_64(pageAddr) + offset; in dmaCommandOperation() 4038 *upl_size = (upl_size_t) ptoa_64(pageCount); in io_get_kernel_static_upl() 4044 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); in io_get_kernel_static_upl() 4338 ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo)); in wireVirtual() 4606 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) { in dmaMap()
|
| H A D | IODMACommand.cpp | 574 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); in segmentOp() 1500 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) in genIOVMSegments()
|
| H A D | IOLib.cpp | 1304 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode ); in IOSetProcessorCacheMode()
|
| /xnu-8019.80.24/osfmk/vm/ |
| H A D | vm_purgeable.c | 1550 ptoa_64(resident_page_count - wired_page_count)); in vm_purgeable_accounting() 1554 ptoa_64(compressed_page_count)); in vm_purgeable_accounting() 1559 ptoa_64(resident_page_count - wired_page_count)); in vm_purgeable_accounting() 1563 ptoa_64(compressed_page_count)); in vm_purgeable_accounting() 1568 ptoa_64(resident_page_count in vm_purgeable_accounting() 1576 ptoa_64(resident_page_count - wired_page_count)); in vm_purgeable_accounting() 1580 ptoa_64(compressed_page_count)); in vm_purgeable_accounting() 1585 ptoa_64(resident_page_count in vm_purgeable_accounting() 1593 ptoa_64(resident_page_count - wired_page_count)); in vm_purgeable_accounting() 1597 ptoa_64(compressed_page_count)); in vm_purgeable_accounting() [all …]
|
| H A D | vm_object.h | 490 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \ 513 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ 516 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
|
| H A D | vm_object.c | 2748 if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) { in vm_object_pmap_protect_options() 2770 if (ptoa_64(object->resident_page_count / 4) < size_in_object) { in vm_object_pmap_protect_options() 8365 ptoa_64(resident_count)); in vm_object_ownership_change() 8372 ptoa_64(resident_count)); in vm_object_ownership_change() 8379 ptoa_64(wired_count)); in vm_object_ownership_change() 8383 ptoa_64(wired_count)); in vm_object_ownership_change() 8393 ptoa_64(compressed_count)); in vm_object_ownership_change() 8399 ptoa_64(compressed_count)); in vm_object_ownership_change() 8513 ptoa_64(resident_count)); in vm_object_ownership_change() 8520 ptoa_64(resident_count)); in vm_object_ownership_change() [all …]
|
| H A D | vm_resident.c | 6689 mach_vm_size_t map_size = ptoa_64(page_count); in vm_page_alloc_list() 9318 info[object->wire_tag].size += ptoa_64(object->wired_page_count); in vm_page_count_object() 9571 wired_size = ptoa_64(vm_page_wire_count); in vm_page_diagnose() 9572 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count); in vm_page_diagnose() 9574 wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count); in vm_page_diagnose() 9575 …wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttle… in vm_page_diagnose() 9577 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial); in vm_page_diagnose() 9592 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0); in vm_page_diagnose() 9596 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED); in vm_page_diagnose() 9597 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); in vm_page_diagnose() [all …]
|
| H A D | vm_kern.c | 866 vm_tag_update_size(tag, -ptoa_64(pages_unwired)); in kernel_memory_depopulate()
|
| H A D | vm_compressor.c | 732 compressor_pool_size = ptoa_64(vm_compression_limit); in vm_compressor_init()
|
| H A D | vm_fault.c | 6508 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages)); in vm_fault_unwire()
|
| H A D | vm_map.c | 6322 size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) { in add_wire_counts() 6323 if (size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) { in add_wire_counts() 6326 …bal wire limit. %llu bytes wired and requested to wire %llu bytes more", ptoa_64(total_wire_count)… in add_wire_counts()
|
| /xnu-8019.80.24/osfmk/i386/vmx/ |
| H A D | vmx_shims.c | 53 return ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t)va)); in vmx_paddr()
|
| /xnu-8019.80.24/osfmk/mach/ |
| H A D | vm_param.h | 100 #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) macro 188 #undef ptoa_64 223 #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0))
|
| /xnu-8019.80.24/osfmk/kern/ |
| H A D | zalloc.c | 4924 *current_size = ptoa_64(phys_pages); in get_zone_map_size() 4925 *capacity = ptoa_64(zone_phys_mapped_max_pages); in get_zone_map_size() 4968 ptoa_64(os_atomic_load(&zones_phys_page_mapped_count, relaxed)), in kill_process_in_largest_zone() 4969 ptoa_64(zone_phys_mapped_max_pages), in kill_process_in_largest_zone() 7192 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)), in get_zone_info() 7194 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)), in get_zone_info() 7196 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages), in get_zone_info() 7204 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty))); in get_zone_info()
|
| /xnu-8019.80.24/osfmk/kdp/ |
| H A D | kdp_core.c | 297 uint64_t phys = ptoa_64(ppn); in kernel_pmap_present_mapping()
|
| /xnu-8019.80.24/osfmk/i386/AT386/ |
| H A D | model_dep.c | 1598 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK); in debug_copyin()
|
| /xnu-8019.80.24/bsd/kern/ |
| H A D | uipc_mbuf.c | 6285 return (uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK));
|