| /xnu-10063.141.1/osfmk/arm/ |
| H A D | preemption_disable.c | 264 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in _preemption_disable_snap_start() 297 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in _preemption_disable_snap_end() 460 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in abandon_preemption_disable_measurement() 476 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in _do_disable_preemption_without_measurements()
|
| H A D | cpu_common.c | 713 return PERCPU_GET(processor); in current_processor()
|
| /xnu-10063.141.1/osfmk/kern/ |
| H A D | iotrace.h | 78 nextidxp = PERCPU_GET(iotrace_next); in iotrace() 80 cur_iotrace_ring = *PERCPU_GET(iotrace_ring); in iotrace()
|
| H A D | stack.c | 240 cache = PERCPU_GET(stack_cache); in stack_free_stack() 275 cache = PERCPU_GET(stack_cache); in stack_alloc_try()
|
| H A D | debug.c | 212 return PERCPU_GET(debugger_state); in current_debugger_state() 1426 pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu); 2214 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) { 2217 *PERCPU_GET(hv_entry_detected) = true; 2230 if (*PERCPU_GET(hv_entry_detected)) {
|
| H A D | percpu.h | 108 #define PERCPU_GET(name) \ macro
|
| H A D | smr.c | 702 smrw = PERCPU_GET(smr_worker); in smr_enter_sleepable() 742 __smr_wake_oncore_sleepers(PERCPU_GET(smr_worker)); in smr_ack_ipi() 748 struct smr_worker *smrw = PERCPU_GET(smr_worker); in smr_mark_active_trackers_stalled() 871 smrw = PERCPU_GET(smr_worker); in smr_leave_sleepable() 876 __smr_wake_oncore_sleepers(PERCPU_GET(smr_worker)); in smr_leave_sleepable() 1460 smrw = PERCPU_GET(smr_worker); in smr_call() 1568 __smr_cpu_lazy_up_if_needed(PERCPU_GET(smr_worker)); in smr_barrier() 2172 struct smr_worker *smrw = PERCPU_GET(smr_worker); in smr_cpu_tick()
|
| H A D | locks_internal.h | 257 return PERCPU_GET(lck_mcs); in lck_mcs_get_current()
|
| H A D | telemetry.c | 1402 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler); in telemetry_stash_ca_event() 1411 uintptr_t *cache_address = PERCPU_GET(brk_telemetry_cache_address); in telemetry_stash_ca_event() 1499 bool *in_handler = PERCPU_GET(brk_telemetry_in_handler); in telemetry_flush_ca_events()
|
| H A D | exclaves.c | 1548 XrtHosted_Buffer_t *request_buf = *PERCPU_GET(exclaves_request); in exclaves_scheduler_send() 1557 XrtHosted_Buffer_t *response_buf = *PERCPU_GET(exclaves_response); in exclaves_scheduler_send() 2152 *PERCPU_GET(exclaves_request) = in exclaves_scheduler_boot() 2154 *PERCPU_GET(exclaves_response) = in exclaves_scheduler_boot()
|
| H A D | sched_prim.h | 483 PERCPU_GET(sched_stats)->field++; \
|
| H A D | locks.c | 273 PERCPU_GET(lck_spinlock_to_info)->owner_thread_orig = owner & ~0x7ul; in lck_spinlock_timeout_set_orig_owner() 284 PERCPU_GET(lck_spinlock_to_info)->owner_thread_orig = in lck_spinlock_timeout_set_orig_ctid() 294 lck_spinlock_to_info_t lsti = PERCPU_GET(lck_spinlock_to_info); in lck_spinlock_timeout_hit()
|
| /xnu-10063.141.1/osfmk/arm64/sptm/pmap/ |
| H A D | pmap_misc.c | 46 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in pmap_abandon_measurement()
|
| H A D | pmap_data.h | 2067 pmap_retype_epoch_t *retype_epoch = &PERCPU_GET(pmap_sptm_percpu)->retype_epoch; in pmap_retype_epoch_enter() 2093 pmap_retype_epoch_t *retype_epoch = &PERCPU_GET(pmap_sptm_percpu)->retype_epoch; in pmap_retype_epoch_exit() 2129 pmap_retype_epoch_t *retype_epoch = &PERCPU_GET(pmap_sptm_percpu)->retype_epoch; in pmap_retype_epoch_prepare_drain() 2176 pmap_retype_epoch_t *retype_epoch = &PERCPU_GET(pmap_sptm_percpu)->retype_epoch; in pmap_retype_epoch_drain()
|
| H A D | pmap.c | 3287 sptm_pte_t *prev_ptes = PERCPU_GET(pmap_sptm_percpu)->sptm_prev_ptes; 3718 pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu); 3854 PERCPU_GET(pmap_sptm_percpu)->sptm_ops_pa, flush_range->pending_disjoint_entries); 3955 sptm_disjoint_op_t *sptm_ops = PERCPU_GET(pmap_sptm_percpu)->sptm_ops; 4016 PERCPU_GET(pmap_sptm_percpu)->sptm_templates_pa, 4061 PERCPU_GET(pmap_sptm_percpu)->sptm_templates[flush_range->pending_region_entries++] = template; 4225 sptm_pcpu = PERCPU_GET(pmap_sptm_percpu); \ 4810 sptm_pcpu = PERCPU_GET(pmap_sptm_percpu); 4944 sptm_pcpu = PERCPU_GET(pmap_sptm_percpu); 5330 *old_pte = prev_pte = PERCPU_GET(pmap_sptm_percpu)->sptm_prev_ptes[0]; [all …]
|
| /xnu-10063.141.1/osfmk/arm/pmap/ |
| H A D | pmap_misc.c | 46 struct _preemption_disable_pcpu *pcpu = PERCPU_GET(_preemption_disable_pcpu_data); in pmap_abandon_measurement()
|
| /xnu-10063.141.1/osfmk/arm64/ |
| H A D | lock_ticket_pv.c | 108 lck_tktlock_pv_info_t ltpi = PERCPU_GET(lck_tktlock_pv_info); in hw_lck_ticket_lock_wait_pv()
|
| /xnu-10063.141.1/osfmk/i386/ |
| H A D | lock_ticket_pv.c | 129 lck_tktlock_pv_info_t ltpi = PERCPU_GET(lck_tktlock_pv_info); in hw_lck_ticket_lock_wait_pv()
|
| H A D | cpu_data.h | 565 nextidxp = PERCPU_GET(traptrace_next); in traptrace_start() 571 *PERCPU_GET(traptrace_ring), sizeof(traptrace_entry_t) * traptrace_entries_per_cpu); in traptrace_start()
|
| /xnu-10063.141.1/bsd/vfs/ |
| H A D | vfs_io_compression_stats.c | 276 lz4_encode_scratch_t *scratch_buf = *PERCPU_GET(per_cpu_scratch_buf); in iocs_compress_block() 277 uint8_t *dest_buf = *PERCPU_GET(per_cpu_compression_buf); in iocs_compress_block()
|
| /xnu-10063.141.1/libkern/os/ |
| H A D | log_queue.c | 535 log_queue_t lq = PERCPU_GET(oslog_queue); in log_queue_dispatch() 587 log_queue_t lq = PERCPU_GET(oslog_queue); in log_queue_add()
|
| /xnu-10063.141.1/osfmk/prng/ |
| H A D | entropy.c | 223 entropy_cpu_data_t *e = PERCPU_GET(entropy_cpu_data); in entropy_collect()
|
| /xnu-10063.141.1/san/memory/ |
| H A D | kasan-classic.c | 637 kasan_quarantine_t q = PERCPU_GET(kasan_quarantine); in kasan_quarantine()
|
| /xnu-10063.141.1/tools/lldbmacros/core/ |
| H A D | kernelcore.py | 343 def PERCPU_GET(self, name, cpu): member in KernelTarget
|
| /xnu-10063.141.1/tools/lldbmacros/ |
| H A D | xnu.py | 1317 getattr(kern.PERCPU_GET(ring, 0)[0], field_arg) 1343 ring_slice = [(x, y, kern.PERCPU_GET(ring, x)[y]) for y in range(entries_per_cpu)]
|