Home
last modified time | relevance | path

Searched refs:os_atomic_load (Results 1 – 25 of 165) sorted by relevance

1234567

/xnu-12377.61.12/osfmk/kern/
H A Dexclaves_memory.c94 e->pages_alloced = os_atomic_load(&exclaves_allocation_statistics.pages_alloced, relaxed); in exclaves_memory_report_accounting()
95 e->pages_freed = os_atomic_load(&exclaves_allocation_statistics.pages_freed, relaxed); in exclaves_memory_report_accounting()
96 e->time_allocating = os_atomic_load(&exclaves_allocation_statistics.time_allocating, relaxed); in exclaves_memory_report_accounting()
97 e->max_alloc_latency = os_atomic_load(&exclaves_allocation_statistics.max_alloc_latency, relaxed); in exclaves_memory_report_accounting()
98 …e->alloc_latency_highbit0 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
99 …e->alloc_latency_highbit1 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
100 …e->alloc_latency_highbit2 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
101 …e->alloc_latency_highbit3 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
102 …e->alloc_latency_highbit4 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
103 …e->alloc_latency_highbit5 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit… in exclaves_memory_report_accounting()
[all …]
H A Dexclaves_boot.c160 while (os_atomic_load(&exclaves_boot_status, relaxed) < status) { in exclaves_boot_status_wait()
176 assert3u(status, >, os_atomic_load(&exclaves_boot_status, relaxed)); in exclaves_boot_status_set()
206 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_boot_exclavecore()
258 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_boot_exclavekit()
353 os_atomic_load(&exclaves_boot_status, acquire); in exclaves_boot_wait()
424 exclaves_boot_status_t boot_status = os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_get_boot_status_string()
449 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_get_boot_stage()
476 status = os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_boot_supported()
H A Dsched_average.c190 load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); in compute_sched_load()
191 load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed); in compute_sched_load()
192 load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed); in compute_sched_load()
193 load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed); in compute_sched_load()
194 load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed); in compute_sched_load()
195 load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed); in compute_sched_load()
288 uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1; in compute_averages()
H A Dlock_ptr.c62 hw_lck_ptr_t tmp = os_atomic_load(lck, relaxed); in __hw_lck_ptr_invalid_panic()
113 hw_lck_ptr_t tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_destroy()
134 return os_atomic_load(lck, relaxed).lck_ptr_locked; in hw_lck_ptr_held()
147 tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_timeout_panic()
271 tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_lock_fastpath()
413 hw_lck_ptr_t tmp = os_atomic_load(lck, acquire); in hw_lck_ptr_wait_for_value()
H A Dsocd_client.c62 bool already_initialized = os_atomic_load(&socd_client_trace_available, relaxed); in socd_client_init()
95 long available = os_atomic_load(&socd_client_trace_available, relaxed); in socd_client_set_primary_kernelcache_uuid()
126 available = os_atomic_load(&socd_client_trace_available, dependency); in socd_client_trace()
H A Dlock_mtx.c708 deadline = ml_get_timebase() + os_atomic_load(&MutexSpin, relaxed) * processor_avail_count; in lck_mtx_lock_adaptive_spin()
741 … prev == 0 || (os_atomic_load(astp, relaxed) & AST_URGENT) || (ml_get_timebase() > deadline))) { in lck_mtx_lock_adaptive_spin()
762 deadline = ml_get_timebase() + os_atomic_load(&MutexSpin, relaxed); in lck_mtx_lock_adaptive_spin()
794 (os_atomic_load(astp, relaxed) & AST_URGENT) || in lck_mtx_lock_adaptive_spin()
849 state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_lock_contended()
925 state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_lock_contended()
1231 data = os_atomic_load(&lock->lck_mtx.data, compiler_acq_rel); in lck_mtx_unlock_contended()
1311 lck_mtx_state_t state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_assert()
1331 lck_mtx_state_t state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_assert_owned_spin()
1352 lck_mtx_state_t state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_convert_spin()
[all …]
H A Dmpsc_ring.c257 union mpsc_ring_head_tail head_tail = os_atomic_load( in mpsc_ring_write()
311 union mpsc_ring_head_tail head_tail = os_atomic_load(&buf->mr_head_tail, acquire); in mpsc_ring_read_start()
316 uint32_t hold = os_atomic_load(&buf->mr_writer_holds[i], relaxed); in mpsc_ring_read_start()
H A Dsmr.c651 s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_enter()
722 if (__improbable(os_atomic_load(&smrw->sect_waiter, compiler_acq_rel))) { in __smr_wake_oncore_sleepers()
1032 return os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_wait_for_oncore()
1104 smr_seq_t seq = os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_scan()
1150 smr_seq_t seq = os_atomic_load(&pcpu->stall_rd_seq, relaxed); in __smr_scan()
1188 clk.s_rd_seq = os_atomic_load(&smr->smr_clock.s_rd_seq, acquire); in __smr_poll()
1197 clk.s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_poll()
1222 return SMR_SEQ_INC + os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in smr_deferred_advance()
1270 clk.s_rd_seq = os_atomic_load(&smr->smr_clock.s_rd_seq, relaxed); in smr_synchronize()
1272 clk.s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in smr_synchronize()
[all …]
H A Dmpsc_queue.c48 mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
54 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
72 tail = os_atomic_load(&q->mpqh_tail, relaxed); in mpsc_queue_dequeue_batch()
78 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_dequeue_batch()
111 elm = os_atomic_load(&cur->mpqc_next, relaxed); in mpsc_queue_batch_next()
H A Dlocks_internal.h97 __auto_type __v = os_atomic_load(p, relaxed); \
104 (os_atomic_load(p, relaxed) == (e))
139 #define lock_load_exclusive(p, m) os_atomic_load(p, relaxed)
H A Dast.h194 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason))
195 #define thread_ast_get(act) os_atomic_load(&(act)->ast, relaxed)
H A Dexclaves_inspection.c410 assert(!os_atomic_load(&exclaves_inspection_initialized, relaxed)); in exclaves_inspection_init()
492 return os_atomic_load(&exclaves_inspection_initialized, acquire); in exclaves_inspection_is_initialized()
507 …assert((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NO… in exclaves_inspection_check_ast()
514 …while ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_ST… in exclaves_inspection_check_ast()
521 …if ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_KPERF… in exclaves_inspection_check_ast()
H A Dremote_time.c66 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_timer_maintenance()
89 assert(os_atomic_load(&bt_init_flag, relaxed)); in mach_bridge_timer_enable()
182 if (os_atomic_load(&bt_init_flag, acquire)) { in bt_params_get_latest()
505 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_remote_time()
H A Dsched_rt.c235 return os_atomic_load(&pset_array[src_pset]->sched_rt_edges[dst_pset], relaxed); in sched_rt_config_get()
568 uint64_t nset_deadline = os_atomic_load(&nset->stealable_rt_threads_earliest_deadline, relaxed); in sched_rt_steal_thread()
581 if (os_atomic_load(&pset->stealable_rt_threads_earliest_deadline, relaxed) <= target_deadline) { in sched_rt_steal_thread()
1252 assert(os_atomic_load(&rt_run_queue->count, relaxed) == count); in check_rt_runq_consistency()
1253 assert(os_atomic_load(&rt_run_queue->constraint, relaxed) == constraint); in check_rt_runq_consistency()
1254 assert(os_atomic_load(&rt_run_queue->ed_index, relaxed) == ed_index); in check_rt_runq_consistency()
1311 SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed)); in rt_runq_enqueue()
1347 return os_atomic_load(&pset->rt_runq.count, relaxed); in rt_runq_count()
1370 return os_atomic_load(&pset->rt_runq.constraint, relaxed) <= rt_constraint_threshold; in rt_runq_is_low_latency()
1383 int ed_index = os_atomic_load(&rt_run_queue->ed_index, relaxed); in rt_runq_dequeue()
[all …]
/xnu-12377.61.12/san/memory/
H A Dubsan_log.c79 if (n == os_atomic_load(&ubsan_log_tail, acquire)) { in ubsan_log_append()
119 head = os_atomic_load(&ubsan_log_head, relaxed);
121 tail = os_atomic_load(&ubsan_log_tail, relaxed);
157 head = os_atomic_load(&ubsan_log_head, relaxed);
159 tail = os_atomic_load(&ubsan_log_tail, relaxed);
/xnu-12377.61.12/osfmk/arm64/
H A Dlock_ticket_pv.c51 const cpumap_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire); in hw_lck_ticket_unlock_kick_pv()
63 const hw_lck_ticket_t *wlck = os_atomic_load(&ltpi->ltpi_lck, in hw_lck_ticket_unlock_kick_pv()
69 const uint8_t wt = os_atomic_load(&ltpi->ltpi_wt, acquire); in hw_lck_ticket_unlock_kick_pv()
123 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire); in hw_lck_ticket_lock_wait_pv()
H A Dmachine_remote_time.c50 if (os_atomic_load(&bt_init_flag, relaxed)) { in mach_bridge_init_timestamp()
69 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_recv_timestamps()
91 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_set_params()
/xnu-12377.61.12/osfmk/i386/
H A Dlock_ticket_pv.c72 const cpumask_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire); in hw_lck_ticket_unlock_kick_pv()
84 const hw_lck_ticket_t *wlck = os_atomic_load(&ltpi->ltpi_lck, in hw_lck_ticket_unlock_kick_pv()
90 const uint8_t wt = os_atomic_load(&ltpi->ltpi_wt, acquire); in hw_lck_ticket_unlock_kick_pv()
144 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire); in hw_lck_ticket_lock_wait_pv()
/xnu-12377.61.12/bsd/kern/
H A Dmem_acct.c72 allocated = os_atomic_load(&macct->ma_allocated, relaxed); in mem_acct_limited()
299 value = os_atomic_load(&acct->ma_peak, relaxed); in sysctl_subsystem_peak()
321 hardlimit = os_atomic_load(&acct->ma_hardlimit, relaxed); in sysctl_subsystem_soft_limit()
347 value = os_atomic_load(&acct->ma_hardlimit, relaxed); in sysctl_subsystem_hard_limit()
369 value = os_atomic_load(&acct->ma_allocated, relaxed); in sysctl_subsystem_allocated()
484 s->peak = os_atomic_load(&a->ma_peak, relaxed); in memacct_copy_stats()
485 s->allocated = os_atomic_load(&a->ma_allocated, relaxed); in memacct_copy_stats()
H A Dkern_memorystatus_policy.c81 os_atomic_load(&memorystatus_compressor_space_shortage, relaxed); in memstat_evaluate_health_conditions()
93 status->msh_phantom_cache_pressure = os_atomic_load(&memorystatus_phantom_cache_pressure, relaxed); in memstat_evaluate_health_conditions()
102 status->msh_pageout_starved = os_atomic_load(&memorystatus_pageout_starved, relaxed); in memstat_evaluate_health_conditions()
110 status->msh_zone_map_is_exhausted = os_atomic_load(&memorystatus_zone_map_is_exhausted, relaxed); in memstat_evaluate_health_conditions()
343 …ble_compressor_segments_over_limit && !vm_swapout_thread_running && !os_atomic_load(&vm_swapout_wa… in memorystatus_pick_action()
355 vm_swapout_thread_running, os_atomic_load(&vm_swapout_wake_pending, relaxed)); in memorystatus_pick_action()
449 uint64_t last_action_ts = os_atomic_load(&last_no_space_action_ts, relaxed); in memorystatus_pick_action()
476 uint64_t last_purge_ts = os_atomic_load(&memstat_last_cache_purge_ts, relaxed); in memorystatus_pick_action()
H A Dcounter_test.c121 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
139 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
156 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
171 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
/xnu-12377.61.12/osfmk/arm/
H A Dpreemption_disable.c312 const uint64_t max_duration = os_atomic_load(&pcpu->pdp_max_mach_duration, relaxed); in _preemption_disable_snap_end()
370 const uint64_t threshold = os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed); in _collect_preemption_disable_measurement()
448 durations[cpu++] = os_atomic_load(&pcpu->pdp_max_mach_duration, relaxed); in preemption_disable_get_max_durations()
/xnu-12377.61.12/san/coverage/
H A Dkcov_ksancov.c221 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) { in trace_pc_guard_pcs()
238 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) { in trace_pc_guard_pcs_stk()
322 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) { in kcov_ksancov_trace_pc()
385 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) { in kcov_ksancov_trace_cmp()
388 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) { in kcov_ksancov_trace_cmp()
405 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) { in kcov_ksancov_trace_cmp()
438 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) { in kcov_ksancov_trace_cmp_func()
441 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) { in kcov_ksancov_trace_cmp_func()
458 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) { in kcov_ksancov_trace_cmp_func()
633 unsigned int modules_count = os_atomic_load(&ksancov_od_modules_count, relaxed); in kcov_ksancov_must_instrument()
[all …]
/xnu-12377.61.12/osfmk/arm64/sptm/pmap/
H A Dpmap_data.h295 assertf(os_atomic_load(&pv_head_table[index], relaxed) & PVH_LOCK_FLAGS, in pvh_assert_locked()
297 &pv_head_table[index], (void*)(os_atomic_load(&pv_head_table[index], relaxed)), index); in pvh_assert_locked()
348 locked_pvh.pvh = os_atomic_load(&pv_head_table[index], relaxed); in pvh_lock()
382 …const locked_pvh_t locked_pvh = {.pvh = os_atomic_load(&pv_head_table[index], relaxed), .pai = ind… in pvh_lock_nopreempt()
406 locked_pvh.pvh = os_atomic_load(&pv_head_table[index], relaxed); in pvh_try_lock()
453 const uintptr_t old_pvh = os_atomic_load(&pv_head_table[index], relaxed); in pvh_lock_enter_sleep_mode()
503 const uintptr_t old_pvh = os_atomic_load(&pv_head_table[index], relaxed); in pvh_unlock()
2115 os_atomic_load(&pmap_pcpu->pmap_epoch.local_seq, relaxed); in pmap_epoch_prepare_drain()
/xnu-12377.61.12/osfmk/tests/
H A Dvfp_state_test.c71 while (os_atomic_load(var, acquire) != num) { in wait_threads()
73 if (os_atomic_load(var, acquire) != num) { in wait_threads()

1234567