| /xnu-8792.41.9/san/memory/ |
| H A D | ubsan_log.c | 79 if (n == os_atomic_load(&ubsan_log_tail, acquire)) { in ubsan_log_append() 119 head = os_atomic_load(&ubsan_log_head, relaxed); 121 tail = os_atomic_load(&ubsan_log_tail, relaxed); 157 head = os_atomic_load(&ubsan_log_head, relaxed); 159 tail = os_atomic_load(&ubsan_log_tail, relaxed);
|
| /xnu-8792.41.9/osfmk/kern/ |
| H A D | sched_average.c | 193 load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); in compute_sched_load() 194 load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed); in compute_sched_load() 195 load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed); in compute_sched_load() 196 load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed); in compute_sched_load() 197 load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed); in compute_sched_load() 198 load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed); in compute_sched_load() 291 uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1; in compute_averages()
|
| H A D | locks_internal.h | 96 __auto_type __v = os_atomic_load(p, relaxed); \ 103 (os_atomic_load(p, relaxed) == (e)) 138 #define lock_load_exclusive(p, m) os_atomic_load(p, relaxed)
|
| H A D | cpu_quiesce.c | 200 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_join() 225 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_ast() 340 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_checkin() 378 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_assert_ast()
|
| H A D | smr.c | 332 s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_enter() 337 old_seq = os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_enter() 462 smr_seq_t seq = os_atomic_load(&it->c_rd_seq, relaxed); in __smr_scan() 511 clk.s_rd_seq = os_atomic_load(&smr->smr_clock.s_rd_seq, acquire); in __smr_poll() 520 clk.s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_poll() 562 return SMR_SEQ_INC + os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in smr_deferred_advance_nopreempt()
|
| H A D | mpsc_queue.c | 62 mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch() 68 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch() 86 tail = os_atomic_load(&q->mpqh_tail, relaxed); in mpsc_queue_dequeue_batch() 92 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_dequeue_batch() 125 elm = os_atomic_load(&cur->mpqc_next, relaxed); in mpsc_queue_batch_next()
|
| H A D | socd_client.c | 87 long available = os_atomic_load(&socd_client_trace_available, relaxed); in socd_client_set_primary_kernelcache_uuid() 108 available = os_atomic_load(&socd_client_trace_available, dependency); in socd_client_trace()
|
| H A D | ast.h | 190 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason)) 191 #define thread_ast_get(act) os_atomic_load(&(act)->ast, relaxed)
|
| H A D | lock_mtx.c | 751 prev == 0 || (os_atomic_load(astp, relaxed) & AST_URGENT))) { in lck_mtx_lock_adaptive_spin() 772 deadline = ml_get_timebase() + os_atomic_load(&MutexSpin, relaxed); in lck_mtx_lock_adaptive_spin() 804 (os_atomic_load(astp, relaxed) & AST_URGENT) || in lck_mtx_lock_adaptive_spin() 859 state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_lock_contended() 935 state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_lock_contended() 1241 data = os_atomic_load(&lock->lck_mtx.data, compiler_acq_rel); in lck_mtx_unlock_contended() 1321 lck_mtx_state_t state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_assert() 1345 lck_mtx_state_t state = os_atomic_load(&lock->lck_mtx, relaxed); in lck_mtx_convert_spin() 1375 lck_mtx_state_t state = os_atomic_load(&lck->lck_mtx, relaxed); in kdp_lck_mtx_lock_spin_is_acquired() 1393 lck_mtx_state_t state = os_atomic_load(&mutex->lck_mtx, relaxed); in kdp_lck_mtx_find_owner()
|
| H A D | remote_time.c | 66 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_timer_maintenance() 89 assert(os_atomic_load(&bt_init_flag, relaxed)); in mach_bridge_timer_enable() 182 if (os_atomic_load(&bt_init_flag, acquire)) { in bt_params_get_latest() 505 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_remote_time()
|
| H A D | test_lock.c | 45 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed); in hw_lck_ticket_test_wait_for_delta() 115 if (os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed) < sane_us2abs(20 * 1000)) { in hw_lck_ticket_allow_invalid_test()
|
| H A D | restartable.c | 390 trrs.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ack_IPI() 408 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed() 480 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed() 518 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ast()
|
| H A D | smr.h | 161 os_atomic_load(&(ptr)->__smr_ptr, acquire) 171 os_atomic_load(&(ptr)->__smr_ptr, acquire); \
|
| H A D | zalloc_internal.h | 565 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \ 570 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \ 657 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed)); in zone_size_wired()
|
| /xnu-8792.41.9/osfmk/arm64/ |
| H A D | lock_ticket_pv.c | 51 const cpumap_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire); in hw_lck_ticket_unlock_kick_pv() 63 const hw_lck_ticket_t *wlck = os_atomic_load(<pi->ltpi_lck, in hw_lck_ticket_unlock_kick_pv() 69 const uint8_t wt = os_atomic_load(<pi->ltpi_wt, acquire); in hw_lck_ticket_unlock_kick_pv() 123 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire); in hw_lck_ticket_lock_wait_pv()
|
| H A D | machine_remote_time.c | 50 if (os_atomic_load(&bt_init_flag, relaxed)) { in mach_bridge_init_timestamp() 69 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_recv_timestamps() 91 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_set_params()
|
| H A D | loose_ends.c | 293 uint32_t const report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed); in ml_phys_read_data() 294 uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed); in ml_phys_read_data() 469 uint32_t const report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed); in ml_phys_write_data() 470 uint32_t const trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed); in ml_phys_write_data()
|
| /xnu-8792.41.9/osfmk/i386/ |
| H A D | lock_ticket_pv.c | 72 const cpumask_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire); in hw_lck_ticket_unlock_kick_pv() 84 const hw_lck_ticket_t *wlck = os_atomic_load(<pi->ltpi_lck, in hw_lck_ticket_unlock_kick_pv() 90 const uint8_t wt = os_atomic_load(<pi->ltpi_wt, acquire); in hw_lck_ticket_unlock_kick_pv() 144 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire); in hw_lck_ticket_lock_wait_pv()
|
| H A D | locks_i386_inlines.h | 39 #define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
|
| /xnu-8792.41.9/bsd/kern/ |
| H A D | counter_test.c | 121 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) { 139 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) { 156 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) { 171 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
|
| H A D | kern_memorystatus_policy.c | 85 …status->msh_phantom_cache_pressure = os_atomic_load(&memorystatus_phantom_cache_pressure, acquire); in memorystatus_health_check() 98 if (os_atomic_load(&memorystatus_compressor_space_shortage, relaxed)) { in memorystatus_health_check() 106 status->msh_zone_map_is_exhausted = os_atomic_load(&memorystatus_zone_map_is_exhausted, relaxed); in memorystatus_health_check() 173 …ble_compressor_segments_over_limit && !vm_swapout_thread_running && !os_atomic_load(&vm_swapout_wa… in memorystatus_pick_action() 183 …unning=%d, vm_swapout_wake_pending=%d\n", vm_swapout_thread_running, os_atomic_load(&vm_swapout_wa… in memorystatus_pick_action()
|
| H A D | kern_memorystatus_freeze.c | 306 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage() 307 processes_thawed = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed, relaxed); in get_thaw_percentage() 324 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_fg() 325 processes_thawed_fg = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_fg, relaxed); in get_thaw_percentage_fg() 341 …processes_frozen_webcontent = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen_webc… in get_thaw_percentage_webcontent() 342 …processes_thawed_webcontent = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_webc… in get_thaw_percentage_webcontent() 359 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_bg() 360 processes_thawed = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed, relaxed); in get_thaw_percentage_bg() 361 processes_thawed_fg = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_fg, relaxed); in get_thaw_percentage_bg() 377 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_fg_non_xpc_service() [all …]
|
| /xnu-8792.41.9/osfmk/tests/ |
| H A D | vfp_state_test.c | 71 while (os_atomic_load(var, acquire) != num) { in wait_threads() 73 if (os_atomic_load(var, acquire) != num) { in wait_threads()
|
| /xnu-8792.41.9/osfmk/x86_64/ |
| H A D | machine_remote_time.c | 57 if (!os_atomic_load(&bt_init_flag, relaxed)) { in mach_bridge_register_regwrite_timestamp_callback()
|
| /xnu-8792.41.9/osfmk/arm/ |
| H A D | machine_routines_common.c | 962 …__ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed… in ml_handle_interrupts_disabled_duration() 969 uint64_t stackshot_timeout = os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed); in ml_handle_stackshot_interrupt_disabled_duration() 970 uint64_t normal_timeout = os_atomic_load(&interrupt_masked_timeout, relaxed); in ml_handle_stackshot_interrupt_disabled_duration() 978 …__ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed… in ml_handle_interrupt_handler_duration() 1324 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed); in ml_get_cpu_number_type() 1328 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed); in ml_get_cpu_number_type()
|