Home
last modified time | relevance | path

Searched refs:os_atomic_load (Results 1 – 25 of 94) sorted by relevance

1234

/xnu-8020.140.41/san/memory/
H A Dubsan_log.c79 if (n == os_atomic_load(&ubsan_log_tail, acquire)) { in ubsan_log_append()
119 head = os_atomic_load(&ubsan_log_head, relaxed);
121 tail = os_atomic_load(&ubsan_log_tail, relaxed);
157 head = os_atomic_load(&ubsan_log_head, relaxed);
159 tail = os_atomic_load(&ubsan_log_tail, relaxed);
/xnu-8020.140.41/osfmk/kern/
H A Dsched_average.c193 load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); in compute_sched_load()
194 load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed); in compute_sched_load()
195 load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed); in compute_sched_load()
196 load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed); in compute_sched_load()
197 load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed); in compute_sched_load()
198 load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed); in compute_sched_load()
291 uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1; in compute_averages()
H A Dsmr.c152 s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_enter()
156 old_seq = os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_enter()
208 s_rd_seq = os_atomic_load(&smr->smr_clock.s_rd_seq, relaxed); in __smr_rd_advance()
268 smr_seq_t seq = os_atomic_load(&it->c_rd_seq, relaxed); in __smr_scan()
328 clk.s_rd_seq = os_atomic_load(&smr->smr_clock.s_rd_seq, seq_cst); in __smr_poll()
337 clk.s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, seq_cst); in __smr_poll()
H A Dcpu_quiesce.c200 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_join()
225 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_ast()
340 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_checkin()
378 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_assert_ast()
H A Dmpsc_queue.c47 mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
53 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
71 tail = os_atomic_load(&q->mpqh_tail, relaxed); in mpsc_queue_dequeue_batch()
77 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_dequeue_batch()
110 elm = os_atomic_load(&cur->mpqc_next, relaxed); in mpsc_queue_batch_next()
H A Dlock_ticket.c169 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed); in hw_lck_ticket_destroy_internal()
220 tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed); in hw_lck_ticket_held()
260 tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed); in hw_lck_ticket_timeout_panic()
286 tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed); in lck_ticket_timeout_panic()
392 cticket = os_atomic_load(&lck->cticket, acquire); in hw_lck_ticket_contended()
754 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed); in hw_lck_ticket_unlock()
786 owner = (thread_t)os_atomic_load(&tlock->lck_owner, relaxed); in lck_ticket_assert_owned()
H A Dast.h190 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason))
191 #define thread_ast_get(act) os_atomic_load(&(act)->ast, relaxed)
H A Dsocd_client.c87 long available = os_atomic_load(&socd_client_trace_available, relaxed); in socd_client_set_primary_kernelcache_uuid()
108 available = os_atomic_load(&socd_client_trace_available, dependency); in socd_client_trace()
H A Dsmr.h151 os_atomic_load(&(ptr)->__smr_ptr, acquire)
161 os_atomic_load(&(ptr)->__smr_ptr, acquire); \
H A Dremote_time.c66 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_timer_maintenance()
89 assert(os_atomic_load(&bt_init_flag, relaxed)); in mach_bridge_timer_enable()
182 if (os_atomic_load(&bt_init_flag, acquire)) { in bt_params_get_latest()
505 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_remote_time()
H A Drestartable.c390 trrs.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ack_IPI()
408 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed()
480 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed()
518 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ast()
H A Dmachine.c525 uint32_t const report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed); in ml_io_read()
526 uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed); in ml_io_read()
650 uint32_t report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed); in ml_io_write()
651 uint32_t trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed); in ml_io_write()
798 cursor = os_atomic_load(&cpu_callback_chain, dependency); in ml_broadcast_cpu_event()
H A Dlock_stat.h181 if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) { in lck_grp_spin_update_spin()
238 if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) { in lck_grp_ticket_update_spin()
H A Dzalloc_internal.h520 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
525 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
624 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed)); in zone_size_wired()
H A Dhazard.c468 p = os_atomic_load(&hga->hga_array[i].hg_val, relaxed); in hazard_scan_and_reclaim()
629 start = os_atomic_load(&hazard_test_outstanding, relaxed); in hazard_basic_test()
639 end = os_atomic_load(&hazard_test_outstanding, relaxed); in hazard_basic_test()
/xnu-8020.140.41/osfmk/arm64/
H A Dmachine_remote_time.c50 if (os_atomic_load(&bt_init_flag, relaxed)) { in mach_bridge_init_timestamp()
69 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_recv_timestamps()
91 if (!os_atomic_load(&bt_init_flag, acquire)) { in mach_bridge_set_params()
H A Dloose_ends.c292 uint32_t const report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed); in ml_phys_read_data()
293 uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed); in ml_phys_read_data()
470 uint32_t const report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed); in ml_phys_write_data()
471 uint32_t const trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed); in ml_phys_write_data()
/xnu-8020.140.41/bsd/kern/
H A Dcounter_test.c121 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
139 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
156 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
171 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
H A Dkern_memorystatus_freeze.c318 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage()
319 processes_thawed = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed, relaxed); in get_thaw_percentage()
336 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_fg()
337 processes_thawed_fg = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_fg, relaxed); in get_thaw_percentage_fg()
353 …processes_frozen_webcontent = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen_webc… in get_thaw_percentage_webcontent()
354 …processes_thawed_webcontent = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_webc… in get_thaw_percentage_webcontent()
371 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_bg()
372 processes_thawed = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed, relaxed); in get_thaw_percentage_bg()
373 processes_thawed_fg = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_thawed_fg, relaxed); in get_thaw_percentage_bg()
389 processes_frozen = os_atomic_load(&memorystatus_freezer_stats.mfs_processes_frozen, relaxed); in get_thaw_percentage_fg_non_xpc_service()
[all …]
/xnu-8020.140.41/osfmk/tests/
H A Dvfp_state_test.c79 while (os_atomic_load(var, acquire) != num) { in wait_threads()
81 if (os_atomic_load(var, acquire) != num) { in wait_threads()
H A Dkernel_tests.c895 while (os_atomic_load(var, acquire) != num) { in wait_threads()
897 if (os_atomic_load(var, acquire) != num) { in wait_threads()
1086 while (os_atomic_load(&info->threads[i], acquire) == NULL) { in wait_for_waiters()
1096 thread = os_atomic_load(&info->threads[i], relaxed); in wait_for_waiters()
1125 while (os_atomic_load(&info->threads[i], acquire) == NULL) { in exclude_current_waiter()
1132 if (os_atomic_load(&info->threads[i], acquire) == current_thread()) { in exclude_current_waiter()
2346 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) { in thread_sleep_gate_chain_work()
2365 if (self == os_atomic_load(&info->head.threads[0], acquire)) { in thread_sleep_gate_chain_work()
2438 while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) { in thread_gate_chain_work()
2457 if (self == os_atomic_load(&info->head.threads[0], acquire)) { in thread_gate_chain_work()
[all …]
/xnu-8020.140.41/osfmk/x86_64/
H A Dmachine_remote_time.c57 if (!os_atomic_load(&bt_init_flag, relaxed)) { in mach_bridge_register_regwrite_timestamp_callback()
/xnu-8020.140.41/osfmk/arm/
H A Dmachine_routines_common.c923 …__ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed… in ml_handle_interrupts_disabled_duration()
930 …errupts_disabled_duration(thread, MAX(os_atomic_load(&stackshot_interrupt_masked_timeout, relaxed)… in ml_handle_stackshot_interrupt_disabled_duration()
936 …__ml_handle_interrupts_disabled_duration(thread, os_atomic_load(&interrupt_masked_timeout, relaxed… in ml_handle_interrupt_handler_duration()
1270 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed); in ml_get_cpu_number_type()
1274 return os_atomic_load(&cluster_type_num_active_cpus[cluster_type], relaxed); in ml_get_cpu_number_type()
/xnu-8020.140.41/osfmk/i386/
H A Dlocks_i386_inlines.h37 #define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
/xnu-8020.140.41/san/coverage/
H A Dkcov.c189 if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) { in trace_pc_guard()

1234