Searched refs:os_atomic_load_wide (Results 1 – 15 of 15) sorted by relevance
| /xnu-8020.140.41/osfmk/kern/ |
| H A D | counter_common.c | 67 uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed); in scalable_counter_static_init() 151 return os_atomic_load_wide(counter, relaxed); in counter_load() 160 value += os_atomic_load_wide(it, relaxed); in counter_load()
|
| H A D | sched_prim.c | 148 return os_atomic_load_wide(&SCHED(rt_runq)(pset)->earliest_deadline, relaxed); in rt_runq_earliest_deadline() 221 assert(os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed) == earliest_deadline); in check_rt_runq_consistency() 4063 if (earliest && (deadline < os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed))) { in rt_runq_enqueue() 6612 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed); in sched_timeshare_consider_maintenance()
|
| H A D | sched_clutch.c | 1396 …scb_cpu_data.scbcd_cpu_data_packed = os_atomic_load_wide(&clutch_bucket_group->scbg_cpu_data.scbcd… in sched_clutch_interactivity_from_cpu_data() 1770 …uint64_t bucket_group_run_count = os_atomic_load_wide(&clutch_bucket_group->scbg_blocked_data.scct… in sched_clutch_bucket_group_timeshare_update()
|
| /xnu-8020.140.41/osfmk/vm/ |
| H A D | analytics.c | 100 e->over_global_limit = os_atomic_load_wide(&vm_add_wire_count_over_global_limit, relaxed); in report_mlock_failures() 101 e->over_user_limit = os_atomic_load_wide(&vm_add_wire_count_over_user_limit, relaxed); in report_mlock_failures()
|
| /xnu-8020.140.41/libkern/os/ |
| H A D | atomic_private.h | 369 #define os_atomic_load_wide(p, m) ({ \ macro 791 #define os_atomic_load_wide(p, m) _os_atomic_error_is_starvable(os_atomic_load_wide) macro
|
| H A D | log_queue.c | 159 os_atomic_load_wide(_v, dependency); \
|
| /xnu-8020.140.41/bsd/kern/ |
| H A D | counter_test.c | 235 uint64_t value = os_atomic_load_wide(&atomic_counter, relaxed);
|
| H A D | kern_exec.c | 5757 local_experiment_factors = os_atomic_load_wide(&libmalloc_experiment_factors, relaxed); in exec_add_apple_strings() 7262 uint64_t value = os_atomic_load_wide(&libmalloc_experiment_factors, relaxed);
|
| H A D | kern_sysctl.c | 3907 old_value = os_atomic_load_wide(ptr, relaxed); in sysctl_r_64bit_atomic()
|
| H A D | kern_event.c | 8882 buf[nknotes] = os_atomic_load_wide(&kn->kn_udata, relaxed); in klist_copy_udata()
|
| /xnu-8020.140.41/osfmk/arm/ |
| H A D | machine_routines_common.c | 369 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) / in perfcontrol_callout_stat_avg() 370 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed); in perfcontrol_callout_stat_avg() 912 uint64_t const old_duration = os_atomic_load_wide(&thread->machine.int_time_mt, relaxed); in __ml_handle_interrupts_disabled_duration()
|
| /xnu-8020.140.41/bsd/dev/ |
| H A D | monotonic.c | 361 uint64_t value = os_atomic_load_wide(&mt_retrograde, relaxed);
|
| /xnu-8020.140.41/osfmk/arm/commpage/ |
| H A D | commpage.c | 759 uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed); in commpage_update_mach_approximate_time()
|
| /xnu-8020.140.41/doc/ |
| H A D | atomics.md | 165 compile to a plain load or store. `os_atomic_load_wide` and
|
| /xnu-8020.140.41/bsd/pthread/ |
| H A D | pthread_workqueue.c | 322 return os_atomic_load_wide(&wq->wq_thactive, relaxed); in _wq_thactive() 1823 uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed); in workq_thread_is_busy()
|