| /xnu-10063.101.15/tools/cocci/ |
| H A D | OSAtomic_rewrite.cocci | 11 + os_atomic_inc_orig(E, relaxed) 14 + os_atomic_inc_orig(E, relaxed) 17 + os_atomic_inc_orig(E, relaxed) 20 + os_atomic_inc_orig(E, relaxed) 23 + os_atomic_inc_orig(E, relaxed) 26 + os_atomic_inc_orig(E, relaxed) 29 + os_atomic_inc_orig(E, relaxed) 32 + os_atomic_inc_orig(E, relaxed) 35 + os_atomic_inc_orig(E, relaxed) 38 + os_atomic_inc_orig(E, relaxed) [all …]
|
| H A D | hw_atomic_rewrite.cocci | 11 + os_atomic_dec_orig(E, relaxed) 14 + os_atomic_dec(E, relaxed) 17 + os_atomic_sub_orig(E, F, relaxed) 20 + os_atomic_sub(E, F, relaxed) 23 + os_atomic_inc_orig(E, relaxed) 26 + os_atomic_inc(E, relaxed) 29 + os_atomic_add_orig(E, F, relaxed) 32 + os_atomic_add(E, F, relaxed) 39 + os_atomic_inc_orig(E, relaxed) 42 + os_atomic_inc(E, relaxed) [all …]
|
| /xnu-10063.101.15/tests/ |
| H A D | os_atomic.cpp | 16 T_ASSERT_EQ(os_atomic_inc_orig(&i, relaxed), 0, "atomic inc"); 17 T_ASSERT_EQ(os_atomic_cmpxchg(&i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); 18 os_atomic_rmw_loop(&i, a, b, relaxed, { 22 T_ASSERT_EQ(os_atomic_inc_orig(&old_i, relaxed), 0, "atomic inc"); 23 T_ASSERT_EQ(os_atomic_cmpxchg(&old_i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); 24 os_atomic_rmw_loop(&old_i, a, b, relaxed, { 28 T_ASSERT_EQ(os_atomic_inc_orig(&v_i, relaxed), 0, "atomic inc"); 29 T_ASSERT_EQ(os_atomic_cmpxchg(&v_i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); 30 os_atomic_rmw_loop(&v_i, a, b, relaxed, {
|
| /xnu-10063.101.15/libkern/gen/ |
| H A D | OSAtomicOperations.c | 101 return os_atomic_add_orig(address, (SInt8)amount, relaxed); in OSAddAtomic8() 107 return os_atomic_add_orig(address, (SInt16)amount, relaxed); in OSAddAtomic16() 115 return os_atomic_add_orig(address, amount, relaxed); in OSAddAtomic() 125 return os_atomic_add_orig(aligned_address, amount, relaxed); in OSAddAtomic64() 132 return os_atomic_add_orig(address, theAmount, relaxed); in OSAddAtomicLong() 139 return os_atomic_inc_orig(value, relaxed); in OSIncrementAtomic() 146 return os_atomic_dec_orig(value, relaxed); in OSDecrementAtomic() 153 return os_atomic_and_orig(value, mask, relaxed); in OSBitAndAtomic() 160 return os_atomic_or_orig(value, mask, relaxed); in OSBitOrAtomic() 167 return os_atomic_xor_orig(value, mask, relaxed); in OSBitXorAtomic() [all …]
|
| /xnu-10063.101.15/osfmk/kern/ |
| H A D | counter_common.c | 67 uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed); in scalable_counter_static_init() 73 os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed); in scalable_counter_static_init() 88 os_atomic_store_wide(counter, 0, relaxed); in counter_alloc() 109 os_atomic_add(counter, amount, relaxed); in counter_add() 116 os_atomic_inc(counter, relaxed); in counter_inc() 123 os_atomic_dec(counter, relaxed); in counter_dec() 151 return os_atomic_load_wide(counter, relaxed); in counter_load() 160 value += os_atomic_load_wide(it, relaxed); in counter_load()
|
| H A D | mpsc_queue.c | 62 mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch() 64 os_atomic_store(&last->mpqc_next, head, relaxed); in mpsc_queue_restore_batch() 68 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch() 72 os_atomic_store(&last->mpqc_next, head, relaxed); in mpsc_queue_restore_batch() 75 os_atomic_store(&q->mpqh_head.mpqc_next, first, relaxed); in mpsc_queue_restore_batch() 86 tail = os_atomic_load(&q->mpqh_tail, relaxed); in mpsc_queue_dequeue_batch() 92 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_dequeue_batch() 96 os_atomic_store(&q->mpqh_head.mpqc_next, NULL, relaxed); in mpsc_queue_dequeue_batch() 125 elm = os_atomic_load(&cur->mpqc_next, relaxed); in mpsc_queue_batch_next() 305 os_atomic_andnot(&dq->mpd_state, MPSC_QUEUE_STATE_WAKEUP, relaxed); in _mpsc_daemon_queue_drain() [all …]
|
| H A D | sched_average.c | 193 load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); in compute_sched_load() 194 load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed); in compute_sched_load() 195 load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed); in compute_sched_load() 196 load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed); in compute_sched_load() 197 load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed); in compute_sched_load() 198 load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed); in compute_sched_load() 291 uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1; in compute_averages()
|
| H A D | exclaves_boot.c | 139 EXCLAVES_BS_NOT_STARTED, relaxed); in exclaves_check_sk() 149 while (os_atomic_load(&exclaves_boot_status, relaxed) < status) { in exclaves_boot_status_wait() 165 assert3u(status, >, os_atomic_load(&exclaves_boot_status, relaxed)); in exclaves_boot_status_set() 186 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_boot_stage_2() 237 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_boot_exclavekit() 382 os_atomic_load(&exclaves_boot_status, relaxed); in exclaves_get_boot_stage()
|
| H A D | ast.h | 188 #define thread_ast_set(act, reason) ((void)os_atomic_or(&(act)->ast, (reason), relaxed)) 189 #define thread_ast_clear(act, reason) ((void)os_atomic_andnot(&(act)->ast, (reason), relaxed)) 190 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason)) 191 #define thread_ast_get(act) os_atomic_load(&(act)->ast, relaxed)
|
| H A D | lock_ptr.c | 62 hw_lck_ptr_t tmp = os_atomic_load(lck, relaxed); in __hw_lck_ptr_invalid_panic() 113 hw_lck_ptr_t tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_destroy() 128 os_atomic_store(lck, tmp, relaxed); in hw_lck_ptr_destroy() 134 return os_atomic_load(lck, relaxed).lck_ptr_locked; in hw_lck_ptr_held() 147 tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_timeout_panic() 196 os_atomic_store(&pnode->lsm_next, txn.txn_slot, relaxed); in hw_lck_ptr_contended() 235 os_atomic_store(&nnode->lsm_ready, 1, relaxed); in hw_lck_ptr_contended() 271 tmp = os_atomic_load(lck, relaxed); in hw_lck_ptr_lock_fastpath()
|
| H A D | lock_group.c | 274 os_atomic_inc(cnt, relaxed); in lck_grp_reference() 285 os_atomic_dec(cnt, relaxed); in lck_grp_deallocate() 306 os_atomic_or(&lck_debug_state.lds_value, bit, relaxed); in lck_grp_enable_feature() 323 os_atomic_andnot(&lck_debug_state.lds_value, bit, relaxed); in lck_grp_disable_feature() 352 needed = os_atomic_load(&lck_grp_table.cidt_count, relaxed); in host_lockgroup_info() 443 os_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); in lck_attr_setdebug() 449 os_atomic_andnot(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); in lck_attr_cleardebug() 455 os_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY, relaxed); in lck_attr_rw_shared_priority() 494 __unused uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed); in lck_grp_stat_inc() 507 __unused uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed); in lck_grp_inc_time_stats()
|
| H A D | machine.c | 154 os_atomic_inc(&processor_avail_count, relaxed); in processor_up_update_counts() 156 os_atomic_inc(&processor_avail_count_user, relaxed); in processor_up_update_counts() 159 os_atomic_inc(&primary_processor_avail_count, relaxed); in processor_up_update_counts() 161 os_atomic_inc(&primary_processor_avail_count_user, relaxed); in processor_up_update_counts() 272 os_atomic_dec(&processor_avail_count, relaxed); in processor_down_update_counts() 274 os_atomic_dec(&processor_avail_count_user, relaxed); in processor_down_update_counts() 277 os_atomic_dec(&primary_processor_avail_count, relaxed); in processor_down_update_counts() 279 os_atomic_dec(&primary_processor_avail_count_user, relaxed); in processor_down_update_counts() 686 os_atomic_store(&report_phy_write_delay_to, 0, relaxed); in ml_io_init_timeouts() 687 os_atomic_store(&report_phy_read_delay_to, 0, relaxed); in ml_io_init_timeouts() [all …]
|
| H A D | smr.c | 650 s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_enter() 656 os_atomic_store(&pcpu->c_rd_seq, s_wr_seq | sleepable, relaxed); in __smr_enter() 723 os_atomic_store(&smrw->sect_waiter, NULL, relaxed); in __smr_wake_oncore_sleepers() 775 os_atomic_store(&pcpu->stall_rd_seq, t->smrt_seq, relaxed); in smr_mark_active_trackers_stalled() 898 os_atomic_rmw_loop(&smr->smr_clock.s_rd_seq, o_seq, rd_seq, relaxed, { in __smr_rd_advance() 1014 if (lock_cmpxchg(&smrw->sect_waiter, NULL, self, relaxed)) { in __smr_wait_for_oncore() 1031 return os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_wait_for_oncore() 1066 clk.s_wr_seq, goal, &clk.s_wr_seq, relaxed)) { in __smr_scan() 1103 smr_seq_t seq = os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_scan() 1149 smr_seq_t seq = os_atomic_load(&pcpu->stall_rd_seq, relaxed); in __smr_scan() [all …]
|
| H A D | exclaves_inspection.c | 132 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed); in clear_pending_threads_stackshot() 149 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed); in clear_pending_threads_kperf() 165 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed); in clear_stackshot_queue() 181 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed); in clear_kperf_queue() 297 …_store(¤t_thread()->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_NOINSPECT, relaxed); in exclaves_collect_threads_thread() 376 assert(!os_atomic_load(&exclaves_inspection_initialized, relaxed)); in exclaves_inspection_init() 482 …assert((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NO… in exclaves_inspection_check_ast() 489 …while ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_ST… in exclaves_inspection_check_ast() 496 …if ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_KPERF… in exclaves_inspection_check_ast()
|
| H A D | restartable.c | 355 (uint8_t)TRR_FAULT_PENDING, relaxed); in thread_reset_pcs_will_fault() 371 state.trr_value, relaxed); in thread_reset_pcs_done_faulting() 390 trrs.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ack_IPI() 398 trrs.trr_value, relaxed); in thread_reset_pcs_ack_IPI() 408 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed() 471 nstate, relaxed)) { in thread_rr_wait_if_needed() 480 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed() 518 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ast()
|
| H A D | test_lock.c | 46 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed); in hw_lck_ticket_test_wait_for_delta() 116 if (os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed) < sane_us2abs(20 * 1000)) { in hw_lck_ticket_allow_invalid_test() 381 os_atomic_inc(&cctx->ctx->calls_done, relaxed); in smr_sleepable_stress_cb() 393 os_atomic_inc(&ctx->calls_sent, relaxed); in smr_sleepable_stress_make_call() 417 return os_atomic_inc(&ctx->idx, relaxed); in smr_sleepable_stress_idx() 478 os_atomic_inc(&ctx->syncs_done, relaxed); in smr_sleepable_stress_worker() 482 os_atomic_inc(&ctx->barriers_done, relaxed); in smr_sleepable_stress_worker() 489 if (os_atomic_dec(&ctx->active, relaxed) == 0) { in smr_sleepable_stress_worker() 535 if (os_atomic_dec(&ctx.active, relaxed) == 0) { in smr_sleepable_stress_test()
|
| H A D | sched_clutch.c | 921 return (int)os_atomic_load(&sched_clutch_global_bucket_load[bucket], relaxed); in sched_clutch_global_bucket_load_get() 1136 os_atomic_store(&clutch_bucket_group->scbg_timeshare_tick, 0, relaxed); in sched_clutch_bucket_group_init() 1137 os_atomic_store(&clutch_bucket_group->scbg_pri_shift, INT8_MAX, relaxed); in sched_clutch_bucket_group_init() 1138 os_atomic_store(&clutch_bucket_group->scbg_preferred_cluster, pset0.pset_cluster_id, relaxed); in sched_clutch_bucket_group_init() 1145 …pu_data.scbcd_cpu_blocked, (clutch_cpu_data_t)sched_clutch_bucket_group_adjust_threshold, relaxed); in sched_clutch_bucket_group_init() 1169 os_atomic_store(&clutch->sc_thr_count, 0, relaxed); in sched_clutch_init_with_thread_group() 1189 assert(os_atomic_load(&clutch->sc_thr_count, relaxed) == 0); in sched_clutch_destroy() 1210 return os_atomic_load(&clutch_bucket_group->scbg_preferred_cluster, relaxed); in sched_edge_clutch_bucket_group_preferred_cluster() 1283 … os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_FIXPRI], relaxed); OS_FALLTHROUGH; in sched_edge_cluster_cumulative_count_incr() 1284 …os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_FG], relaxed); OS_FALLTHROUGH; in sched_edge_cluster_cumulative_count_incr() [all …]
|
| /xnu-10063.101.15/libsyscall/mach/ |
| H A D | vm_reclaim.c | 136 idx = os_atomic_load_wide(&indices->tail, relaxed); in mach_vm_reclaim_mark_free() 137 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_free() 146 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_free() 157 os_atomic_inc(&indices->tail, relaxed); in mach_vm_reclaim_mark_free() 177 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_used() 188 original_tail = os_atomic_load_wide(&indices->tail, relaxed); in mach_vm_reclaim_mark_used() 191 os_atomic_store_wide(&indices->tail, id, relaxed); in mach_vm_reclaim_mark_used() 193 busy = os_atomic_load_wide(&indices->busy, relaxed); in mach_vm_reclaim_mark_used() 196 os_atomic_store_wide(&indices->tail, original_tail, relaxed); in mach_vm_reclaim_mark_used() 206 os_atomic_store_wide(&indices->tail, original_tail, relaxed); in mach_vm_reclaim_mark_used() [all …]
|
| /xnu-10063.101.15/san/memory/ |
| H A D | ubsan_log.c | 77 os_atomic_rmw_loop(&ubsan_log_next, i, n, relaxed, { in ubsan_log_append() 90 os_atomic_rmw_loop(&ubsan_log_head, e, n, relaxed, { in ubsan_log_append() 119 head = os_atomic_load(&ubsan_log_head, relaxed); 121 tail = os_atomic_load(&ubsan_log_tail, relaxed); 157 head = os_atomic_load(&ubsan_log_head, relaxed); 159 tail = os_atomic_load(&ubsan_log_tail, relaxed); 172 os_atomic_store(&ubsan_log_tail, head, relaxed);
|
| /xnu-10063.101.15/osfmk/bank/ |
| H A D | bank_internal.h | 120 (os_atomic_inc_orig(&(elem)->bt_made, relaxed)) 123 (os_atomic_dec_orig(&(elem)->bt_made, relaxed)) 126 (os_atomic_sub_orig(&(elem)->bt_made, (num), relaxed)) 171 (os_atomic_inc_orig(&(elem)->ba_made, relaxed)) 174 (os_atomic_dec_orig(&(elem)->ba_made, relaxed)) 177 (os_atomic_sub_orig(&(elem)->ba_made, (num), relaxed))
|
| /xnu-10063.101.15/osfmk/arm/ |
| H A D | counter.c | 40 os_atomic_add(zpercpu_get(*counter), amount, relaxed); in counter_add() 47 os_atomic_inc(zpercpu_get(*counter), relaxed); in counter_inc() 54 os_atomic_dec(zpercpu_get(*counter), relaxed); in counter_dec()
|
| H A D | preemption_disable.c | 320 const uint64_t max_duration = os_atomic_load(&pcpu->pdp_max_mach_duration, relaxed); in _preemption_disable_snap_end() 335 os_atomic_store(&pcpu->pdp_max_mach_duration, gross_duration, relaxed); in _preemption_disable_snap_end() 388 uint64_t const threshold = os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed); in _collect_preemption_disable_measurement() 488 os_atomic_store(&pcpu->pdp_max_mach_duration, 0, relaxed); in preemption_disable_reset_max_durations() 499 durations[cpu++] = os_atomic_load(&pcpu->pdp_max_mach_duration, relaxed); in preemption_disable_get_max_durations()
|
| /xnu-10063.101.15/san/coverage/ |
| H A D | kcov.c | 117 os_atomic_add(&kcov_enabled, 1, relaxed); in kcov_enable() 123 os_atomic_sub(&kcov_enabled, 1, relaxed); in kcov_disable() 156 os_atomic_store(&kcov_enabled, 0, relaxed); in kcov_panic_disable() 190 if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) { in trace_pc_guard()
|
| /xnu-10063.101.15/bsd/skywalk/nexus/flowswitch/flow/ |
| H A D | flow_route.c | 460 os_atomic_andnot(&fr->fr_flags, FLOWRTF_DELETED, relaxed); in flow_route_configure() 508 os_atomic_or(&fr->fr_flags, FLOWRTF_GATEWAY, relaxed); in flow_route_configure() 513 os_atomic_or(&fr->fr_flags, FLOWRTF_ONLINK, relaxed); in flow_route_configure() 526 os_atomic_or(&fr->fr_flags, FLOWRTF_STABLE_ADDR, relaxed); in flow_route_configure() 528 os_atomic_andnot(&fr->fr_flags, FLOWRTF_STABLE_ADDR, relaxed); in flow_route_configure() 606 os_atomic_inc(&fr->fr_want_configure, relaxed); in flow_route_find() 689 os_atomic_inc(&fr->fr_want_configure, relaxed); in flow_route_find() 720 os_atomic_or(&fr->fr_flags, FLOWRTF_ATTACHED, relaxed); in flow_route_find() 887 os_atomic_andnot(&fr->fr_flags, FLOWRTF_ATTACHED, relaxed); in flow_route_bucket_purge_common() 1059 os_atomic_inc(&fr->fr_want_configure, relaxed); in flow_route_ev_callback() [all …]
|
| /xnu-10063.101.15/libkern/os/ |
| H A D | log_mem.c | 179 os_atomic_inc(&lm->lm_cnt_allocations, relaxed); in logmem_alloc_impl() 182 os_atomic_inc(&lm->lm_cnt_failed_lmoff, relaxed); in logmem_alloc_impl() 187 os_atomic_inc(&lm->lm_cnt_failed_size, relaxed); in logmem_alloc_impl() 197 os_atomic_inc(&lm->lm_cnt_failed_full, relaxed); in logmem_alloc_impl() 202 os_atomic_sub(&lm->lm_cnt_free, (uint32_t)*amount, relaxed); in logmem_alloc_impl() 237 os_atomic_add(&lm->lm_cnt_free, (uint32_t)amount, relaxed); in logmem_free_impl()
|