Home
last modified time | relevance | path

Searched refs:relaxed (Results 1 – 25 of 169) sorted by relevance

1234567

/xnu-8792.61.2/tools/cocci/
H A DOSAtomic_rewrite.cocci11 + os_atomic_inc_orig(E, relaxed)
14 + os_atomic_inc_orig(E, relaxed)
17 + os_atomic_inc_orig(E, relaxed)
20 + os_atomic_inc_orig(E, relaxed)
23 + os_atomic_inc_orig(E, relaxed)
26 + os_atomic_inc_orig(E, relaxed)
29 + os_atomic_inc_orig(E, relaxed)
32 + os_atomic_inc_orig(E, relaxed)
35 + os_atomic_inc_orig(E, relaxed)
38 + os_atomic_inc_orig(E, relaxed)
[all …]
H A Dmcache_atomic_rewrite.cocci11 + os_atomic_inc_orig(E, relaxed)
14 + os_atomic_inc(E, relaxed)
17 + os_atomic_inc_orig(E, relaxed)
20 + os_atomic_inc(E, relaxed)
23 + os_atomic_inc_orig(E, relaxed)
26 + os_atomic_inc(E, relaxed)
29 + os_atomic_dec_orig(E, relaxed)
32 + os_atomic_dec(E, relaxed)
35 + os_atomic_dec_orig(E, relaxed)
38 + os_atomic_dec(E, relaxed)
[all …]
H A Dhw_atomic_rewrite.cocci11 + os_atomic_dec_orig(E, relaxed)
14 + os_atomic_dec(E, relaxed)
17 + os_atomic_sub_orig(E, F, relaxed)
20 + os_atomic_sub(E, F, relaxed)
23 + os_atomic_inc_orig(E, relaxed)
26 + os_atomic_inc(E, relaxed)
29 + os_atomic_add_orig(E, F, relaxed)
32 + os_atomic_add(E, F, relaxed)
39 + os_atomic_inc_orig(E, relaxed)
42 + os_atomic_inc(E, relaxed)
[all …]
/xnu-8792.61.2/tests/
H A Dos_atomic.cpp16 T_ASSERT_EQ(os_atomic_inc_orig(&i, relaxed), 0, "atomic inc");
17 T_ASSERT_EQ(os_atomic_cmpxchg(&i, 1, 0, relaxed), true, "os_atomic_cmpxchg");
18 os_atomic_rmw_loop(&i, a, b, relaxed, {
22 T_ASSERT_EQ(os_atomic_inc_orig(&old_i, relaxed), 0, "atomic inc");
23 T_ASSERT_EQ(os_atomic_cmpxchg(&old_i, 1, 0, relaxed), true, "os_atomic_cmpxchg");
24 os_atomic_rmw_loop(&old_i, a, b, relaxed, {
28 T_ASSERT_EQ(os_atomic_inc_orig(&v_i, relaxed), 0, "atomic inc");
29 T_ASSERT_EQ(os_atomic_cmpxchg(&v_i, 1, 0, relaxed), true, "os_atomic_cmpxchg");
30 os_atomic_rmw_loop(&v_i, a, b, relaxed, {
/xnu-8792.61.2/libkern/gen/
H A DOSAtomicOperations.c105 return os_atomic_add_orig(address, (SInt8)amount, relaxed); in OSAddAtomic8()
111 return os_atomic_add_orig(address, (SInt16)amount, relaxed); in OSAddAtomic16()
119 return os_atomic_add_orig(address, amount, relaxed); in OSAddAtomic()
129 return os_atomic_add_orig(aligned_address, amount, relaxed); in OSAddAtomic64()
136 return os_atomic_add_orig(address, theAmount, relaxed); in OSAddAtomicLong()
143 return os_atomic_inc_orig(value, relaxed); in OSIncrementAtomic()
150 return os_atomic_dec_orig(value, relaxed); in OSDecrementAtomic()
157 return os_atomic_and_orig(value, mask, relaxed); in OSBitAndAtomic()
164 return os_atomic_or_orig(value, mask, relaxed); in OSBitOrAtomic()
171 return os_atomic_xor_orig(value, mask, relaxed); in OSBitXorAtomic()
[all …]
/xnu-8792.61.2/osfmk/kern/
H A Dcounter_common.c67 uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed); in scalable_counter_static_init()
73 os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed); in scalable_counter_static_init()
88 os_atomic_store_wide(counter, 0, relaxed); in counter_alloc()
109 os_atomic_add(counter, amount, relaxed); in counter_add()
116 os_atomic_inc(counter, relaxed); in counter_inc()
123 os_atomic_dec(counter, relaxed); in counter_dec()
151 return os_atomic_load_wide(counter, relaxed); in counter_load()
160 value += os_atomic_load_wide(it, relaxed); in counter_load()
H A Dmpsc_queue.c62 mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
64 os_atomic_store(&last->mpqc_next, head, relaxed); in mpsc_queue_restore_batch()
68 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_restore_batch()
72 os_atomic_store(&last->mpqc_next, head, relaxed); in mpsc_queue_restore_batch()
75 os_atomic_store(&q->mpqh_head.mpqc_next, first, relaxed); in mpsc_queue_restore_batch()
86 tail = os_atomic_load(&q->mpqh_tail, relaxed); in mpsc_queue_dequeue_batch()
92 head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed); in mpsc_queue_dequeue_batch()
96 os_atomic_store(&q->mpqh_head.mpqc_next, NULL, relaxed); in mpsc_queue_dequeue_batch()
125 elm = os_atomic_load(&cur->mpqc_next, relaxed); in mpsc_queue_batch_next()
303 os_atomic_andnot(&dq->mpd_state, MPSC_QUEUE_STATE_WAKEUP, relaxed); in _mpsc_daemon_queue_drain()
[all …]
H A Dsched_average.c193 load_now[TH_BUCKET_RUN] = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); in compute_sched_load()
194 load_now[TH_BUCKET_FIXPRI] = os_atomic_load(&sched_run_buckets[TH_BUCKET_FIXPRI], relaxed); in compute_sched_load()
195 load_now[TH_BUCKET_SHARE_FG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_FG], relaxed); in compute_sched_load()
196 load_now[TH_BUCKET_SHARE_DF] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_DF], relaxed); in compute_sched_load()
197 load_now[TH_BUCKET_SHARE_UT] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_UT], relaxed); in compute_sched_load()
198 load_now[TH_BUCKET_SHARE_BG] = os_atomic_load(&sched_run_buckets[TH_BUCKET_SHARE_BG], relaxed); in compute_sched_load()
291 uint32_t nthreads = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) - 1; in compute_averages()
H A Dast.h188 #define thread_ast_set(act, reason) ((void)os_atomic_or(&(act)->ast, (reason), relaxed))
189 #define thread_ast_clear(act, reason) ((void)os_atomic_andnot(&(act)->ast, (reason), relaxed))
190 #define thread_ast_peek(act, reason) (os_atomic_load(&(act)->ast, relaxed) & (reason))
191 #define thread_ast_get(act) os_atomic_load(&(act)->ast, relaxed)
H A Dlock_group.c274 os_atomic_inc(cnt, relaxed); in lck_grp_reference()
285 os_atomic_dec(cnt, relaxed); in lck_grp_deallocate()
306 os_atomic_or(&lck_debug_state.lds_value, bit, relaxed); in lck_grp_enable_feature()
323 os_atomic_andnot(&lck_debug_state.lds_value, bit, relaxed); in lck_grp_disable_feature()
352 needed = os_atomic_load(&lck_grp_table.cidt_count, relaxed); in host_lockgroup_info()
443 os_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); in lck_attr_setdebug()
449 os_atomic_andnot(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); in lck_attr_cleardebug()
455 os_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY, relaxed); in lck_attr_rw_shared_priority()
494 __unused uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed); in lck_grp_stat_inc()
507 __unused uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed); in lck_grp_inc_time_stats()
H A Drestartable.c355 (uint8_t)TRR_FAULT_PENDING, relaxed); in thread_reset_pcs_will_fault()
371 state.trr_value, relaxed); in thread_reset_pcs_done_faulting()
390 trrs.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ack_IPI()
398 trrs.trr_value, relaxed); in thread_reset_pcs_ack_IPI()
408 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed()
471 nstate, relaxed)) { in thread_rr_wait_if_needed()
480 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_rr_wait_if_needed()
518 state.trr_value = os_atomic_load(&thread->t_rr_state.trr_value, relaxed); in thread_reset_pcs_ast()
H A Dmachine.c142 os_atomic_inc(&processor_avail_count, relaxed); in processor_up_update_counts()
144 os_atomic_inc(&processor_avail_count_user, relaxed); in processor_up_update_counts()
147 os_atomic_inc(&primary_processor_avail_count, relaxed); in processor_up_update_counts()
149 os_atomic_inc(&primary_processor_avail_count_user, relaxed); in processor_up_update_counts()
259 os_atomic_dec(&processor_avail_count, relaxed); in processor_down_update_counts()
261 os_atomic_dec(&processor_avail_count_user, relaxed); in processor_down_update_counts()
264 os_atomic_dec(&primary_processor_avail_count, relaxed); in processor_down_update_counts()
266 os_atomic_dec(&primary_processor_avail_count_user, relaxed); in processor_down_update_counts()
648 os_atomic_store(&report_phy_write_delay_to, 0, relaxed); in ml_io_init_timeouts()
649 os_atomic_store(&report_phy_read_delay_to, 0, relaxed); in ml_io_init_timeouts()
[all …]
H A Dsched_clutch.c872 return (int)os_atomic_load(&sched_clutch_global_bucket_load[bucket], relaxed); in sched_clutch_global_bucket_load_get()
1099 os_atomic_store(&clutch_bucket_group->scbg_timeshare_tick, 0, relaxed); in sched_clutch_bucket_group_init()
1100 os_atomic_store(&clutch_bucket_group->scbg_pri_shift, INT8_MAX, relaxed); in sched_clutch_bucket_group_init()
1101 os_atomic_store(&clutch_bucket_group->scbg_preferred_cluster, pset0.pset_cluster_id, relaxed); in sched_clutch_bucket_group_init()
1108 …pu_data.scbcd_cpu_blocked, (clutch_cpu_data_t)sched_clutch_bucket_group_adjust_threshold, relaxed); in sched_clutch_bucket_group_init()
1135 os_atomic_store(&clutch->sc_thr_count, 0, relaxed); in sched_clutch_init_with_thread_group()
1144 os_atomic_store(&clutch->sc_tg_priority, 0, relaxed); in sched_clutch_init_with_thread_group()
1156 assert(os_atomic_load(&clutch->sc_thr_count, relaxed) == 0); in sched_clutch_destroy()
1177 return os_atomic_load(&clutch_bucket_group->scbg_preferred_cluster, relaxed); in sched_edge_clutch_bucket_group_preferred_cluster()
1246 … os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_FIXPRI], relaxed); OS_FALLTHROUGH; in sched_edge_cluster_cumulative_count_incr()
[all …]
H A Dlocks_internal.h96 __auto_type __v = os_atomic_load(p, relaxed); \
103 (os_atomic_load(p, relaxed) == (e))
138 #define lock_load_exclusive(p, m) os_atomic_load(p, relaxed)
H A Dsmr.c332 s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_enter()
337 old_seq = os_atomic_load(&pcpu->c_rd_seq, relaxed); in __smr_enter()
338 os_atomic_store(&pcpu->c_rd_seq, s_wr_seq, relaxed); in __smr_enter()
392 oclk.s_combined, nclk.s_combined, relaxed, { in __smr_rd_advance()
462 smr_seq_t seq = os_atomic_load(&it->c_rd_seq, relaxed); in __smr_scan()
520 clk.s_wr_seq = os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in __smr_poll()
562 return SMR_SEQ_INC + os_atomic_load(&smr->smr_clock.s_wr_seq, relaxed); in smr_deferred_advance_nopreempt()
H A Dcpu_quiesce.c200 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_join()
225 assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & in cpu_quiescent_counter_ast()
340 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_checkin()
378 checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); in cpu_quiescent_counter_assert_ast()
/xnu-8792.61.2/libsyscall/mach/
H A Dvm_reclaim.c129 idx = os_atomic_load_wide(&indices->tail, relaxed); in mach_vm_reclaim_mark_free()
130 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_free()
138 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_free()
140 …_assert("mach_vm_reclaim_mark_free", os_atomic_load_wide(&indices->tail, relaxed) % size != head %… in mach_vm_reclaim_mark_free()
149 os_atomic_inc(&indices->tail, relaxed); in mach_vm_reclaim_mark_free()
168 head = os_atomic_load_wide(&indices->head, relaxed); in mach_vm_reclaim_mark_used()
179 original_tail = os_atomic_load_wide(&indices->tail, relaxed); in mach_vm_reclaim_mark_used()
182 os_atomic_store_wide(&indices->tail, id, relaxed); in mach_vm_reclaim_mark_used()
184 busy = os_atomic_load_wide(&indices->busy, relaxed); in mach_vm_reclaim_mark_used()
187 os_atomic_store_wide(&indices->tail, original_tail, relaxed); in mach_vm_reclaim_mark_used()
[all …]
/xnu-8792.61.2/san/memory/
H A Dubsan_log.c77 os_atomic_rmw_loop(&ubsan_log_next, i, n, relaxed, { in ubsan_log_append()
90 os_atomic_rmw_loop(&ubsan_log_head, e, n, relaxed, { in ubsan_log_append()
119 head = os_atomic_load(&ubsan_log_head, relaxed);
121 tail = os_atomic_load(&ubsan_log_tail, relaxed);
157 head = os_atomic_load(&ubsan_log_head, relaxed);
159 tail = os_atomic_load(&ubsan_log_tail, relaxed);
172 os_atomic_store(&ubsan_log_tail, head, relaxed);
/xnu-8792.61.2/osfmk/bank/
H A Dbank_internal.h117 (os_atomic_inc_orig(&(elem)->bt_made, relaxed))
120 (os_atomic_dec_orig(&(elem)->bt_made, relaxed))
123 (os_atomic_sub_orig(&(elem)->bt_made, (num), relaxed))
168 (os_atomic_inc_orig(&(elem)->ba_made, relaxed))
171 (os_atomic_dec_orig(&(elem)->ba_made, relaxed))
174 (os_atomic_sub_orig(&(elem)->ba_made, (num), relaxed))
/xnu-8792.61.2/osfmk/arm/
H A Dcounter.c40 os_atomic_add(zpercpu_get(*counter), amount, relaxed); in counter_add()
47 os_atomic_inc(zpercpu_get(*counter), relaxed); in counter_inc()
54 os_atomic_dec(zpercpu_get(*counter), relaxed); in counter_dec()
H A Dcpu_common.c229 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed); in cpu_handle_xcall()
240 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed); in cpu_handle_xcall()
294 if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) { in cpu_broadcast_xcall_internal()
330 if (os_atomic_dec(&data->sync, relaxed) == 0) { in cpu_broadcast_xcall_simple_cbk()
596 os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); in cpu_signal_handler_internal()
598 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); in cpu_signal_handler_internal()
603 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed); in cpu_signal_handler_internal()
610 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed); in cpu_signal_handler_internal()
621 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed); in cpu_signal_handler_internal()
627 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed); in cpu_signal_handler_internal()
[all …]
/xnu-8792.61.2/san/coverage/
H A Dkcov.c116 os_atomic_add(&kcov_enabled, 1, relaxed); in kcov_enable()
122 os_atomic_sub(&kcov_enabled, 1, relaxed); in kcov_disable()
155 os_atomic_store(&kcov_enabled, 0, relaxed); in kcov_panic_disable()
189 if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) { in trace_pc_guard()
H A Dkcov_ksancov.c114 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) { in trace_pc_guard_pcs()
118 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed); in trace_pc_guard_pcs()
131 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) { in trace_pc_guard_pcs_stk()
135 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed); in trace_pc_guard_pcs_stk()
189 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) { in kcov_ksancov_trace_pc()
529 os_atomic_store(&data->ktd_device, d, relaxed); in ksancov_attach()
530 os_atomic_add(&ksancov_enabled, 1, relaxed); in ksancov_attach()
557 os_atomic_store(&data->ktd_device, NULL, relaxed); in ksancov_detach()
566 os_atomic_sub(&ksancov_enabled, 1, relaxed); in ksancov_detach()
596 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */ in ksancov_close()
/xnu-8792.61.2/libkern/os/
H A Dlog_mem.c179 os_atomic_inc(&lm->lm_cnt_allocations, relaxed); in logmem_alloc_impl()
182 os_atomic_inc(&lm->lm_cnt_failed_lmoff, relaxed); in logmem_alloc_impl()
187 os_atomic_inc(&lm->lm_cnt_failed_size, relaxed); in logmem_alloc_impl()
197 os_atomic_inc(&lm->lm_cnt_failed_full, relaxed); in logmem_alloc_impl()
202 os_atomic_sub(&lm->lm_cnt_free, (uint32_t)*amount, relaxed); in logmem_alloc_impl()
237 os_atomic_add(&lm->lm_cnt_free, (uint32_t)amount, relaxed); in logmem_free_impl()
/xnu-8792.61.2/osfmk/vm/
H A Danalytics.c100 e->over_global_limit = os_atomic_load_wide(&vm_add_wire_count_over_global_limit, relaxed); in report_mlock_failures()
101 e->over_user_limit = os_atomic_load_wide(&vm_add_wire_count_over_user_limit, relaxed); in report_mlock_failures()
103 os_atomic_store_wide(&vm_add_wire_count_over_global_limit, 0, relaxed); in report_mlock_failures()
104 os_atomic_store_wide(&vm_add_wire_count_over_user_limit, 0, relaxed); in report_mlock_failures()

1234567