Home
last modified time | relevance | path

Searched refs:bit_test (Results 1 – 16 of 16) sorted by relevance

/xnu-12377.41.6/osfmk/kern/
H A Dbits.h62 #define bit_test(x, b) ((bool)((x) & BIT(b))) macro
81 bool _bit_is_set = bit_test(*_map, _n); \
91 bool _bit_is_set = bit_test(*_map, _n); \
183 return bit_test(prev, n); in atomic_bit_set()
191 return bit_test(prev, n); in atomic_bit_clear()
298 return bit_test(map[bitmap_index(n)], bitmap_bit(n)); in bitmap_test()
H A Dprocessor.h812 assert(bit_test(pset->cpu_bitmask, cpuid)); in pset_update_processor_state()
822 if (bit_test(pset->cpu_available_map, cpuid) && (new_state < PROCESSOR_IDLE)) { in pset_update_processor_state()
825 } else if (!bit_test(pset->cpu_available_map, cpuid) && (new_state >= PROCESSOR_IDLE)) { in pset_update_processor_state()
851 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) { in pset_update_processor_state()
856 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) { in pset_update_processor_state()
859 if (!bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) { in pset_update_processor_state()
866 if (bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) { in pset_update_processor_state()
H A Dsched_amp.c247 if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { in sched_amp_choose_thread()
300 bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); in sched_amp_processor_queue_empty()
337 if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { in sched_amp_processor_csw_check()
382 if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { in sched_amp_processor_queue_has_priority()
503 bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); in sched_amp_steal_thread()
H A Dsched_prim.c2262 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) { in pset_commit_processor_to_new_thread()
2269 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) { in pset_commit_processor_to_new_thread()
2381 pending_AST_URGENT = bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); in thread_select()
2382 pending_AST_PREEMPT = bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); in thread_select()
2519 if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) { in thread_select()
2690 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) { in thread_select()
2696 bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id); in thread_select()
2783 if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) { in thread_select()
2818 if ((!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) || in thread_select()
2819 (!pending_AST_PREEMPT && bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id))) { in thread_select()
[all …]
H A Dsched_common.c102 if (bit_test(candidate_map, pset_id)) { in sched_iterate_psets_ordered()
H A Dsched_amp_common.c153 if (bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { in pset_signal_spill()
H A Dsched_rt.c415 if (bit_test(available_map, processor->cpu_id)) { in sched_rtlocal_choose_processor_smt()
628 } while (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)); in sched_rt_choose_thread()
938 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0); in pset_choose_processor_for_realtime_thread_smt()
H A Dsched_clutch.c4366 …if (bit_test(target_pset->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_RR], for… in sched_edge_cpu_running_foreign_shared_rsrc_available()
4371 …if (bit_test(target_pset->cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_NATIVE_F… in sched_edge_cpu_running_foreign_shared_rsrc_available()
5017 if (bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id)) { in sched_edge_thread_avoid_processor()
/xnu-12377.41.6/iokit/Kernel/arm/
H A DAppleARMSMP.cpp388 bit_test(cpu_power_state_mask, i)) { in is_cluster_powering_down()
418 assert(bit_test(cpu_power_state_mask, cpu_id)); in PE_cpu_power_disable()
452 return bit_test(online_clusters_mask, cluster_id); in PE_cpu_power_check_kdp()
458 assert(!bit_test(cpu_power_state_mask, cpu_id)); in PE_cpu_power_enable()
471 if (!bit_test(online_clusters_mask, cluster_id)) { in PE_cpu_power_enable()
/xnu-12377.41.6/osfmk/arm64/
H A Dlock_ticket_pv.c57 if (!bit_test(wmask, tcpunum)) { in hw_lck_ticket_unlock_kick_pv()
H A Dmachine_routines.c764 assert(!bit_test(os_atomic_load(&ml_cpu_up_processors, relaxed), cpu_data_ptr->cpu_number)); in ml_cpu_up()
832 assert(bit_test(os_atomic_load(&ml_cpu_up_processors, relaxed), cpu_data_ptr->cpu_number)); in ml_cpu_down()
/xnu-12377.41.6/osfmk/vm/
H A Dvm_page.h268 #define VM_PAGE_INACTIVE(m) bit_test(vm_page_inactive_states, (m)->vmp_q_state)
269 #define VM_PAGE_ACTIVE_OR_INACTIVE(m) bit_test(vm_page_active_or_inactive_states, (m)->vm…
270 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) bit_test(vm_page_non_speculative_pageable_states, (…
271 #define VM_PAGE_PAGEABLE(m) bit_test(vm_page_pageable_states, (m)->vmp_q_state)
H A Dvm_mteinfo.c966 assert(bit_test(orig.free_mask, bit)); in mteinfo_free_queue_requeue()
982 assert(bit_test(cell->free_mask, bit)); in mteinfo_free_queue_requeue()
1475 assert(!bit_test(cell->free_mask, bit)); in mteinfo_covered_page_set_free()
1499 assert(bit_test(cell->free_mask, bit)); in mteinfo_covered_page_set_used()
1523 assert(!bit_test(cell->free_mask, pnum % MTE_PAGES_PER_TAG_PAGE)); in mteinfo_covered_page_set_stolen_tagged()
1537 assert(!bit_test(cell->free_mask, pnum % MTE_PAGES_PER_TAG_PAGE)); in mteinfo_covered_page_clear_tagged()
2799 assert(bit_test(orig.free_mask, bit)); in mteinfo_free_queue_grab()
2808 if (!bit_test(cell->free_mask, bit) || in mteinfo_free_queue_grab()
/xnu-12377.41.6/bsd/skywalk/mem/
H A Dskmem_region.c1571 ASSERT(bit_test(skr->skr_seg_bmap[i / BMAPSZ], i % BMAPSZ)); in skmem_region_depopulate()
1603 ASSERT(bit_test(*bmap, i % BMAPSZ)); in sksegment_create()
1642 ASSERT(!bit_test(*bmap, i % BMAPSZ)); in sksegment_destroy()
1927 if (!bit_test(skr->skr_seg_bmap[idx / BMAPSZ], idx % BMAPSZ)) { in sksegment_alloc_with_idx()
1938 VERIFY(!bit_test(skr->skr_seg_bmap[idx / BMAPSZ], idx % BMAPSZ)); in sksegment_alloc_with_idx()
/xnu-12377.41.6/bsd/skywalk/nexus/
H A Dnexus.c2678 if (bit_test(*bmap, j)) { in nx_port_alloc()
2739 ASSERT(!bit_test(nx->nx_ports_bmap[nx_port / NX_PORT_CHUNK], in nx_port_alloc()
2777 ASSERT(!bit_test(*bmap, j)); in nx_port_free()
2822 if (bit_test(*bmap, j)) { in nx_port_bind_info()
2897 (!bit_test(*bmap, j) && nx->nx_active_ports > 0)); in nx_port_unbind()
2909 ASSERT(!bit_test(*bmap, j)); in nx_port_unbind()
3025 if (bit_test(bmap, j)) {
/xnu-12377.41.6/bsd/skywalk/nexus/flowswitch/flow/
H A Dflow_owner.c506 ASSERT(!bit_test(bmap[chunk_idx], bit_pos)); in flow_owner_flowadv_index_free()