xref: /xnu-12377.1.9/osfmk/arm/locks.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _ARM_LOCKS_H_
30 #define _ARM_LOCKS_H_
31 
32 #ifdef  MACH_KERNEL_PRIVATE
33 #ifndef LCK_SPIN_IS_TICKET_LOCK
34 #define LCK_SPIN_IS_TICKET_LOCK 0
35 #endif
36 #endif /* MACH_KERNEL_PRIVATE */
37 
38 #include <kern/lock_types.h>
39 #ifdef  MACH_KERNEL_PRIVATE
40 #include <kern/sched_hygiene.h>
41 #include <kern/startup.h>
42 #if LCK_SPIN_IS_TICKET_LOCK
43 #include <kern/ticket_lock.h>
44 #endif
45 #endif
46 
47 #ifdef  MACH_KERNEL_PRIVATE
48 #if LCK_SPIN_IS_TICKET_LOCK
49 typedef lck_ticket_t lck_spin_t;
50 #else
51 typedef struct lck_spin_s {
52 	struct hslock   hwlock;
53 	unsigned long   type;
54 } lck_spin_t;
55 
56 #define lck_spin_data hwlock.lock_data
57 
58 #define LCK_SPIN_TAG_DESTROYED  0xdead  /* lock marked as Destroyed */
59 
60 #define LCK_SPIN_TYPE           0x00000011
61 #define LCK_SPIN_TYPE_DESTROYED 0x000000ee
62 #endif
63 
64 #elif KERNEL_PRIVATE
65 
66 typedef struct {
67 	uintptr_t opaque[2] __kernel_data_semantics;
68 } lck_spin_t;
69 
70 typedef struct {
71 	uintptr_t opaque[2] __kernel_data_semantics;
72 } lck_mtx_t;
73 
74 typedef struct {
75 	uintptr_t opaque[16];
76 } lck_mtx_ext_t;
77 
78 #else
79 
80 typedef struct __lck_spin_t__           lck_spin_t;
81 typedef struct __lck_mtx_t__            lck_mtx_t;
82 typedef struct __lck_mtx_ext_t__        lck_mtx_ext_t;
83 
84 #endif  /* !KERNEL_PRIVATE */
85 #ifdef  MACH_KERNEL_PRIVATE
86 
87 /*
88  * static panic deadline, in timebase units, for
89  * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
90  */
91 extern uint64_t _Atomic lock_panic_timeout;
92 
93 /* Adaptive spin before blocking */
94 extern uint64_t            MutexSpin;
95 extern uint64_t            low_MutexSpin;
96 extern int64_t             high_MutexSpin;
97 
98 #if CONFIG_PV_TICKET
99 extern bool                has_lock_pv;
100 #endif
101 
102 #ifdef LOCK_PRIVATE
103 
104 #define LOCK_SNOOP_SPINS        100
105 #define LOCK_PRETEST            0
106 
107 #define wait_for_event()        __builtin_arm_wfe()
108 
109 #ifndef __BUILDING_XNU_LIB_UNITTEST__
110 #if SCHED_HYGIENE_DEBUG
111 #define lock_disable_preemption_for_thread(t) ({                                \
112 	thread_t __dpft_thread = (t);                                           \
113 	uint32_t *__dpft_countp = &__dpft_thread->machine.preemption_count;     \
114 	uint32_t __dpft_count;                                                  \
115                                                                                 \
116 	__dpft_count = *__dpft_countp;                                          \
117 	os_atomic_store(__dpft_countp, __dpft_count + 1, compiler_acq_rel);     \
118                                                                                 \
119 	if (static_if(sched_debug_preemption_disable)) {                        \
120 	       if (__dpft_count == 0 && sched_preemption_disable_debug_mode) {  \
121 	               _prepare_preemption_disable_measurement();               \
122 	       }                                                                \
123 	}                                                                       \
124 })
125 #else /* SCHED_HYGIENE_DEBUG */
126 #define lock_disable_preemption_for_thread(t) ({                                \
127 	uint32_t *__dpft_countp = &(t)->machine.preemption_count;               \
128                                                                                 \
129 	os_atomic_store(__dpft_countp, *__dpft_countp + 1, compiler_acq_rel);   \
130 })
131 #endif /* SCHED_HYGIENE_DEBUG */
132 #else /* __BUILDING_XNU_LIB_UNITTEST__ */
133 extern void lock_disable_preemption_for_thread(thread_t);
134 #endif /* __BUILDING_XNU_LIB_UNITTEST__ */
135 #define lock_enable_preemption()                enable_preemption()
136 #define lock_preemption_level_for_thread(t)     get_preemption_level_for_thread(t)
137 #define lock_preemption_disabled_for_thread(t)  (get_preemption_level_for_thread(t) != 0)
138 #define current_thread()                        current_thread_fast()
139 
140 #define __hw_spin_wait_load(ptr, load_var, cond_result, cond_expr) ({ \
141 	load_var = os_atomic_load_exclusive(ptr, relaxed);                      \
142 	cond_result = (cond_expr);                                              \
143 	if (__probable(cond_result)) {                                          \
144 	        os_atomic_clear_exclusive();                                    \
145 	} else {                                                                \
146 	        wait_for_event();                                               \
147 	}                                                                       \
148 })
149 
150 #endif /* LOCK_PRIVATE */
151 #endif /* MACH_KERNEL_PRIVATE */
152 #endif /* _ARM_LOCKS_H_ */
153