xref: /xnu-8796.121.2/osfmk/i386/locks.h (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
31 
32 #include <sys/appleapiopts.h>
33 #include <kern/lock_types.h>
34 #include <kern/assert.h>
35 
36 #ifdef  MACH_KERNEL_PRIVATE
37 
38 #define enaLkDeb                0x00000001      /* Request debug in default attribute */
39 #define enaLkStat               0x00000002      /* Request statistic in default attribute */
40 #define disLkRWPrio             0x00000004      /* Disable RW lock priority promotion */
41 #define enaLkTimeStat           0x00000008      /* Request time statistics in default attribute */
42 #define disLkRWDebug            0x00000010      /* Disable RW lock best-effort debugging */
43 
44 #endif /* MACH_KERNEL_PRIVATE */
45 
46 #ifdef MACH_KERNEL_PRIVATE
47 typedef struct {
48 	volatile uintptr_t      interlock __kernel_data_semantics;
49 #if MACH_LDEBUG
50 	unsigned long           lck_spin_pad[9];        /* XXX - usimple_lock_data_t */
51 #endif
52 } lck_spin_t;
53 
54 #define LCK_SPIN_TAG_DESTROYED 0x00002007      /* lock marked as Destroyed */
55 
56 #if LCK_MTX_USE_ARCH
57 
58 typedef struct _lck_mtx_ {
59 	union {
60 		struct {
61 			volatile uint32_t
62 			    lck_mtx_waiters:16,
63 			    lck_mtx_pri:8, // unused
64 			    lck_mtx_ilocked:1,
65 			    lck_mtx_mlocked:1,
66 			    lck_mtx_spin:1,
67 			    lck_mtx_profile:1,
68 			    lck_mtx_pad3:4;
69 		};
70 		uint32_t        lck_mtx_state;
71 	};
72 	volatile uint32_t       lck_mtx_owner; /* a ctid_t */
73 	uint32_t                lck_mtx_grp;
74 	uint32_t                lck_mtx_padding;
75 } lck_mtx_t;
76 
77 #define LCK_MTX_WAITERS_MSK             0x0000ffff
78 #define LCK_MTX_WAITER                  0x00000001
79 #define LCK_MTX_PRIORITY_MSK            0x00ff0000
80 #define LCK_MTX_ILOCKED_MSK             0x01000000
81 #define LCK_MTX_MLOCKED_MSK             0x02000000
82 #define LCK_MTX_SPIN_MSK                0x04000000
83 #define LCK_MTX_PROFILE_MSK             0x08000000
84 
85 /* This pattern must subsume the interlocked, mlocked and spin bits */
86 #define LCK_MTX_TAG_DESTROYED           0x07fe2007      /* lock marked as Destroyed */
87 
88 #endif /* LCK_MTX_USE_ARCH */
89 #elif KERNEL_PRIVATE
90 
91 typedef struct {
92 	unsigned long opaque[10] __kernel_data_semantics;
93 } lck_spin_t;
94 
95 typedef struct {
96 	unsigned long opaque[2] __kernel_data_semantics;
97 } lck_mtx_t;
98 
99 typedef struct {
100 	unsigned long opaque[10];
101 } lck_mtx_ext_t;
102 
103 #else /* KERNEL_PRIVATE */
104 
105 typedef struct __lck_spin_t__           lck_spin_t;
106 typedef struct __lck_mtx_t__            lck_mtx_t;
107 typedef struct __lck_mtx_ext_t__        lck_mtx_ext_t;
108 
109 #endif /* !KERNEL_PRIVATE */
110 #ifdef  MACH_KERNEL_PRIVATE
111 
112 /*
113  * static panic deadline, in timebase units, for
114  * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
115  */
116 extern uint64_t _Atomic lock_panic_timeout;
117 
118 /* Adaptive spin before blocking */
119 extern uint64_t         MutexSpin;
120 extern uint64_t         low_MutexSpin;
121 extern int64_t          high_MutexSpin;
122 
123 #if CONFIG_PV_TICKET
124 extern bool             has_lock_pv;
125 #endif
126 #if LCK_MTX_USE_ARCH
127 
128 typedef enum lck_mtx_spinwait_ret_type {
129 	LCK_MTX_SPINWAIT_ACQUIRED = 0,
130 
131 	LCK_MTX_SPINWAIT_SPUN_HIGH_THR = 1,
132 	LCK_MTX_SPINWAIT_SPUN_OWNER_NOT_CORE = 2,
133 	LCK_MTX_SPINWAIT_SPUN_NO_WINDOW_CONTENTION = 3,
134 	LCK_MTX_SPINWAIT_SPUN_SLIDING_THR = 4,
135 
136 	LCK_MTX_SPINWAIT_NO_SPIN = 5,
137 } lck_mtx_spinwait_ret_type_t;
138 
139 extern lck_mtx_spinwait_ret_type_t              lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
140 struct turnstile;
141 extern void             lck_mtx_lock_wait_x86(lck_mtx_t *mutex, struct turnstile **ts);
142 extern void             lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
143 
144 extern void             lck_mtx_lock_slow(lck_mtx_t *lock);
145 extern boolean_t        lck_mtx_try_lock_slow(lck_mtx_t *lock);
146 extern void             lck_mtx_unlock_slow(lck_mtx_t *lock);
147 extern void             lck_mtx_lock_spin_slow(lck_mtx_t *lock);
148 extern boolean_t        lck_mtx_try_lock_spin_slow(lck_mtx_t *lock);
149 
150 #endif /* LCK_MTX_USE_ARCH */
151 
152 extern void             hw_lock_byte_init(volatile uint8_t *lock_byte);
153 extern void             hw_lock_byte_lock(volatile uint8_t *lock_byte);
154 extern void             hw_lock_byte_unlock(volatile uint8_t *lock_byte);
155 extern void             kernel_preempt_check(void);
156 
157 #ifdef LOCK_PRIVATE
158 
159 #if LCK_MTX_USE_ARCH
160 #define LCK_MTX_EVENT(lck)      CAST_EVENT64_T(&(lck)->lck_mtx_owner)
161 #define LCK_EVENT_TO_MUTEX(e)   __container_of((uint32_t *)(event), lck_mtx_t, lck_mtx_owner)
162 #define LCK_MTX_HAS_WAITERS(l)  ((l)->lck_mtx_waiters != 0)
163 #endif /* LCK_MTX_USE_ARCH */
164 
165 #define LOCK_SNOOP_SPINS        1000
166 #define LOCK_PRETEST            1
167 
168 #define lock_disable_preemption_for_thread(t)   disable_preemption_internal()
169 #define lock_preemption_level_for_thread(t)     get_preemption_level()
170 #define lock_preemption_disabled_for_thread(t)  (get_preemption_level() > 0)
171 #define lock_enable_preemption()                enable_preemption_internal()
172 #define current_thread()                        current_thread_fast()
173 
174 #define __hw_spin_wait_load(ptr, load_var, cond_result, cond_expr) ({ \
175 	load_var = os_atomic_load(ptr, relaxed);                                \
176 	cond_result = (cond_expr);                                              \
177 	if (!(cond_result)) {                                                   \
178 	        cpu_pause();                                                    \
179 	}                                                                       \
180 })
181 
182 #endif /* LOCK_PRIVATE */
183 #endif /* MACH_KERNEL_PRIVATE */
184 #endif /* _I386_LOCKS_H_ */
185