xref: /xnu-8020.101.4/osfmk/i386/locks.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
31 
32 #include <sys/appleapiopts.h>
33 #include <kern/kern_types.h>
34 #include <kern/assert.h>
35 
36 #ifdef  MACH_KERNEL_PRIVATE
37 #define LOCKS_INDIRECT_ALLOW    1
38 
39 #include <i386/hw_lock_types.h>
40 
41 #define enaLkDeb                0x00000001      /* Request debug in default attribute */
42 #define enaLkStat               0x00000002      /* Request statistic in default attribute */
43 #define disLkRWPrio             0x00000004      /* Disable RW lock priority promotion */
44 #define enaLkTimeStat           0x00000008      /* Request time statistics in default attribute */
45 #define disLkRWDebug            0x00000010      /* Disable RW lock best-effort debugging */
46 
47 #endif /* MACH_KERNEL_PRIVATE */
48 
49 #if     defined(MACH_KERNEL_PRIVATE)
50 typedef struct {
51 	volatile uintptr_t      interlock __kernel_data_semantics;
52 #if     MACH_LDEBUG
53 	unsigned long   lck_spin_pad[9];        /* XXX - usimple_lock_data_t */
54 #endif
55 } lck_spin_t;
56 
57 #define LCK_SPIN_TAG_DESTROYED          0x00002007      /* lock marked as Destroyed */
58 
59 #else /* MACH_KERNEL_PRIVATE */
60 #ifdef  KERNEL_PRIVATE
61 typedef struct {
62 	unsigned long    opaque[10];
63 } lck_spin_t;
64 #else /* KERNEL_PRIVATE */
65 typedef struct __lck_spin_t__   lck_spin_t;
66 #endif /* KERNEL_PRIVATE */
67 #endif /* MACH_KERNEL_PRIVATE */
68 
69 #ifdef  MACH_KERNEL_PRIVATE
70 /* The definition of this structure, including the layout of the
71  * state bitfield, is tailored to the asm implementation in i386_lock.s
72  */
73 typedef struct _lck_mtx_ {
74 	/*
75 	 * The mtx_owner which holds a thread_t can be "data semantics"
76 	 * because any dereference of it that leads to mutation
77 	 * will zone_id_require() that it is indeed a proper thread
78 	 * from the thread zone.
79 	 *
80 	 * This allows us to leave pure data with a lock into
81 	 * the kalloc data heap.
82 	 */
83 	union {
84 		struct {
85 			volatile uintptr_t              lck_mtx_owner __kernel_data_semantics;
86 			union {
87 				struct {
88 					volatile uint32_t
89 					    lck_mtx_waiters:16,
90 					    lck_mtx_pri:8, // unused
91 					    lck_mtx_ilocked:1,
92 					    lck_mtx_mlocked:1,
93 					    lck_mtx_promoted:1, // unused
94 					    lck_mtx_spin:1,
95 					    lck_mtx_is_ext:1,
96 					    lck_mtx_pad3:3;
97 				};
98 				uint32_t        lck_mtx_state;
99 			};
100 			/* Pad field used as a canary, initialized to ~0 */
101 			uint32_t                        lck_mtx_pad32;
102 		};
103 		struct {
104 			/* Marked as data as it is only dereferenced under LCK_ATTR_DEBUG */
105 			struct _lck_mtx_ext_            *lck_mtx_ptr __kernel_data_semantics;
106 			uint32_t                        lck_mtx_tag;
107 			uint32_t                        lck_mtx_pad32_2;
108 		};
109 	};
110 } lck_mtx_t;
111 
112 #define LCK_MTX_WAITERS_MSK             0x0000ffff
113 #define LCK_MTX_WAITER                  0x00000001
114 #define LCK_MTX_PRIORITY_MSK            0x00ff0000
115 #define LCK_MTX_ILOCKED_MSK             0x01000000
116 #define LCK_MTX_MLOCKED_MSK             0x02000000
117 #define LCK_MTX_SPIN_MSK                0x08000000
118 
119 /* This pattern must subsume the interlocked, mlocked and spin bits */
120 #define LCK_MTX_TAG_INDIRECT                    0x07ff1007      /* lock marked as Indirect  */
121 #define LCK_MTX_TAG_DESTROYED                   0x07fe2007      /* lock marked as Destroyed */
122 
123 /* Adaptive spin before blocking */
124 extern uint64_t         MutexSpin;
125 extern uint64_t         low_MutexSpin;
126 extern int64_t         high_MutexSpin;
127 
128 typedef enum lck_mtx_spinwait_ret_type {
129 	LCK_MTX_SPINWAIT_ACQUIRED = 0,
130 
131 	LCK_MTX_SPINWAIT_SPUN_HIGH_THR = 1,
132 	LCK_MTX_SPINWAIT_SPUN_OWNER_NOT_CORE = 2,
133 	LCK_MTX_SPINWAIT_SPUN_NO_WINDOW_CONTENTION = 3,
134 	LCK_MTX_SPINWAIT_SPUN_SLIDING_THR = 4,
135 
136 	LCK_MTX_SPINWAIT_NO_SPIN = 5,
137 } lck_mtx_spinwait_ret_type_t;
138 
139 extern lck_mtx_spinwait_ret_type_t              lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
140 struct turnstile;
141 extern void                                     lck_mtx_lock_wait_x86(lck_mtx_t *mutex, struct turnstile **ts);
142 extern void                                     lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
143 
144 extern void                                     lck_mtx_lock_slow(lck_mtx_t *lock);
145 extern boolean_t                                lck_mtx_try_lock_slow(lck_mtx_t *lock);
146 extern void                                     lck_mtx_unlock_slow(lck_mtx_t *lock);
147 extern void                                     lck_mtx_lock_spin_slow(lck_mtx_t *lock);
148 extern boolean_t                                lck_mtx_try_lock_spin_slow(lck_mtx_t *lock);
149 extern void                                     hw_lock_byte_init(volatile uint8_t *lock_byte);
150 extern void                                     hw_lock_byte_lock(volatile uint8_t *lock_byte);
151 extern void                                     hw_lock_byte_unlock(volatile uint8_t *lock_byte);
152 
153 typedef struct {
154 	unsigned int            type;
155 	unsigned int            pad4;
156 	vm_offset_t             pc;
157 	vm_offset_t             thread;
158 } lck_mtx_deb_t;
159 
160 #define MUTEX_TAG       0x4d4d
161 
162 typedef struct {
163 	unsigned int            lck_mtx_stat_data;
164 } lck_mtx_stat_t;
165 
166 typedef struct _lck_mtx_ext_ {
167 	lck_mtx_t               lck_mtx;
168 	struct _lck_grp_        *lck_mtx_grp;
169 	unsigned int            lck_mtx_attr;
170 	unsigned int            lck_mtx_pad1;
171 	lck_mtx_deb_t           lck_mtx_deb;
172 	uint64_t                lck_mtx_stat;
173 	unsigned int            lck_mtx_pad2[2];
174 } lck_mtx_ext_t;
175 
176 #define LCK_MTX_ATTR_DEBUG      0x1
177 #define LCK_MTX_ATTR_DEBUGb     0
178 #define LCK_MTX_ATTR_STAT       0x2
179 #define LCK_MTX_ATTR_STATb      1
180 
181 #define LCK_MTX_EVENT(lck)        ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))
182 #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
183 
184 #else /* MACH_KERNEL_PRIVATE */
185 #ifdef  XNU_KERNEL_PRIVATE
186 typedef struct {
187 	unsigned long           opaque[2];
188 } lck_mtx_t;
189 
190 typedef struct {
191 	unsigned long           opaque[10];
192 } lck_mtx_ext_t;
193 #else /* XNU_KERNEL_PRIVATE */
194 #ifdef  KERNEL_PRIVATE
195 typedef struct {
196 	unsigned long           opaque[2];
197 } lck_mtx_t;
198 
199 typedef struct {
200 	unsigned long           opaque[10];
201 } lck_mtx_ext_t;
202 
203 #else /* KERNEL_PRIVATE */
204 typedef struct __lck_mtx_t__            lck_mtx_t;
205 typedef struct __lck_mtx_ext_t__        lck_mtx_ext_t;
206 #endif /* KERNEL_PRIVATE */
207 #endif /* XNU_KERNEL_PRIVATE */
208 #endif /* MACH_KERNEL_PRIVATE */
209 
210 #ifdef  MACH_KERNEL_PRIVATE
211 
212 /*
213  * static panic deadline, in timebase units, for
214  * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
215  */
216 extern uint64_t _Atomic lock_panic_timeout;
217 
218 #if LOCK_PRIVATE
219 
220 #define lock_disable_preemption_for_thread(t)   disable_preemption_internal()
221 #define lock_preemption_disabled_for_thread(t)  (get_preemption_level() > 0)
222 
223 #define LCK_MTX_THREAD_TO_STATE(t)      ((uintptr_t)t)
224 #define PLATFORM_LCK_ILOCK              0
225 
226 #define LOCK_SNOOP_SPINS        1000
227 #define LOCK_PRETEST            1
228 
229 #endif  // LOCK_PRIVATE
230 
231 extern void             kernel_preempt_check(void);
232 
233 #endif /* MACH_KERNEL_PRIVATE */
234 #endif  /* _I386_LOCKS_H_ */
235