xref: /xnu-11215.41.3/osfmk/i386/locks.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _I386_LOCKS_H_
30 #define _I386_LOCKS_H_
31 
32 #include <sys/appleapiopts.h>
33 #include <kern/lock_types.h>
34 #include <kern/assert.h>
35 
36 #ifdef MACH_KERNEL_PRIVATE
37 typedef struct {
38 	volatile uintptr_t      interlock __kernel_data_semantics;
39 #if MACH_LDEBUG
40 	unsigned long           lck_spin_pad[9];        /* XXX - usimple_lock_data_t */
41 #endif
42 } lck_spin_t;
43 
44 #define LCK_SPIN_TAG_DESTROYED 0x00002007      /* lock marked as Destroyed */
45 
46 #if LCK_MTX_USE_ARCH
47 
48 typedef struct lck_mtx_s {
49 	union {
50 		struct {
51 			volatile uint32_t
52 			    lck_mtx_waiters:16,
53 			    lck_mtx_pri:8, // unused
54 			    lck_mtx_ilocked:1,
55 			    lck_mtx_mlocked:1,
56 			    lck_mtx_spin:1,
57 			    lck_mtx_profile:1,
58 			    lck_mtx_pad3:4;
59 		};
60 		uint32_t        lck_mtx_state;
61 	};
62 	volatile uint32_t       lck_mtx_owner; /* a ctid_t */
63 	uint32_t                lck_mtx_grp;
64 	uint32_t                lck_mtx_padding;
65 } lck_mtx_t;
66 
67 #define LCK_MTX_WAITERS_MSK             0x0000ffff
68 #define LCK_MTX_WAITER                  0x00000001
69 #define LCK_MTX_PRIORITY_MSK            0x00ff0000
70 #define LCK_MTX_ILOCKED_MSK             0x01000000
71 #define LCK_MTX_MLOCKED_MSK             0x02000000
72 #define LCK_MTX_SPIN_MSK                0x04000000
73 #define LCK_MTX_PROFILE_MSK             0x08000000
74 
75 /* This pattern must subsume the interlocked, mlocked and spin bits */
76 #define LCK_MTX_TAG_DESTROYED           0x07fe2007      /* lock marked as Destroyed */
77 
78 #endif /* LCK_MTX_USE_ARCH */
79 #elif KERNEL_PRIVATE
80 
81 typedef struct {
82 	unsigned long opaque[10] __kernel_data_semantics;
83 } lck_spin_t;
84 
85 typedef struct {
86 	unsigned long opaque[2] __kernel_data_semantics;
87 } lck_mtx_t;
88 
89 typedef struct {
90 	unsigned long opaque[10];
91 } lck_mtx_ext_t;
92 
93 #else /* KERNEL_PRIVATE */
94 
95 typedef struct __lck_spin_t__           lck_spin_t;
96 typedef struct __lck_mtx_t__            lck_mtx_t;
97 typedef struct __lck_mtx_ext_t__        lck_mtx_ext_t;
98 
99 #endif /* !KERNEL_PRIVATE */
100 #ifdef  MACH_KERNEL_PRIVATE
101 
102 /*
103  * static panic deadline, in timebase units, for
104  * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
105  */
106 extern uint64_t _Atomic lock_panic_timeout;
107 
108 /* Adaptive spin before blocking */
109 extern uint64_t         MutexSpin;
110 extern uint64_t         low_MutexSpin;
111 extern int64_t          high_MutexSpin;
112 
113 #if CONFIG_PV_TICKET
114 extern bool             has_lock_pv;
115 #endif
116 #if LCK_MTX_USE_ARCH
117 
118 typedef enum lck_mtx_spinwait_ret_type {
119 	LCK_MTX_SPINWAIT_ACQUIRED = 0,
120 
121 	LCK_MTX_SPINWAIT_SPUN_HIGH_THR = 1,
122 	LCK_MTX_SPINWAIT_SPUN_OWNER_NOT_CORE = 2,
123 	LCK_MTX_SPINWAIT_SPUN_NO_WINDOW_CONTENTION = 3,
124 	LCK_MTX_SPINWAIT_SPUN_SLIDING_THR = 4,
125 
126 	LCK_MTX_SPINWAIT_NO_SPIN = 5,
127 } lck_mtx_spinwait_ret_type_t;
128 
129 extern lck_mtx_spinwait_ret_type_t              lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex);
130 struct turnstile;
131 extern void             lck_mtx_lock_wait_x86(lck_mtx_t *mutex, struct turnstile **ts);
132 extern void             lck_mtx_lock_acquire_x86(lck_mtx_t *mutex);
133 
134 extern void             lck_mtx_lock_slow(lck_mtx_t *lock);
135 extern boolean_t        lck_mtx_try_lock_slow(lck_mtx_t *lock);
136 extern void             lck_mtx_unlock_slow(lck_mtx_t *lock);
137 extern void             lck_mtx_lock_spin_slow(lck_mtx_t *lock);
138 extern boolean_t        lck_mtx_try_lock_spin_slow(lck_mtx_t *lock);
139 
140 #endif /* LCK_MTX_USE_ARCH */
141 
142 extern void             hw_lock_byte_init(volatile uint8_t *lock_byte);
143 extern void             hw_lock_byte_lock(volatile uint8_t *lock_byte);
144 extern void             hw_lock_byte_unlock(volatile uint8_t *lock_byte);
145 extern void             kernel_preempt_check(void);
146 
147 #ifdef LOCK_PRIVATE
148 
149 #if LCK_MTX_USE_ARCH
150 #define LCK_MTX_EVENT(lck)      CAST_EVENT64_T(&(lck)->lck_mtx_owner)
151 #define LCK_EVENT_TO_MUTEX(e)   __container_of((uint32_t *)(event), lck_mtx_t, lck_mtx_owner)
152 #define LCK_MTX_HAS_WAITERS(l)  ((l)->lck_mtx_waiters != 0)
153 #endif /* LCK_MTX_USE_ARCH */
154 
155 #define LOCK_SNOOP_SPINS        1000
156 #define LOCK_PRETEST            1
157 
158 #define lock_disable_preemption_for_thread(t)   disable_preemption_internal()
159 #define lock_preemption_level_for_thread(t)     get_preemption_level()
160 #define lock_preemption_disabled_for_thread(t)  (get_preemption_level() > 0)
161 #define lock_enable_preemption()                enable_preemption_internal()
162 #define current_thread()                        current_thread_fast()
163 
164 #define __hw_spin_wait_load(ptr, load_var, cond_result, cond_expr) ({ \
165 	load_var = os_atomic_load(ptr, relaxed);                                \
166 	cond_result = (cond_expr);                                              \
167 	if (!(cond_result)) {                                                   \
168 	        cpu_pause();                                                    \
169 	}                                                                       \
170 })
171 
172 #endif /* LOCK_PRIVATE */
173 #endif /* MACH_KERNEL_PRIVATE */
174 #endif /* _I386_LOCKS_H_ */
175