xref: /xnu-8020.140.41/osfmk/kern/lock_stat.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _KERN_LOCKSTAT_H
29 #define _KERN_LOCKSTAT_H
30 
31 #include <machine/locks.h>
32 #include <machine/atomic.h>
33 #include <kern/lock_group.h>
34 
35 __BEGIN_DECLS
36 #pragma GCC visibility push(hidden)
37 
38 /*
39  * DTrace lockstat probe definitions
40  *
41  */
42 
43 enum lockstat_probe_id {
44 	/* Spinlocks */
45 	LS_LCK_SPIN_LOCK_ACQUIRE,
46 	LS_LCK_SPIN_LOCK_SPIN,
47 	LS_LCK_SPIN_UNLOCK_RELEASE,
48 
49 	/*
50 	 * Mutexes can also have interlock-spin events, which are
51 	 * unique to our lock implementation.
52 	 */
53 	LS_LCK_MTX_LOCK_ACQUIRE,
54 	LS_LCK_MTX_LOCK_BLOCK,
55 	LS_LCK_MTX_LOCK_ADAPTIVE_SPIN,
56 	LS_LCK_MTX_LOCK_SPIN_SPIN,
57 	LS_LCK_MTX_LOCK_SPIN_ACQUIRE,
58 	LS_LCK_MTX_LOCK_ILK_SPIN,
59 	LS_LCK_MTX_TRY_LOCK_ACQUIRE,
60 	LS_LCK_MTX_TRY_LOCK_SPIN_ACQUIRE,
61 	LS_LCK_MTX_UNLOCK_RELEASE,
62 
63 	/*
64 	 * Provide a parallel set for indirect mutexes
65 	 */
66 	LS_LCK_MTX_EXT_LOCK_ACQUIRE,
67 	LS_LCK_MTX_EXT_LOCK_BLOCK,
68 	LS_LCK_MTX_EXT_LOCK_SPIN_SPIN,
69 	LS_LCK_MTX_EXT_LOCK_ADAPTIVE_SPIN,
70 	LS_LCK_MTX_EXT_LOCK_ILK_SPIN,
71 	LS_LCK_MTX_EXT_UNLOCK_RELEASE,
72 
73 	/*
74 	 * Reader-writer locks support a blocking upgrade primitive, as
75 	 * well as the possibility of spinning on the interlock.
76 	 */
77 	LS_LCK_RW_LOCK_SHARED_ACQUIRE,
78 	LS_LCK_RW_LOCK_SHARED_BLOCK,
79 	LS_LCK_RW_LOCK_SHARED_SPIN,
80 
81 	LS_LCK_RW_LOCK_EXCL_ACQUIRE,
82 	LS_LCK_RW_LOCK_EXCL_BLOCK,
83 	LS_LCK_RW_LOCK_EXCL_SPIN,
84 
85 	LS_LCK_RW_DONE_RELEASE,
86 
87 	LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE,
88 	LS_LCK_RW_TRY_LOCK_SHARED_SPIN,
89 
90 	LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE,
91 	LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN,
92 
93 	LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,
94 	LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN,
95 	LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK,
96 
97 	LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE,
98 	LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN,
99 
100 	/* Ticket lock */
101 	LS_LCK_TICKET_LOCK_ACQUIRE,
102 	LS_LCK_TICKET_LOCK_RELEASE,
103 	LS_LCK_TICKET_LOCK_SPIN,
104 
105 	LS_NPROBES
106 };
107 
108 #if XNU_KERNEL_PRIVATE
109 
110 extern void lck_grp_stat_enable(lck_grp_stat_t *stat);
111 
112 extern void lck_grp_stat_disable(lck_grp_stat_t *stat);
113 
114 extern bool lck_grp_stat_enabled(lck_grp_stat_t *stat);
115 
116 #if CONFIG_DTRACE
117 /*
118  * Time threshold before dtrace lockstat spin
119  * probes are triggered
120  */
121 extern machine_timeout32_t dtrace_spin_threshold;
122 extern uint32_t lockstat_probemap[LS_NPROBES];
123 
124 extern void dtrace_probe(uint32_t, uint64_t, uint64_t,
125     uint64_t, uint64_t, uint64_t);
126 extern void lockprof_invoke(lck_grp_t*, lck_grp_stat_t*, uint64_t);
127 
128 /*
129  * Macros to record lockstat probes.
130  */
131 #define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)             \
132 	{                                                                   \
133 	        uint32_t id;                                                \
134 	        if (__improbable(id = lockstat_probemap[(probe)])) {        \
135 	                dtrace_probe(id, (uintptr_t)(lp), (arg0),           \
136 	                    (arg1), (arg2), (arg3));                        \
137 	        }                                                           \
138 	}
139 #define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3)
140 #define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
141 #define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
142 
143 #endif /* CONFIG_DTRACE */
144 #endif /* XNU_KERNEL_PRIVATE */
145 #if MACH_KERNEL_PRIVATE
146 
147 #if LOCK_STATS
148 extern void __lck_grp_spin_update_held(lck_grp_t *grp);
149 extern void __lck_grp_spin_update_miss(lck_grp_t *grp);
150 extern void __lck_grp_spin_update_spin(lck_grp_t *grp, uint64_t time);
151 extern void __lck_grp_ticket_update_held(lck_grp_t *grp);
152 extern void __lck_grp_ticket_update_miss(lck_grp_t *grp);
153 extern void __lck_grp_ticket_update_spin(lck_grp_t *grp, uint64_t time);
154 #define LOCK_STATS_CALL(fn, ...)  fn(__VA_ARGS__)
155 #else
156 #define LOCK_STATS_CALL(fn, ...) ((void)0)
157 #endif
158 
159 static inline void
lck_grp_spin_update_held(void * lock LCK_GRP_ARG (lck_grp_t * grp))160 lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
161 {
162 #pragma unused(lock)
163 #if CONFIG_DTRACE
164 	LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
165 #endif
166 	LOCK_STATS_CALL(__lck_grp_spin_update_held, grp);
167 }
168 
169 static inline void
lck_grp_spin_update_miss(void * lock LCK_GRP_ARG (lck_grp_t * grp))170 lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
171 {
172 #pragma unused(lock)
173 	LOCK_STATS_CALL(__lck_grp_spin_update_miss, grp);
174 }
175 
176 static inline void
lck_grp_spin_update_spin(void * lock LCK_GRP_ARG (lck_grp_t * grp),uint64_t time)177 lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
178 {
179 #pragma unused(lock, time)
180 #if CONFIG_DTRACE
181 	if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) {
182 		LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
183 	}
184 #endif /* CONFIG_DTRACE */
185 	LOCK_STATS_CALL(__lck_grp_spin_update_spin, grp, time);
186 }
187 
188 static inline bool
lck_grp_spin_spin_enabled(void * lock LCK_GRP_ARG (lck_grp_t * grp))189 lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
190 {
191 #pragma unused(lock)
192 	bool enabled = false;
193 #if CONFIG_DTRACE
194 	enabled |= lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0;
195 #endif /* CONFIG_DTRACE */
196 #if LOCK_STATS
197 	enabled |= (grp && lck_grp_stat_enabled(&grp->lck_grp_stats.lgss_spin_spin));
198 #endif /* LOCK_STATS */
199 	return enabled;
200 }
201 
202 static inline void
lck_grp_ticket_update_held(void * lock LCK_GRP_ARG (lck_grp_t * grp))203 lck_grp_ticket_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
204 {
205 #pragma unused(lock)
206 #if CONFIG_DTRACE
207 	LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
208 #endif
209 	LOCK_STATS_CALL(__lck_grp_ticket_update_held, grp);
210 }
211 
212 static inline void
lck_grp_ticket_update_miss(void * lock LCK_GRP_ARG (lck_grp_t * grp))213 lck_grp_ticket_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
214 {
215 #pragma unused(lock)
216 	LOCK_STATS_CALL(__lck_grp_ticket_update_miss, grp);
217 }
218 
219 static inline bool
lck_grp_ticket_spin_enabled(void * lock LCK_GRP_ARG (lck_grp_t * grp))220 lck_grp_ticket_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
221 {
222 #pragma unused(lock)
223 	bool enabled = false;
224 #if CONFIG_DTRACE
225 	enabled |= lockstat_probemap[LS_LCK_TICKET_LOCK_SPIN] != 0;
226 #endif /* CONFIG_DTRACE */
227 #if LOCK_STATS
228 	enabled |= (grp && lck_grp_stat_enabled(&grp->lck_grp_stats.lgss_ticket_spin));
229 #endif /* LOCK_STATS */
230 	return enabled;
231 }
232 
233 static inline void
lck_grp_ticket_update_spin(void * lock LCK_GRP_ARG (lck_grp_t * grp),uint64_t time)234 lck_grp_ticket_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
235 {
236 #pragma unused(lock, time)
237 #if CONFIG_DTRACE
238 	if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) {
239 		LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
240 	}
241 #endif /* CONFIG_DTRACE */
242 	LOCK_STATS_CALL(__lck_grp_ticket_update_spin, grp, time);
243 }
244 
245 #if __x86_64__
246 /*
247  * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
248  * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
249  * as a 64-bit quantity (the new x86 specific statistics are also maintained
250  * as 32-bit quantities).
251  *
252  * Enable this preprocessor define to record the first miss alone
253  * By default, we count every miss, hence multiple misses may be
254  * recorded for a single lock acquire attempt via lck_mtx_lock
255  */
256 #undef LOG_FIRST_MISS_ALONE
257 
258 extern void __lck_grp_mtx_update_miss(lck_grp_t *grp);
259 extern void __lck_grp_mtx_update_direct_wait(lck_grp_t *grp);
260 extern void __lck_grp_mtx_update_wait(lck_grp_t *grp);
261 extern void __lck_grp_mtx_update_held(lck_grp_t *grp);
262 
263 static inline void
lck_grp_mtx_update_miss(struct _lck_mtx_ext_ * lock,int * first_miss)264 lck_grp_mtx_update_miss(struct _lck_mtx_ext_ *lock, int *first_miss)
265 {
266 #pragma unused(first_miss)
267 #if LOG_FIRST_MISS_ALONE
268 	if (*first_miss & 1) {
269 		return;
270 	}
271 	*first_miss |= 1;
272 #endif /* LOG_FIRST_MISS_ALONE */
273 	__lck_grp_mtx_update_miss(lock->lck_mtx_grp);
274 }
275 
276 static void inline
lck_grp_mtx_update_direct_wait(struct _lck_mtx_ext_ * lock)277 lck_grp_mtx_update_direct_wait(struct _lck_mtx_ext_ *lock)
278 {
279 	__lck_grp_mtx_update_direct_wait(lock->lck_mtx_grp);
280 }
281 
282 static void inline
lck_grp_mtx_update_wait(struct _lck_mtx_ext_ * lock,int * first_miss)283 lck_grp_mtx_update_wait(struct _lck_mtx_ext_ *lock, int *first_miss)
284 {
285 #pragma unused(first_miss)
286 #if LOG_FIRST_MISS_ALONE
287 	if (*first_miss & 2) {
288 		return;
289 	}
290 	*first_miss |= 2;
291 #endif /* LOG_FIRST_MISS_ALONE */
292 	__lck_grp_mtx_update_wait(lock->lck_mtx_grp);
293 }
294 
295 static void inline
lck_grp_mtx_update_held(struct _lck_mtx_ext_ * lock)296 lck_grp_mtx_update_held(struct _lck_mtx_ext_ *lock)
297 {
298 	__lck_grp_mtx_update_held(lock->lck_mtx_grp);
299 }
300 
301 #endif /* __x86_64__ */
302 #endif /* MACH_KERNEL_PRIVATE */
303 
304 #pragma GCC visibility pop
305 __END_DECLS
306 
307 #endif /* _KERN_LOCKSTAT_H */
308