xref: /xnu-11215.41.3/osfmk/kern/lock_group.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #define LOCK_PRIVATE 1
58 
59 #include <mach_ldebug.h>
60 #include <debug.h>
61 
62 #include <mach/mach_host_server.h>
63 #include <mach_debug/lockgroup_info.h>
64 
65 #if __x86_64__
66 #include <i386/tsc.h>
67 #endif
68 
69 #include <kern/compact_id.h>
70 #include <kern/kalloc.h>
71 #include <kern/lock_stat.h>
72 #include <kern/locks.h>
73 
74 #include <os/atomic_private.h>
75 #include <vm/vm_kern_xnu.h>
76 #include <vm/vm_map_xnu.h>
77 
78 static KALLOC_TYPE_DEFINE(KT_LCK_GRP_ATTR, lck_grp_attr_t, KT_PRIV_ACCT);
79 static KALLOC_TYPE_DEFINE(KT_LCK_GRP, lck_grp_t, KT_PRIV_ACCT);
80 static KALLOC_TYPE_DEFINE(KT_LCK_ATTR, lck_attr_t, KT_PRIV_ACCT);
81 
82 SECURITY_READ_ONLY_LATE(lck_attr_t) lck_attr_default;
83 static SECURITY_READ_ONLY_LATE(lck_grp_attr_t) lck_grp_attr_default;
84 static lck_grp_t lck_grp_compat_grp;
85 COMPACT_ID_TABLE_DEFINE(static, lck_grp_table);
86 struct lck_debug_state lck_debug_state;
87 
88 #pragma mark lock group attributes
89 
90 lck_grp_attr_t  *
lck_grp_attr_alloc_init(void)91 lck_grp_attr_alloc_init(void)
92 {
93 	lck_grp_attr_t  *attr;
94 
95 	attr = zalloc(KT_LCK_GRP_ATTR);
96 	lck_grp_attr_setdefault(attr);
97 	return attr;
98 }
99 
100 void
lck_grp_attr_setdefault(lck_grp_attr_t * attr)101 lck_grp_attr_setdefault(lck_grp_attr_t *attr)
102 {
103 	attr->grp_attr_val = lck_grp_attr_default.grp_attr_val;
104 }
105 
106 void
lck_grp_attr_setstat(lck_grp_attr_t * attr __unused)107 lck_grp_attr_setstat(lck_grp_attr_t *attr __unused)
108 {
109 	attr->grp_attr_val |= LCK_GRP_ATTR_STAT;
110 }
111 
112 
113 void
lck_grp_attr_free(lck_grp_attr_t * attr)114 lck_grp_attr_free(lck_grp_attr_t *attr)
115 {
116 	zfree(KT_LCK_GRP_ATTR, attr);
117 }
118 
119 #pragma mark lock groups
120 
121 __startup_func
122 static void
lck_group_init(void)123 lck_group_init(void)
124 {
125 	if (LcksOpts & LCK_OPTION_ENABLE_STAT) {
126 		lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_STAT;
127 	}
128 	if (LcksOpts & LCK_OPTION_ENABLE_TIME_STAT) {
129 		lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_TIME_STAT;
130 	}
131 	if (LcksOpts & LCK_OPTION_ENABLE_DEBUG) {
132 		lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_DEBUG;
133 	}
134 
135 	if (LcksOpts & LCK_OPTION_ENABLE_DEBUG) {
136 		lck_attr_default.lck_attr_val = LCK_ATTR_DEBUG;
137 	} else {
138 		lck_attr_default.lck_attr_val = LCK_ATTR_NONE;
139 	}
140 
141 	/*
142 	 * This is a little gross, this allows us to use the table before
143 	 * compact_table_init() is called on it, but we have a chicken
144 	 * and egg problem otherwise.
145 	 *
146 	 * compact_table_init() really only inits the ticket lock
147 	 * with the proper lock group
148 	 */
149 	lck_grp_init(&lck_grp_compat_grp, "Compatibility APIs",
150 	    &lck_grp_attr_default);
151 	*compact_id_resolve(&lck_grp_table, 0) = LCK_GRP_NULL;
152 }
153 STARTUP(LOCKS, STARTUP_RANK_FIRST, lck_group_init);
154 
155 __startup_func
156 void
lck_grp_startup_init(struct lck_grp_spec * sp)157 lck_grp_startup_init(struct lck_grp_spec *sp)
158 {
159 	lck_grp_init_flags(sp->grp, sp->grp_name, sp->grp_flags |
160 	    lck_grp_attr_default.grp_attr_val);
161 }
162 
163 bool
lck_grp_has_stats(lck_grp_t * grp)164 lck_grp_has_stats(lck_grp_t *grp)
165 {
166 	return grp->lck_grp_attr_id & LCK_GRP_ATTR_STAT;
167 }
168 
169 lck_grp_t *
lck_grp_alloc_init(const char * grp_name,lck_grp_attr_t * attr)170 lck_grp_alloc_init(const char *grp_name, lck_grp_attr_t *attr)
171 {
172 	lck_grp_t *grp;
173 
174 	if (attr == LCK_GRP_ATTR_NULL) {
175 		attr = &lck_grp_attr_default;
176 	}
177 	grp = zalloc(KT_LCK_GRP);
178 	lck_grp_init_flags(grp, grp_name,
179 	    attr->grp_attr_val | LCK_GRP_ATTR_ALLOCATED);
180 	return grp;
181 }
182 
183 void
lck_grp_init(lck_grp_t * grp,const char * grp_name,lck_grp_attr_t * attr)184 lck_grp_init(lck_grp_t *grp, const char *grp_name, lck_grp_attr_t *attr)
185 {
186 	if (attr == LCK_GRP_ATTR_NULL) {
187 		attr = &lck_grp_attr_default;
188 	}
189 	lck_grp_init_flags(grp, grp_name, attr->grp_attr_val);
190 }
191 
192 lck_grp_t *
lck_grp_init_flags(lck_grp_t * grp,const char * grp_name,lck_grp_options_t flags)193 lck_grp_init_flags(lck_grp_t *grp, const char *grp_name, lck_grp_options_t flags)
194 {
195 	bzero(grp, sizeof(lck_grp_t));
196 	os_ref_init_raw(&grp->lck_grp_refcnt, NULL);
197 	(void)strlcpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
198 
199 #if CONFIG_DTRACE
200 	lck_grp_stats_t *stats = &grp->lck_grp_stats;
201 
202 	if (flags & LCK_GRP_ATTR_STAT) {
203 		lck_grp_stat_enable(&stats->lgss_spin_held);
204 		lck_grp_stat_enable(&stats->lgss_spin_miss);
205 
206 		lck_grp_stat_enable(&stats->lgss_ticket_held);
207 		lck_grp_stat_enable(&stats->lgss_ticket_miss);
208 
209 		lck_grp_stat_enable(&stats->lgss_mtx_held);
210 		lck_grp_stat_enable(&stats->lgss_mtx_direct_wait);
211 		lck_grp_stat_enable(&stats->lgss_mtx_miss);
212 		lck_grp_stat_enable(&stats->lgss_mtx_wait);
213 	}
214 	if (flags & LCK_GRP_ATTR_TIME_STAT) {
215 		lck_grp_stat_enable(&stats->lgss_spin_spin);
216 		lck_grp_stat_enable(&stats->lgss_ticket_spin);
217 	}
218 #endif /* CONFIG_DTRACE */
219 
220 	/* must be last as it publishes the group */
221 	if (startup_phase > STARTUP_SUB_LOCKS) {
222 		compact_id_table_lock(&lck_grp_table);
223 	}
224 	flags |= compact_id_get_locked(&lck_grp_table, LCK_GRP_ATTR_ID_MASK, grp);
225 	grp->lck_grp_attr_id = flags;
226 	if (startup_phase > STARTUP_SUB_LOCKS) {
227 		compact_id_table_unlock(&lck_grp_table);
228 	}
229 
230 	return grp;
231 }
232 
233 lck_grp_t *
lck_grp_resolve(uint32_t grp_attr_id)234 lck_grp_resolve(uint32_t grp_attr_id)
235 {
236 	grp_attr_id &= LCK_GRP_ATTR_ID_MASK;
237 	return *compact_id_resolve(&lck_grp_table, grp_attr_id);
238 }
239 
240 __abortlike
241 static void
__lck_grp_assert_id_panic(lck_grp_t * grp,uint32_t grp_attr_id)242 __lck_grp_assert_id_panic(lck_grp_t *grp, uint32_t grp_attr_id)
243 {
244 	panic("lck_grp_t %p has ID %d, but %d was expected", grp,
245 	    grp->lck_grp_attr_id & LCK_GRP_ATTR_ID_MASK,
246 	    grp_attr_id & LCK_GRP_ATTR_ID_MASK);
247 }
248 
249 __attribute__((always_inline))
250 void
lck_grp_assert_id(lck_grp_t * grp,uint32_t grp_attr_id)251 lck_grp_assert_id(lck_grp_t *grp, uint32_t grp_attr_id)
252 {
253 	if ((grp->lck_grp_attr_id ^ grp_attr_id) & LCK_GRP_ATTR_ID_MASK) {
254 		__lck_grp_assert_id_panic(grp, grp_attr_id);
255 	}
256 }
257 
258 static void
lck_grp_destroy(lck_grp_t * grp)259 lck_grp_destroy(lck_grp_t *grp)
260 {
261 	compact_id_put(&lck_grp_table,
262 	    grp->lck_grp_attr_id & LCK_GRP_ATTR_ID_MASK);
263 	zfree(KT_LCK_GRP, grp);
264 }
265 
266 void
lck_grp_free(lck_grp_t * grp)267 lck_grp_free(lck_grp_t *grp)
268 {
269 	lck_grp_deallocate(grp, NULL);
270 }
271 
272 void
lck_grp_reference(lck_grp_t * grp,uint32_t * cnt)273 lck_grp_reference(lck_grp_t *grp, uint32_t *cnt)
274 {
275 	if (cnt) {
276 		os_atomic_inc(cnt, relaxed);
277 	}
278 	if (grp->lck_grp_attr_id & LCK_GRP_ATTR_ALLOCATED) {
279 		os_ref_retain_raw(&grp->lck_grp_refcnt, NULL);
280 	}
281 }
282 
283 void
lck_grp_deallocate(lck_grp_t * grp,uint32_t * cnt)284 lck_grp_deallocate(lck_grp_t *grp, uint32_t *cnt)
285 {
286 	if (cnt) {
287 		os_atomic_dec(cnt, relaxed);
288 	}
289 	if ((grp->lck_grp_attr_id & LCK_GRP_ATTR_ALLOCATED) &&
290 	    os_ref_release_raw(&grp->lck_grp_refcnt, 0) == 0) {
291 		lck_grp_destroy(grp);
292 	}
293 }
294 
295 void
296 lck_grp_foreach(bool (^block)(lck_grp_t *))
297 {
298 	compact_id_for_each(&lck_grp_table, 64, (bool (^)(void *))block);
299 }
300 
301 void
lck_grp_enable_feature(lck_debug_feature_t feat)302 lck_grp_enable_feature(lck_debug_feature_t feat)
303 {
304 	uint32_t bit = 1u << feat;
305 
306 	compact_id_table_lock(&lck_grp_table);
307 	if (lck_debug_state.lds_counts[feat]++ == 0) {
308 		os_atomic_or(&lck_debug_state.lds_value, bit, relaxed);
309 	}
310 	compact_id_table_unlock(&lck_grp_table);
311 }
312 
313 void
lck_grp_disable_feature(lck_debug_feature_t feat)314 lck_grp_disable_feature(lck_debug_feature_t feat)
315 {
316 	uint32_t bit = 1u << feat;
317 	long v;
318 
319 	compact_id_table_lock(&lck_grp_table);
320 	v = --lck_debug_state.lds_counts[feat];
321 	if (v < 0) {
322 		panic("lck_debug_state: feature %d imbalance", feat);
323 	}
324 	if (v == 0) {
325 		os_atomic_andnot(&lck_debug_state.lds_value, bit, relaxed);
326 	}
327 	compact_id_table_unlock(&lck_grp_table);
328 }
329 
330 kern_return_t
host_lockgroup_info(host_t host,lockgroup_info_array_t * lockgroup_infop,mach_msg_type_number_t * lockgroup_infoCntp)331 host_lockgroup_info(
332 	host_t                   host,
333 	lockgroup_info_array_t  *lockgroup_infop,
334 	mach_msg_type_number_t  *lockgroup_infoCntp)
335 {
336 	lockgroup_info_t *info;
337 	vm_offset_t       addr;
338 	vm_size_t         size, used;
339 	vm_size_t         vmsize, vmused;
340 	uint32_t          needed;
341 	__block uint32_t  count = 0;
342 	vm_map_copy_t     copy;
343 	kern_return_t     kr;
344 
345 	if (host == HOST_NULL) {
346 		return KERN_INVALID_HOST;
347 	}
348 
349 	/*
350 	 * Give about 10% of slop here, lock groups are mostly allocated
351 	 * during boot or kext loads, and is extremely unlikely to grow
352 	 * rapidly.
353 	 */
354 	needed  = os_atomic_load(&lck_grp_table.cidt_count, relaxed);
355 	needed += needed / 8;
356 	size    = needed * sizeof(lockgroup_info_t);
357 	vmsize = vm_map_round_page(size, VM_MAP_PAGE_MASK(ipc_kernel_map));
358 	kr     = kmem_alloc(ipc_kernel_map, &addr, vmsize,
359 	    KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_IPC);
360 	if (kr != KERN_SUCCESS) {
361 		return kr;
362 	}
363 
364 	info = (lockgroup_info_t *)addr;
365 
366 	lck_grp_foreach(^bool (lck_grp_t *grp) {
367 		info[count].lock_spin_cnt = grp->lck_grp_spincnt;
368 		info[count].lock_rw_cnt   = grp->lck_grp_rwcnt;
369 		info[count].lock_mtx_cnt  = grp->lck_grp_mtxcnt;
370 
371 #if CONFIG_DTRACE
372 		info[count].lock_spin_held_cnt = grp->lck_grp_stats.lgss_spin_held.lgs_count;
373 		info[count].lock_spin_miss_cnt = grp->lck_grp_stats.lgss_spin_miss.lgs_count;
374 
375 		// Historically on x86, held was used for "direct wait" and util for "held"
376 		info[count].lock_mtx_util_cnt = grp->lck_grp_stats.lgss_mtx_held.lgs_count;
377 		info[count].lock_mtx_held_cnt = grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count;
378 		info[count].lock_mtx_miss_cnt = grp->lck_grp_stats.lgss_mtx_miss.lgs_count;
379 		info[count].lock_mtx_wait_cnt = grp->lck_grp_stats.lgss_mtx_wait.lgs_count;
380 #endif /* CONFIG_DTRACE */
381 
382 		memcpy(info[count].lockgroup_name, grp->lck_grp_name, LOCKGROUP_MAX_NAME);
383 
384 		return ++count >= needed ? false : true;
385 	});
386 
387 	/*
388 	 * We might have found less groups than `needed`
389 	 * get rid of the excess now:
390 	 * - [0, used) is what we want to return
391 	 * - [0, size) is what we allocated
392 	 */
393 	used   = count * sizeof(lockgroup_info_t);
394 	vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
395 
396 	if (vmused < vmsize) {
397 		kmem_free(ipc_kernel_map, addr + vmused, vmsize - vmused);
398 	}
399 
400 	kr = vm_map_unwire(ipc_kernel_map, addr, addr + vmused, FALSE);
401 	assert(kr == KERN_SUCCESS);
402 
403 	kr = vm_map_copyin(ipc_kernel_map, addr, used, TRUE, &copy);
404 	assert(kr == KERN_SUCCESS);
405 
406 	*lockgroup_infop = (lockgroup_info_t *)copy;
407 	*lockgroup_infoCntp = count;
408 
409 	return KERN_SUCCESS;
410 }
411 
412 #pragma mark lock attributes
413 
414 __startup_func
415 void
lck_attr_startup_init(struct lck_attr_startup_spec * sp)416 lck_attr_startup_init(struct lck_attr_startup_spec *sp)
417 {
418 	lck_attr_t *attr = sp->lck_attr;
419 	lck_attr_setdefault(attr);
420 	attr->lck_attr_val |= sp->lck_attr_set_flags;
421 	attr->lck_attr_val &= ~sp->lck_attr_clear_flags;
422 }
423 
424 lck_attr_t *
lck_attr_alloc_init(void)425 lck_attr_alloc_init(void)
426 {
427 	lck_attr_t      *attr;
428 
429 	attr = zalloc(KT_LCK_ATTR);
430 	lck_attr_setdefault(attr);
431 	return attr;
432 }
433 
434 
435 void
lck_attr_setdefault(lck_attr_t * attr)436 lck_attr_setdefault(lck_attr_t *attr)
437 {
438 	attr->lck_attr_val = lck_attr_default.lck_attr_val;
439 }
440 
441 
442 void
lck_attr_setdebug(lck_attr_t * attr)443 lck_attr_setdebug(lck_attr_t *attr)
444 {
445 	os_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed);
446 }
447 
448 void
lck_attr_cleardebug(lck_attr_t * attr)449 lck_attr_cleardebug(lck_attr_t *attr)
450 {
451 	os_atomic_andnot(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed);
452 }
453 
454 void
lck_attr_rw_shared_priority(lck_attr_t * attr)455 lck_attr_rw_shared_priority(lck_attr_t *attr)
456 {
457 	os_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY, relaxed);
458 }
459 
460 
461 void
lck_attr_free(lck_attr_t * attr)462 lck_attr_free(lck_attr_t *attr)
463 {
464 	zfree(KT_LCK_ATTR, attr);
465 }
466 
467 #pragma mark lock stat
468 #if CONFIG_DTRACE
469 
470 void
lck_grp_stat_enable(lck_grp_stat_t * stat)471 lck_grp_stat_enable(lck_grp_stat_t *stat)
472 {
473 	/* callers ensure this is properly synchronized */
474 	stat->lgs_enablings++;
475 }
476 
477 void
lck_grp_stat_disable(lck_grp_stat_t * stat)478 lck_grp_stat_disable(lck_grp_stat_t *stat)
479 {
480 	stat->lgs_enablings--;
481 }
482 
483 bool
lck_grp_stat_enabled(lck_grp_stat_t * stat)484 lck_grp_stat_enabled(lck_grp_stat_t *stat)
485 {
486 	return stat->lgs_enablings != 0;
487 }
488 
489 
490 __attribute__((always_inline))
491 void
lck_grp_stat_inc(lck_grp_t * grp,lck_grp_stat_t * stat,bool always)492 lck_grp_stat_inc(lck_grp_t *grp, lck_grp_stat_t *stat, bool always)
493 {
494 #pragma unused(grp)
495 	if (always || lck_grp_stat_enabled(stat)) {
496 		__unused uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed);
497 		if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) {
498 			lockprof_probe(grp, stat, val);
499 		}
500 	}
501 }
502 
503 #if LOCK_STATS
504 
505 static inline void
lck_grp_inc_time_stats(lck_grp_t * grp,lck_grp_stat_t * stat,uint64_t time)506 lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time)
507 {
508 	if (lck_grp_stat_enabled(stat)) {
509 		__unused uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed);
510 		if (__improbable(stat->lgs_limit)) {
511 			while (__improbable(time > stat->lgs_limit)) {
512 				time -= stat->lgs_limit;
513 				lockprof_probe(grp, stat, val);
514 			}
515 			if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) {
516 				lockprof_probe(grp, stat, val);
517 			}
518 		}
519 	}
520 }
521 
522 void
__lck_grp_spin_update_held(lck_grp_t * grp)523 __lck_grp_spin_update_held(lck_grp_t *grp)
524 {
525 	if (grp) {
526 		lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_spin_held, false);
527 	}
528 }
529 
530 void
__lck_grp_spin_update_miss(lck_grp_t * grp)531 __lck_grp_spin_update_miss(lck_grp_t *grp)
532 {
533 	if (grp) {
534 		lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_spin_miss, false);
535 	}
536 }
537 
538 void
__lck_grp_spin_update_spin(lck_grp_t * grp,uint64_t time)539 __lck_grp_spin_update_spin(lck_grp_t *grp, uint64_t time)
540 {
541 	if (grp) {
542 		lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin;
543 		lck_grp_inc_time_stats(grp, stat, time);
544 	}
545 }
546 
547 void
__lck_grp_ticket_update_held(lck_grp_t * grp)548 __lck_grp_ticket_update_held(lck_grp_t *grp)
549 {
550 	if (grp) {
551 		lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_ticket_held, false);
552 	}
553 }
554 
555 void
__lck_grp_ticket_update_miss(lck_grp_t * grp)556 __lck_grp_ticket_update_miss(lck_grp_t *grp)
557 {
558 	if (grp) {
559 		lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_ticket_miss, false);
560 	}
561 }
562 
563 void
__lck_grp_ticket_update_spin(lck_grp_t * grp,uint64_t time)564 __lck_grp_ticket_update_spin(lck_grp_t *grp, uint64_t time)
565 {
566 	if (grp) {
567 		lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_spin;
568 		lck_grp_inc_time_stats(grp, stat, time);
569 	}
570 }
571 
572 #endif /* LOCK_STATS */
573 
574 void
lck_mtx_time_stat_record(enum lockstat_probe_id pid,lck_mtx_t * mtx,uint32_t grp_attr_id,uint64_t start)575 lck_mtx_time_stat_record(
576 	enum lockstat_probe_id  pid,
577 	lck_mtx_t              *mtx,
578 	uint32_t                grp_attr_id,
579 	uint64_t                start)
580 {
581 	uint32_t id = lockstat_probemap[pid];
582 
583 	if (__improbable(start && id)) {
584 		uint64_t delta = ml_get_timebase() - start;
585 		lck_grp_t *grp = lck_grp_resolve(grp_attr_id);
586 
587 #if __x86_64__
588 		delta = tmrCvt(delta, tscFCvtt2n);
589 #endif
590 		dtrace_probe(id, (uintptr_t)mtx, delta, (uintptr_t)grp, 0, 0);
591 	}
592 }
593 
594 #endif /* CONFIG_DTRACE */
595