xref: /xnu-8796.121.2/osfmk/arm/locks.h (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _ARM_LOCKS_H_
30 #define _ARM_LOCKS_H_
31 
32 #ifdef  MACH_KERNEL_PRIVATE
33 #ifndef LCK_SPIN_IS_TICKET_LOCK
34 #define LCK_SPIN_IS_TICKET_LOCK 0
35 #endif
36 #endif /* MACH_KERNEL_PRIVATE */
37 
38 #include <kern/lock_types.h>
39 #ifdef  MACH_KERNEL_PRIVATE
40 #include <kern/sched_hygiene.h>
41 #include <kern/startup.h>
42 #if LCK_SPIN_IS_TICKET_LOCK
43 #include <kern/ticket_lock.h>
44 #endif
45 #endif
46 
47 #ifdef  MACH_KERNEL_PRIVATE
48 
49 #define enaLkDeb                0x00000001      /* Request debug in default attribute */
50 #define enaLkStat               0x00000002      /* Request statistic in default attribute */
51 #define disLkRWPrio             0x00000004      /* Disable RW lock priority promotion */
52 #define enaLkTimeStat           0x00000008      /* Request time statistics in default attribute */
53 #define disLkRWDebug            0x00000010      /* Disable RW lock best-effort debugging */
54 
55 #define disLkType               0x80000000      /* Disable type checking */
56 #define disLktypeb              0
57 #define disLkThread             0x40000000      /* Disable ownership checking */
58 #define disLkThreadb            1
59 #define enaLkExtStck            0x20000000      /* Enable extended backtrace */
60 #define enaLkExtStckb           2
61 #define disLkMyLck              0x10000000      /* Disable recursive lock dectection */
62 #define disLkMyLckb             3
63 
64 #endif
65 
66 #ifdef  MACH_KERNEL_PRIVATE
67 #if LCK_SPIN_IS_TICKET_LOCK
68 typedef lck_ticket_t lck_spin_t;
69 #else
70 typedef struct {
71 	struct hslock   hwlock;
72 	unsigned long   type;
73 } lck_spin_t;
74 
75 #define lck_spin_data hwlock.lock_data
76 
77 #define LCK_SPIN_TAG_DESTROYED  0xdead  /* lock marked as Destroyed */
78 
79 #define LCK_SPIN_TYPE           0x00000011
80 #define LCK_SPIN_TYPE_DESTROYED 0x000000ee
81 #endif
82 
83 #elif KERNEL_PRIVATE
84 
85 typedef struct {
86 	uintptr_t opaque[2] __kernel_data_semantics;
87 } lck_spin_t;
88 
89 typedef struct {
90 	uintptr_t opaque[2] __kernel_data_semantics;
91 } lck_mtx_t;
92 
93 typedef struct {
94 	uintptr_t opaque[16];
95 } lck_mtx_ext_t;
96 
97 #else
98 
99 typedef struct __lck_spin_t__           lck_spin_t;
100 typedef struct __lck_mtx_t__            lck_mtx_t;
101 typedef struct __lck_mtx_ext_t__        lck_mtx_ext_t;
102 
103 #endif  /* !KERNEL_PRIVATE */
104 #ifdef  MACH_KERNEL_PRIVATE
105 
106 /*
107  * static panic deadline, in timebase units, for
108  * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
109  */
110 extern uint64_t _Atomic lock_panic_timeout;
111 
112 /* Adaptive spin before blocking */
113 extern machine_timeout_t   MutexSpin;
114 extern uint64_t            low_MutexSpin;
115 extern int64_t             high_MutexSpin;
116 
117 #if CONFIG_PV_TICKET
118 extern bool                has_lock_pv;
119 #endif
120 
121 #ifdef LOCK_PRIVATE
122 
123 #define LOCK_SNOOP_SPINS        100
124 #define LOCK_PRETEST            0
125 
126 #define wait_for_event()        __builtin_arm_wfe()
127 
128 #if SCHED_HYGIENE_DEBUG
129 #define lock_disable_preemption_for_thread(t) ({                                \
130 	thread_t __dpft_thread = (t);                                           \
131 	uint32_t *__dpft_countp = &__dpft_thread->machine.preemption_count;     \
132 	uint32_t __dpft_count;                                                  \
133                                                                                 \
134 	__dpft_count = *__dpft_countp;                                          \
135 	os_atomic_store(__dpft_countp, __dpft_count + 1, compiler_acq_rel);     \
136                                                                                 \
137 	if (__dpft_count == 0 && sched_preemption_disable_debug_mode) {         \
138 	        _prepare_preemption_disable_measurement();                      \
139 	}                                                                       \
140 })
141 #else /* SCHED_HYGIENE_DEBUG */
142 #define lock_disable_preemption_for_thread(t) ({                                \
143 	uint32_t *__dpft_countp = &(t)->machine.preemption_count;               \
144                                                                                 \
145 	os_atomic_store(__dpft_countp, *__dpft_countp + 1, compiler_acq_rel);   \
146 })
147 #endif /* SCHED_HYGIENE_DEBUG */
148 #define lock_enable_preemption()                enable_preemption()
149 #define lock_preemption_level_for_thread(t)     get_preemption_level_for_thread(t)
150 #define lock_preemption_disabled_for_thread(t)  (get_preemption_level_for_thread(t) != 0)
151 #define current_thread()                        current_thread_fast()
152 
153 #define __hw_spin_wait_load(ptr, load_var, cond_result, cond_expr) ({ \
154 	load_var = os_atomic_load_exclusive(ptr, relaxed);                      \
155 	cond_result = (cond_expr);                                              \
156 	if (__probable(cond_result)) {                                          \
157 	        os_atomic_clear_exclusive();                                    \
158 	} else {                                                                \
159 	        wait_for_event();                                               \
160 	}                                                                       \
161 })
162 
163 #endif /* LOCK_PRIVATE */
164 #endif /* MACH_KERNEL_PRIVATE */
165 #endif /* _ARM_LOCKS_H_ */
166