xref: /xnu-10002.41.9/osfmk/i386/locks_i386_inlines.h (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 201 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _I386_LOCKS_I386_INLINES_H_
30 #define _I386_LOCKS_I386_INLINES_H_
31 
32 #include <kern/locks.h>
33 #include <kern/lock_stat.h>
34 #include <kern/turnstile.h>
35 
36 #if LCK_MTX_USE_ARCH
37 
38 // Enforce program order of loads and stores.
39 #define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
40 #define ordered_store_release(target, value) ({ \
41 	        os_atomic_store(target, value, release); \
42 	        os_compiler_barrier(); \
43 })
44 
45 /* Enforce program order of loads and stores. */
46 #define ordered_load_mtx_state(lock)                    ordered_load(&(lock)->lck_mtx_state)
47 #define ordered_store_mtx_state_release(lock, value)    ordered_store_release(&(lock)->lck_mtx_state, (value))
48 #define ordered_store_mtx_owner(lock, value)            os_atomic_store(&(lock)->lck_mtx_owner, (value), compiler_acq_rel)
49 
50 #if DEVELOPMENT | DEBUG
51 void lck_mtx_owner_check_panic(lck_mtx_t       *mutex) __abortlike;
52 #endif
53 
54 __attribute__((always_inline))
55 static inline void
lck_mtx_ilk_unlock_inline(lck_mtx_t * mutex,uint32_t state)56 lck_mtx_ilk_unlock_inline(
57 	lck_mtx_t       *mutex,
58 	uint32_t        state)
59 {
60 	state &= ~LCK_MTX_ILOCKED_MSK;
61 	ordered_store_mtx_state_release(mutex, state);
62 
63 	enable_preemption();
64 }
65 
66 __attribute__((always_inline))
67 static inline void
lck_mtx_lock_finish_inline(lck_mtx_t * mutex,uint32_t state)68 lck_mtx_lock_finish_inline(
69 	lck_mtx_t       *mutex,
70 	uint32_t        state)
71 {
72 	assert(state & LCK_MTX_ILOCKED_MSK);
73 
74 	/* release the interlock and re-enable preemption */
75 	lck_mtx_ilk_unlock_inline(mutex, state);
76 
77 	LCK_MTX_ACQUIRED(mutex, mutex->lck_mtx_grp, false,
78 	    state & LCK_MTX_PROFILE_MSK);
79 }
80 
81 __attribute__((always_inline))
82 static inline void
lck_mtx_lock_finish_inline_with_cleanup(lck_mtx_t * mutex,uint32_t state)83 lck_mtx_lock_finish_inline_with_cleanup(
84 	lck_mtx_t       *mutex,
85 	uint32_t        state)
86 {
87 	assert(state & LCK_MTX_ILOCKED_MSK);
88 
89 	/* release the interlock and re-enable preemption */
90 	lck_mtx_ilk_unlock_inline(mutex, state);
91 
92 	LCK_MTX_ACQUIRED(mutex, mutex->lck_mtx_grp, false,
93 	    state & LCK_MTX_PROFILE_MSK);
94 
95 	turnstile_cleanup();
96 }
97 
98 __attribute__((always_inline))
99 static inline void
lck_mtx_try_lock_finish_inline(lck_mtx_t * mutex,uint32_t state)100 lck_mtx_try_lock_finish_inline(
101 	lck_mtx_t       *mutex,
102 	uint32_t        state)
103 {
104 	/* release the interlock and re-enable preemption */
105 	lck_mtx_ilk_unlock_inline(mutex, state);
106 	LCK_MTX_TRY_ACQUIRED(mutex, mutex->lck_mtx_grp, false,
107 	    state & LCK_MTX_PROFILE_MSK);
108 }
109 
110 __attribute__((always_inline))
111 static inline void
lck_mtx_convert_spin_finish_inline(lck_mtx_t * mutex,uint32_t state)112 lck_mtx_convert_spin_finish_inline(
113 	lck_mtx_t       *mutex,
114 	uint32_t        state)
115 {
116 	/* release the interlock and acquire it as mutex */
117 	state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK);
118 	state |= LCK_MTX_MLOCKED_MSK;
119 
120 	ordered_store_mtx_state_release(mutex, state);
121 	enable_preemption();
122 }
123 
124 __attribute__((always_inline))
125 static inline void
lck_mtx_unlock_finish_inline(lck_mtx_t * mutex,uint32_t state)126 lck_mtx_unlock_finish_inline(
127 	lck_mtx_t       *mutex,
128 	uint32_t        state)
129 {
130 	enable_preemption();
131 	LCK_MTX_RELEASED(mutex, mutex->lck_mtx_grp,
132 	    state & LCK_MTX_PROFILE_MSK);
133 }
134 
135 #endif /* LCK_MTX_USE_ARCH */
136 #endif /* _I386_LOCKS_I386_INLINES_H_ */
137