xref: /xnu-8020.101.4/osfmk/kern/ticket_lock.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_TICKET_LOCK_H_
30 #define _KERN_TICKET_LOCK_H_
31 
32 #ifndef __ASSEMBLER__
33 #include <kern/lock_types.h>
34 #include <kern/lock_group.h>
35 #endif /* __ASSEMBLER__ */
36 
37 /*
38  * TODO <rdar://problem/72157773>. We do not need to make
39  * the header available only to KERNEL_PRIVATE.
40  */
41 #if KERNEL_PRIVATE
42 #ifndef __ASSEMBLER__
43 
44 __BEGIN_DECLS
45 
46 #ifdef MACH_KERNEL_PRIVATE
47 
48 /*!
49  * @typedef hw_lck_ticket_t
50  *
51  * @discussion
52  * This type describes the low level type for a ticket lock.
53  * @c lck_ticket_t provides a higher level abstraction
54  * that also provides thread ownership information.
55  *
56  * This lock is meant to be exactly 32bits to be able to replace
57  * hw_lock_bit_t locks when needed.
58  *
59  * This lower level interface supports an @c *_allow_invalid()
60  * to implement advanced memory reclamation schemes using sequestering.
61  * Do note that when @c CONFIG_PROB_GZALLOC is engaged, and the target lock
62  * comes from a zone, PGZ must be handled manually.
63  * See ipc_object_lock_allow_invalid() for an example of that.
64  *
65  * @c hw_lck_ticket_invalidate() must be used on locks
66  * that will be used this way: in addition to make subsequent calls to
67  * @c hw_lck_ticket_lock_allow_invalid() to fail, it allows for
68  * @c hw_lck_ticket_destroy() to synchronize with callers to
69  * @c hw_lck_ticket_lock_allow_invalid() who successfully reserved
70  * a ticket but will fail, ensuring the memory can't be freed too early.
71  *
72  *
73  * @c hw_lck_ticket_reserve() can be used to pre-reserve a ticket.
74  * When this function returns @c true, then the lock was acquired.
75  * When it returns @c false, then @c hw_lck_ticket_wait() must
76  * be called to wait for this ticket.
77  *
78  * This can be used to resolve certain lock inversions: assuming
79  * two locks, @c L (a mutex or any kind of lock) and @c T (a ticket lock),
80  * where @c L can be taken when @c T is held but not the other way around,
81  * then the following can be done to take both locks in "the wrong order",
82  * with a guarantee of forward progress:
83  *
84  * <code>
85  *     // starts with L held
86  *     uint32_t ticket;
87  *
88  *     if (!hw_lck_ticket_reserve(T, &ticket)) {
89  *         unlock(L);
90  *         hw_lck_ticket_wait(T, ticket):
91  *         lock(L);
92  *         // possibly validate what might have changed
93  *         // due to dropping L
94  *     }
95  *
96  *     // both L and T are held
97  * </code>
98  *
99  * This pattern above is safe even for a case when the protected
100  * resource contains the ticket lock @c T, provided that it is
101  * guaranteed that both @c T and @c L (in the proper order) will
102  * be taken before that resource death. In that case, in the resource
103  * destructor, when @c hw_lck_ticket_destroy() is called, it will
104  * wait for the reservation to be released.
105  *
106  * See @c waitq_pull_thread_locked() for an example of this where:
107  * - @c L is the thread lock of a thread waiting on a given waitq,
108  * - @c T is the lock for that waitq,
109  * - the waitq can't be destroyed before the thread is unhooked from it,
110  *   which happens under both @c L and @c T.
111  *
112  *
113  * @note:
114  * At the moment, this construct only supports up to 255 CPUs.
115  * Supporting more CPUs requires losing the `lck_type` field,
116  * and burning the low bit of the cticket/nticket
117  * for the "invalidation" feature.
118  */
119 typedef union {
120 	struct {
121 		uint8_t lck_type;
122 		uint8_t lck_valid;
123 		union {
124 			struct {
125 				uint8_t cticket;
126 				uint8_t nticket;
127 			};
128 			uint16_t tcurnext;
129 		};
130 	};
131 	uint32_t lck_value;
132 } hw_lck_ticket_t;
133 
134 /*!
135  * @typedef lck_ticket_t
136  *
137  * @discussion
138  * A higher level construct than hw_lck_ticket_t in 2 words
139  * like other kernel locks, which admits thread ownership information.
140  */
141 typedef struct {
142 	union {
143 		uintptr_t lck_owner __kernel_data_semantics;
144 		uintptr_t lck_tag __kernel_data_semantics;
145 	};
146 	hw_lck_ticket_t tu;
147 } lck_ticket_t;
148 
149 #define LCK_TICKET_TYPE                 0x44
150 #define LCK_TICKET_TAG_DESTROYED        0xdead
151 
152 #pragma GCC visibility push(hidden)
153 
154 void hw_lck_ticket_init(hw_lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp));
155 void hw_lck_ticket_init_locked(hw_lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp));
156 void hw_lck_ticket_destroy(hw_lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp));
157 
158 bool hw_lck_ticket_held(hw_lck_ticket_t *tlock) __result_use_check;
159 void hw_lck_ticket_lock(hw_lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp));
160 hw_lock_status_t hw_lck_ticket_lock_to(hw_lck_ticket_t * tlock, uint64_t timeout,
161     hw_lock_timeout_handler_t handler LCK_GRP_ARG(lck_grp_t *grp));
162 bool hw_lck_ticket_lock_try(hw_lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp)) __result_use_check;
163 void hw_lck_ticket_unlock(hw_lck_ticket_t *tlock);
164 
165 bool hw_lck_ticket_reserve(hw_lck_ticket_t * tlock, uint32_t *ticket LCK_GRP_ARG(lck_grp_t *grp)) __result_use_check;
166 hw_lock_status_t hw_lck_ticket_reserve_allow_invalid(hw_lck_ticket_t * tlock,
167     uint32_t *ticket LCK_GRP_ARG(lck_grp_t *grp)) __result_use_check;
168 hw_lock_status_t hw_lck_ticket_wait(hw_lck_ticket_t * tlock, uint32_t ticket,
169     uint64_t timeout, hw_lock_timeout_handler_t handler LCK_GRP_ARG(lck_grp_t *grp));
170 
171 hw_lock_status_t hw_lck_ticket_lock_allow_invalid(hw_lck_ticket_t * tlock,
172     uint64_t timeout, hw_lock_timeout_handler_t handler LCK_GRP_ARG(lck_grp_t *grp));
173 void hw_lck_ticket_invalidate(hw_lck_ticket_t *tlock);
174 
175 #if !LOCK_STATS
176 #define hw_lck_ticket_init(lck, grp)             hw_lck_ticket_init(lck)
177 #define hw_lck_ticket_init_locked(lck, grp)      hw_lck_ticket_init_locked(lck)
178 #define hw_lck_ticket_destroy(lck, grp)          hw_lck_ticket_destroy(lck)
179 #define hw_lck_ticket_lock(lck, grp)             hw_lck_ticket_lock(lck)
180 #define hw_lck_ticket_lock_to(lck, to, cb, grp)  hw_lck_ticket_lock_to(lck, to, cb)
181 #define hw_lck_ticket_lock_try(lck, grp)         hw_lck_ticket_lock_try(lck)
182 #define hw_lck_ticket_lock_allow_invalid(lck, to, cb, grp) \
183 	hw_lck_ticket_lock_allow_invalid(lck, to, cb)
184 #define hw_lck_ticket_reserve(lck, t, grp)       hw_lck_ticket_reserve(lck, t)
185 #define hw_lck_ticket_reserve_allow_invalid(lck, t, grp) \
186 	hw_lck_ticket_reserve_allow_invalid(lck, t)
187 #define hw_lck_ticket_wait(lck, ticket, to, cb, grp) \
188 	hw_lck_ticket_wait(lck, ticket, to, cb)
189 #endif /* !LOCK_STATS */
190 
191 #pragma GCC visibility pop
192 #else /* MACH_KERNEL_PRIVATE */
193 
194 typedef struct {
195 	uintptr_t       opaque1 __kernel_data_semantics;
196 	uint32_t        opaque2;
197 } lck_ticket_t;
198 
199 #endif /* MACH_KERNEL_PRIVATE */
200 
201 void lck_ticket_init(lck_ticket_t *tlock, lck_grp_t *grp);
202 void lck_ticket_destroy(lck_ticket_t *tlock, lck_grp_t *grp);
203 void lck_ticket_lock(lck_ticket_t *tlock, lck_grp_t *grp);
204 void lck_ticket_unlock(lck_ticket_t *tlock);
205 void lck_ticket_assert_owned(lck_ticket_t *tlock);
206 #if MACH_ASSERT
207 #define LCK_TICKET_ASSERT_OWNED(tlock) lck_ticket_assert_owned(tlock)
208 #else
209 #define LCK_TICKET_ASSERT_OWNED(tlock) (void)(tlock)
210 #endif
211 
212 #if XNU_KERNEL_PRIVATE
213 bool lck_ticket_lock_try(lck_ticket_t *tlock, lck_grp_t *grp) __result_use_check;
214 bool kdp_lck_ticket_is_acquired(lck_ticket_t *lck) __result_use_check;
215 #endif
216 
217 __END_DECLS
218 
219 #endif /* __ASSEMBLER__ */
220 
221 #define HW_LCK_TICKET_LOCK_INCREMENT  0x01000000
222 #define HW_LCK_TICKET_LOCK_VALID_BIT  8
223 
224 #else /* KERNEL_PRIVATE */
225 #error header not supported
226 #endif /* KERNEL_PRIVATE */
227 
228 #endif /* _KERN_TICKET_LOCK_H_ */
229