xref: /xnu-8019.80.24/osfmk/kern/mpsc_queue.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea) !
1 /*
2  * Copyright (c) 2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <machine/machine_cpu.h>
30 #include <kern/locks.h>
31 #include <kern/mpsc_queue.h>
32 #include <kern/thread.h>
33 
34 #pragma mark Single Consumer calls
35 
36 __attribute__((noinline))
37 static mpsc_queue_chain_t
_mpsc_queue_wait_for_enqueuer(struct mpsc_queue_chain * _Atomic * ptr)38 _mpsc_queue_wait_for_enqueuer(struct mpsc_queue_chain *_Atomic *ptr)
39 {
40 	return hw_wait_while_equals((void **)ptr, NULL);
41 }
42 
43 void
mpsc_queue_restore_batch(mpsc_queue_head_t q,mpsc_queue_chain_t first,mpsc_queue_chain_t last)44 mpsc_queue_restore_batch(mpsc_queue_head_t q, mpsc_queue_chain_t first,
45     mpsc_queue_chain_t last)
46 {
47 	mpsc_queue_chain_t head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed);
48 
49 	os_atomic_store(&last->mpqc_next, head, relaxed);
50 
51 	if (head == NULL &&
52 	    !os_atomic_cmpxchg(&q->mpqh_tail, &q->mpqh_head, last, release)) {
53 		head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed);
54 		if (__improbable(head == NULL)) {
55 			head = _mpsc_queue_wait_for_enqueuer(&q->mpqh_head.mpqc_next);
56 		}
57 		os_atomic_store(&last->mpqc_next, head, relaxed);
58 	}
59 
60 	os_atomic_store(&q->mpqh_head.mpqc_next, first, relaxed);
61 }
62 
63 mpsc_queue_chain_t
mpsc_queue_dequeue_batch(mpsc_queue_head_t q,mpsc_queue_chain_t * tail_out,os_atomic_dependency_t dependency)64 mpsc_queue_dequeue_batch(mpsc_queue_head_t q, mpsc_queue_chain_t *tail_out,
65     os_atomic_dependency_t dependency)
66 {
67 	mpsc_queue_chain_t head, tail;
68 
69 	q = os_atomic_inject_dependency(q, dependency);
70 
71 	tail = os_atomic_load(&q->mpqh_tail, relaxed);
72 	if (__improbable(tail == &q->mpqh_head)) {
73 		*tail_out = NULL;
74 		return NULL;
75 	}
76 
77 	head = os_atomic_load(&q->mpqh_head.mpqc_next, relaxed);
78 	if (__improbable(head == NULL)) {
79 		head = _mpsc_queue_wait_for_enqueuer(&q->mpqh_head.mpqc_next);
80 	}
81 	os_atomic_store(&q->mpqh_head.mpqc_next, NULL, relaxed);
82 	/*
83 	 * 22708742: set tail to &q->mpqh_head with release, so that NULL write
84 	 * to head above doesn't clobber the head set by concurrent enqueuer
85 	 *
86 	 * The other half of the seq_cst is required to pair with any enqueuer that
87 	 * contributed to an element in this list (pairs with the release fence in
88 	 * __mpsc_queue_append_update_tail().
89 	 *
90 	 * Making this seq_cst instead of acq_rel makes mpsc_queue_append*()
91 	 * visibility transitive (when items hop from one queue to the next)
92 	 * which is expected by clients implicitly.
93 	 *
94 	 * Note that this is the same number of fences that a traditional lock
95 	 * would have, but as a once-per-batch cost.
96 	 */
97 	*tail_out = os_atomic_xchg(&q->mpqh_tail, &q->mpqh_head, seq_cst);
98 
99 	return head;
100 }
101 
102 mpsc_queue_chain_t
mpsc_queue_batch_next(mpsc_queue_chain_t cur,mpsc_queue_chain_t tail)103 mpsc_queue_batch_next(mpsc_queue_chain_t cur, mpsc_queue_chain_t tail)
104 {
105 	mpsc_queue_chain_t elm = NULL;
106 	if (cur == tail || cur == NULL) {
107 		return elm;
108 	}
109 
110 	elm = os_atomic_load(&cur->mpqc_next, relaxed);
111 	if (__improbable(elm == NULL)) {
112 		elm = _mpsc_queue_wait_for_enqueuer(&cur->mpqc_next);
113 	}
114 	return elm;
115 }
116 
117 #pragma mark "GCD"-like facilities
118 
119 static void _mpsc_daemon_queue_drain(mpsc_daemon_queue_t, thread_t);
120 static void _mpsc_daemon_queue_enqueue(mpsc_daemon_queue_t, mpsc_queue_chain_t);
121 
122 /* thread based queues */
123 
124 static void
_mpsc_queue_thread_continue(void * param,wait_result_t wr __unused)125 _mpsc_queue_thread_continue(void *param, wait_result_t wr __unused)
126 {
127 	mpsc_daemon_queue_t dq = param;
128 	mpsc_daemon_queue_kind_t kind = dq->mpd_kind;
129 	thread_t self = dq->mpd_thread;
130 
131 	__builtin_assume(self != THREAD_NULL);
132 
133 	if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) {
134 		self->options |= TH_OPT_SYSTEM_CRITICAL;
135 	}
136 
137 	assert(dq->mpd_thread == current_thread());
138 	_mpsc_daemon_queue_drain(dq, self);
139 
140 	if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) {
141 		self->options &= ~TH_OPT_SYSTEM_CRITICAL;
142 	}
143 
144 	thread_block_parameter(_mpsc_queue_thread_continue, dq);
145 }
146 
147 static void
_mpsc_queue_thread_wakeup(mpsc_daemon_queue_t dq)148 _mpsc_queue_thread_wakeup(mpsc_daemon_queue_t dq)
149 {
150 	thread_wakeup_thread((event_t)dq, dq->mpd_thread);
151 }
152 
153 static kern_return_t
_mpsc_daemon_queue_init_with_thread(mpsc_daemon_queue_t dq,mpsc_daemon_invoke_fn_t invoke,int pri,const char * name,mpsc_daemon_queue_kind_t kind)154 _mpsc_daemon_queue_init_with_thread(mpsc_daemon_queue_t dq,
155     mpsc_daemon_invoke_fn_t invoke, int pri, const char *name,
156     mpsc_daemon_queue_kind_t kind)
157 {
158 	kern_return_t kr;
159 
160 	*dq = (struct mpsc_daemon_queue){
161 		.mpd_kind   = kind,
162 		.mpd_invoke = invoke,
163 		.mpd_queue  = MPSC_QUEUE_INITIALIZER(dq->mpd_queue),
164 		.mpd_chain  = { MPSC_QUEUE_NOTQUEUED_MARKER },
165 	};
166 
167 	kr = kernel_thread_create(_mpsc_queue_thread_continue, dq, pri,
168 	    &dq->mpd_thread);
169 	if (kr == KERN_SUCCESS) {
170 		thread_set_thread_name(dq->mpd_thread, name);
171 		thread_start_in_assert_wait(dq->mpd_thread, (event_t)dq, THREAD_UNINT);
172 		thread_deallocate(dq->mpd_thread);
173 	}
174 	return kr;
175 }
176 
177 kern_return_t
mpsc_daemon_queue_init_with_thread(mpsc_daemon_queue_t dq,mpsc_daemon_invoke_fn_t invoke,int pri,const char * name)178 mpsc_daemon_queue_init_with_thread(mpsc_daemon_queue_t dq,
179     mpsc_daemon_invoke_fn_t invoke, int pri, const char *name)
180 {
181 	return _mpsc_daemon_queue_init_with_thread(dq, invoke, pri, name,
182 	           MPSC_QUEUE_KIND_THREAD);
183 }
184 
185 /* thread-call based queues */
186 
187 static void
_mpsc_queue_thread_call_drain(thread_call_param_t arg0,thread_call_param_t arg1 __unused)188 _mpsc_queue_thread_call_drain(thread_call_param_t arg0,
189     thread_call_param_t arg1 __unused)
190 {
191 	_mpsc_daemon_queue_drain((mpsc_daemon_queue_t)arg0, NULL);
192 }
193 
194 static void
_mpsc_queue_thread_call_wakeup(mpsc_daemon_queue_t dq)195 _mpsc_queue_thread_call_wakeup(mpsc_daemon_queue_t dq)
196 {
197 	thread_call_enter(dq->mpd_call);
198 }
199 
200 void
mpsc_daemon_queue_init_with_thread_call(mpsc_daemon_queue_t dq,mpsc_daemon_invoke_fn_t invoke,thread_call_priority_t pri)201 mpsc_daemon_queue_init_with_thread_call(mpsc_daemon_queue_t dq,
202     mpsc_daemon_invoke_fn_t invoke, thread_call_priority_t pri)
203 {
204 	*dq = (struct mpsc_daemon_queue){
205 		.mpd_kind   = MPSC_QUEUE_KIND_THREAD_CALL,
206 		.mpd_invoke = invoke,
207 		.mpd_queue  = MPSC_QUEUE_INITIALIZER(dq->mpd_queue),
208 		.mpd_chain  = { MPSC_QUEUE_NOTQUEUED_MARKER },
209 	};
210 	dq->mpd_call = thread_call_allocate_with_options(
211 		_mpsc_queue_thread_call_drain, dq, pri, THREAD_CALL_OPTIONS_ONCE);
212 }
213 
214 /* nested queues */
215 
216 void
mpsc_daemon_queue_nested_invoke(mpsc_queue_chain_t elm,__unused mpsc_daemon_queue_t tq)217 mpsc_daemon_queue_nested_invoke(mpsc_queue_chain_t elm,
218     __unused mpsc_daemon_queue_t tq)
219 {
220 	mpsc_daemon_queue_t dq;
221 	dq = mpsc_queue_element(elm, struct mpsc_daemon_queue, mpd_chain);
222 	_mpsc_daemon_queue_drain(dq, NULL);
223 }
224 
225 static void
_mpsc_daemon_queue_nested_wakeup(mpsc_daemon_queue_t dq)226 _mpsc_daemon_queue_nested_wakeup(mpsc_daemon_queue_t dq)
227 {
228 	_mpsc_daemon_queue_enqueue(dq->mpd_target, &dq->mpd_chain);
229 }
230 
231 void
mpsc_daemon_queue_init_with_target(mpsc_daemon_queue_t dq,mpsc_daemon_invoke_fn_t invoke,mpsc_daemon_queue_t target)232 mpsc_daemon_queue_init_with_target(mpsc_daemon_queue_t dq,
233     mpsc_daemon_invoke_fn_t invoke, mpsc_daemon_queue_t target)
234 {
235 	*dq = (struct mpsc_daemon_queue){
236 		.mpd_kind   = MPSC_QUEUE_KIND_NESTED,
237 		.mpd_invoke = invoke,
238 		.mpd_target = target,
239 		.mpd_queue  = MPSC_QUEUE_INITIALIZER(dq->mpd_queue),
240 		.mpd_chain  = { MPSC_QUEUE_NOTQUEUED_MARKER },
241 	};
242 }
243 
244 /* enqueue, drain & cancelation */
245 
246 static void
_mpsc_daemon_queue_drain(mpsc_daemon_queue_t dq,thread_t self)247 _mpsc_daemon_queue_drain(mpsc_daemon_queue_t dq, thread_t self)
248 {
249 	mpsc_daemon_invoke_fn_t invoke = dq->mpd_invoke;
250 	mpsc_queue_chain_t head, cur, tail;
251 	mpsc_daemon_queue_state_t st;
252 
253 again:
254 	/*
255 	 * Most of the time we're woken up because we're dirty,
256 	 * This atomic xor sets DRAINING and clears WAKEUP in a single atomic
257 	 * in that case.
258 	 *
259 	 * However, if we're woken up for cancelation, the state may be reduced to
260 	 * the CANCELED bit set only, and then the xor will actually set WAKEUP.
261 	 * We need to correct this and clear it back to avoid looping below.
262 	 * This is safe to do as no one is allowed to enqueue more work after
263 	 * cancelation has happened.
264 	 *
265 	 * We use `st` as a dependency token to pair with the release fence in
266 	 * _mpsc_daemon_queue_enqueue() which gives us the guarantee that the update
267 	 * to the tail of the MPSC queue that made it non empty is visible to us.
268 	 */
269 	st = os_atomic_xor(&dq->mpd_state,
270 	    MPSC_QUEUE_STATE_DRAINING | MPSC_QUEUE_STATE_WAKEUP, dependency);
271 	assert(st & MPSC_QUEUE_STATE_DRAINING);
272 	if (__improbable(st & MPSC_QUEUE_STATE_WAKEUP)) {
273 		assert(st & MPSC_QUEUE_STATE_CANCELED);
274 		os_atomic_andnot(&dq->mpd_state, MPSC_QUEUE_STATE_WAKEUP, relaxed);
275 	}
276 
277 	os_atomic_dependency_t dep = os_atomic_make_dependency((uintptr_t)st);
278 	if ((head = mpsc_queue_dequeue_batch(&dq->mpd_queue, &tail, dep))) {
279 		do {
280 			mpsc_queue_batch_foreach_safe(cur, head, tail) {
281 				os_atomic_store(&cur->mpqc_next,
282 				    MPSC_QUEUE_NOTQUEUED_MARKER, relaxed);
283 				invoke(cur, dq);
284 			}
285 		} while ((head = mpsc_queue_dequeue_batch(&dq->mpd_queue, &tail, dep)));
286 
287 		if (dq->mpd_options & MPSC_QUEUE_OPTION_BATCH) {
288 			invoke(MPSC_QUEUE_BATCH_END, dq);
289 		}
290 	}
291 
292 	if (self) {
293 		assert_wait((event_t)dq, THREAD_UNINT);
294 	}
295 
296 	/*
297 	 * Unlike GCD no fence is necessary here: there is no concept similar
298 	 * to "dispatch_sync()" that would require changes this thread made to be
299 	 * visible to other threads as part of the mpsc_daemon_queue machinery.
300 	 *
301 	 * Making updates that happened on the daemon queue visible to other threads
302 	 * is the responsibility of the client.
303 	 */
304 	st = os_atomic_andnot(&dq->mpd_state, MPSC_QUEUE_STATE_DRAINING, relaxed);
305 
306 	/*
307 	 * A wakeup has happened while we were draining,
308 	 * which means that the queue did an [ empty -> non empty ]
309 	 * transition during our drain.
310 	 *
311 	 * Chances are we already observed and drained everything,
312 	 * but we need to be absolutely sure, so start a drain again
313 	 * as the enqueuer observed the DRAINING bit and has skipped calling
314 	 * _mpsc_daemon_queue_wakeup().
315 	 */
316 	if (__improbable(st & MPSC_QUEUE_STATE_WAKEUP)) {
317 		if (self) {
318 			clear_wait(self, THREAD_AWAKENED);
319 		}
320 		goto again;
321 	}
322 
323 	/* dereferencing `dq` past this point is unsafe */
324 
325 	if (__improbable(st & MPSC_QUEUE_STATE_CANCELED)) {
326 		thread_wakeup(&dq->mpd_state);
327 		if (self) {
328 			clear_wait(self, THREAD_AWAKENED);
329 			thread_terminate_self();
330 			__builtin_unreachable();
331 		}
332 	}
333 }
334 
335 static void
_mpsc_daemon_queue_wakeup(mpsc_daemon_queue_t dq)336 _mpsc_daemon_queue_wakeup(mpsc_daemon_queue_t dq)
337 {
338 	switch (dq->mpd_kind) {
339 	case MPSC_QUEUE_KIND_NESTED:
340 		_mpsc_daemon_queue_nested_wakeup(dq);
341 		break;
342 	case MPSC_QUEUE_KIND_THREAD:
343 	case MPSC_QUEUE_KIND_THREAD_CRITICAL:
344 		_mpsc_queue_thread_wakeup(dq);
345 		break;
346 	case MPSC_QUEUE_KIND_THREAD_CALL:
347 		_mpsc_queue_thread_call_wakeup(dq);
348 		break;
349 	default:
350 		panic("mpsc_queue[%p]: invalid kind (%d)", dq, dq->mpd_kind);
351 	}
352 }
353 
354 static void
_mpsc_daemon_queue_enqueue(mpsc_daemon_queue_t dq,mpsc_queue_chain_t elm)355 _mpsc_daemon_queue_enqueue(mpsc_daemon_queue_t dq, mpsc_queue_chain_t elm)
356 {
357 	mpsc_daemon_queue_state_t st;
358 
359 	if (mpsc_queue_append(&dq->mpd_queue, elm)) {
360 		/*
361 		 * Pairs with the acquire fence in _mpsc_daemon_queue_drain().
362 		 */
363 		st = os_atomic_or_orig(&dq->mpd_state, MPSC_QUEUE_STATE_WAKEUP, release);
364 		if (__improbable(st & MPSC_QUEUE_STATE_CANCELED)) {
365 			panic("mpsc_queue[%p]: use after cancelation", dq);
366 		}
367 
368 		if ((st & (MPSC_QUEUE_STATE_DRAINING | MPSC_QUEUE_STATE_WAKEUP)) == 0) {
369 			_mpsc_daemon_queue_wakeup(dq);
370 		}
371 	}
372 }
373 
374 void
mpsc_daemon_enqueue(mpsc_daemon_queue_t dq,mpsc_queue_chain_t elm,mpsc_queue_options_t options)375 mpsc_daemon_enqueue(mpsc_daemon_queue_t dq, mpsc_queue_chain_t elm,
376     mpsc_queue_options_t options)
377 {
378 	if (options & MPSC_QUEUE_DISABLE_PREEMPTION) {
379 		disable_preemption();
380 	}
381 
382 	_mpsc_daemon_queue_enqueue(dq, elm);
383 
384 	if (options & MPSC_QUEUE_DISABLE_PREEMPTION) {
385 		enable_preemption();
386 	}
387 }
388 
389 void
mpsc_daemon_queue_cancel_and_wait(mpsc_daemon_queue_t dq)390 mpsc_daemon_queue_cancel_and_wait(mpsc_daemon_queue_t dq)
391 {
392 	mpsc_daemon_queue_state_t st;
393 
394 	assert_wait((event_t)&dq->mpd_state, THREAD_UNINT);
395 
396 	st = os_atomic_or_orig(&dq->mpd_state, MPSC_QUEUE_STATE_CANCELED, relaxed);
397 	if (__improbable(st & MPSC_QUEUE_STATE_CANCELED)) {
398 		panic("mpsc_queue[%p]: cancelled twice (%x)", dq, st);
399 	}
400 
401 	if (dq->mpd_kind == MPSC_QUEUE_KIND_NESTED && st == 0) {
402 		clear_wait(current_thread(), THREAD_AWAKENED);
403 	} else {
404 		disable_preemption();
405 		_mpsc_daemon_queue_wakeup(dq);
406 		enable_preemption();
407 		thread_block(THREAD_CONTINUE_NULL);
408 	}
409 
410 	switch (dq->mpd_kind) {
411 	case MPSC_QUEUE_KIND_NESTED:
412 		dq->mpd_target = NULL;
413 		break;
414 	case MPSC_QUEUE_KIND_THREAD:
415 	case MPSC_QUEUE_KIND_THREAD_CRITICAL:
416 		dq->mpd_thread = NULL;
417 		break;
418 	case MPSC_QUEUE_KIND_THREAD_CALL:
419 		thread_call_cancel_wait(dq->mpd_call);
420 		thread_call_free(dq->mpd_call);
421 		dq->mpd_call = NULL;
422 		break;
423 	default:
424 		panic("mpsc_queue[%p]: invalid kind (%d)", dq, dq->mpd_kind);
425 	}
426 	dq->mpd_kind = MPSC_QUEUE_KIND_UNKNOWN;
427 }
428 
429 #pragma mark deferred deallocation daemon
430 
431 static struct mpsc_daemon_queue thread_deferred_deallocation_queue;
432 
433 void
thread_deallocate_daemon_init(void)434 thread_deallocate_daemon_init(void)
435 {
436 	kern_return_t kr;
437 
438 	kr = _mpsc_daemon_queue_init_with_thread(&thread_deferred_deallocation_queue,
439 	    mpsc_daemon_queue_nested_invoke, MINPRI_KERNEL,
440 	    "daemon.deferred-deallocation", MPSC_QUEUE_KIND_THREAD_CRITICAL);
441 	if (kr != KERN_SUCCESS) {
442 		panic("thread_deallocate_daemon_init: creating daemon failed (%d)", kr);
443 	}
444 }
445 
446 void
thread_deallocate_daemon_register_queue(mpsc_daemon_queue_t dq,mpsc_daemon_invoke_fn_t invoke)447 thread_deallocate_daemon_register_queue(mpsc_daemon_queue_t dq,
448     mpsc_daemon_invoke_fn_t invoke)
449 {
450 	mpsc_daemon_queue_init_with_target(dq, invoke,
451 	    &thread_deferred_deallocation_queue);
452 }
453