xref: /xnu-8796.101.5/libkern/gen/OSAtomicOperations.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/OSAtomic.h>
30 #include <kern/debug.h>
31 #include <machine/atomic.h>
32 
33 enum {
34 	false   = 0,
35 	true    = 1
36 };
37 
38 #ifndef NULL
39 #define NULL ((void *)0)
40 #endif
41 
42 #define ATOMIC_DEBUG DEBUG
43 
44 #if ATOMIC_DEBUG
45 #define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p",p);}while(0)
46 #else
47 #define ALIGN_TEST(p, t) do{}while(0)
48 #endif
49 
50 /*
51  * atomic operations
52  *	These are _the_ atomic operations, now implemented via compiler built-ins.
53  *	It is expected that this C implementation is a candidate for Link-Time-
54  *	Optimization inlining, whereas the assembler implementations they replace
55  *	were not.
56  */
57 
58 #undef OSCompareAndSwap8
59 Boolean
OSCompareAndSwap8(UInt8 oldValue,UInt8 newValue,volatile UInt8 * address)60 OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address)
61 {
62 	return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel);
63 }
64 
65 #undef OSCompareAndSwap16
66 Boolean
OSCompareAndSwap16(UInt16 oldValue,UInt16 newValue,volatile UInt16 * address)67 OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address)
68 {
69 	return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel);
70 }
71 
72 #undef OSCompareAndSwap
73 Boolean
OSCompareAndSwap(UInt32 oldValue,UInt32 newValue,volatile UInt32 * address)74 OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address)
75 {
76 	ALIGN_TEST(address, UInt32);
77 	return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel);
78 }
79 
80 #undef OSCompareAndSwap64
81 Boolean
OSCompareAndSwap64(UInt64 oldValue,UInt64 newValue,volatile UInt64 * address)82 OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address)
83 {
84 	/*
85 	 * _Atomic uint64 requires 8-byte alignment on all architectures.
86 	 * This silences the compiler cast warning.  ALIGN_TEST() verifies
87 	 * that the cast was legal, if defined.
88 	 */
89 	_Atomic UInt64 *aligned_addr = (_Atomic UInt64 *)(uintptr_t)address;
90 
91 	ALIGN_TEST(address, UInt64);
92 	return (Boolean)os_atomic_cmpxchg(aligned_addr, oldValue, newValue, acq_rel);
93 }
94 
95 #undef OSCompareAndSwapPtr
96 Boolean
OSCompareAndSwapPtr(void * oldValue,void * newValue,void * volatile * address)97 OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address)
98 {
99 	return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel);
100 }
101 
102 SInt8
OSAddAtomic8(SInt32 amount,volatile SInt8 * address)103 OSAddAtomic8(SInt32 amount, volatile SInt8 *address)
104 {
105 	return os_atomic_add_orig(address, (SInt8)amount, relaxed);
106 }
107 
108 SInt16
OSAddAtomic16(SInt32 amount,volatile SInt16 * address)109 OSAddAtomic16(SInt32 amount, volatile SInt16 *address)
110 {
111 	return os_atomic_add_orig(address, (SInt16)amount, relaxed);
112 }
113 
114 #undef OSAddAtomic
115 SInt32
OSAddAtomic(SInt32 amount,volatile SInt32 * address)116 OSAddAtomic(SInt32 amount, volatile SInt32 *address)
117 {
118 	ALIGN_TEST(address, UInt32);
119 	return os_atomic_add_orig(address, amount, relaxed);
120 }
121 
122 #undef OSAddAtomic64
123 SInt64
OSAddAtomic64(SInt64 amount,volatile SInt64 * address)124 OSAddAtomic64(SInt64 amount, volatile SInt64 *address)
125 {
126 	_Atomic SInt64* aligned_address = (_Atomic SInt64*)(uintptr_t)address;
127 
128 	ALIGN_TEST(address, SInt64);
129 	return os_atomic_add_orig(aligned_address, amount, relaxed);
130 }
131 
132 #undef OSAddAtomicLong
133 long
OSAddAtomicLong(long theAmount,volatile long * address)134 OSAddAtomicLong(long theAmount, volatile long *address)
135 {
136 	return os_atomic_add_orig(address, theAmount, relaxed);
137 }
138 
139 #undef OSIncrementAtomic
140 SInt32
OSIncrementAtomic(volatile SInt32 * value)141 OSIncrementAtomic(volatile SInt32 * value)
142 {
143 	return os_atomic_inc_orig(value, relaxed);
144 }
145 
146 #undef OSDecrementAtomic
147 SInt32
OSDecrementAtomic(volatile SInt32 * value)148 OSDecrementAtomic(volatile SInt32 * value)
149 {
150 	return os_atomic_dec_orig(value, relaxed);
151 }
152 
153 #undef OSBitAndAtomic
154 UInt32
OSBitAndAtomic(UInt32 mask,volatile UInt32 * value)155 OSBitAndAtomic(UInt32 mask, volatile UInt32 * value)
156 {
157 	return os_atomic_and_orig(value, mask, relaxed);
158 }
159 
160 #undef OSBitOrAtomic
161 UInt32
OSBitOrAtomic(UInt32 mask,volatile UInt32 * value)162 OSBitOrAtomic(UInt32 mask, volatile UInt32 * value)
163 {
164 	return os_atomic_or_orig(value, mask, relaxed);
165 }
166 
167 #undef OSBitXorAtomic
168 UInt32
OSBitXorAtomic(UInt32 mask,volatile UInt32 * value)169 OSBitXorAtomic(UInt32 mask, volatile UInt32 * value)
170 {
171 	return os_atomic_xor_orig(value, mask, relaxed);
172 }
173 
174 static Boolean
OSTestAndSetClear(UInt32 bit,Boolean wantSet,volatile UInt8 * startAddress)175 OSTestAndSetClear(UInt32 bit, Boolean wantSet, volatile UInt8 * startAddress)
176 {
177 	UInt8           mask = 1;
178 	UInt8           oldValue, newValue;
179 	UInt8           wantValue;
180 	UInt8           *address;
181 
182 	address = (UInt8 *)(uintptr_t)(startAddress + (bit / 8));
183 	mask <<= (7 - (bit % 8));
184 	wantValue = wantSet ? mask : 0;
185 
186 	return !os_atomic_rmw_loop(address, oldValue, newValue, relaxed, {
187 		if ((oldValue & mask) == wantValue) {
188 		        os_atomic_rmw_loop_give_up(break);
189 		}
190 		newValue = (oldValue & ~mask) | wantValue;
191 	});
192 }
193 
194 Boolean
OSTestAndSet(UInt32 bit,volatile UInt8 * startAddress)195 OSTestAndSet(UInt32 bit, volatile UInt8 * startAddress)
196 {
197 	return OSTestAndSetClear(bit, true, startAddress);
198 }
199 
200 Boolean
OSTestAndClear(UInt32 bit,volatile UInt8 * startAddress)201 OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress)
202 {
203 	return OSTestAndSetClear(bit, false, startAddress);
204 }
205 
206 /*
207  * silly unaligned versions
208  */
209 
210 SInt8
OSIncrementAtomic8(volatile SInt8 * value)211 OSIncrementAtomic8(volatile SInt8 * value)
212 {
213 	return os_atomic_inc_orig(value, relaxed);
214 }
215 
216 SInt8
OSDecrementAtomic8(volatile SInt8 * value)217 OSDecrementAtomic8(volatile SInt8 * value)
218 {
219 	return os_atomic_dec_orig(value, relaxed);
220 }
221 
222 UInt8
OSBitAndAtomic8(UInt32 mask,volatile UInt8 * value)223 OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value)
224 {
225 	return os_atomic_and_orig(value, (UInt8)mask, relaxed);
226 }
227 
228 UInt8
OSBitOrAtomic8(UInt32 mask,volatile UInt8 * value)229 OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value)
230 {
231 	return os_atomic_or_orig(value, (UInt8)mask, relaxed);
232 }
233 
234 UInt8
OSBitXorAtomic8(UInt32 mask,volatile UInt8 * value)235 OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value)
236 {
237 	return os_atomic_xor_orig(value, (UInt8)mask, relaxed);
238 }
239 
240 SInt16
OSIncrementAtomic16(volatile SInt16 * value)241 OSIncrementAtomic16(volatile SInt16 * value)
242 {
243 	return OSAddAtomic16(1, value);
244 }
245 
246 SInt16
OSDecrementAtomic16(volatile SInt16 * value)247 OSDecrementAtomic16(volatile SInt16 * value)
248 {
249 	return OSAddAtomic16(-1, value);
250 }
251 
252 UInt16
OSBitAndAtomic16(UInt32 mask,volatile UInt16 * value)253 OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value)
254 {
255 	return os_atomic_and_orig(value, (UInt16)mask, relaxed);
256 }
257 
258 UInt16
OSBitOrAtomic16(UInt32 mask,volatile UInt16 * value)259 OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value)
260 {
261 	return os_atomic_or_orig(value, (UInt16)mask, relaxed);
262 }
263 
264 UInt16
OSBitXorAtomic16(UInt32 mask,volatile UInt16 * value)265 OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value)
266 {
267 	return os_atomic_xor_orig(value, (UInt16)mask, relaxed);
268 }
269