xref: /xnu-8796.101.5/EXTERNAL_HEADERS/corecrypto/cc_priv.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /* Copyright (c) (2010-2012,2014-2021) Apple Inc. All rights reserved.
2  *
3  * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which
4  * is contained in the License.txt file distributed with corecrypto) and only to
5  * people who accept that license. IMPORTANT:  Any license rights granted to you by
6  * Apple Inc. (if any) are limited to internal use within your organization only on
7  * devices and computers you own or control, for the sole purpose of verifying the
8  * security characteristics and correct functioning of the Apple Software.  You may
9  * not, directly or indirectly, redistribute the Apple Software or any portions thereof.
10  */
11 
12 #ifndef _CORECRYPTO_CC_PRIV_H_
13 #define _CORECRYPTO_CC_PRIV_H_
14 
15 #include <corecrypto/cc.h>
16 #include <stdbool.h>
17 #include <stdint.h>
18 
19 CC_PTRCHECK_CAPABLE_HEADER()
20 
21 // Fork handlers for the stateful components of corecrypto.
22 void cc_atfork_prepare(void);
23 void cc_atfork_parent(void);
24 void cc_atfork_child(void);
25 
26 #ifndef __has_builtin
27 #define __has_builtin(x) 0
28 #endif
29 
30 #ifndef __DECONST
31 #define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
32 #endif
33 
34 /* defines the following macros :
35 
36  CC_ARRAY_LEN: returns the number of elements in an array
37 
38  CC_ROR  : Rotate Right 32 bits. Rotate count can be a variable.
39  CC_ROL  : Rotate Left 32 bits. Rotate count can be a variable.
40  CC_RORc : Rotate Right 32 bits. Rotate count must be a constant.
41  CC_ROLc : Rotate Left 32 bits. Rotate count must be a constant.
42 
43  CC_ROR64  : Rotate Right 64 bits. Rotate count can be a variable.
44  CC_ROL64  : Rotate Left 64 bits. Rotate count can be a variable.
45  CC_ROR64c : Rotate Right 64 bits. Rotate count must be a constant.
46  CC_ROL64c : Rotate Left 64 bits. Rotate count must be a constant.
47 
48  CC_BSWAP  : byte swap a 32 bits variable.
49 
50  CC_H2BE32 : convert a 32 bits value between host and big endian order.
51  CC_H2LE32 : convert a 32 bits value between host and little endian order.
52 
53  CC_BSWAP64  : byte swap a 64 bits variable
54 
55  CC_H2BE64 : convert a 64 bits value between host and big endian order
56  CC_H2LE64 : convert a 64 bits value between host and little endian order
57 
58 */
59 
60 // RTKitOSPlatform should replace CC_MEMCPY with memcpy
61 #define CC_MEMCPY(D,S,L) cc_memcpy((D),(S),(L))
62 #define CC_MEMMOVE(D,S,L) cc_memmove((D),(S),(L))
63 #define CC_MEMSET(D,V,L) cc_memset((D),(V),(L))
64 
65 #if __has_builtin(__builtin___memcpy_chk) && !defined(_MSC_VER) && !CC_SGX && !CC_EFI
66 #define cc_memcpy(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 1))
67 #define cc_memcpy_nochk(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 0))
68 #else
69 #define cc_memcpy(dst, src, len) memcpy((dst), (src), (len))
70 #define cc_memcpy_nochk(dst, src, len) memcpy((dst), (src), (len))
71 #endif
72 
73 #if __has_builtin(__builtin___memmove_chk) && !defined(_MSC_VER) && !CC_SGX && !CC_EFI
74 #define cc_memmove(dst, src, len) __builtin___memmove_chk((dst), (src), (len), __builtin_object_size((dst), 1))
75 #else
76 #define cc_memmove(dst, src, len) memmove((dst), (src), (len))
77 #endif
78 
79 #if __has_builtin(__builtin___memset_chk) && !defined(_MSC_VER) && !CC_SGX && !CC_EFI
80 #define cc_memset(dst, val, len) __builtin___memset_chk((dst), (val), (len), __builtin_object_size((dst), 1))
81 #else
82 #define cc_memset(dst, val, len) memset((dst), (val), (len))
83 #endif
84 
85 #define CC_ARRAY_LEN(x) (sizeof((x))/sizeof((x)[0]))
86 
87 // MARK: - Loads and Store
88 
89 // 64 bit load & store big endian
90 #if defined(__x86_64__) && !defined(_MSC_VER)
91 CC_INLINE void cc_store64_be(uint64_t x, uint8_t cc_sized_by(8) * y)
92 {
93     __asm__("bswapq %1     \n\t"
94             "movq   %1, %0 \n\t"
95             "bswapq %1     \n\t"
96             : "=m"(*(y))
97             : "r"(x));
98 }
99 CC_INLINE uint64_t cc_load64_be(const uint8_t cc_sized_by(8) * y)
100 {
101     uint64_t x;
102     __asm__("movq %1, %0 \n\t"
103             "bswapq %0   \n\t"
104             : "=r"(x)
105             : "m"(*(y)));
106     return x;
107 }
108 #else
109 CC_INLINE void cc_store64_be(uint64_t x, uint8_t cc_sized_by(8) * y)
110 {
111     y[0] = (uint8_t)(x >> 56);
112     y[1] = (uint8_t)(x >> 48);
113     y[2] = (uint8_t)(x >> 40);
114     y[3] = (uint8_t)(x >> 32);
115     y[4] = (uint8_t)(x >> 24);
116     y[5] = (uint8_t)(x >> 16);
117     y[6] = (uint8_t)(x >> 8);
118     y[7] = (uint8_t)(x);
119 }
120 CC_INLINE uint64_t cc_load64_be(const uint8_t cc_sized_by(8) * y)
121 {
122     return (((uint64_t)(y[0])) << 56) | (((uint64_t)(y[1])) << 48) | (((uint64_t)(y[2])) << 40) | (((uint64_t)(y[3])) << 32) |
123            (((uint64_t)(y[4])) << 24) | (((uint64_t)(y[5])) << 16) | (((uint64_t)(y[6])) << 8) | ((uint64_t)(y[7]));
124 }
125 #endif
126 
127 // 32 bit load & store big endian
128 #if (defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)
129 CC_INLINE void cc_store32_be(uint32_t x, uint8_t cc_sized_by(4) * y)
130 {
131     __asm__("bswapl %1     \n\t"
132             "movl   %1, %0 \n\t"
133             "bswapl %1     \n\t"
134             : "=m"(*(y))
135             : "r"(x));
136 }
137 CC_INLINE uint32_t cc_load32_be(const uint8_t cc_sized_by(4) * y)
138 {
139     uint32_t x;
140     __asm__("movl %1, %0 \n\t"
141             "bswapl %0   \n\t"
142             : "=r"(x)
143             : "m"(*(y)));
144     return x;
145 }
146 #else
147 CC_INLINE void cc_store32_be(uint32_t x, uint8_t cc_sized_by(4) * y)
148 {
149     y[0] = (uint8_t)(x >> 24);
150     y[1] = (uint8_t)(x >> 16);
151     y[2] = (uint8_t)(x >> 8);
152     y[3] = (uint8_t)(x);
153 }
154 CC_INLINE uint32_t cc_load32_be(const uint8_t cc_sized_by(4) * y)
155 {
156     return (((uint32_t)(y[0])) << 24) | (((uint32_t)(y[1])) << 16) | (((uint32_t)(y[2])) << 8) | ((uint32_t)(y[3]));
157 }
158 #endif
159 
160 CC_INLINE void cc_store16_be(uint16_t x, uint8_t cc_sized_by(2) * y)
161 {
162     y[0] = (uint8_t)(x >> 8);
163     y[1] = (uint8_t)(x);
164 }
165 CC_INLINE uint16_t cc_load16_be(const uint8_t cc_sized_by(2) * y)
166 {
167     return (uint16_t) (((uint16_t)(y[0])) << 8) | ((uint16_t)(y[1]));
168 }
169 
170 // 64 bit load & store little endian
171 CC_INLINE void cc_store64_le(uint64_t x, uint8_t cc_sized_by(8) * y)
172 {
173     y[7] = (uint8_t)(x >> 56);
174     y[6] = (uint8_t)(x >> 48);
175     y[5] = (uint8_t)(x >> 40);
176     y[4] = (uint8_t)(x >> 32);
177     y[3] = (uint8_t)(x >> 24);
178     y[2] = (uint8_t)(x >> 16);
179     y[1] = (uint8_t)(x >> 8);
180     y[0] = (uint8_t)(x);
181 }
182 CC_INLINE uint64_t cc_load64_le(const uint8_t cc_sized_by(8) * y)
183 {
184     return (((uint64_t)(y[7])) << 56) | (((uint64_t)(y[6])) << 48) | (((uint64_t)(y[5])) << 40) | (((uint64_t)(y[4])) << 32) |
185            (((uint64_t)(y[3])) << 24) | (((uint64_t)(y[2])) << 16) | (((uint64_t)(y[1])) << 8) | ((uint64_t)(y[0]));
186 }
187 
188 // 32 bit load & store little endian
189 CC_INLINE void cc_store32_le(uint32_t x, uint8_t cc_sized_by(4) * y)
190 {
191     y[3] = (uint8_t)(x >> 24);
192     y[2] = (uint8_t)(x >> 16);
193     y[1] = (uint8_t)(x >> 8);
194     y[0] = (uint8_t)(x);
195 }
196 CC_INLINE uint32_t cc_load32_le(const uint8_t cc_sized_by(4) * y)
197 {
198     return (((uint32_t)(y[3])) << 24) | (((uint32_t)(y[2])) << 16) | (((uint32_t)(y[1])) << 8) | ((uint32_t)(y[0]));
199 }
200 
201 // MARK: - 32-bit Rotates
202 
203 #if defined(_MSC_VER)
204 // MARK: -- MSVC version
205 
206 #include <stdlib.h>
207 #if !defined(__clang__)
208  #pragma intrinsic(_lrotr,_lrotl)
209 #endif
210 #define	CC_ROR(x,n) _lrotr(x,n)
211 #define	CC_ROL(x,n) _lrotl(x,n)
212 #define	CC_RORc(x,n) _lrotr(x,n)
213 #define	CC_ROLc(x,n) _lrotl(x,n)
214 
215 #elif (defined(__i386__) || defined(__x86_64__))
216 // MARK: -- intel asm version
217 
CC_ROL(uint32_t word,int i)218 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
219 {
220     __asm__ ("roll %%cl,%0"
221          :"=r" (word)
222          :"0" (word),"c" (i));
223     return word;
224 }
225 
CC_ROR(uint32_t word,int i)226 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
227 {
228     __asm__ ("rorl %%cl,%0"
229          :"=r" (word)
230          :"0" (word),"c" (i));
231     return word;
232 }
233 
234 /* Need to be a macro here, because 'i' is an immediate (constant) */
235 #define CC_ROLc(word, i)                \
236 ({  uint32_t _word=(word);              \
237     __asm__ __volatile__ ("roll %2,%0"  \
238         :"=r" (_word)                   \
239         :"0" (_word),"I" (i));          \
240     _word;                              \
241 })
242 
243 
244 #define CC_RORc(word, i)                \
245 ({  uint32_t _word=(word);              \
246     __asm__ __volatile__ ("rorl %2,%0"  \
247         :"=r" (_word)                   \
248         :"0" (_word),"I" (i));          \
249     _word;                              \
250 })
251 
252 #else
253 
254 // MARK: -- default version
255 
CC_ROL(uint32_t word,int i)256 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
257 {
258     return ( (word<<(i&31)) | (word >> ( (32-(i&31)) & 31 )) );
259 }
260 
CC_ROR(uint32_t word,int i)261 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
262 {
263     return ( (word>>(i&31)) | (word << ( (32-(i&31)) & 31 )) );
264 }
265 
266 #define	CC_ROLc(x, y) CC_ROL(x, y)
267 #define	CC_RORc(x, y) CC_ROR(x, y)
268 
269 #endif
270 
271 // MARK: - 64 bits rotates
272 
273 #if defined(__x86_64__) && !defined(_MSC_VER) //clang _MSVC doesn't support GNU-style inline assembly
274 // MARK: -- intel 64 asm version
275 
CC_ROL64(uint64_t word,int i)276 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
277 {
278     __asm__("rolq %%cl,%0"
279         :"=r" (word)
280         :"0" (word),"c" (i));
281     return word;
282 }
283 
CC_ROR64(uint64_t word,int i)284 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
285 {
286     __asm__("rorq %%cl,%0"
287         :"=r" (word)
288         :"0" (word),"c" (i));
289     return word;
290 }
291 
292 /* Need to be a macro here, because 'i' is an immediate (constant) */
293 #define CC_ROL64c(word, i)      \
294 ({                              \
295     uint64_t _word=(word);      \
296     __asm__("rolq %2,%0"        \
297         :"=r" (_word)           \
298         :"0" (_word),"J" (i));  \
299     _word;                      \
300 })
301 
302 #define CC_ROR64c(word, i)      \
303 ({                              \
304     uint64_t _word=(word);      \
305     __asm__("rorq %2,%0"        \
306         :"=r" (_word)           \
307         :"0" (_word),"J" (i));  \
308     _word;                      \
309 })
310 
311 
312 #else /* Not x86_64  */
313 
314 // MARK: -- default C version
315 
CC_ROL64(uint64_t word,int i)316 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
317 {
318     return ( (word<<(i&63)) | (word >> ((64-(i&63)) & 63) ) );
319 }
320 
CC_ROR64(uint64_t word,int i)321 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
322 {
323     return ( (word>>(i&63)) | (word << ((64-(i&63)) & 63) ) );
324 }
325 
326 #define	CC_ROL64c(x, y) CC_ROL64(x, y)
327 #define	CC_ROR64c(x, y) CC_ROR64(x, y)
328 
329 #endif
330 
331 
332 // MARK: - Byte Swaps
333 
334 #if __has_builtin(__builtin_bswap32)
335 #define CC_BSWAP32(x) __builtin_bswap32(x)
336 #else
CC_BSWAP32(uint32_t x)337 CC_INLINE uint32_t CC_BSWAP32(uint32_t x)
338 {
339     return
340         ((x & 0xff000000) >> 24) |
341         ((x & 0x00ff0000) >>  8) |
342         ((x & 0x0000ff00) <<  8) |
343         ((x & 0x000000ff) << 24);
344 }
345 #endif
346 
347 #if __has_builtin(__builtin_bswap64)
348 #define CC_BSWAP64(x) __builtin_bswap64(x)
349 #else
CC_BSWAP64(uint64_t x)350 CC_INLINE uint64_t CC_BSWAP64(uint64_t x)
351 {
352     return
353         ((x & 0xff00000000000000ULL) >> 56) |
354         ((x & 0x00ff000000000000ULL) >> 40) |
355         ((x & 0x0000ff0000000000ULL) >> 24) |
356         ((x & 0x000000ff00000000ULL) >>  8) |
357         ((x & 0x00000000ff000000ULL) <<  8) |
358         ((x & 0x0000000000ff0000ULL) << 24) |
359         ((x & 0x000000000000ff00ULL) << 40) |
360         ((x & 0x00000000000000ffULL) << 56);
361 }
362 #endif
363 
364 #ifdef __LITTLE_ENDIAN__
365 #define CC_H2BE32(x) CC_BSWAP32(x)
366 #define CC_H2LE32(x) (x)
367 #define CC_H2BE64(x) CC_BSWAP64(x)
368 #define CC_H2LE64(x) (x)
369 #else
370 #define CC_H2BE32(x) (x)
371 #define CC_H2LE32(x) CC_BSWAP32(x)
372 #define CC_H2BE64(x) (x)
373 #define CC_H2LE64(x) CC_BSWAP64(x)
374 #endif
375 
376 /* extract a byte portably */
377 #ifdef _MSC_VER
378 #define cc_byte(x, n) ((unsigned char)((x) >> (8 * (n))))
379 #else
380 #define cc_byte(x, n) (((x) >> (8 * (n))) & 255)
381 #endif
382 
383 /* Count leading zeros (for nonzero inputs) */
384 
385 /*
386  *  On i386 and x86_64, we know clang and GCC will generate BSR for
387  *  __builtin_clzl.  This instruction IS NOT constant time on all micro-
388  *  architectures, but it *is* constant time on all micro-architectures that
389  *  have been used by Apple, and we expect that to continue to be the case.
390  *
391  *  When building for x86_64h with clang, this produces LZCNT, which is exactly
392  *  what we want.
393  *
394  *  On arm and arm64, we know that clang and GCC generate the constant-time CLZ
395  *  instruction from __builtin_clzl( ).
396  */
397 
398 #if defined(_WIN32)
399 /* We use the Windows implementations below. */
400 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__)
401 /* We use a thought-to-be-good version of __builtin_clz. */
402 #elif defined __GNUC__
403 #warning Using __builtin_clz() on an unknown architecture; it may not be constant-time.
404 /* If you find yourself seeing this warning, file a radar for someone to
405  * check whether or not __builtin_clz() generates a constant-time
406  * implementation on the architecture you are targeting.  If it does, append
407  * the name of that architecture to the list of "safe" architectures above.  */
408 #endif
409 
cc_clz32_fallback(uint32_t data)410 CC_INLINE CC_CONST unsigned cc_clz32_fallback(uint32_t data)
411 {
412     unsigned int b = 0;
413     unsigned int bit = 0;
414     // Work from LSB to MSB
415     for (int i = 0; i < 32; i++) {
416         bit = (data >> i) & 1;
417         // If the bit is 0, update the "leading bits are zero" counter "b".
418         b += (1 - bit);
419         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
420          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
421          */
422         b &= (bit - 1);
423     }
424     return b;
425 }
426 
cc_clz64_fallback(uint64_t data)427 CC_INLINE CC_CONST unsigned cc_clz64_fallback(uint64_t data)
428 {
429     unsigned int b = 0;
430     unsigned int bit = 0;
431     // Work from LSB to MSB
432     for (int i = 0; i < 64; i++) {
433         bit = (data >> i) & 1;
434         // If the bit is 0, update the "leading bits are zero" counter.
435         b += (1 - bit);
436         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
437          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
438          */
439         b &= (bit - 1);
440     }
441     return b;
442 }
443 
cc_ctz32_fallback(uint32_t data)444 CC_INLINE CC_CONST unsigned cc_ctz32_fallback(uint32_t data)
445 {
446     unsigned int b = 0;
447     unsigned int bit = 0;
448     // Work from MSB to LSB
449     for (int i = 31; i >= 0; i--) {
450         bit = (data >> i) & 1;
451         // If the bit is 0, update the "trailing zero bits" counter.
452         b += (1 - bit);
453         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
454          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
455          */
456         b &= (bit - 1);
457     }
458     return b;
459 }
460 
cc_ctz64_fallback(uint64_t data)461 CC_INLINE CC_CONST unsigned cc_ctz64_fallback(uint64_t data)
462 {
463     unsigned int b = 0;
464     unsigned int bit = 0;
465     // Work from MSB to LSB
466     for (int i = 63; i >= 0; i--) {
467         bit = (data >> i) & 1;
468         // If the bit is 0, update the "trailing zero bits" counter.
469         b += (1 - bit);
470         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
471          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
472          */
473         b &= (bit - 1);
474     }
475     return b;
476 }
477 
478 /*!
479   @function cc_clz32
480   @abstract Count leading zeros of a nonzero 32-bit value
481 
482   @param data A nonzero 32-bit value
483 
484   @result Count of leading zeros of @p data
485 
486   @discussion @p data is assumed to be nonzero.
487 */
cc_clz32(uint32_t data)488 CC_INLINE CC_CONST unsigned cc_clz32(uint32_t data) {
489     cc_assert(data != 0);
490 #if __has_builtin(__builtin_clz)
491     cc_static_assert(sizeof(unsigned) == 4, "clz relies on an unsigned int being 4 bytes");
492     return (unsigned)__builtin_clz(data);
493 #else
494     return cc_clz32_fallback(data);
495 #endif
496 }
497 
498 /*!
499   @function cc_clz64
500   @abstract Count leading zeros of a nonzero 64-bit value
501 
502   @param data A nonzero 64-bit value
503 
504   @result Count of leading zeros of @p data
505 
506   @discussion @p data is assumed to be nonzero.
507 */
cc_clz64(uint64_t data)508 CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) {
509     cc_assert(data != 0);
510 #if __has_builtin(__builtin_clzll)
511     return (unsigned)__builtin_clzll(data);
512 #else
513     return cc_clz64_fallback(data);
514 #endif
515 }
516 
517 /*!
518   @function cc_ctz32
519   @abstract Count trailing zeros of a nonzero 32-bit value
520 
521   @param data A nonzero 32-bit value
522 
523   @result Count of trailing zeros of @p data
524 
525   @discussion @p data is assumed to be nonzero.
526 */
cc_ctz32(uint32_t data)527 CC_INLINE CC_CONST unsigned cc_ctz32(uint32_t data) {
528     cc_assert(data != 0);
529 #if __has_builtin(__builtin_ctz)
530     cc_static_assert(sizeof(unsigned) == 4, "ctz relies on an unsigned int being 4 bytes");
531     return (unsigned)__builtin_ctz(data);
532 #else
533     return cc_ctz32_fallback(data);
534 #endif
535 }
536 
537 /*!
538   @function cc_ctz64
539   @abstract Count trailing zeros of a nonzero 64-bit value
540 
541   @param data A nonzero 64-bit value
542 
543   @result Count of trailing zeros of @p data
544 
545   @discussion @p data is assumed to be nonzero.
546 */
cc_ctz64(uint64_t data)547 CC_INLINE CC_CONST unsigned cc_ctz64(uint64_t data) {
548     cc_assert(data != 0);
549 #if __has_builtin(__builtin_ctzll)
550     return (unsigned)__builtin_ctzll(data);
551 #else
552     return cc_ctz64_fallback(data);
553 #endif
554 }
555 
556 /*!
557   @function cc_ffs32_fallback
558   @abstract Find first bit set in a 32-bit value
559 
560   @param data A 32-bit value
561 
562   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
563  */
cc_ffs32_fallback(int32_t data)564 CC_INLINE CC_CONST unsigned cc_ffs32_fallback(int32_t data)
565 {
566     unsigned b = 0;
567     unsigned bit = 0;
568     unsigned seen = 0;
569 
570     // Work from LSB to MSB
571     for (int i = 0; i < 32; i++) {
572         bit = ((uint32_t)data >> i) & 1;
573 
574         // Track whether we've seen a 1 bit.
575         seen |= bit;
576 
577         // If the bit is 0 and we haven't seen a 1 yet, increment b.
578         b += (1 - bit) & (seen - 1);
579     }
580 
581     // If we saw a 1, return b + 1, else 0.
582     return (~(seen - 1)) & (b + 1);
583 }
584 
585 /*!
586   @function cc_ffs64_fallback
587   @abstract Find first bit set in a 64-bit value
588 
589   @param data A 64-bit value
590 
591   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
592  */
cc_ffs64_fallback(int64_t data)593 CC_INLINE CC_CONST unsigned cc_ffs64_fallback(int64_t data)
594 {
595     unsigned b = 0;
596     unsigned bit = 0;
597     unsigned seen = 0;
598 
599     // Work from LSB to MSB
600     for (int i = 0; i < 64; i++) {
601         bit = ((uint64_t)data >> i) & 1;
602 
603         // Track whether we've seen a 1 bit.
604         seen |= bit;
605 
606         // If the bit is 0 and we haven't seen a 1 yet, increment b.
607         b += (1 - bit) & (seen - 1);
608     }
609 
610     // If we saw a 1, return b + 1, else 0.
611     return (~(seen - 1)) & (b + 1);
612 }
613 
614 /*!
615   @function cc_ffs32
616   @abstract Find first bit set in a 32-bit value
617 
618   @param data A 32-bit value
619 
620   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
621  */
cc_ffs32(int32_t data)622 CC_INLINE CC_CONST unsigned cc_ffs32(int32_t data)
623 {
624     cc_static_assert(sizeof(int) == 4, "ffs relies on an int being 4 bytes");
625 #if __has_builtin(__builtin_ffs)
626     return (unsigned)__builtin_ffs(data);
627 #else
628     return cc_ffs32_fallback(data);
629 #endif
630 }
631 
632 /*!
633   @function cc_ffs64
634   @abstract Find first bit set in a 64-bit value
635 
636   @param data A 64-bit value
637 
638   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
639  */
cc_ffs64(int64_t data)640 CC_INLINE CC_CONST unsigned cc_ffs64(int64_t data)
641 {
642 #if __has_builtin(__builtin_ffsll)
643     return (unsigned)__builtin_ffsll(data);
644 #else
645     return cc_ffs64_fallback(data);
646 #endif
647 }
648 
649 #define cc_add_overflow __builtin_add_overflow
650 #define cc_mul_overflow __builtin_mul_overflow
651 
652 /* HEAVISIDE_STEP (shifted by one)
653    function f(x): x->0, when x=0
654                   x->1, when x>0
655    Can also be seen as a bitwise operation:
656       f(x): x -> y
657         y[0]=(OR x[i]) for all i (all bits)
658         y[i]=0 for all i>0
659    Run in constant time (log2(<bitsize of x>))
660    Useful to run constant time checks
661 */
662 #define CC_HEAVISIDE_STEP(r, s) do {                                         \
663     cc_static_assert(sizeof(uint64_t) >= sizeof(s), "max type is uint64_t"); \
664     const uint64_t _s = (uint64_t)s;                                         \
665     const uint64_t _t = (_s & 0xffffffff) | (_s >> 32);                      \
666     r = (uint8_t)((_t + 0xffffffff) >> 32);                                  \
667 } while (0)
668 
669 /* Return 1 if x mod 4 =1,2,3, 0 otherwise */
670 #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1)
671 #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1)
672 
673 #define cc_ceiling(a,b)  (((a)+((b)-1))/(b))
674 #define CC_BITLEN_TO_BYTELEN(x) cc_ceiling((x), 8)
675 
676 /*!
677  @brief     CC_MUXU(r, s, a, b) is equivalent to r = s ? a : b, but executes in constant time
678  @param a   Input a
679  @param b   Input b
680  @param s   Selection parameter s. Must be 0 or 1.
681  @param r   Output, set to a if s=1, or b if s=0.
682  */
683 #define CC_MUXU(r, s, a, b) do {            \
684     cc_assert((s) == 0 || (s) == 1);        \
685     r = (~((s)-1) & (a)) | (((s)-1) & (b)); \
686 } while (0)
687 
688 #define CC_PROVIDES_ABORT (!(CC_BASEBAND || CC_EFI || CC_RTKITROM || CC_USE_SEPROM))
689 
690 /*!
691  @function cc_abort
692  @abstract Abort execution unconditionally
693  */
694 CC_NORETURN
695 void cc_abort(const char *msg);
696 
697 /*!
698   @function cc_try_abort
699   @abstract Abort execution iff the platform provides a function like @p abort() or @p panic()
700 
701   @discussion If the platform does not provide a means to abort execution, this function does nothing; therefore, callers should return an error code after calling this function.
702 */
703 void cc_try_abort(const char *msg);
704 
705 #if __has_builtin(__builtin_expect)
706  #define CC_LIKELY(cond) __builtin_expect(!!(cond), 1)
707  #define CC_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
708 #else
709  #define CC_LIKELY(cond) cond
710  #define CC_UNLIKELY(cond) cond
711 #endif
712 
713 #define cc_abort_if(cond, msg)                  \
714     do {                                        \
715         if (CC_UNLIKELY(cond)) {                \
716             cc_abort(msg);                      \
717         }                                       \
718     } while (0)
719 
720 void cc_try_abort_if(bool condition, const char *msg);
721 
722 /*
723   Unfortunately, since we export this symbol, this declaration needs
724   to be in a public header to satisfy TAPI.
725 
726   See fipspost_trace_priv.h for more details.
727 */
728 extern const void *fipspost_trace_vtable;
729 
730 
731 // MARK: -- Deprecated macros
732 /*
733  Use `cc_store32_be`, `cc_store32_le`, `cc_store64_be`, `cc_store64_le`, and
734  `cc_load32_be`, `cc_load32_le`, `cc_load64_be`, `cc_load64_le` instead.
735 
736  CC_STORE32_BE : store 32 bit value in big endian in unaligned buffer.
737  CC_STORE32_LE : store 32 bit value in little endian in unaligned buffer.
738  CC_STORE64_BE : store 64 bit value in big endian in unaligned buffer.
739  CC_STORE64_LE : store 64 bit value in little endian in unaligned buffer.
740  CC_LOAD32_BE : load 32 bit value in big endian from unaligned buffer.
741  CC_LOAD32_LE : load 32 bit value in little endian from unaligned buffer.
742  CC_LOAD64_BE : load 64 bit value in big endian from unaligned buffer.
743  CC_LOAD64_LE : load 64 bit value in little endian from unaligned buffer.
744  CC_READ_LE32 : read a 32 bits little endian value
745  CC_WRITE_LE32 : write a 32 bits little endian value
746  CC_WRITE_LE64 : write a 64 bits little endian value
747 */
748 
749 #define CC_STORE32_BE(x, y) cc_store32_be((uint32_t)(x), (uint8_t *)(y))
750 #define CC_STORE32_LE(x, y) cc_store32_le((uint32_t)(x), (uint8_t *)(y))
751 #define CC_STORE64_BE(x, y) cc_store64_be((uint64_t)(x), (uint8_t *)(y))
752 #define CC_STORE64_LE(x, y) cc_store64_le((uint64_t)(x), (uint8_t *)(y))
753 
754 #define CC_LOAD32_BE(x, y) ((x) = cc_load32_be((uint8_t *)(y)))
755 #define CC_LOAD32_LE(x, y) ((x) = cc_load32_le((uint8_t *)(y)))
756 #define CC_LOAD64_BE(x, y) ((x) = cc_load64_be((uint8_t *)(y)))
757 #define CC_LOAD64_LE(x, y) ((x) = cc_load64_le((uint8_t *)(y)))
758 
759 #define CC_READ_LE32(ptr) cc_load32_le((uint8_t *)(ptr))
760 
761 #define CC_WRITE_LE32(ptr, x) cc_store32_le((uint32_t)(x), (uint8_t *)(ptr))
762 #define CC_WRITE_LE64(ptr, x) cc_store64_le((uint64_t)(x), (uint8_t *)(ptr))
763 
764 #endif /* _CORECRYPTO_CC_PRIV_H_ */
765