xref: /xnu-8020.121.3/EXTERNAL_HEADERS/corecrypto/cc_priv.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved.
2  *
3  * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which
4  * is contained in the License.txt file distributed with corecrypto) and only to
5  * people who accept that license. IMPORTANT:  Any license rights granted to you by
6  * Apple Inc. (if any) are limited to internal use within your organization only on
7  * devices and computers you own or control, for the sole purpose of verifying the
8  * security characteristics and correct functioning of the Apple Software.  You may
9  * not, directly or indirectly, redistribute the Apple Software or any portions thereof.
10  */
11 
12 #ifndef _CORECRYPTO_CC_PRIV_H_
13 #define _CORECRYPTO_CC_PRIV_H_
14 
15 #include <corecrypto/cc.h>
16 #include <stdbool.h>
17 #include <stdint.h>
18 
19 // Fork handlers for the stateful components of corecrypto.
20 void cc_atfork_prepare(void);
21 void cc_atfork_parent(void);
22 void cc_atfork_child(void);
23 
24 #ifndef __has_builtin
25 #define __has_builtin(x) 0
26 #endif
27 
28 #ifndef __DECONST
29 #define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
30 #endif
31 
32 /* defines the following macros :
33 
34  CC_ARRAY_LEN: returns the number of elements in an array
35 
36  CC_STORE32_BE : store 32 bit value in big endian in unaligned buffer.
37  CC_STORE32_LE : store 32 bit value in little endian in unaligned buffer.
38  CC_STORE64_BE : store 64 bit value in big endian in unaligned buffer.
39  CC_STORE64_LE : store 64 bit value in little endian in unaligned buffer.
40 
41  CC_LOAD32_BE : load 32 bit value in big endian from unaligned buffer.
42  CC_LOAD32_LE : load 32 bit value in little endian from unaligned buffer.
43  CC_LOAD64_BE : load 64 bit value in big endian from unaligned buffer.
44  CC_LOAD64_LE : load 64 bit value in little endian from unaligned buffer.
45 
46  CC_ROR  : Rotate Right 32 bits. Rotate count can be a variable.
47  CC_ROL  : Rotate Left 32 bits. Rotate count can be a variable.
48  CC_RORc : Rotate Right 32 bits. Rotate count must be a constant.
49  CC_ROLc : Rotate Left 32 bits. Rotate count must be a constant.
50 
51  CC_ROR64  : Rotate Right 64 bits. Rotate count can be a variable.
52  CC_ROL64  : Rotate Left 64 bits. Rotate count can be a variable.
53  CC_ROR64c : Rotate Right 64 bits. Rotate count must be a constant.
54  CC_ROL64c : Rotate Left 64 bits. Rotate count must be a constant.
55 
56  CC_BSWAP  : byte swap a 32 bits variable.
57 
58  CC_H2BE32 : convert a 32 bits value between host and big endian order.
59  CC_H2LE32 : convert a 32 bits value between host and little endian order.
60 
61  CC_BSWAP64  : byte swap a 64 bits variable
62 
63  CC_READ_LE32 : read a 32 bits little endian value
64 
65  CC_WRITE_LE32 : write a 32 bits little endian value
66  CC_WRITE_LE64 : write a 64 bits little endian value
67 
68  CC_H2BE64 : convert a 64 bits value between host and big endian order
69  CC_H2LE64 : convert a 64 bits value between host and little endian order
70 
71 */
72 
73 // RTKitOSPlatform should replace CC_MEMCPY with memcpy
74 #define CC_MEMCPY(D,S,L) cc_memcpy((D),(S),(L))
75 #define CC_MEMMOVE(D,S,L) cc_memmove((D),(S),(L))
76 #define CC_MEMSET(D,V,L) cc_memset((D),(V),(L))
77 
78 #if __has_builtin(__builtin___memcpy_chk) && !defined(_MSC_VER)
79 #define cc_memcpy(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 1))
80 #define cc_memcpy_nochk(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 0))
81 #else
82 #define cc_memcpy(dst, src, len) memcpy((dst), (src), (len))
83 #define cc_memcpy_nochk(dst, src, len) memcpy((dst), (src), (len))
84 #endif
85 
86 #if __has_builtin(__builtin___memmove_chk) && !defined(_MSC_VER)
87 #define cc_memmove(dst, src, len) __builtin___memmove_chk((dst), (src), (len), __builtin_object_size((dst), 1))
88 #else
89 #define cc_memmove(dst, src, len) memmove((dst), (src), (len))
90 #endif
91 
92 #if __has_builtin(__builtin___memset_chk) && !defined(_MSC_VER)
93 #define cc_memset(dst, val, len) __builtin___memset_chk((dst), (val), (len), __builtin_object_size((dst), 1))
94 #else
95 #define cc_memset(dst, val, len) memset((dst), (val), (len))
96 #endif
97 
98 #define CC_ARRAY_LEN(x) (sizeof((x))/sizeof((x)[0]))
99 
100 // MARK: - Loads and Store
101 
102 // MARK: -- 32 bits - little endian
103 
104 // MARK: --- Default version
105 
106 #define	CC_STORE32_LE(x, y) do {                                    \
107     ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255);		\
108     ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255);		\
109     ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255);		\
110     ((unsigned char *)(y))[0] = (unsigned char)((x)&255);			\
111 } while(0)
112 
113 #define	CC_LOAD32_LE(x, y) do {                                     \
114 x = ((uint32_t)(((const unsigned char *)(y))[3] & 255)<<24) |			    \
115     ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<16) |			    \
116     ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<8)  |			    \
117     ((uint32_t)(((const unsigned char *)(y))[0] & 255));				    \
118 } while(0)
119 
120 // MARK: -- 64 bits - little endian
121 
122 #define	CC_STORE64_LE(x, y) do {                                    \
123     ((unsigned char *)(y))[7] = (unsigned char)(((x)>>56)&255);     \
124     ((unsigned char *)(y))[6] = (unsigned char)(((x)>>48)&255);		\
125     ((unsigned char *)(y))[5] = (unsigned char)(((x)>>40)&255);		\
126     ((unsigned char *)(y))[4] = (unsigned char)(((x)>>32)&255);		\
127     ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255);		\
128     ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255);		\
129     ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255);		\
130     ((unsigned char *)(y))[0] = (unsigned char)((x)&255);			\
131 } while(0)
132 
133 #define	CC_LOAD64_LE(x, y) do {                                     \
134 x = (((uint64_t)(((const unsigned char *)(y))[7] & 255))<<56) |           \
135     (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<48) |           \
136     (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<40) |           \
137     (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<32) |           \
138     (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<24) |           \
139     (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<16) |           \
140     (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<8)  |           \
141     (((uint64_t)(((const unsigned char *)(y))[0] & 255)));                \
142 } while(0)
143 
144 // MARK: -- 32 bits - big endian
145 // MARK: --- intel version
146 
147 #if (defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)
148 
149 #define CC_STORE32_BE(x, y)     \
150     __asm__ __volatile__ (      \
151     "bswapl %0     \n\t"        \
152     "movl   %0,(%1)\n\t"        \
153     "bswapl %0     \n\t"        \
154     ::"r"(x), "r"(y))
155 
156 #define CC_LOAD32_BE(x, y)      \
157     __asm__ __volatile__ (      \
158     "movl (%1),%0\n\t"          \
159     "bswapl %0\n\t"             \
160     :"=r"(x): "r"(y))
161 
162 #else
163 // MARK: --- default version
164 #define	CC_STORE32_BE(x, y) do {                                \
165     ((unsigned char *)(y))[0] = (unsigned char)(((x)>>24)&255);	\
166     ((unsigned char *)(y))[1] = (unsigned char)(((x)>>16)&255);	\
167     ((unsigned char *)(y))[2] = (unsigned char)(((x)>>8)&255);	\
168     ((unsigned char *)(y))[3] = (unsigned char)((x)&255);       \
169 } while(0)
170 
171 #define	CC_LOAD32_BE(x, y) do {                             \
172 x = ((uint32_t)(((const unsigned char *)(y))[0] & 255)<<24) |	    \
173     ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<16) |		\
174     ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<8)  |		\
175     ((uint32_t)(((const unsigned char *)(y))[3] & 255));          \
176 } while(0)
177 
178 #endif
179 
180 // MARK: -- 64 bits - big endian
181 
182 // MARK: --- intel 64 bits version
183 
184 #if defined(__x86_64__) && !defined (_MSC_VER)
185 
186 #define	CC_STORE64_BE(x, y)   \
187 __asm__ __volatile__ (        \
188 "bswapq %0     \n\t"          \
189 "movq   %0,(%1)\n\t"          \
190 "bswapq %0     \n\t"          \
191 ::"r"(x), "r"(y))
192 
193 #define	CC_LOAD64_BE(x, y)    \
194 __asm__ __volatile__ (        \
195 "movq (%1),%0\n\t"            \
196 "bswapq %0\n\t"               \
197 :"=r"(x): "r"(y))
198 
199 #else
200 
201 // MARK: --- default version
202 
203 #define CC_STORE64_BE(x, y) do {                                    \
204     ((unsigned char *)(y))[0] = (unsigned char)(((x)>>56)&255);		\
205     ((unsigned char *)(y))[1] = (unsigned char)(((x)>>48)&255);		\
206     ((unsigned char *)(y))[2] = (unsigned char)(((x)>>40)&255);		\
207     ((unsigned char *)(y))[3] = (unsigned char)(((x)>>32)&255);		\
208     ((unsigned char *)(y))[4] = (unsigned char)(((x)>>24)&255);		\
209     ((unsigned char *)(y))[5] = (unsigned char)(((x)>>16)&255);		\
210     ((unsigned char *)(y))[6] = (unsigned char)(((x)>>8)&255);		\
211     ((unsigned char *)(y))[7] = (unsigned char)((x)&255);			\
212 } while(0)
213 
214 #define	CC_LOAD64_BE(x, y) do {                                     \
215 x = (((uint64_t)(((const unsigned char *)(y))[0] & 255))<<56) |           \
216     (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<48) |           \
217     (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<40) |           \
218     (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<32) |           \
219     (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<24) |           \
220     (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<16) |           \
221     (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<8)  |          	\
222     (((uint64_t)(((const unsigned char *)(y))[7] & 255)));	            \
223 } while(0)
224 
225 #endif
226 
227 // MARK: - 32-bit Rotates
228 
229 #if defined(_MSC_VER)
230 // MARK: -- MSVC version
231 
232 #include <stdlib.h>
233 #if !defined(__clang__)
234  #pragma intrinsic(_lrotr,_lrotl)
235 #endif
236 #define	CC_ROR(x,n) _lrotr(x,n)
237 #define	CC_ROL(x,n) _lrotl(x,n)
238 #define	CC_RORc(x,n) _lrotr(x,n)
239 #define	CC_ROLc(x,n) _lrotl(x,n)
240 
241 #elif (defined(__i386__) || defined(__x86_64__))
242 // MARK: -- intel asm version
243 
CC_ROL(uint32_t word,int i)244 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
245 {
246     __asm__ ("roll %%cl,%0"
247          :"=r" (word)
248          :"0" (word),"c" (i));
249     return word;
250 }
251 
CC_ROR(uint32_t word,int i)252 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
253 {
254     __asm__ ("rorl %%cl,%0"
255          :"=r" (word)
256          :"0" (word),"c" (i));
257     return word;
258 }
259 
260 /* Need to be a macro here, because 'i' is an immediate (constant) */
261 #define CC_ROLc(word, i)                \
262 ({  uint32_t _word=(word);              \
263     __asm__ __volatile__ ("roll %2,%0"  \
264         :"=r" (_word)                   \
265         :"0" (_word),"I" (i));          \
266     _word;                              \
267 })
268 
269 
270 #define CC_RORc(word, i)                \
271 ({  uint32_t _word=(word);              \
272     __asm__ __volatile__ ("rorl %2,%0"  \
273         :"=r" (_word)                   \
274         :"0" (_word),"I" (i));          \
275     _word;                              \
276 })
277 
278 #else
279 
280 // MARK: -- default version
281 
CC_ROL(uint32_t word,int i)282 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
283 {
284     return ( (word<<(i&31)) | (word>>(32-(i&31))) );
285 }
286 
CC_ROR(uint32_t word,int i)287 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
288 {
289     return ( (word>>(i&31)) | (word<<(32-(i&31))) );
290 }
291 
292 #define	CC_ROLc(x, y) CC_ROL(x, y)
293 #define	CC_RORc(x, y) CC_ROR(x, y)
294 
295 #endif
296 
297 // MARK: - 64 bits rotates
298 
299 #if defined(__x86_64__) && !defined(_MSC_VER) //clang _MSVC doesn't support GNU-style inline assembly
300 // MARK: -- intel 64 asm version
301 
CC_ROL64(uint64_t word,int i)302 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
303 {
304     __asm__("rolq %%cl,%0"
305         :"=r" (word)
306         :"0" (word),"c" (i));
307     return word;
308 }
309 
CC_ROR64(uint64_t word,int i)310 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
311 {
312     __asm__("rorq %%cl,%0"
313         :"=r" (word)
314         :"0" (word),"c" (i));
315     return word;
316 }
317 
318 /* Need to be a macro here, because 'i' is an immediate (constant) */
319 #define CC_ROL64c(word, i)      \
320 ({                              \
321     uint64_t _word=(word);      \
322     __asm__("rolq %2,%0"        \
323         :"=r" (_word)           \
324         :"0" (_word),"J" (i));  \
325     _word;                      \
326 })
327 
328 #define CC_ROR64c(word, i)      \
329 ({                              \
330     uint64_t _word=(word);      \
331     __asm__("rorq %2,%0"        \
332         :"=r" (_word)           \
333         :"0" (_word),"J" (i));  \
334     _word;                      \
335 })
336 
337 
338 #else /* Not x86_64  */
339 
340 // MARK: -- default C version
341 
CC_ROL64(uint64_t word,int i)342 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
343 {
344     return ( (word<<(i&63)) | (word>>(64-(i&63))) );
345 }
346 
CC_ROR64(uint64_t word,int i)347 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
348 {
349     return ( (word>>(i&63)) | (word<<(64-(i&63))) );
350 }
351 
352 #define	CC_ROL64c(x, y) CC_ROL64(x, y)
353 #define	CC_ROR64c(x, y) CC_ROR64(x, y)
354 
355 #endif
356 
357 
358 // MARK: - Byte Swaps
359 
360 #if __has_builtin(__builtin_bswap32)
361 #define CC_BSWAP32(x) __builtin_bswap32(x)
362 #else
CC_BSWAP32(uint32_t x)363 CC_INLINE uint32_t CC_BSWAP32(uint32_t x)
364 {
365     return
366         ((x & 0xff000000) >> 24) |
367         ((x & 0x00ff0000) >>  8) |
368         ((x & 0x0000ff00) <<  8) |
369         ((x & 0x000000ff) << 24);
370 }
371 #endif
372 
373 #if __has_builtin(__builtin_bswap64)
374 #define CC_BSWAP64(x) __builtin_bswap64(x)
375 #else
CC_BSWAP64(uint64_t x)376 CC_INLINE uint64_t CC_BSWAP64(uint64_t x)
377 {
378     return
379         ((x & 0xff00000000000000ULL) >> 56) |
380         ((x & 0x00ff000000000000ULL) >> 40) |
381         ((x & 0x0000ff0000000000ULL) >> 24) |
382         ((x & 0x000000ff00000000ULL) >>  8) |
383         ((x & 0x00000000ff000000ULL) <<  8) |
384         ((x & 0x0000000000ff0000ULL) << 24) |
385         ((x & 0x000000000000ff00ULL) << 40) |
386         ((x & 0x00000000000000ffULL) << 56);
387 }
388 #endif
389 
390 #ifdef __LITTLE_ENDIAN__
391 #define CC_H2BE32(x) CC_BSWAP32(x)
392 #define CC_H2LE32(x) (x)
393 #define CC_H2BE64(x) CC_BSWAP64(x)
394 #define CC_H2LE64(x) (x)
395 #else
396 #define CC_H2BE32(x) (x)
397 #define CC_H2LE32(x) CC_BSWAP32(x)
398 #define CC_H2BE64(x) (x)
399 #define CC_H2LE64(x) CC_BSWAP64(x)
400 #endif
401 
402 #define	CC_READ_LE32(ptr) \
403 ( (uint32_t)( \
404 ((uint32_t)((const uint8_t *)(ptr))[0]) | \
405 (((uint32_t)((const uint8_t *)(ptr))[1]) <<  8) | \
406 (((uint32_t)((const uint8_t *)(ptr))[2]) << 16) | \
407 (((uint32_t)((const uint8_t *)(ptr))[3]) << 24)))
408 
409 #define	CC_WRITE_LE32(ptr, x) \
410 do { \
411 ((uint8_t *)(ptr))[0] = (uint8_t)( (x)        & 0xFF); \
412 ((uint8_t *)(ptr))[1] = (uint8_t)(((x) >>  8) & 0xFF); \
413 ((uint8_t *)(ptr))[2] = (uint8_t)(((x) >> 16) & 0xFF); \
414 ((uint8_t *)(ptr))[3] = (uint8_t)(((x) >> 24) & 0xFF); \
415 } while(0)
416 
417 #define	CC_WRITE_LE64(ptr, x) \
418 do { \
419 ((uint8_t *)(ptr))[0] = (uint8_t)( (x)        & 0xFF); \
420 ((uint8_t *)(ptr))[1] = (uint8_t)(((x) >>  8) & 0xFF); \
421 ((uint8_t *)(ptr))[2] = (uint8_t)(((x) >> 16) & 0xFF); \
422 ((uint8_t *)(ptr))[3] = (uint8_t)(((x) >> 24) & 0xFF); \
423 ((uint8_t *)(ptr))[4] = (uint8_t)(((x) >> 32) & 0xFF); \
424 ((uint8_t *)(ptr))[5] = (uint8_t)(((x) >> 40) & 0xFF); \
425 ((uint8_t *)(ptr))[6] = (uint8_t)(((x) >> 48) & 0xFF); \
426 ((uint8_t *)(ptr))[7] = (uint8_t)(((x) >> 56) & 0xFF); \
427 } while(0)
428 
429 /* extract a byte portably */
430 #ifdef _MSC_VER
431 #define cc_byte(x, n) ((unsigned char)((x) >> (8 * (n))))
432 #else
433 #define cc_byte(x, n) (((x) >> (8 * (n))) & 255)
434 #endif
435 
436 /* Count leading zeros (for nonzero inputs) */
437 
438 /*
439  *  On i386 and x86_64, we know clang and GCC will generate BSR for
440  *  __builtin_clzl.  This instruction IS NOT constant time on all micro-
441  *  architectures, but it *is* constant time on all micro-architectures that
442  *  have been used by Apple, and we expect that to continue to be the case.
443  *
444  *  When building for x86_64h with clang, this produces LZCNT, which is exactly
445  *  what we want.
446  *
447  *  On arm and arm64, we know that clang and GCC generate the constant-time CLZ
448  *  instruction from __builtin_clzl( ).
449  */
450 
451 #if defined(_WIN32)
452 /* We use the Windows implementations below. */
453 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__)
454 /* We use a thought-to-be-good version of __builtin_clz. */
455 #elif defined __GNUC__
456 #warning Using __builtin_clz() on an unknown architecture; it may not be constant-time.
457 /* If you find yourself seeing this warning, file a radar for someone to
458  * check whether or not __builtin_clz() generates a constant-time
459  * implementation on the architecture you are targeting.  If it does, append
460  * the name of that architecture to the list of "safe" architectures above.  */
461 #endif
462 
cc_clz32_fallback(uint32_t data)463 CC_INLINE CC_CONST unsigned cc_clz32_fallback(uint32_t data)
464 {
465     unsigned int b = 0;
466     unsigned int bit = 0;
467     // Work from LSB to MSB
468     for (int i = 0; i < 32; i++) {
469         bit = (data >> i) & 1;
470         // If the bit is 0, update the "leading bits are zero" counter "b".
471         b += (1 - bit);
472         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
473          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
474          */
475         b &= (bit - 1);
476     }
477     return b;
478 }
479 
cc_clz64_fallback(uint64_t data)480 CC_INLINE CC_CONST unsigned cc_clz64_fallback(uint64_t data)
481 {
482     unsigned int b = 0;
483     unsigned int bit = 0;
484     // Work from LSB to MSB
485     for (int i = 0; i < 64; i++) {
486         bit = (data >> i) & 1;
487         // If the bit is 0, update the "leading bits are zero" counter.
488         b += (1 - bit);
489         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
490          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
491          */
492         b &= (bit - 1);
493     }
494     return b;
495 }
496 
cc_ctz32_fallback(uint32_t data)497 CC_INLINE CC_CONST unsigned cc_ctz32_fallback(uint32_t data)
498 {
499     unsigned int b = 0;
500     unsigned int bit = 0;
501     // Work from MSB to LSB
502     for (int i = 31; i >= 0; i--) {
503         bit = (data >> i) & 1;
504         // If the bit is 0, update the "trailing zero bits" counter.
505         b += (1 - bit);
506         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
507          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
508          */
509         b &= (bit - 1);
510     }
511     return b;
512 }
513 
cc_ctz64_fallback(uint64_t data)514 CC_INLINE CC_CONST unsigned cc_ctz64_fallback(uint64_t data)
515 {
516     unsigned int b = 0;
517     unsigned int bit = 0;
518     // Work from MSB to LSB
519     for (int i = 63; i >= 0; i--) {
520         bit = (data >> i) & 1;
521         // If the bit is 0, update the "trailing zero bits" counter.
522         b += (1 - bit);
523         /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
524          * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
525          */
526         b &= (bit - 1);
527     }
528     return b;
529 }
530 
531 /*!
532   @function cc_clz32
533   @abstract Count leading zeros of a nonzero 32-bit value
534 
535   @param data A nonzero 32-bit value
536 
537   @result Count of leading zeros of @p data
538 
539   @discussion @p data is assumed to be nonzero.
540 */
cc_clz32(uint32_t data)541 CC_INLINE CC_CONST unsigned cc_clz32(uint32_t data) {
542     cc_assert(data != 0);
543 #if defined(_WIN32)
544     return cc_clz32_fallback(data);
545 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
546     cc_static_assert(sizeof(unsigned) == 4, "clz relies on an unsigned int being 4 bytes");
547     return (unsigned)__builtin_clz(data);
548 #else
549     return cc_clz32_fallback(data);
550 #endif
551 }
552 
553 /*!
554   @function cc_clz64
555   @abstract Count leading zeros of a nonzero 64-bit value
556 
557   @param data A nonzero 64-bit value
558 
559   @result Count of leading zeros of @p data
560 
561   @discussion @p data is assumed to be nonzero.
562 */
cc_clz64(uint64_t data)563 CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) {
564     cc_assert(data != 0);
565 #if defined(_WIN32)
566     return cc_clz64_fallback(data);
567 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
568     return (unsigned)__builtin_clzll(data);
569 #else
570     return cc_clz64_fallback(data);
571 #endif
572 }
573 
574 /*!
575   @function cc_ctz32
576   @abstract Count trailing zeros of a nonzero 32-bit value
577 
578   @param data A nonzero 32-bit value
579 
580   @result Count of trailing zeros of @p data
581 
582   @discussion @p data is assumed to be nonzero.
583 */
cc_ctz32(uint32_t data)584 CC_INLINE CC_CONST unsigned cc_ctz32(uint32_t data) {
585     cc_assert(data != 0);
586 #if defined(_WIN32)
587     return cc_ctz32_fallback(data);
588 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
589     cc_static_assert(sizeof(unsigned) == 4, "ctz relies on an unsigned int being 4 bytes");
590     return (unsigned)__builtin_ctz(data);
591 #else
592     return cc_ctz32_fallback(data);
593 #endif
594 }
595 
596 /*!
597   @function cc_ctz64
598   @abstract Count trailing zeros of a nonzero 64-bit value
599 
600   @param data A nonzero 64-bit value
601 
602   @result Count of trailing zeros of @p data
603 
604   @discussion @p data is assumed to be nonzero.
605 */
cc_ctz64(uint64_t data)606 CC_INLINE CC_CONST unsigned cc_ctz64(uint64_t data) {
607     cc_assert(data != 0);
608 #if defined(_WIN32)
609     return cc_ctz64_fallback(data);
610 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
611     return (unsigned)__builtin_ctzll(data);
612 #else
613     return cc_ctz64_fallback(data);
614 #endif
615 }
616 
617 /*!
618   @function cc_ffs32_fallback
619   @abstract Find first bit set in a 32-bit value
620 
621   @param data A 32-bit value
622 
623   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
624  */
cc_ffs32_fallback(int32_t data)625 CC_INLINE CC_CONST unsigned cc_ffs32_fallback(int32_t data)
626 {
627     unsigned b = 0;
628     unsigned bit = 0;
629     unsigned seen = 0;
630 
631     // Work from LSB to MSB
632     for (int i = 0; i < 32; i++) {
633         bit = ((uint32_t)data >> i) & 1;
634 
635         // Track whether we've seen a 1 bit.
636         seen |= bit;
637 
638         // If the bit is 0 and we haven't seen a 1 yet, increment b.
639         b += (1 - bit) & (seen - 1);
640     }
641 
642     // If we saw a 1, return b + 1, else 0.
643     return (~(seen - 1)) & (b + 1);
644 }
645 
646 /*!
647   @function cc_ffs64_fallback
648   @abstract Find first bit set in a 64-bit value
649 
650   @param data A 64-bit value
651 
652   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
653  */
cc_ffs64_fallback(int64_t data)654 CC_INLINE CC_CONST unsigned cc_ffs64_fallback(int64_t data)
655 {
656     unsigned b = 0;
657     unsigned bit = 0;
658     unsigned seen = 0;
659 
660     // Work from LSB to MSB
661     for (int i = 0; i < 64; i++) {
662         bit = ((uint64_t)data >> i) & 1;
663 
664         // Track whether we've seen a 1 bit.
665         seen |= bit;
666 
667         // If the bit is 0 and we haven't seen a 1 yet, increment b.
668         b += (1 - bit) & (seen - 1);
669     }
670 
671     // If we saw a 1, return b + 1, else 0.
672     return (~(seen - 1)) & (b + 1);
673 }
674 
675 /*!
676   @function cc_ffs32
677   @abstract Find first bit set in a 32-bit value
678 
679   @param data A 32-bit value
680 
681   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
682  */
cc_ffs32(int32_t data)683 CC_INLINE CC_CONST unsigned cc_ffs32(int32_t data)
684 {
685     cc_static_assert(sizeof(int) == 4, "ffs relies on an int being 4 bytes");
686 #ifdef _WIN32
687     return cc_ffs32_fallback(data);
688 #else
689     return (unsigned)__builtin_ffs(data);
690 #endif
691 }
692 
693 /*!
694   @function cc_ffs64
695   @abstract Find first bit set in a 64-bit value
696 
697   @param data A 64-bit value
698 
699   @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
700  */
cc_ffs64(int64_t data)701 CC_INLINE CC_CONST unsigned cc_ffs64(int64_t data)
702 {
703 #ifdef _WIN32
704     return cc_ffs64_fallback(data);
705 #else
706     return (unsigned)__builtin_ffsll(data);
707 #endif
708 }
709 
710 #define cc_add_overflow __builtin_add_overflow
711 #define cc_mul_overflow __builtin_mul_overflow
712 
713 /* HEAVISIDE_STEP (shifted by one)
714    function f(x): x->0, when x=0
715                   x->1, when x>0
716    Can also be seen as a bitwise operation:
717       f(x): x -> y
718         y[0]=(OR x[i]) for all i (all bits)
719         y[i]=0 for all i>0
720    Run in constant time (log2(<bitsize of x>))
721    Useful to run constant time checks
722 */
723 #define CC_HEAVISIDE_STEP(r, s) {                       \
724     const uint64_t _s = (uint64_t)s;                    \
725     const uint64_t _t = (_s & 0xffffffff) | (_s >> 32); \
726     r = (__typeof__(r))((0xffffffff + _t) >> 32);       \
727 }
728 
729 /* Return 1 if x mod 4 =1,2,3, 0 otherwise */
730 #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1)
731 #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1)
732 
733 #define cc_ceiling(a,b)  (((a)+((b)-1))/(b))
734 #define CC_BITLEN_TO_BYTELEN(x) cc_ceiling((x), 8)
735 
736 /*!
737  @brief     cc_muxp(s, a, b) is equivalent to z = s ? a : b, but it executes in constant time
738  @param a	input pointer
739  @param b	input pointer
740  @param s	The selection parameter s must be 0 or 1. if s is integer 1 a is returned. If s is integer 0, b is returned. Otherwise, the output is undefined.
741  @return    Returns a, if s is 1 and b if s is 0
742  */
743 void *cc_muxp(int s, const void *a, const void *b);
744 
745 /*!
746  @brief     CC_MUXU(r, s, a, b) is equivalent to r = s ? a : b, but executes in constant time
747  @param a   Input a
748  @param b   Input b
749  @param s   Selection parameter s. Must be 0 or 1.
750  @param r   Output, set to a if s=1, or b if s=0.
751  */
752 #define CC_MUXU(r, s, a, b)                           \
753     {                                                 \
754         __typeof__(r) _cond = (__typeof__(r))((s)-1); \
755         r = (~_cond & (a)) | (_cond & (b));           \
756     }
757 
758 #define CC_PROVIDES_ABORT (!(CC_USE_SEPROM || CC_USE_S3 || CC_BASEBAND || CC_EFI || CC_IBOOT || CC_RTKITROM))
759 
760 /*!
761  @function cc_abort
762  @abstract Abort execution unconditionally
763  */
764 CC_NORETURN
765 void cc_abort(const char *msg);
766 
767 /*!
768   @function cc_try_abort
769   @abstract Abort execution iff the platform provides a function like @p abort() or @p panic()
770 
771   @discussion If the platform does not provide a means to abort execution, this function does nothing; therefore, callers should return an error code after calling this function.
772 */
773 #if CC_PROVIDES_ABORT
774 
775 #pragma clang diagnostic push
776 #pragma clang diagnostic ignored "-Wmissing-noreturn"
777 
778 CC_INLINE
cc_try_abort(const char * msg)779 void cc_try_abort(const char *msg)
780 {
781     cc_abort(msg);
782 }
783 
784 #pragma clang diagnostic pop
785 
786 #else
787 
788 CC_INLINE
cc_try_abort(CC_UNUSED const char * msg)789 void cc_try_abort(CC_UNUSED const char *msg)
790 {
791 
792 }
793 
794 #endif
795 
796 #if __has_builtin(__builtin_expect)
797  #define CC_UNLIKELY(cond) __builtin_expect(cond, 0)
798 #else
799  #define CC_UNLIKELY(cond) cond
800 #endif
801 
802 CC_INLINE
cc_try_abort_if(bool condition,const char * msg)803 void cc_try_abort_if(bool condition, const char *msg)
804 {
805     if (CC_UNLIKELY(condition)) {
806         cc_try_abort(msg);
807     }
808 }
809 
810 /*
811   Unfortunately, since we export this symbol, this declaration needs
812   to be in a public header to satisfy TAPI.
813 
814   See fipspost_trace_priv.h for more details.
815 */
816 extern const void *fipspost_trace_vtable;
817 
818 #endif /* _CORECRYPTO_CC_PRIV_H_ */
819