xref: /xnu-11215.81.4/osfmk/corecrypto/cc_internal.h (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /* Copyright (c) (2019,2021-2023) Apple Inc. All rights reserved.
2  *
3  * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which
4  * is contained in the License.txt file distributed with corecrypto) and only to
5  * people who accept that license. IMPORTANT:  Any license rights granted to you by
6  * Apple Inc. (if any) are limited to internal use within your organization only on
7  * devices and computers you own or control, for the sole purpose of verifying the
8  * security characteristics and correct functioning of the Apple Software.  You may
9  * not, directly or indirectly, redistribute the Apple Software or any portions thereof.
10  *
11  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
12  *
13  * This file contains Original Code and/or Modifications of Original Code
14  * as defined in and that are subject to the Apple Public Source License
15  * Version 2.0 (the 'License'). You may not use this file except in
16  * compliance with the License. The rights granted to you under the License
17  * may not be used to create, or enable the creation or redistribution of,
18  * unlawful or unlicensed copies of an Apple operating system, or to
19  * circumvent, violate, or enable the circumvention or violation of, any
20  * terms of an Apple operating system software license agreement.
21  *
22  * Please obtain a copy of the License at
23  * http://www.opensource.apple.com/apsl/ and read it before using this file.
24  *
25  * The Original Code and all software distributed under the License are
26  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
27  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
28  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
30  * Please see the License for the specific language governing rights and
31  * limitations under the License.
32  *
33  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34  */
35 
36 #ifndef _CORECRYPTO_CC_INTERNAL_H_
37 #define _CORECRYPTO_CC_INTERNAL_H_
38 
39 #include <corecrypto/cc_priv.h>
40 #include "cc_runtime_config.h"
41 
42 #if CC_XNU_KERNEL_PRIVATE
43 #elif CC_EFI
44 #elif CC_KERNEL
45 #include <libkern/libkern.h>
46 #else
47 #include <stdlib.h>
48 #include <stdio.h>
49 #endif
50 
51 #include <stdarg.h>
52 
53 #include "cc_macros.h"
54 
55 #if CC_EFI
56 #include "cc_efi_shim.h"
57 int cc_memcmp(const void *buf1, const void *buf2, size_t len);
58 #else
59     #define cc_memcmp(buf1, buf2, len) memcmp(buf1, buf2, len)
60 #endif
61 
62 extern bool cc_rdrand(uint64_t *rand);
63 
64 #if CC_BUILT_FOR_TESTING
65 extern bool (*cc_rdrand_mock)(uint64_t *rand);
66 
67 extern void (*cc_abort_mock)(const char *msg);
68 #endif
69 
70 
71 #if CC_DIT_MAYBE_SUPPORTED
72 
73 // Use the DIT register's encoded name to avoid assembler
74 // complaints when compiling for ARM64 before v8.4.
75 #define CC_DIT_REGISTER "s3_3_c4_c2_5"
76 
77 #define CC_DIT_BIT (1U << 24)
78 
79 CC_INLINE bool
cc_is_dit_enabled(void)80 cc_is_dit_enabled(void)
81 {
82 	return __builtin_arm_rsr64(CC_DIT_REGISTER) & CC_DIT_BIT;
83 }
84 
85 CC_INLINE bool
cc_enable_dit(void)86 cc_enable_dit(void)
87 {
88 	if (!CC_HAS_DIT()) {
89 		return false;
90 	}
91 
92 	// DIT might have already been enabled by another corecrypto function, in
93 	// that case that function is responsible for disabling DIT when returning.
94 	//
95 	// This also covers when code _outside_ corecrypto enabled DIT before
96 	// calling us. In that case we're not responsible for disabling it either.
97 	if (cc_is_dit_enabled()) {
98 		return false;
99 	}
100 
101 	// Encoding of <msr dit, #1>.
102 	__asm__ __volatile__ (
103         ".long 0xd503415f\n"
104         );
105 
106 #if CC_BUILT_FOR_TESTING
107 	// Check that DIT was enabled.
108 	cc_try_abort_if(!cc_is_dit_enabled(), "DIT not enabled");
109 #endif
110 
111 	// To the cleanup function, indicate that we toggled DIT and
112 	// that cc_disable_dit() should actually disable it again.
113 	return true;
114 }
115 
116 void cc_disable_dit(volatile bool *cc_unsafe_indexable dit_was_enabled);
117 
118 #define CC_ENSURE_DIT_ENABLED                    \
119     volatile bool _cc_dit_auto_disable           \
120 	__attribute__((cleanup(cc_disable_dit))) \
121 	__attribute__((unused)) = cc_enable_dit();
122 
123 #else
124 
125 #define CC_ENSURE_DIT_ENABLED
126 
127 #endif // CC_DIT_MAYBE_SUPPORTED
128 
129 /*!
130  *  @function cc_is_vmm_present
131  *  @abstract Determine if corecrypto is running in a VM
132  *
133  *  @return True iff running in a VM; false otherwise
134  *
135  *  @discussion This function merely checks the relevant sysctl, which
136  *  may not be accurate. Thus, it should not be used to make any
137  *  security decisions.
138  */
139 extern bool cc_is_vmm_present(void);
140 
141 /*!
142  *  @function cc_current_arch
143  *  @abstract The architecture loaded in the current process
144  *
145  *  @return A string representation of the current architecture or
146  *  "unknown"
147  */
148 extern const char *cc_current_arch(void);
149 
150 // MARK: - popcount
151 
152 /// Count number of bits set
153 CC_INLINE CC_CONST unsigned
cc_popcount32_fallback(uint32_t v)154 cc_popcount32_fallback(uint32_t v)
155 {
156 	v = v - ((v >> 1) & 0x55555555);
157 	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
158 	return ((v + (v >> 4) & 0xf0f0f0f) * 0x1010101) >> 24;
159 }
160 
161 /// Count number of bits set
162 CC_INLINE CC_CONST unsigned
cc_popcount64_fallback(uint64_t v)163 cc_popcount64_fallback(uint64_t v)
164 {
165 	v = v - ((v >> 1) & 0x5555555555555555);
166 	v = (v & 0x3333333333333333) + ((v >> 2) & 0x3333333333333333);
167 	v = (v + (v >> 4)) & 0xf0f0f0f0f0f0f0f;
168 	return (v * 0x101010101010101) >> 56;
169 }
170 
171 /// Count number of bits set
172 CC_INLINE CC_CONST unsigned
cc_popcount32(uint32_t data)173 cc_popcount32(uint32_t data)
174 {
175 #if __has_builtin(__builtin_popcount)
176 	return (unsigned)__builtin_popcount(data);
177 #else
178 	return cc_popcount32_fallback(data);
179 #endif
180 }
181 
182 /// Count number of bits set
183 CC_INLINE CC_CONST unsigned
cc_popcount64(uint64_t data)184 cc_popcount64(uint64_t data)
185 {
186 #if __has_builtin(__builtin_popcountll)
187 	return (unsigned)__builtin_popcountll(data);
188 #else
189 	return cc_popcount64_fallback(data);
190 #endif
191 }
192 
193 // Use with volatile variables only.
194 #define CC_MULTI_IF_AND(condition) \
195     ((condition) && (condition) && (condition))
196 
197 // MARK: - Byte Extraction
198 #ifdef _MSC_VER
199 #define cc_byte(x, n) ((unsigned char)((x) >> (8 * (n))))
200 #else
201 #define cc_byte(x, n) (((x) >> (8 * (n))) & 255)
202 #endif
203 
204 // MARK: - 32-bit Rotates
205 
206 #if defined(_MSC_VER)
207 // MARK: -- MSVC version
208 
209 #include <stdlib.h>
210 #if !defined(__clang__)
211  #pragma intrinsic(_lrotr,_lrotl)
212 #endif
213 #define    CC_ROR(x, n) _lrotr(x,n)
214 #define    CC_ROL(x, n) _lrotl(x,n)
215 #define    CC_RORc(x, n) _lrotr(x,n)
216 #define    CC_ROLc(x, n) _lrotl(x,n)
217 
218 #elif (defined(__i386__) || defined(__x86_64__))
219 // MARK: -- intel asm version
220 
221 CC_INLINE uint32_t
CC_ROL(uint32_t word,int i)222 CC_ROL(uint32_t word, int i)
223 {
224 	__asm__ ("roll %%cl,%0"
225              :"=r" (word)
226              :"0" (word),"c" (i));
227 	return word;
228 }
229 
230 CC_INLINE uint32_t
CC_ROR(uint32_t word,int i)231 CC_ROR(uint32_t word, int i)
232 {
233 	__asm__ ("rorl %%cl,%0"
234              :"=r" (word)
235              :"0" (word),"c" (i));
236 	return word;
237 }
238 
239 /* Need to be a macro here, because 'i' is an immediate (constant) */
240 #define CC_ROLc(word, i)                \
241 ({  uint32_t _word=(word);              \
242     __asm__ __volatile__ ("roll %2,%0"  \
243 	:"=r" (_word)                   \
244 	:"0" (_word),"I" (i));          \
245     _word;                              \
246 })
247 
248 
249 #define CC_RORc(word, i)                \
250 ({  uint32_t _word=(word);              \
251     __asm__ __volatile__ ("rorl %2,%0"  \
252 	:"=r" (_word)                   \
253 	:"0" (_word),"I" (i));          \
254     _word;                              \
255 })
256 
257 #else
258 
259 // MARK: -- default version
260 
261 CC_INLINE uint32_t
CC_ROL(uint32_t word,int i)262 CC_ROL(uint32_t word, int i)
263 {
264 	return (word << (i & 31)) | (word >> ((32 - (i & 31)) & 31));
265 }
266 
267 CC_INLINE uint32_t
CC_ROR(uint32_t word,int i)268 CC_ROR(uint32_t word, int i)
269 {
270 	return (word >> (i & 31)) | (word << ((32 - (i & 31)) & 31));
271 }
272 
273 #define    CC_ROLc(x, y) CC_ROL(x, y)
274 #define    CC_RORc(x, y) CC_ROR(x, y)
275 
276 #endif
277 
278 // MARK: - 64 bits rotates
279 
280 #if defined(__x86_64__) && !defined(_MSC_VER) //clang _MSVC doesn't support GNU-style inline assembly
281 // MARK: -- intel 64 asm version
282 
283 CC_INLINE uint64_t
CC_ROL64(uint64_t word,int i)284 CC_ROL64(uint64_t word, int i)
285 {
286 	__asm__("rolq %%cl,%0"
287             :"=r" (word)
288             :"0" (word),"c" (i));
289 	return word;
290 }
291 
292 CC_INLINE uint64_t
CC_ROR64(uint64_t word,int i)293 CC_ROR64(uint64_t word, int i)
294 {
295 	__asm__("rorq %%cl,%0"
296             :"=r" (word)
297             :"0" (word),"c" (i));
298 	return word;
299 }
300 
301 /* Need to be a macro here, because 'i' is an immediate (constant) */
302 #define CC_ROL64c(word, i)      \
303 ({                              \
304     uint64_t _word=(word);      \
305     __asm__("rolq %2,%0"        \
306 	:"=r" (_word)           \
307 	:"0" (_word),"J" (i));  \
308     _word;                      \
309 })
310 
311 #define CC_ROR64c(word, i)      \
312 ({                              \
313     uint64_t _word=(word);      \
314     __asm__("rorq %2,%0"        \
315 	:"=r" (_word)           \
316 	:"0" (_word),"J" (i));  \
317     _word;                      \
318 })
319 
320 
321 #else /* Not x86_64  */
322 
323 // MARK: -- default C version
324 
325 CC_INLINE uint64_t
CC_ROL64(uint64_t word,int i)326 CC_ROL64(uint64_t word, int i)
327 {
328 	return (word << (i & 63)) | (word >> ((64 - (i & 63)) & 63));
329 }
330 
331 CC_INLINE uint64_t
CC_ROR64(uint64_t word,int i)332 CC_ROR64(uint64_t word, int i)
333 {
334 	return (word >> (i & 63)) | (word << ((64 - (i & 63)) & 63));
335 }
336 
337 #define    CC_ROL64c(x, y) CC_ROL64(x, y)
338 #define    CC_ROR64c(x, y) CC_ROR64(x, y)
339 
340 #endif
341 
342 // MARK: -- Count Leading / Trailing Zeros
343 /* Count leading zeros (for nonzero inputs) */
344 
345 /*
346  *  On i386 and x86_64, we know clang and GCC will generate BSR for
347  *  __builtin_clzl.  This instruction IS NOT constant time on all micro-
348  *  architectures, but it *is* constant time on all micro-architectures that
349  *  have been used by Apple, and we expect that to continue to be the case.
350  *
351  *  When building for x86_64h with clang, this produces LZCNT, which is exactly
352  *  what we want.
353  *
354  *  On arm and arm64, we know that clang and GCC generate the constant-time CLZ
355  *  instruction from __builtin_clzl( ).
356  */
357 
358 #if defined(_WIN32)
359 /* We use the Windows implementations below. */
360 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__)
361 /* We use a thought-to-be-good version of __builtin_clz. */
362 #elif defined __GNUC__
363 #warning Using __builtin_clz() on an unknown architecture; it may not be constant-time.
364 /* If you find yourself seeing this warning, file a radar for someone to
365  * check whether or not __builtin_clz() generates a constant-time
366  * implementation on the architecture you are targeting.  If it does, append
367  * the name of that architecture to the list of "safe" architectures above.  */
368 #endif
369 
370 CC_INLINE CC_CONST unsigned
cc_clz32_fallback(uint32_t data)371 cc_clz32_fallback(uint32_t data)
372 {
373 	unsigned int b = 0;
374 	unsigned int bit = 0;
375 	// Work from LSB to MSB
376 	for (int i = 0; i < 32; i++) {
377 		bit = (data >> i) & 1;
378 		// If the bit is 0, update the "leading bits are zero" counter "b".
379 		b += (1 - bit);
380 		/* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
381 		 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
382 		 */
383 		b &= (bit - 1);
384 	}
385 	return b;
386 }
387 
388 CC_INLINE CC_CONST unsigned
cc_clz64_fallback(uint64_t data)389 cc_clz64_fallback(uint64_t data)
390 {
391 	unsigned int b = 0;
392 	unsigned int bit = 0;
393 	// Work from LSB to MSB
394 	for (int i = 0; i < 64; i++) {
395 		bit = (data >> i) & 1;
396 		// If the bit is 0, update the "leading bits are zero" counter.
397 		b += (1 - bit);
398 		/* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
399 		 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
400 		 */
401 		b &= (bit - 1);
402 	}
403 	return b;
404 }
405 
406 CC_INLINE CC_CONST unsigned
cc_ctz32_fallback(uint32_t data)407 cc_ctz32_fallback(uint32_t data)
408 {
409 	unsigned int b = 0;
410 	unsigned int bit = 0;
411 	// Work from MSB to LSB
412 	for (int i = 31; i >= 0; i--) {
413 		bit = (data >> i) & 1;
414 		// If the bit is 0, update the "trailing zero bits" counter.
415 		b += (1 - bit);
416 		/* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
417 		 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
418 		 */
419 		b &= (bit - 1);
420 	}
421 	return b;
422 }
423 
424 CC_INLINE CC_CONST unsigned
cc_ctz64_fallback(uint64_t data)425 cc_ctz64_fallback(uint64_t data)
426 {
427 	unsigned int b = 0;
428 	unsigned int bit = 0;
429 	// Work from MSB to LSB
430 	for (int i = 63; i >= 0; i--) {
431 		bit = (data >> i) & 1;
432 		// If the bit is 0, update the "trailing zero bits" counter.
433 		b += (1 - bit);
434 		/* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
435 		 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
436 		 */
437 		b &= (bit - 1);
438 	}
439 	return b;
440 }
441 
442 /*!
443  *  @function cc_clz32
444  *  @abstract Count leading zeros of a nonzero 32-bit value
445  *
446  *  @param data A nonzero 32-bit value
447  *
448  *  @result Count of leading zeros of @p data
449  *
450  *  @discussion @p data is assumed to be nonzero.
451  */
452 CC_INLINE CC_CONST unsigned
cc_clz32(uint32_t data)453 cc_clz32(uint32_t data)
454 {
455 	cc_assert(data != 0);
456 #if __has_builtin(__builtin_clz)
457 	cc_static_assert(sizeof(unsigned) == 4, "clz relies on an unsigned int being 4 bytes");
458 	return (unsigned)__builtin_clz(data);
459 #else
460 	return cc_clz32_fallback(data);
461 #endif
462 }
463 
464 /*!
465  *  @function cc_clz64
466  *  @abstract Count leading zeros of a nonzero 64-bit value
467  *
468  *  @param data A nonzero 64-bit value
469  *
470  *  @result Count of leading zeros of @p data
471  *
472  *  @discussion @p data is assumed to be nonzero.
473  */
474 CC_INLINE CC_CONST unsigned
cc_clz64(uint64_t data)475 cc_clz64(uint64_t data)
476 {
477 	cc_assert(data != 0);
478 #if __has_builtin(__builtin_clzll)
479 	return (unsigned)__builtin_clzll(data);
480 #else
481 	return cc_clz64_fallback(data);
482 #endif
483 }
484 
485 /*!
486  *  @function cc_ctz32
487  *  @abstract Count trailing zeros of a nonzero 32-bit value
488  *
489  *  @param data A nonzero 32-bit value
490  *
491  *  @result Count of trailing zeros of @p data
492  *
493  *  @discussion @p data is assumed to be nonzero.
494  */
495 CC_INLINE CC_CONST unsigned
cc_ctz32(uint32_t data)496 cc_ctz32(uint32_t data)
497 {
498 	cc_assert(data != 0);
499 #if __has_builtin(__builtin_ctz)
500 	cc_static_assert(sizeof(unsigned) == 4, "ctz relies on an unsigned int being 4 bytes");
501 	return (unsigned)__builtin_ctz(data);
502 #else
503 	return cc_ctz32_fallback(data);
504 #endif
505 }
506 
507 /*!
508  *  @function cc_ctz64
509  *  @abstract Count trailing zeros of a nonzero 64-bit value
510  *
511  *  @param data A nonzero 64-bit value
512  *
513  *  @result Count of trailing zeros of @p data
514  *
515  *  @discussion @p data is assumed to be nonzero.
516  */
517 CC_INLINE CC_CONST unsigned
cc_ctz64(uint64_t data)518 cc_ctz64(uint64_t data)
519 {
520 	cc_assert(data != 0);
521 #if __has_builtin(__builtin_ctzll)
522 	return (unsigned)__builtin_ctzll(data);
523 #else
524 	return cc_ctz64_fallback(data);
525 #endif
526 }
527 
528 // MARK: -- Find first bit set
529 
530 /*!
531  *  @function cc_ffs32_fallback
532  *  @abstract Find first bit set in a 32-bit value
533  *
534  *  @param data A 32-bit value
535  *
536  *  @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
537  */
538 CC_INLINE CC_CONST unsigned
cc_ffs32_fallback(int32_t data)539 cc_ffs32_fallback(int32_t data)
540 {
541 	unsigned b = 0;
542 	unsigned bit = 0;
543 	unsigned seen = 0;
544 
545 	// Work from LSB to MSB
546 	for (int i = 0; i < 32; i++) {
547 		bit = ((uint32_t)data >> i) & 1;
548 
549 		// Track whether we've seen a 1 bit.
550 		seen |= bit;
551 
552 		// If the bit is 0 and we haven't seen a 1 yet, increment b.
553 		b += (1 - bit) & (seen - 1);
554 	}
555 
556 	// If we saw a 1, return b + 1, else 0.
557 	return (~(seen - 1)) & (b + 1);
558 }
559 
560 /*!
561  *  @function cc_ffs64_fallback
562  *  @abstract Find first bit set in a 64-bit value
563  *
564  *  @param data A 64-bit value
565  *
566  *  @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
567  */
568 CC_INLINE CC_CONST unsigned
cc_ffs64_fallback(int64_t data)569 cc_ffs64_fallback(int64_t data)
570 {
571 	unsigned b = 0;
572 	unsigned bit = 0;
573 	unsigned seen = 0;
574 
575 	// Work from LSB to MSB
576 	for (int i = 0; i < 64; i++) {
577 		bit = ((uint64_t)data >> i) & 1;
578 
579 		// Track whether we've seen a 1 bit.
580 		seen |= bit;
581 
582 		// If the bit is 0 and we haven't seen a 1 yet, increment b.
583 		b += (1 - bit) & (seen - 1);
584 	}
585 
586 	// If we saw a 1, return b + 1, else 0.
587 	return (~(seen - 1)) & (b + 1);
588 }
589 
590 /*!
591  *  @function cc_ffs32
592  *  @abstract Find first bit set in a 32-bit value
593  *
594  *  @param data A 32-bit value
595  *
596  *  @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
597  */
598 CC_INLINE CC_CONST unsigned
cc_ffs32(int32_t data)599 cc_ffs32(int32_t data)
600 {
601 	cc_static_assert(sizeof(int) == 4, "ffs relies on an int being 4 bytes");
602 #if __has_builtin(__builtin_ffs)
603 	return (unsigned)__builtin_ffs(data);
604 #else
605 	return cc_ffs32_fallback(data);
606 #endif
607 }
608 
609 /*!
610  *  @function cc_ffs64
611  *  @abstract Find first bit set in a 64-bit value
612  *
613  *  @param data A 64-bit value
614  *
615  *  @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero
616  */
617 CC_INLINE CC_CONST unsigned
cc_ffs64(int64_t data)618 cc_ffs64(int64_t data)
619 {
620 #if __has_builtin(__builtin_ffsll)
621 	return (unsigned)__builtin_ffsll(data);
622 #else
623 	return cc_ffs64_fallback(data);
624 #endif
625 }
626 
627 // MARK: -- Overflow wrappers
628 #define cc_add_overflow __builtin_add_overflow
629 
630 // On 32-bit architectures, clang emits libcalls to __mulodi4 when
631 // __builtin_mul_overflow() encounters `long long` types.
632 //
633 // The libgcc runtime does not provide __mulodi4, so for Linux on ARMv7
634 // we cannot call __builtin_mul_overflow().
635 //
636 // Using __has_builtin(__builtin_mul_overflow) would be better but that will
637 // return the correct response for ARMv7/Linux only with LLVM-14 or higher.
638 #if defined(__clang__) && defined(__arm__) && CC_LINUX
639 CC_INLINE bool
cc_mul_overflow(uint64_t a,uint64_t b,uint64_t * r)640 cc_mul_overflow(uint64_t a, uint64_t b, uint64_t *r)
641 {
642 	*r = a * b;
643 	return (a != 0) && ((*r / a) != b);
644 }
645 #else
646 #define cc_mul_overflow __builtin_mul_overflow
647 #endif
648 
649 // MARK: -- Heavyside Step
650 /* HEAVISIDE_STEP (shifted by one)
651  *  function f(x): x->0, when x=0
652  *                 x->1, when x>0
653  *  Can also be seen as a bitwise operation:
654  *     f(x): x -> y
655  *       y[0]=(OR x[i]) for all i (all bits)
656  *       y[i]=0 for all i>0
657  *  Run in constant time (log2(<bitsize of x>))
658  *  Useful to run constant time checks
659  */
660 #define CC_HEAVISIDE_STEP(r, s) do {                                         \
661     cc_static_assert(sizeof(uint64_t) >= sizeof(s), "max type is uint64_t"); \
662     const uint64_t _s = (uint64_t)s;                                         \
663     const uint64_t _t = (_s & 0xffffffff) | (_s >> 32);                      \
664     r = (uint8_t)((_t + 0xffffffff) >> 32);                                  \
665 } while (0)
666 
667 /* Return 1 if x mod 4 =1,2,3, 0 otherwise */
668 #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1)
669 #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1)
670 
671 /*!
672  *  @brief     CC_MUXU(r, s, a, b) is equivalent to r = s ? a : b, but executes in constant time
673  *  @param a   Input a
674  *  @param b   Input b
675  *  @param s   Selection parameter s. Must be 0 or 1.
676  *  @param r   Output, set to a if s=1, or b if s=0.
677  */
678 #define CC_MUXU(r, s, a, b) do {            \
679     cc_assert((s) == 0 || (s) == 1);        \
680     r = (~((s)-1) & (a)) | (((s)-1) & (b)); \
681 } while (0)
682 
683 #endif // _CORECRYPTO_CC_INTERNAL_H_
684