/Users/eugenesiegel/btc/bitcoin/src/random.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | | // Copyright (c) 2009-present The Bitcoin Core developers |
3 | | // Distributed under the MIT software license, see the accompanying |
4 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
5 | | |
6 | | #include <bitcoin-build-config.h> // IWYU pragma: keep |
7 | | |
8 | | #include <random.h> |
9 | | |
10 | | #include <compat/compat.h> |
11 | | #include <compat/cpuid.h> |
12 | | #include <crypto/chacha20.h> |
13 | | #include <crypto/sha256.h> |
14 | | #include <crypto/sha512.h> |
15 | | #include <logging.h> |
16 | | #include <randomenv.h> |
17 | | #include <span.h> |
18 | | #include <support/allocators/secure.h> |
19 | | #include <support/cleanse.h> |
20 | | #include <sync.h> |
21 | | #include <util/time.h> |
22 | | |
23 | | #include <array> |
24 | | #include <cmath> |
25 | | #include <cstdlib> |
26 | | #include <optional> |
27 | | #include <thread> |
28 | | |
29 | | #ifdef WIN32 |
30 | | #include <windows.h> |
31 | | #include <wincrypt.h> |
32 | | #else |
33 | | #include <fcntl.h> |
34 | | #include <sys/time.h> |
35 | | #endif |
36 | | |
37 | | #if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__)) |
38 | | #include <sys/random.h> |
39 | | #endif |
40 | | |
41 | | #ifdef HAVE_SYSCTL_ARND |
42 | | #include <sys/sysctl.h> |
43 | | #endif |
44 | | |
45 | | namespace { |
46 | | |
47 | | /* Number of random bytes returned by GetOSRand. |
48 | | * When changing this constant make sure to change all call sites, and make |
49 | | * sure that the underlying OS APIs for all platforms support the number. |
50 | | * (many cap out at 256 bytes). |
51 | | */ |
52 | | static const int NUM_OS_RANDOM_BYTES = 32; |
53 | | |
54 | | |
55 | | [[noreturn]] void RandFailure() |
56 | 0 | { |
57 | 0 | LogError("Failed to read randomness, aborting\n"); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
58 | 0 | std::abort(); |
59 | 0 | } |
60 | | |
61 | | inline int64_t GetPerformanceCounter() noexcept |
62 | 122M | { |
63 | | // Read the hardware time stamp counter when available. |
64 | | // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information. |
65 | | #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) |
66 | | return __rdtsc(); |
67 | | #elif !defined(_MSC_VER) && defined(__i386__) |
68 | | uint64_t r = 0; |
69 | | __asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair. |
70 | | return r; |
71 | | #elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__)) |
72 | | uint64_t r1 = 0, r2 = 0; |
73 | | __asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx. |
74 | | return (r2 << 32) | r1; |
75 | | #else |
76 | | // Fall back to using standard library clock (usually microsecond or nanosecond precision) |
77 | 122M | return std::chrono::high_resolution_clock::now().time_since_epoch().count(); |
78 | 122M | #endif |
79 | 122M | } |
80 | | |
81 | | #ifdef HAVE_GETCPUID |
82 | | bool g_rdrand_supported = false; |
83 | | bool g_rdseed_supported = false; |
84 | | constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; |
85 | | constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000; |
86 | | #ifdef bit_RDRND |
87 | | static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND"); |
88 | | #endif |
89 | | #ifdef bit_RDSEED |
90 | | static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED"); |
91 | | #endif |
92 | | |
93 | | void InitHardwareRand() |
94 | | { |
95 | | uint32_t eax, ebx, ecx, edx; |
96 | | GetCPUID(1, 0, eax, ebx, ecx, edx); |
97 | | if (ecx & CPUID_F1_ECX_RDRAND) { |
98 | | g_rdrand_supported = true; |
99 | | } |
100 | | GetCPUID(7, 0, eax, ebx, ecx, edx); |
101 | | if (ebx & CPUID_F7_EBX_RDSEED) { |
102 | | g_rdseed_supported = true; |
103 | | } |
104 | | } |
105 | | |
106 | | void ReportHardwareRand() |
107 | | { |
108 | | // This must be done in a separate function, as InitHardwareRand() may be indirectly called |
109 | | // from global constructors, before logging is initialized. |
110 | | if (g_rdseed_supported) { |
111 | | LogPrintf("Using RdSeed as an additional entropy source\n"); |
112 | | } |
113 | | if (g_rdrand_supported) { |
114 | | LogPrintf("Using RdRand as an additional entropy source\n"); |
115 | | } |
116 | | } |
117 | | |
118 | | /** Read 64 bits of entropy using rdrand. |
119 | | * |
120 | | * Must only be called when RdRand is supported. |
121 | | */ |
122 | | uint64_t GetRdRand() noexcept |
123 | | { |
124 | | // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk. |
125 | | #ifdef __i386__ |
126 | | uint8_t ok = 0; |
127 | | // Initialize to 0 to silence a compiler warning that r1 or r2 may be used |
128 | | // uninitialized. Even if rdrand fails (!ok) it will set the output to 0, |
129 | | // but there is no way that the compiler could know that. |
130 | | uint32_t r1 = 0, r2 = 0; |
131 | | for (int i = 0; i < 10; ++i) { |
132 | | __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax |
133 | | if (ok) break; |
134 | | } |
135 | | for (int i = 0; i < 10; ++i) { |
136 | | __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax |
137 | | if (ok) break; |
138 | | } |
139 | | return (((uint64_t)r2) << 32) | r1; |
140 | | #elif defined(__x86_64__) || defined(__amd64__) |
141 | | uint8_t ok = 0; |
142 | | uint64_t r1 = 0; // See above why we initialize to 0. |
143 | | for (int i = 0; i < 10; ++i) { |
144 | | __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax |
145 | | if (ok) break; |
146 | | } |
147 | | return r1; |
148 | | #else |
149 | | #error "RdRand is only supported on x86 and x86_64" |
150 | | #endif |
151 | | } |
152 | | |
153 | | /** Read 64 bits of entropy using rdseed. |
154 | | * |
155 | | * Must only be called when RdSeed is supported. |
156 | | */ |
157 | | uint64_t GetRdSeed() noexcept |
158 | | { |
159 | | // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered, |
160 | | // but pause after every failure. |
161 | | #ifdef __i386__ |
162 | | uint8_t ok = 0; |
163 | | uint32_t r1, r2; |
164 | | do { |
165 | | __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax |
166 | | if (ok) break; |
167 | | __asm__ volatile ("pause"); |
168 | | } while(true); |
169 | | do { |
170 | | __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax |
171 | | if (ok) break; |
172 | | __asm__ volatile ("pause"); |
173 | | } while(true); |
174 | | return (((uint64_t)r2) << 32) | r1; |
175 | | #elif defined(__x86_64__) || defined(__amd64__) |
176 | | uint8_t ok; |
177 | | uint64_t r1; |
178 | | do { |
179 | | __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax |
180 | | if (ok) break; |
181 | | __asm__ volatile ("pause"); |
182 | | } while(true); |
183 | | return r1; |
184 | | #else |
185 | | #error "RdSeed is only supported on x86 and x86_64" |
186 | | #endif |
187 | | } |
188 | | |
189 | | #else |
190 | | /* Access to other hardware random number generators could be added here later, |
191 | | * assuming it is sufficiently fast (in the order of a few hundred CPU cycles). |
192 | | * Slower sources should probably be invoked separately, and/or only from |
193 | | * RandAddPeriodic (which is called once a minute). |
194 | | */ |
195 | 0 | void InitHardwareRand() {} |
196 | 1 | void ReportHardwareRand() {} |
197 | | #endif |
198 | | |
199 | | /** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ |
200 | 121M | void SeedHardwareFast(CSHA512& hasher) noexcept { |
201 | | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) |
202 | | if (g_rdrand_supported) { |
203 | | uint64_t out = GetRdRand(); |
204 | | hasher.Write((const unsigned char*)&out, sizeof(out)); |
205 | | return; |
206 | | } |
207 | | #endif |
208 | 121M | } |
209 | | |
210 | | /** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ |
211 | 0 | void SeedHardwareSlow(CSHA512& hasher) noexcept { |
212 | | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) |
213 | | // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's |
214 | | // guaranteed to produce independent randomness on every call. |
215 | | if (g_rdseed_supported) { |
216 | | for (int i = 0; i < 4; ++i) { |
217 | | uint64_t out = GetRdSeed(); |
218 | | hasher.Write((const unsigned char*)&out, sizeof(out)); |
219 | | } |
220 | | return; |
221 | | } |
222 | | // When falling back to RdRand, XOR the result of 1024 results. |
223 | | // This guarantees a reseeding occurs between each. |
224 | | if (g_rdrand_supported) { |
225 | | for (int i = 0; i < 4; ++i) { |
226 | | uint64_t out = 0; |
227 | | for (int j = 0; j < 1024; ++j) out ^= GetRdRand(); |
228 | | hasher.Write((const unsigned char*)&out, sizeof(out)); |
229 | | } |
230 | | return; |
231 | | } |
232 | | #endif |
233 | 0 | } |
234 | | |
235 | | /** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */ |
236 | | void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept |
237 | 0 | { |
238 | 0 | CSHA512 inner_hasher; |
239 | 0 | inner_hasher.Write(seed, sizeof(seed)); |
240 | | |
241 | | // Hash loop |
242 | 0 | unsigned char buffer[64]; |
243 | 0 | const auto stop{SteadyClock::now() + dur}; |
244 | 0 | do { |
245 | 0 | for (int i = 0; i < 1000; ++i) { |
246 | 0 | inner_hasher.Finalize(buffer); |
247 | 0 | inner_hasher.Reset(); |
248 | 0 | inner_hasher.Write(buffer, sizeof(buffer)); |
249 | 0 | } |
250 | | // Benchmark operation and feed it into outer hasher. |
251 | 0 | int64_t perf = GetPerformanceCounter(); |
252 | 0 | hasher.Write((const unsigned char*)&perf, sizeof(perf)); |
253 | 0 | } while (SteadyClock::now() < stop); |
254 | | |
255 | | // Produce output from inner state and feed it to outer hasher. |
256 | 0 | inner_hasher.Finalize(buffer); |
257 | 0 | hasher.Write(buffer, sizeof(buffer)); |
258 | | // Try to clean up. |
259 | 0 | inner_hasher.Reset(); |
260 | 0 | memory_cleanse(buffer, sizeof(buffer)); |
261 | 0 | } |
262 | | |
263 | | #ifndef WIN32 |
264 | | /** Fallback: get 32 bytes of system entropy from /dev/urandom. The most |
265 | | * compatible way to get cryptographic randomness on UNIX-ish platforms. |
266 | | */ |
267 | | [[maybe_unused]] void GetDevURandom(unsigned char *ent32) |
268 | 0 | { |
269 | 0 | int f = open("/dev/urandom", O_RDONLY); |
270 | 0 | if (f == -1) { |
271 | 0 | RandFailure(); |
272 | 0 | } |
273 | 0 | int have = 0; |
274 | 0 | do { |
275 | 0 | ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have); |
276 | 0 | if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) { |
277 | 0 | close(f); |
278 | 0 | RandFailure(); |
279 | 0 | } |
280 | 0 | have += n; |
281 | 0 | } while (have < NUM_OS_RANDOM_BYTES); |
282 | 0 | close(f); |
283 | 0 | } |
284 | | #endif |
285 | | |
286 | | /** Get 32 bytes of system entropy. */ |
287 | | void GetOSRand(unsigned char *ent32) |
288 | 0 | { |
289 | | #if defined(WIN32) |
290 | | HCRYPTPROV hProvider; |
291 | | int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); |
292 | | if (!ret) { |
293 | | RandFailure(); |
294 | | } |
295 | | ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32); |
296 | | if (!ret) { |
297 | | RandFailure(); |
298 | | } |
299 | | CryptReleaseContext(hProvider, 0); |
300 | | #elif defined(HAVE_GETRANDOM) |
301 | | /* Linux. From the getrandom(2) man page: |
302 | | * "If the urandom source has been initialized, reads of up to 256 bytes |
303 | | * will always return as many bytes as requested and will not be |
304 | | * interrupted by signals." |
305 | | */ |
306 | | if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) { |
307 | | RandFailure(); |
308 | | } |
309 | | #elif defined(__OpenBSD__) |
310 | | /* OpenBSD. From the arc4random(3) man page: |
311 | | "Use of these functions is encouraged for almost all random number |
312 | | consumption because the other interfaces are deficient in either |
313 | | quality, portability, standardization, or availability." |
314 | | The function call is always successful. |
315 | | */ |
316 | | arc4random_buf(ent32, NUM_OS_RANDOM_BYTES); |
317 | | #elif defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__) |
318 | 0 | if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { |
319 | 0 | RandFailure(); |
320 | 0 | } |
321 | | #elif defined(HAVE_SYSCTL_ARND) |
322 | | /* FreeBSD, NetBSD and similar. It is possible for the call to return less |
323 | | * bytes than requested, so need to read in a loop. |
324 | | */ |
325 | | static int name[2] = {CTL_KERN, KERN_ARND}; |
326 | | int have = 0; |
327 | | do { |
328 | | size_t len = NUM_OS_RANDOM_BYTES - have; |
329 | | if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) { |
330 | | RandFailure(); |
331 | | } |
332 | | have += len; |
333 | | } while (have < NUM_OS_RANDOM_BYTES); |
334 | | #else |
335 | | /* Fall back to /dev/urandom if there is no specific method implemented to |
336 | | * get system entropy for this OS. |
337 | | */ |
338 | | GetDevURandom(ent32); |
339 | | #endif |
340 | 0 | } |
341 | | |
342 | | class RNGState { |
343 | | Mutex m_mutex; |
344 | | /* The RNG state consists of 256 bits of entropy, taken from the output of |
345 | | * one operation's SHA512 output, and fed as input to the next one. |
346 | | * Carrying 256 bits of entropy should be sufficient to guarantee |
347 | | * unpredictability as long as any entropy source was ever unpredictable |
348 | | * to an attacker. To protect against situations where an attacker might |
349 | | * observe the RNG's state, fresh entropy is always mixed when |
350 | | * GetStrongRandBytes is called. |
351 | | */ |
352 | | unsigned char m_state[32] GUARDED_BY(m_mutex) = {0}; |
353 | | uint64_t m_counter GUARDED_BY(m_mutex) = 0; |
354 | | bool m_strongly_seeded GUARDED_BY(m_mutex) = false; |
355 | | |
356 | | /** If not nullopt, the output of this RNGState is redirected and drawn from here |
357 | | * (unless always_use_real_rng is passed to MixExtract). */ |
358 | | std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex); |
359 | | |
360 | | Mutex m_events_mutex; |
361 | | CSHA256 m_events_hasher GUARDED_BY(m_events_mutex); |
362 | | |
363 | | public: |
364 | | RNGState() noexcept |
365 | 0 | { |
366 | 0 | InitHardwareRand(); |
367 | 0 | } |
368 | | |
369 | 0 | ~RNGState() = default; |
370 | | |
371 | | void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex) |
372 | 711k | { |
373 | 711k | LOCK(m_events_mutex); Line | Count | Source | 257 | 711k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 711k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 711k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 711k | #define PASTE(x, y) x ## y |
|
|
|
|
374 | | |
375 | 711k | m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info)); |
376 | | // Get the low four bytes of the performance counter. This translates to roughly the |
377 | | // subsecond part. |
378 | 711k | uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff); |
379 | 711k | m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter)); |
380 | 711k | } |
381 | | |
382 | | /** |
383 | | * Feed (the hash of) all events added through AddEvent() to hasher. |
384 | | */ |
385 | | void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex) |
386 | 0 | { |
387 | | // We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256, |
388 | | // since we want it to be fast as network peers may be able to trigger it repeatedly. |
389 | 0 | LOCK(m_events_mutex); Line | Count | Source | 257 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
390 | |
|
391 | 0 | unsigned char events_hash[32]; |
392 | 0 | m_events_hasher.Finalize(events_hash); |
393 | 0 | hasher.Write(events_hash, 32); |
394 | | |
395 | | // Re-initialize the hasher with the finalized state to use later. |
396 | 0 | m_events_hasher.Reset(); |
397 | 0 | m_events_hasher.Write(events_hash, 32); |
398 | 0 | } |
399 | | |
400 | | /** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */ |
401 | | void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) |
402 | 49.9k | { |
403 | 49.9k | LOCK(m_mutex); Line | Count | Source | 257 | 49.9k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 49.9k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 49.9k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 49.9k | #define PASTE(x, y) x ## y |
|
|
|
|
404 | 49.9k | m_deterministic_prng.emplace(MakeByteSpan(seed)); |
405 | 49.9k | } |
406 | | |
407 | | /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher. |
408 | | * |
409 | | * If this function has never been called with strong_seed = true, false is returned. |
410 | | * |
411 | | * If always_use_real_rng is false, and MakeDeterministic has been called before, output |
412 | | * from the deterministic PRNG instead. |
413 | | */ |
414 | | bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) |
415 | 121M | { |
416 | 121M | assert(num <= 32); |
417 | 121M | unsigned char buf[64]; |
418 | 121M | static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size"); |
419 | 121M | bool ret; |
420 | 121M | { |
421 | 121M | LOCK(m_mutex); Line | Count | Source | 257 | 121M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 121M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 121M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 121M | #define PASTE(x, y) x ## y |
|
|
|
|
422 | 121M | ret = (m_strongly_seeded |= strong_seed); |
423 | | // Write the current state of the RNG into the hasher |
424 | 121M | hasher.Write(m_state, 32); |
425 | | // Write a new counter number into the state |
426 | 121M | hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter)); |
427 | 121M | ++m_counter; |
428 | | // Finalize the hasher |
429 | 121M | hasher.Finalize(buf); |
430 | | // Store the last 32 bytes of the hash output as new RNG state. |
431 | 121M | memcpy(m_state, buf + 32, 32); |
432 | | // Handle requests for deterministic randomness. |
433 | 121M | if (!always_use_real_rng && m_deterministic_prng.has_value()121M ) [[unlikely]] { |
434 | | // Overwrite the beginning of buf, which will be used for output. |
435 | 121M | m_deterministic_prng->Keystream(std::as_writable_bytes(std::span{buf, num})); |
436 | | // Do not require strong seeding for deterministic output. |
437 | 121M | ret = true; |
438 | 121M | } |
439 | 121M | } |
440 | | // If desired, copy (up to) the first 32 bytes of the hash output as output. |
441 | 121M | if (num) { |
442 | 121M | assert(out != nullptr); |
443 | 121M | memcpy(out, buf, num); |
444 | 121M | } |
445 | | // Best effort cleanup of internal state |
446 | 121M | hasher.Reset(); |
447 | 121M | memory_cleanse(buf, 64); |
448 | 121M | return ret; |
449 | 121M | } |
450 | | }; |
451 | | |
452 | | RNGState& GetRNGState() noexcept |
453 | 122M | { |
454 | | // This idiom relies on the guarantee that static variable are initialized |
455 | | // on first call, even when multiple parallel calls are permitted. |
456 | 122M | static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1); |
457 | 122M | return g_rng[0]; |
458 | 122M | } |
459 | | |
460 | | /* A note on the use of noexcept in the seeding functions below: |
461 | | * |
462 | | * None of the RNG code should ever throw any exception. |
463 | | */ |
464 | | |
465 | | void SeedTimestamp(CSHA512& hasher) noexcept |
466 | 121M | { |
467 | 121M | int64_t perfcounter = GetPerformanceCounter(); |
468 | 121M | hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter)); |
469 | 121M | } |
470 | | |
471 | | void SeedFast(CSHA512& hasher) noexcept |
472 | 121M | { |
473 | 121M | unsigned char buffer[32]; |
474 | | |
475 | | // Stack pointer to indirectly commit to thread/callstack |
476 | 121M | const unsigned char* ptr = buffer; |
477 | 121M | hasher.Write((const unsigned char*)&ptr, sizeof(ptr)); |
478 | | |
479 | | // Hardware randomness is very fast when available; use it always. |
480 | 121M | SeedHardwareFast(hasher); |
481 | | |
482 | | // High-precision timestamp |
483 | 121M | SeedTimestamp(hasher); |
484 | 121M | } |
485 | | |
486 | | void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept |
487 | 0 | { |
488 | 0 | unsigned char buffer[32]; |
489 | | |
490 | | // Everything that the 'fast' seeder includes |
491 | 0 | SeedFast(hasher); |
492 | | |
493 | | // OS randomness |
494 | 0 | GetOSRand(buffer); |
495 | 0 | hasher.Write(buffer, sizeof(buffer)); |
496 | | |
497 | | // Add the events hasher into the mix |
498 | 0 | rng.SeedEvents(hasher); |
499 | | |
500 | | // High-precision timestamp. |
501 | | // |
502 | | // Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a |
503 | | // benchmark of all the entropy gathering sources in this function). |
504 | 0 | SeedTimestamp(hasher); |
505 | 0 | } |
506 | | |
507 | | /** Extract entropy from rng, strengthen it, and feed it into hasher. */ |
508 | | void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept |
509 | 0 | { |
510 | | // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher. |
511 | | // Never use the deterministic PRNG for this, as the result is only used internally. |
512 | 0 | unsigned char strengthen_seed[32]; |
513 | 0 | rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true); |
514 | | // Strengthen the seed, and feed it into hasher. |
515 | 0 | Strengthen(strengthen_seed, dur, hasher); |
516 | 0 | } |
517 | | |
518 | | void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept |
519 | 0 | { |
520 | | // Everything that the 'fast' seeder includes |
521 | 0 | SeedFast(hasher); |
522 | | |
523 | | // High-precision timestamp |
524 | 0 | SeedTimestamp(hasher); |
525 | | |
526 | | // Add the events hasher into the mix |
527 | 0 | rng.SeedEvents(hasher); |
528 | | |
529 | | // Dynamic environment data (clocks, resource usage, ...) |
530 | 0 | auto old_size = hasher.Size(); |
531 | 0 | RandAddDynamicEnv(hasher); |
532 | 0 | LogDebug(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
533 | | |
534 | | // Strengthen for 10 ms |
535 | 0 | SeedStrengthen(hasher, rng, 10ms); |
536 | 0 | } |
537 | | |
538 | | void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept |
539 | 0 | { |
540 | | // Gather 256 bits of hardware randomness, if available |
541 | 0 | SeedHardwareSlow(hasher); |
542 | | |
543 | | // Everything that the 'slow' seeder includes. |
544 | 0 | SeedSlow(hasher, rng); |
545 | | |
546 | | // Dynamic environment data (clocks, resource usage, ...) |
547 | 0 | auto old_size = hasher.Size(); |
548 | 0 | RandAddDynamicEnv(hasher); |
549 | | |
550 | | // Static environment data |
551 | 0 | RandAddStaticEnv(hasher); |
552 | 0 | LogDebug(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
553 | | |
554 | | // Strengthen for 100 ms |
555 | 0 | SeedStrengthen(hasher, rng, 100ms); |
556 | 0 | } |
557 | | |
558 | | enum class RNGLevel { |
559 | | FAST, //!< Automatically called by GetRandBytes |
560 | | SLOW, //!< Automatically called by GetStrongRandBytes |
561 | | PERIODIC, //!< Called by RandAddPeriodic() |
562 | | }; |
563 | | |
564 | | void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept |
565 | 121M | { |
566 | | // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available). |
567 | 121M | RNGState& rng = GetRNGState(); |
568 | | |
569 | 121M | assert(num <= 32); |
570 | | |
571 | 121M | CSHA512 hasher; |
572 | 121M | switch (level) { |
573 | 121M | case RNGLevel::FAST: |
574 | 121M | SeedFast(hasher); |
575 | 121M | break; |
576 | 0 | case RNGLevel::SLOW: |
577 | 0 | SeedSlow(hasher, rng); |
578 | 0 | break; |
579 | 0 | case RNGLevel::PERIODIC: |
580 | 0 | SeedPeriodic(hasher, rng); |
581 | 0 | break; |
582 | 121M | } |
583 | | |
584 | | // Combine with and update state |
585 | 121M | if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) { |
586 | | // On the first invocation, also seed with SeedStartup(). |
587 | 0 | CSHA512 startup_hasher; |
588 | 0 | SeedStartup(startup_hasher, rng); |
589 | 0 | rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng); |
590 | 0 | } |
591 | 121M | } |
592 | | |
593 | | } // namespace |
594 | | |
595 | | |
596 | | /** Internal function to set g_determinstic_rng. Only accessed from tests. */ |
597 | | void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept |
598 | 49.9k | { |
599 | 49.9k | GetRNGState().MakeDeterministic(seed); |
600 | 49.9k | } |
601 | | std::atomic<bool> g_used_g_prng{false}; // Only accessed from tests |
602 | | |
603 | | void GetRandBytes(std::span<unsigned char> bytes) noexcept |
604 | 121M | { |
605 | 121M | g_used_g_prng = true; |
606 | 121M | ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false); |
607 | 121M | } |
608 | | |
609 | | void GetStrongRandBytes(std::span<unsigned char> bytes) noexcept |
610 | 0 | { |
611 | 0 | ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true); |
612 | 0 | } |
613 | | |
614 | | void RandAddPeriodic() noexcept |
615 | 0 | { |
616 | 0 | ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false); |
617 | 0 | } |
618 | | |
619 | 711k | void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); } |
620 | | |
621 | | void FastRandomContext::RandomSeed() noexcept |
622 | 121M | { |
623 | 121M | uint256 seed = GetRandHash(); |
624 | 121M | rng.SetKey(MakeByteSpan(seed)); |
625 | 121M | requires_seed = false; |
626 | 121M | } |
627 | | |
628 | | void FastRandomContext::fillrand(std::span<std::byte> output) noexcept |
629 | 149k | { |
630 | 149k | if (requires_seed) RandomSeed()99.9k ; |
631 | 149k | rng.Keystream(output); |
632 | 149k | } |
633 | | |
634 | 0 | FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {} |
635 | | |
636 | | void FastRandomContext::Reseed(const uint256& seed) noexcept |
637 | 0 | { |
638 | 0 | FlushCache(); |
639 | 0 | requires_seed = false; |
640 | 0 | rng = {MakeByteSpan(seed)}; |
641 | 0 | } |
642 | | |
643 | | bool Random_SanityCheck() |
644 | 0 | { |
645 | 0 | uint64_t start = GetPerformanceCounter(); |
646 | | |
647 | | /* This does not measure the quality of randomness, but it does test that |
648 | | * GetOSRand() overwrites all 32 bytes of the output given a maximum |
649 | | * number of tries. |
650 | | */ |
651 | 0 | static constexpr int MAX_TRIES{1024}; |
652 | 0 | uint8_t data[NUM_OS_RANDOM_BYTES]; |
653 | 0 | bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */ |
654 | 0 | int num_overwritten; |
655 | 0 | int tries = 0; |
656 | | /* Loop until all bytes have been overwritten at least once, or max number tries reached */ |
657 | 0 | do { |
658 | 0 | memset(data, 0, NUM_OS_RANDOM_BYTES); |
659 | 0 | GetOSRand(data); |
660 | 0 | for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) { |
661 | 0 | overwritten[x] |= (data[x] != 0); |
662 | 0 | } |
663 | |
|
664 | 0 | num_overwritten = 0; |
665 | 0 | for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) { |
666 | 0 | if (overwritten[x]) { |
667 | 0 | num_overwritten += 1; |
668 | 0 | } |
669 | 0 | } |
670 | |
|
671 | 0 | tries += 1; |
672 | 0 | } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES); |
673 | 0 | if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */ |
674 | | |
675 | | // Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep. |
676 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
677 | 0 | uint64_t stop = GetPerformanceCounter(); |
678 | 0 | if (stop == start) return false; |
679 | | |
680 | | // We called GetPerformanceCounter. Use it as entropy. |
681 | 0 | CSHA512 to_add; |
682 | 0 | to_add.Write((const unsigned char*)&start, sizeof(start)); |
683 | 0 | to_add.Write((const unsigned char*)&stop, sizeof(stop)); |
684 | 0 | GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true); |
685 | |
|
686 | 0 | return true; |
687 | 0 | } |
688 | | |
689 | | static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{}; |
690 | | |
691 | 161M | FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY) |
692 | 161M | { |
693 | | // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not |
694 | | // fDeterministic. That means the rng will be reinitialized with a secure random key upon first |
695 | | // use. |
696 | 161M | } |
697 | | |
698 | | void RandomInit() |
699 | 1 | { |
700 | | // Invoke RNG code to trigger initialization (if not already performed) |
701 | 1 | ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true); |
702 | | |
703 | 1 | ReportHardwareRand(); |
704 | 1 | } |
705 | | |
706 | | double MakeExponentiallyDistributed(uint64_t uniform) noexcept |
707 | 62.3k | { |
708 | | // To convert uniform into an exponentially-distributed double, we use two steps: |
709 | | // - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression |
710 | | // ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under |
711 | | // "Generating uniform doubles in the unit interval". Call this value x. |
712 | | // - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value |
713 | | // by applying the quantile function to it. For the exponential distribution with mean 1 this |
714 | | // is F(x) = -log(1 - x). |
715 | | // |
716 | | // Combining the two, and using log1p(x) = log(1 + x), we obtain the following: |
717 | 62.3k | return -std::log1p((uniform >> 11) * -0x1.0p-53); |
718 | 62.3k | } |