Coverage Report

Created: 2024-08-21 05:08

/workdir/bitcoin/src/random.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2009-2010 Satoshi Nakamoto
2
// Copyright (c) 2009-2022 The Bitcoin Core developers
3
// Distributed under the MIT software license, see the accompanying
4
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6
#include <config/bitcoin-config.h> // IWYU pragma: keep
7
8
#include <random.h>
9
10
#include <compat/compat.h>
11
#include <compat/cpuid.h>
12
#include <crypto/chacha20.h>
13
#include <crypto/sha256.h>
14
#include <crypto/sha512.h>
15
#include <logging.h>
16
#include <randomenv.h>
17
#include <span.h>
18
#include <support/allocators/secure.h>
19
#include <support/cleanse.h>
20
#include <sync.h>
21
#include <util/time.h>
22
23
#include <array>
24
#include <cmath>
25
#include <cstdlib>
26
#include <optional>
27
#include <thread>
28
29
#ifdef WIN32
30
#include <windows.h>
31
#include <wincrypt.h>
32
#else
33
#include <fcntl.h>
34
#include <sys/time.h>
35
#endif
36
37
#if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX))
38
#include <sys/random.h>
39
#endif
40
41
#ifdef HAVE_SYSCTL_ARND
42
#include <sys/sysctl.h>
43
#endif
44
#if defined(HAVE_STRONG_GETAUXVAL) && defined(__aarch64__)
45
#include <sys/auxv.h>
46
#endif
47
48
namespace {
49
50
/* Number of random bytes returned by GetOSRand.
51
 * When changing this constant make sure to change all call sites, and make
52
 * sure that the underlying OS APIs for all platforms support the number.
53
 * (many cap out at 256 bytes).
54
 */
55
static const int NUM_OS_RANDOM_BYTES = 32;
56
57
58
[[noreturn]] void RandFailure()
59
0
{
60
0
    LogError("Failed to read randomness, aborting\n");
61
0
    std::abort();
62
0
}
63
64
inline int64_t GetPerformanceCounter() noexcept
65
15.8k
{
66
    // Read the hardware time stamp counter when available.
67
    // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information.
68
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
69
    return __rdtsc();
70
#elif !defined(_MSC_VER) && defined(__i386__)
71
    uint64_t r = 0;
72
    __asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair.
73
    return r;
74
#elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__))
75
    uint64_t r1 = 0, r2 = 0;
76
    __asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx.
77
    return (r2 << 32) | r1;
78
#else
79
    // Fall back to using standard library clock (usually microsecond or nanosecond precision)
80
15.8k
    return std::chrono::high_resolution_clock::now().time_since_epoch().count();
81
15.8k
#endif
82
15.8k
}
83
84
#ifdef HAVE_GETCPUID
85
bool g_rdrand_supported = false;
86
bool g_rdseed_supported = false;
87
constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
88
constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000;
89
#ifdef bit_RDRND
90
static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND");
91
#endif
92
#ifdef bit_RDSEED
93
static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED");
94
#endif
95
96
void InitHardwareRand()
97
{
98
    uint32_t eax, ebx, ecx, edx;
99
    GetCPUID(1, 0, eax, ebx, ecx, edx);
100
    if (ecx & CPUID_F1_ECX_RDRAND) {
101
        g_rdrand_supported = true;
102
    }
103
    GetCPUID(7, 0, eax, ebx, ecx, edx);
104
    if (ebx & CPUID_F7_EBX_RDSEED) {
105
        g_rdseed_supported = true;
106
    }
107
}
108
109
void ReportHardwareRand()
110
{
111
    // This must be done in a separate function, as InitHardwareRand() may be indirectly called
112
    // from global constructors, before logging is initialized.
113
    if (g_rdseed_supported) {
114
        LogPrintf("Using RdSeed as an additional entropy source\n");
115
    }
116
    if (g_rdrand_supported) {
117
        LogPrintf("Using RdRand as an additional entropy source\n");
118
    }
119
}
120
121
/** Read 64 bits of entropy using rdrand.
122
 *
123
 * Must only be called when RdRand is supported.
124
 */
125
uint64_t GetRdRand() noexcept
126
{
127
    // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk.
128
#ifdef __i386__
129
    uint8_t ok;
130
    // Initialize to 0 to silence a compiler warning that r1 or r2 may be used
131
    // uninitialized. Even if rdrand fails (!ok) it will set the output to 0,
132
    // but there is no way that the compiler could know that.
133
    uint32_t r1 = 0, r2 = 0;
134
    for (int i = 0; i < 10; ++i) {
135
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax
136
        if (ok) break;
137
    }
138
    for (int i = 0; i < 10; ++i) {
139
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax
140
        if (ok) break;
141
    }
142
    return (((uint64_t)r2) << 32) | r1;
143
#elif defined(__x86_64__) || defined(__amd64__)
144
    uint8_t ok;
145
    uint64_t r1 = 0; // See above why we initialize to 0.
146
    for (int i = 0; i < 10; ++i) {
147
        __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax
148
        if (ok) break;
149
    }
150
    return r1;
151
#else
152
#error "RdRand is only supported on x86 and x86_64"
153
#endif
154
}
155
156
/** Read 64 bits of entropy using rdseed.
157
 *
158
 * Must only be called when RdSeed is supported.
159
 */
160
uint64_t GetRdSeed() noexcept
161
{
162
    // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered,
163
    // but pause after every failure.
164
#ifdef __i386__
165
    uint8_t ok;
166
    uint32_t r1, r2;
167
    do {
168
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax
169
        if (ok) break;
170
        __asm__ volatile ("pause");
171
    } while(true);
172
    do {
173
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax
174
        if (ok) break;
175
        __asm__ volatile ("pause");
176
    } while(true);
177
    return (((uint64_t)r2) << 32) | r1;
178
#elif defined(__x86_64__) || defined(__amd64__)
179
    uint8_t ok;
180
    uint64_t r1;
181
    do {
182
        __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax
183
        if (ok) break;
184
        __asm__ volatile ("pause");
185
    } while(true);
186
    return r1;
187
#else
188
#error "RdSeed is only supported on x86 and x86_64"
189
#endif
190
}
191
192
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
193
194
bool g_rndr_supported = false;
195
196
void InitHardwareRand()
197
0
{
198
0
    if (getauxval(AT_HWCAP2) & HWCAP2_RNG) {
  Branch (198:9): [True: 0, False: 0]
199
0
        g_rndr_supported = true;
200
0
    }
201
0
}
202
203
void ReportHardwareRand()
204
0
{
205
    // This must be done in a separate function, as InitHardwareRand() may be indirectly called
206
    // from global constructors, before logging is initialized.
207
0
    if (g_rndr_supported) {
  Branch (207:9): [True: 0, False: 0]
208
0
        LogPrintf("Using RNDR and RNDRRS as additional entropy sources\n");
209
0
    }
210
0
}
211
212
/** Read 64 bits of entropy using rndr.
213
 *
214
 * Must only be called when RNDR is supported.
215
 */
216
uint64_t GetRNDR() noexcept
217
0
{
218
0
    uint8_t ok;
219
0
    uint64_t r1;
220
0
    do {
221
        // https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDR--Random-Number
222
0
        __asm__ volatile("mrs %0, s3_3_c2_c4_0; cset %w1, ne;"
223
0
                         : "=r"(r1), "=r"(ok)::"cc");
224
0
        if (ok) break;
  Branch (224:13): [True: 0, False: 0]
225
0
        __asm__ volatile("yield");
226
0
    } while (true);
  Branch (226:14): [Folded - Ignored]
227
0
    return r1;
228
0
}
229
230
/** Read 64 bits of entropy using rndrrs.
231
 *
232
 * Must only be called when RNDRRS is supported.
233
 */
234
uint64_t GetRNDRRS() noexcept
235
0
{
236
0
    uint8_t ok;
237
0
    uint64_t r1;
238
0
    do {
239
        // https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDRRS--Reseeded-Random-Number
240
0
        __asm__ volatile("mrs %0, s3_3_c2_c4_1; cset %w1, ne;"
241
0
                         : "=r"(r1), "=r"(ok)::"cc");
242
0
        if (ok) break;
  Branch (242:13): [True: 0, False: 0]
243
0
        __asm__ volatile("yield");
244
0
    } while (true);
  Branch (244:14): [Folded - Ignored]
245
0
    return r1;
246
0
}
247
248
#else
249
/* Access to other hardware random number generators could be added here later,
250
 * assuming it is sufficiently fast (in the order of a few hundred CPU cycles).
251
 * Slower sources should probably be invoked separately, and/or only from
252
 * RandAddPeriodic (which is called once a minute).
253
 */
254
void InitHardwareRand() {}
255
void ReportHardwareRand() {}
256
#endif
257
258
/** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
259
2.33k
void SeedHardwareFast(CSHA512& hasher) noexcept {
260
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
261
    if (g_rdrand_supported) {
262
        uint64_t out = GetRdRand();
263
        hasher.Write((const unsigned char*)&out, sizeof(out));
264
        return;
265
    }
266
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
267
2.33k
    if (g_rndr_supported) {
  Branch (267:9): [True: 0, False: 2.33k]
268
0
        uint64_t out = GetRNDR();
269
0
        hasher.Write((const unsigned char*)&out, sizeof(out));
270
0
        return;
271
0
    }
272
2.33k
#endif
273
2.33k
}
274
275
/** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
276
0
void SeedHardwareSlow(CSHA512& hasher) noexcept {
277
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
278
    // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's
279
    // guaranteed to produce independent randomness on every call.
280
    if (g_rdseed_supported) {
281
        for (int i = 0; i < 4; ++i) {
282
            uint64_t out = GetRdSeed();
283
            hasher.Write((const unsigned char*)&out, sizeof(out));
284
        }
285
        return;
286
    }
287
    // When falling back to RdRand, XOR the result of 1024 results.
288
    // This guarantees a reseeding occurs between each.
289
    if (g_rdrand_supported) {
290
        for (int i = 0; i < 4; ++i) {
291
            uint64_t out = 0;
292
            for (int j = 0; j < 1024; ++j) out ^= GetRdRand();
293
            hasher.Write((const unsigned char*)&out, sizeof(out));
294
        }
295
        return;
296
    }
297
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
298
0
    if (g_rndr_supported) {
  Branch (298:9): [True: 0, False: 0]
299
0
        for (int i = 0; i < 4; ++i) {
  Branch (299:25): [True: 0, False: 0]
300
0
            uint64_t out = GetRNDRRS();
301
0
            hasher.Write((const unsigned char*)&out, sizeof(out));
302
0
        }
303
0
        return;
304
0
    }
305
0
#endif
306
0
}
307
308
/** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */
309
void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept
310
0
{
311
0
    CSHA512 inner_hasher;
312
0
    inner_hasher.Write(seed, sizeof(seed));
313
314
    // Hash loop
315
0
    unsigned char buffer[64];
316
0
    const auto stop{SteadyClock::now() + dur};
317
0
    do {
318
0
        for (int i = 0; i < 1000; ++i) {
  Branch (318:25): [True: 0, False: 0]
319
0
            inner_hasher.Finalize(buffer);
320
0
            inner_hasher.Reset();
321
0
            inner_hasher.Write(buffer, sizeof(buffer));
322
0
        }
323
        // Benchmark operation and feed it into outer hasher.
324
0
        int64_t perf = GetPerformanceCounter();
325
0
        hasher.Write((const unsigned char*)&perf, sizeof(perf));
326
0
    } while (SteadyClock::now() < stop);
  Branch (326:14): [True: 0, False: 0]
327
328
    // Produce output from inner state and feed it to outer hasher.
329
0
    inner_hasher.Finalize(buffer);
330
0
    hasher.Write(buffer, sizeof(buffer));
331
    // Try to clean up.
332
0
    inner_hasher.Reset();
333
0
    memory_cleanse(buffer, sizeof(buffer));
334
0
}
335
336
#ifndef WIN32
337
/** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
338
 * compatible way to get cryptographic randomness on UNIX-ish platforms.
339
 */
340
[[maybe_unused]] void GetDevURandom(unsigned char *ent32)
341
0
{
342
0
    int f = open("/dev/urandom", O_RDONLY);
343
0
    if (f == -1) {
344
0
        RandFailure();
345
0
    }
346
0
    int have = 0;
347
0
    do {
348
0
        ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have);
349
0
        if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) {
350
0
            close(f);
351
0
            RandFailure();
352
0
        }
353
0
        have += n;
354
0
    } while (have < NUM_OS_RANDOM_BYTES);
355
0
    close(f);
356
0
}
357
#endif
358
359
/** Get 32 bytes of system entropy. */
360
void GetOSRand(unsigned char *ent32)
361
0
{
362
#if defined(WIN32)
363
    HCRYPTPROV hProvider;
364
    int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
365
    if (!ret) {
366
        RandFailure();
367
    }
368
    ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32);
369
    if (!ret) {
370
        RandFailure();
371
    }
372
    CryptReleaseContext(hProvider, 0);
373
#elif defined(HAVE_GETRANDOM)
374
    /* Linux. From the getrandom(2) man page:
375
     * "If the urandom source has been initialized, reads of up to 256 bytes
376
     * will always return as many bytes as requested and will not be
377
     * interrupted by signals."
378
     */
379
0
    if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) {
  Branch (379:9): [True: 0, False: 0]
380
0
        RandFailure();
381
0
    }
382
#elif defined(__OpenBSD__)
383
    /* OpenBSD. From the arc4random(3) man page:
384
       "Use of these functions is encouraged for almost all random number
385
        consumption because the other interfaces are deficient in either
386
        quality, portability, standardization, or availability."
387
       The function call is always successful.
388
     */
389
    arc4random_buf(ent32, NUM_OS_RANDOM_BYTES);
390
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
391
    if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
392
        RandFailure();
393
    }
394
#elif defined(HAVE_SYSCTL_ARND)
395
    /* FreeBSD, NetBSD and similar. It is possible for the call to return less
396
     * bytes than requested, so need to read in a loop.
397
     */
398
    static int name[2] = {CTL_KERN, KERN_ARND};
399
    int have = 0;
400
    do {
401
        size_t len = NUM_OS_RANDOM_BYTES - have;
402
        if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) {
403
            RandFailure();
404
        }
405
        have += len;
406
    } while (have < NUM_OS_RANDOM_BYTES);
407
#else
408
    /* Fall back to /dev/urandom if there is no specific method implemented to
409
     * get system entropy for this OS.
410
     */
411
    GetDevURandom(ent32);
412
#endif
413
0
}
414
415
class RNGState {
416
    Mutex m_mutex;
417
    /* The RNG state consists of 256 bits of entropy, taken from the output of
418
     * one operation's SHA512 output, and fed as input to the next one.
419
     * Carrying 256 bits of entropy should be sufficient to guarantee
420
     * unpredictability as long as any entropy source was ever unpredictable
421
     * to an attacker. To protect against situations where an attacker might
422
     * observe the RNG's state, fresh entropy is always mixed when
423
     * GetStrongRandBytes is called.
424
     */
425
    unsigned char m_state[32] GUARDED_BY(m_mutex) = {0};
426
    uint64_t m_counter GUARDED_BY(m_mutex) = 0;
427
    bool m_strongly_seeded GUARDED_BY(m_mutex) = false;
428
429
    /** If not nullopt, the output of this RNGState is redirected and drawn from here
430
     *  (unless always_use_real_rng is passed to MixExtract). */
431
    std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex);
432
433
    Mutex m_events_mutex;
434
    CSHA256 m_events_hasher GUARDED_BY(m_events_mutex);
435
436
public:
437
    RNGState() noexcept
438
0
    {
439
0
        InitHardwareRand();
440
0
    }
441
442
1
    ~RNGState() = default;
443
444
    void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
445
13.5k
    {
446
13.5k
        LOCK(m_events_mutex);
447
448
13.5k
        m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info));
449
        // Get the low four bytes of the performance counter. This translates to roughly the
450
        // subsecond part.
451
13.5k
        uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff);
452
13.5k
        m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
453
13.5k
    }
454
455
    /**
456
     * Feed (the hash of) all events added through AddEvent() to hasher.
457
     */
458
    void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
459
0
    {
460
        // We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256,
461
        // since we want it to be fast as network peers may be able to trigger it repeatedly.
462
0
        LOCK(m_events_mutex);
463
464
0
        unsigned char events_hash[32];
465
0
        m_events_hasher.Finalize(events_hash);
466
0
        hasher.Write(events_hash, 32);
467
468
        // Re-initialize the hasher with the finalized state to use later.
469
0
        m_events_hasher.Reset();
470
0
        m_events_hasher.Write(events_hash, 32);
471
0
    }
472
473
    /** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */
474
    void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
475
0
    {
476
0
        LOCK(m_mutex);
477
0
        m_deterministic_prng.emplace(MakeByteSpan(seed));
478
0
    }
479
480
    /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher.
481
     *
482
     * If this function has never been called with strong_seed = true, false is returned.
483
     *
484
     * If always_use_real_rng is false, and MakeDeterministic has been called before, output
485
     * from the deterministic PRNG instead.
486
     */
487
    bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
488
2.33k
    {
489
2.33k
        assert(num <= 32);
490
2.33k
        unsigned char buf[64];
491
2.33k
        static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size");
492
2.33k
        bool ret;
493
2.33k
        {
494
2.33k
            LOCK(m_mutex);
495
2.33k
            ret = (m_strongly_seeded |= strong_seed);
496
            // Write the current state of the RNG into the hasher
497
2.33k
            hasher.Write(m_state, 32);
498
            // Write a new counter number into the state
499
2.33k
            hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter));
500
2.33k
            ++m_counter;
501
            // Finalize the hasher
502
2.33k
            hasher.Finalize(buf);
503
            // Store the last 32 bytes of the hash output as new RNG state.
504
2.33k
            memcpy(m_state, buf + 32, 32);
505
            // Handle requests for deterministic randomness.
506
2.33k
            if (!always_use_real_rng && m_deterministic_prng.has_value()) [[unlikely]] {
  Branch (506:17): [True: 2.33k, False: 0]
  Branch (506:41): [True: 2.33k, False: 0]
507
                // Overwrite the beginning of buf, which will be used for output.
508
2.33k
                m_deterministic_prng->Keystream(AsWritableBytes(Span{buf, num}));
509
                // Do not require strong seeding for deterministic output.
510
2.33k
                ret = true;
511
2.33k
            }
512
2.33k
        }
513
        // If desired, copy (up to) the first 32 bytes of the hash output as output.
514
2.33k
        if (num) {
  Branch (514:13): [True: 2.33k, False: 0]
515
2.33k
            assert(out != nullptr);
516
2.33k
            memcpy(out, buf, num);
517
2.33k
        }
518
        // Best effort cleanup of internal state
519
2.33k
        hasher.Reset();
520
2.33k
        memory_cleanse(buf, 64);
521
2.33k
        return ret;
522
2.33k
    }
523
};
524
525
RNGState& GetRNGState() noexcept
526
15.8k
{
527
    // This idiom relies on the guarantee that static variable are initialized
528
    // on first call, even when multiple parallel calls are permitted.
529
15.8k
    static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1);
530
15.8k
    return g_rng[0];
531
15.8k
}
532
533
/* A note on the use of noexcept in the seeding functions below:
534
 *
535
 * None of the RNG code should ever throw any exception.
536
 */
537
538
void SeedTimestamp(CSHA512& hasher) noexcept
539
2.33k
{
540
2.33k
    int64_t perfcounter = GetPerformanceCounter();
541
2.33k
    hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
542
2.33k
}
543
544
void SeedFast(CSHA512& hasher) noexcept
545
2.33k
{
546
2.33k
    unsigned char buffer[32];
547
548
    // Stack pointer to indirectly commit to thread/callstack
549
2.33k
    const unsigned char* ptr = buffer;
550
2.33k
    hasher.Write((const unsigned char*)&ptr, sizeof(ptr));
551
552
    // Hardware randomness is very fast when available; use it always.
553
2.33k
    SeedHardwareFast(hasher);
554
555
    // High-precision timestamp
556
2.33k
    SeedTimestamp(hasher);
557
2.33k
}
558
559
void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept
560
0
{
561
0
    unsigned char buffer[32];
562
563
    // Everything that the 'fast' seeder includes
564
0
    SeedFast(hasher);
565
566
    // OS randomness
567
0
    GetOSRand(buffer);
568
0
    hasher.Write(buffer, sizeof(buffer));
569
570
    // Add the events hasher into the mix
571
0
    rng.SeedEvents(hasher);
572
573
    // High-precision timestamp.
574
    //
575
    // Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a
576
    // benchmark of all the entropy gathering sources in this function).
577
0
    SeedTimestamp(hasher);
578
0
}
579
580
/** Extract entropy from rng, strengthen it, and feed it into hasher. */
581
void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept
582
0
{
583
    // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher.
584
    // Never use the deterministic PRNG for this, as the result is only used internally.
585
0
    unsigned char strengthen_seed[32];
586
0
    rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true);
587
    // Strengthen the seed, and feed it into hasher.
588
0
    Strengthen(strengthen_seed, dur, hasher);
589
0
}
590
591
void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept
592
0
{
593
    // Everything that the 'fast' seeder includes
594
0
    SeedFast(hasher);
595
596
    // High-precision timestamp
597
0
    SeedTimestamp(hasher);
598
599
    // Add the events hasher into the mix
600
0
    rng.SeedEvents(hasher);
601
602
    // Dynamic environment data (performance monitoring, ...)
603
0
    auto old_size = hasher.Size();
604
0
    RandAddDynamicEnv(hasher);
605
0
    LogPrint(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size);
606
607
    // Strengthen for 10 ms
608
0
    SeedStrengthen(hasher, rng, 10ms);
609
0
}
610
611
void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept
612
0
{
613
    // Gather 256 bits of hardware randomness, if available
614
0
    SeedHardwareSlow(hasher);
615
616
    // Everything that the 'slow' seeder includes.
617
0
    SeedSlow(hasher, rng);
618
619
    // Dynamic environment data (performance monitoring, ...)
620
0
    auto old_size = hasher.Size();
621
0
    RandAddDynamicEnv(hasher);
622
623
    // Static environment data
624
0
    RandAddStaticEnv(hasher);
625
0
    LogPrint(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size);
626
627
    // Strengthen for 100 ms
628
0
    SeedStrengthen(hasher, rng, 100ms);
629
0
}
630
631
enum class RNGLevel {
632
    FAST, //!< Automatically called by GetRandBytes
633
    SLOW, //!< Automatically called by GetStrongRandBytes
634
    PERIODIC, //!< Called by RandAddPeriodic()
635
};
636
637
void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept
638
2.33k
{
639
    // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available).
640
2.33k
    RNGState& rng = GetRNGState();
641
642
2.33k
    assert(num <= 32);
643
644
2.33k
    CSHA512 hasher;
645
2.33k
    switch (level) {
  Branch (645:13): [True: 0, False: 2.33k]
646
2.33k
    case RNGLevel::FAST:
  Branch (646:5): [True: 2.33k, False: 0]
647
2.33k
        SeedFast(hasher);
648
2.33k
        break;
649
0
    case RNGLevel::SLOW:
  Branch (649:5): [True: 0, False: 2.33k]
650
0
        SeedSlow(hasher, rng);
651
0
        break;
652
0
    case RNGLevel::PERIODIC:
  Branch (652:5): [True: 0, False: 2.33k]
653
0
        SeedPeriodic(hasher, rng);
654
0
        break;
655
2.33k
    }
656
657
    // Combine with and update state
658
2.33k
    if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) {
  Branch (658:9): [True: 0, False: 2.33k]
659
        // On the first invocation, also seed with SeedStartup().
660
0
        CSHA512 startup_hasher;
661
0
        SeedStartup(startup_hasher, rng);
662
0
        rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng);
663
0
    }
664
2.33k
}
665
666
} // namespace
667
668
669
/** Internal function to set g_determinstic_rng. Only accessed from tests. */
670
void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept
671
0
{
672
0
    GetRNGState().MakeDeterministic(seed);
673
0
}
674
675
void GetRandBytes(Span<unsigned char> bytes) noexcept
676
2.33k
{
677
2.33k
    ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false);
678
2.33k
}
679
680
void GetStrongRandBytes(Span<unsigned char> bytes) noexcept
681
0
{
682
0
    ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true);
683
0
}
684
685
void RandAddPeriodic() noexcept
686
0
{
687
0
    ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false);
688
0
}
689
690
13.5k
void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); }
691
692
void FastRandomContext::RandomSeed() noexcept
693
2.33k
{
694
2.33k
    uint256 seed = GetRandHash();
695
2.33k
    rng.SetKey(MakeByteSpan(seed));
696
2.33k
    requires_seed = false;
697
2.33k
}
698
699
void FastRandomContext::fillrand(Span<std::byte> output) noexcept
700
0
{
701
0
    if (requires_seed) RandomSeed();
  Branch (701:9): [True: 0, False: 0]
702
0
    rng.Keystream(output);
703
0
}
704
705
0
FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {}
706
707
void FastRandomContext::Reseed(const uint256& seed) noexcept
708
0
{
709
0
    FlushCache();
710
0
    requires_seed = false;
711
0
    rng = {MakeByteSpan(seed)};
712
0
}
713
714
bool Random_SanityCheck()
715
0
{
716
0
    uint64_t start = GetPerformanceCounter();
717
718
    /* This does not measure the quality of randomness, but it does test that
719
     * GetOSRand() overwrites all 32 bytes of the output given a maximum
720
     * number of tries.
721
     */
722
0
    static constexpr int MAX_TRIES{1024};
723
0
    uint8_t data[NUM_OS_RANDOM_BYTES];
724
0
    bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
725
0
    int num_overwritten;
726
0
    int tries = 0;
727
    /* Loop until all bytes have been overwritten at least once, or max number tries reached */
728
0
    do {
729
0
        memset(data, 0, NUM_OS_RANDOM_BYTES);
730
0
        GetOSRand(data);
731
0
        for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
  Branch (731:23): [True: 0, False: 0]
732
0
            overwritten[x] |= (data[x] != 0);
733
0
        }
734
735
0
        num_overwritten = 0;
736
0
        for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
  Branch (736:23): [True: 0, False: 0]
737
0
            if (overwritten[x]) {
  Branch (737:17): [True: 0, False: 0]
738
0
                num_overwritten += 1;
739
0
            }
740
0
        }
741
742
0
        tries += 1;
743
0
    } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES);
  Branch (743:14): [True: 0, False: 0]
  Branch (743:55): [True: 0, False: 0]
744
0
    if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */
  Branch (744:9): [True: 0, False: 0]
745
746
    // Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep.
747
0
    std::this_thread::sleep_for(std::chrono::milliseconds(1));
748
0
    uint64_t stop = GetPerformanceCounter();
749
0
    if (stop == start) return false;
  Branch (749:9): [True: 0, False: 0]
750
751
    // We called GetPerformanceCounter. Use it as entropy.
752
0
    CSHA512 to_add;
753
0
    to_add.Write((const unsigned char*)&start, sizeof(start));
754
0
    to_add.Write((const unsigned char*)&stop, sizeof(stop));
755
0
    GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true);
756
757
0
    return true;
758
0
}
759
760
static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{};
761
762
9.17k
FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY)
763
9.17k
{
764
    // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not
765
    // fDeterministic. That means the rng will be reinitialized with a secure random key upon first
766
    // use.
767
9.17k
}
768
769
void RandomInit()
770
0
{
771
    // Invoke RNG code to trigger initialization (if not already performed)
772
0
    ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true);
773
774
0
    ReportHardwareRand();
775
0
}
776
777
double MakeExponentiallyDistributed(uint64_t uniform) noexcept
778
1.16k
{
779
    // To convert uniform into an exponentially-distributed double, we use two steps:
780
    // - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression
781
    //   ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under
782
    //   "Generating uniform doubles in the unit interval". Call this value x.
783
    // - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value
784
    //   by applying the quantile function to it. For the exponential distribution with mean 1 this
785
    //   is F(x) = -log(1 - x).
786
    //
787
    // Combining the two, and using log1p(x) = log(1 + x), we obtain the following:
788
1.16k
    return -std::log1p((uniform >> 11) * -0x1.0p-53);
789
1.16k
}