Coverage Report

Created: 2024-08-21 05:08

/workdir/bitcoin/src/blockencodings.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2016-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <blockencodings.h>
6
#include <chainparams.h>
7
#include <common/system.h>
8
#include <consensus/consensus.h>
9
#include <consensus/validation.h>
10
#include <crypto/sha256.h>
11
#include <crypto/siphash.h>
12
#include <logging.h>
13
#include <random.h>
14
#include <streams.h>
15
#include <txmempool.h>
16
#include <validation.h>
17
18
#include <unordered_map>
19
20
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) :
21
1.68k
        nonce(nonce),
22
1.68k
        shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23
1.68k
    FillShortTxIDSelector();
24
    //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25
1.68k
    prefilledtxn[0] = {0, block.vtx[0]};
26
1.68k
    for (size_t i = 1; i < block.vtx.size(); i++) {
  Branch (26:24): [True: 0, False: 1.68k]
27
0
        const CTransaction& tx = *block.vtx[i];
28
0
        shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29
0
    }
30
1.68k
}
31
32
3.37k
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33
3.37k
    DataStream stream{};
34
3.37k
    stream << header << nonce;
35
3.37k
    CSHA256 hasher;
36
3.37k
    hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37
3.37k
    uint256 shorttxidhash;
38
3.37k
    hasher.Finalize(shorttxidhash.begin());
39
3.37k
    shorttxidk0 = shorttxidhash.GetUint64(0);
40
3.37k
    shorttxidk1 = shorttxidhash.GetUint64(1);
41
3.37k
}
42
43
0
uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const {
44
0
    static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45
0
    return SipHashUint256(shorttxidk0, shorttxidk1, wtxid) & 0xffffffffffffL;
46
0
}
47
48
49
50
0
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn) {
51
0
    if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
  Branch (51:9): [True: 0, False: 0]
  Branch (51:40): [True: 0, False: 0]
  Branch (51:73): [True: 0, False: 0]
52
0
        return READ_STATUS_INVALID;
53
0
    if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
  Branch (53:9): [True: 0, False: 0]
54
0
        return READ_STATUS_INVALID;
55
56
0
    if (!header.IsNull() || !txn_available.empty()) return READ_STATUS_INVALID;
  Branch (56:9): [True: 0, False: 0]
  Branch (56:29): [True: 0, False: 0]
57
58
0
    header = cmpctblock.header;
59
0
    txn_available.resize(cmpctblock.BlockTxCount());
60
61
0
    int32_t lastprefilledindex = -1;
62
0
    for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
  Branch (62:24): [True: 0, False: 0]
63
0
        if (cmpctblock.prefilledtxn[i].tx->IsNull())
  Branch (63:13): [True: 0, False: 0]
64
0
            return READ_STATUS_INVALID;
65
66
0
        lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
67
0
        if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
  Branch (67:13): [True: 0, False: 0]
68
0
            return READ_STATUS_INVALID;
69
0
        if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
  Branch (69:13): [True: 0, False: 0]
70
            // If we are inserting a tx at an index greater than our full list of shorttxids
71
            // plus the number of prefilled txn we've inserted, then we have txn for which we
72
            // have neither a prefilled txn or a shorttxid!
73
0
            return READ_STATUS_INVALID;
74
0
        }
75
0
        txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76
0
    }
77
0
    prefilled_count = cmpctblock.prefilledtxn.size();
78
79
    // Calculate map of txids -> positions and check mempool to see what we have (or don't)
80
    // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
81
    // of short IDs, any highly-uneven distribution of elements can be safely treated as a
82
    // READ_STATUS_FAILED.
83
0
    std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
84
0
    uint16_t index_offset = 0;
85
0
    for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
  Branch (85:24): [True: 0, False: 0]
86
0
        while (txn_available[i + index_offset])
  Branch (86:16): [True: 0, False: 0]
87
0
            index_offset++;
88
0
        shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
89
        // To determine the chance that the number of entries in a bucket exceeds N,
90
        // we use the fact that the number of elements in a single bucket is
91
        // binomially distributed (with n = the number of shorttxids S, and p =
92
        // 1 / the number of buckets), that in the worst case the number of buckets is
93
        // equal to S (due to std::unordered_map having a default load factor of 1.0),
94
        // and that the chance for any bucket to exceed N elements is at most
95
        // buckets * (the chance that any given bucket is above N elements).
96
        // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
97
        // If we assume blocks of up to 16000, allowing 12 elements per bucket should
98
        // only fail once per ~1 million block transfers (per peer and connection).
99
0
        if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
  Branch (99:13): [True: 0, False: 0]
100
0
            return READ_STATUS_FAILED;
101
0
    }
102
    // TODO: in the shortid-collision case, we should instead request both transactions
103
    // which collided. Falling back to full-block-request here is overkill.
104
0
    if (shorttxids.size() != cmpctblock.shorttxids.size())
  Branch (104:9): [True: 0, False: 0]
105
0
        return READ_STATUS_FAILED; // Short ID collision
106
107
0
    std::vector<bool> have_txn(txn_available.size());
108
0
    {
109
0
    LOCK(pool->cs);
110
0
    for (const auto& tx : pool->txns_randomized) {
  Branch (110:25): [True: 0, False: 0]
111
0
        uint64_t shortid = cmpctblock.GetShortID(tx->GetWitnessHash());
112
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113
0
        if (idit != shorttxids.end()) {
  Branch (113:13): [True: 0, False: 0]
114
0
            if (!have_txn[idit->second]) {
  Branch (114:17): [True: 0, False: 0]
115
0
                txn_available[idit->second] = tx;
116
0
                have_txn[idit->second]  = true;
117
0
                mempool_count++;
118
0
            } else {
119
                // If we find two mempool txn that match the short id, just request it.
120
                // This should be rare enough that the extra bandwidth doesn't matter,
121
                // but eating a round-trip due to FillBlock failure would be annoying
122
0
                if (txn_available[idit->second]) {
  Branch (122:21): [True: 0, False: 0]
123
0
                    txn_available[idit->second].reset();
124
0
                    mempool_count--;
125
0
                }
126
0
            }
127
0
        }
128
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129
        // the performance win of an early exit here is too good to pass up and worth
130
        // the extra risk.
131
0
        if (mempool_count == shorttxids.size())
  Branch (131:13): [True: 0, False: 0]
132
0
            break;
133
0
    }
134
0
    }
135
136
0
    for (size_t i = 0; i < extra_txn.size(); i++) {
  Branch (136:24): [True: 0, False: 0]
137
0
        if (extra_txn[i] == nullptr) {
  Branch (137:13): [True: 0, False: 0]
138
0
            continue;
139
0
        }
140
0
        uint64_t shortid = cmpctblock.GetShortID(extra_txn[i]->GetWitnessHash());
141
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
142
0
        if (idit != shorttxids.end()) {
  Branch (142:13): [True: 0, False: 0]
143
0
            if (!have_txn[idit->second]) {
  Branch (143:17): [True: 0, False: 0]
144
0
                txn_available[idit->second] = extra_txn[i];
145
0
                have_txn[idit->second]  = true;
146
0
                mempool_count++;
147
0
                extra_count++;
148
0
            } else {
149
                // If we find two mempool/extra txn that match the short id, just
150
                // request it.
151
                // This should be rare enough that the extra bandwidth doesn't matter,
152
                // but eating a round-trip due to FillBlock failure would be annoying
153
                // Note that we don't want duplication between extra_txn and mempool to
154
                // trigger this case, so we compare witness hashes first
155
0
                if (txn_available[idit->second] &&
  Branch (155:21): [True: 0, False: 0]
156
0
                        txn_available[idit->second]->GetWitnessHash() != extra_txn[i]->GetWitnessHash()) {
  Branch (156:25): [True: 0, False: 0]
157
0
                    txn_available[idit->second].reset();
158
0
                    mempool_count--;
159
0
                    extra_count--;
160
0
                }
161
0
            }
162
0
        }
163
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
164
        // the performance win of an early exit here is too good to pass up and worth
165
        // the extra risk.
166
0
        if (mempool_count == shorttxids.size())
  Branch (166:13): [True: 0, False: 0]
167
0
            break;
168
0
    }
169
170
0
    LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
171
172
0
    return READ_STATUS_OK;
173
0
}
174
175
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
176
0
{
177
0
    if (header.IsNull()) return false;
  Branch (177:9): [True: 0, False: 0]
178
179
0
    assert(index < txn_available.size());
180
0
    return txn_available[index] != nullptr;
181
0
}
182
183
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
184
0
{
185
0
    if (header.IsNull()) return READ_STATUS_INVALID;
  Branch (185:9): [True: 0, False: 0]
186
187
0
    uint256 hash = header.GetHash();
188
0
    block = header;
189
0
    block.vtx.resize(txn_available.size());
190
191
0
    size_t tx_missing_offset = 0;
192
0
    for (size_t i = 0; i < txn_available.size(); i++) {
  Branch (192:24): [True: 0, False: 0]
193
0
        if (!txn_available[i]) {
  Branch (193:13): [True: 0, False: 0]
194
0
            if (vtx_missing.size() <= tx_missing_offset)
  Branch (194:17): [True: 0, False: 0]
195
0
                return READ_STATUS_INVALID;
196
0
            block.vtx[i] = vtx_missing[tx_missing_offset++];
197
0
        } else
198
0
            block.vtx[i] = std::move(txn_available[i]);
199
0
    }
200
201
    // Make sure we can't call FillBlock again.
202
0
    header.SetNull();
203
0
    txn_available.clear();
204
205
0
    if (vtx_missing.size() != tx_missing_offset)
  Branch (205:9): [True: 0, False: 0]
206
0
        return READ_STATUS_INVALID;
207
208
0
    BlockValidationState state;
209
0
    CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
  Branch (209:32): [True: 0, False: 0]
210
0
    if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
  Branch (210:9): [True: 0, False: 0]
211
        // TODO: We really want to just check merkle tree manually here,
212
        // but that is expensive, and CheckBlock caches a block's
213
        // "checked-status" (in the CBlock?). CBlock should be able to
214
        // check its own merkle root and cache that check.
215
0
        if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
  Branch (215:13): [True: 0, False: 0]
216
0
            return READ_STATUS_FAILED; // Possible Short ID collision
217
0
        return READ_STATUS_CHECKBLOCK_FAILED;
218
0
    }
219
220
0
    LogPrint(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
221
0
    if (vtx_missing.size() < 5) {
  Branch (221:9): [True: 0, False: 0]
222
0
        for (const auto& tx : vtx_missing) {
  Branch (222:29): [True: 0, False: 0]
223
0
            LogPrint(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
224
0
        }
225
0
    }
226
227
0
    return READ_STATUS_OK;
228
0
}