fuzz coverage

Coverage Report

Created: 2025-06-01 19:34

/Users/eugenesiegel/btc/bitcoin/src/blockencodings.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2016-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <blockencodings.h>
6
#include <chainparams.h>
7
#include <common/system.h>
8
#include <consensus/consensus.h>
9
#include <consensus/validation.h>
10
#include <crypto/sha256.h>
11
#include <crypto/siphash.h>
12
#include <logging.h>
13
#include <random.h>
14
#include <streams.h>
15
#include <txmempool.h>
16
#include <validation.h>
17
18
#include <unordered_map>
19
20
165k
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) : nonce(nonce),
21
165k
                                                                                                  header(block), shorttxids(block.vtx.size() - 1), prefilledtxn(1)
22
165k
{
23
165k
    FillShortTxIDSelector();
24
    //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25
165k
    prefilledtxn[0] = {0, block.vtx[0]};
26
468k
    for (size_t i = 1; i < block.vtx.size(); 
i++303k
) {
27
303k
        const CTransaction& tx = *block.vtx[i];
28
303k
        shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29
303k
    }
30
165k
}
31
32
271k
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33
271k
    DataStream stream{};
34
271k
    stream << header << nonce;
35
271k
    CSHA256 hasher;
36
271k
    hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37
271k
    uint256 shorttxidhash;
38
271k
    hasher.Finalize(shorttxidhash.begin());
39
271k
    shorttxidk0 = shorttxidhash.GetUint64(0);
40
271k
    shorttxidk1 = shorttxidhash.GetUint64(1);
41
271k
}
42
43
303k
uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const {
44
303k
    static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45
303k
    return SipHashUint256(shorttxidk0, shorttxidk1, wtxid) & 0xffffffffffffL;
46
303k
}
47
48
49
50
47.2k
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn) {
51
47.2k
    if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && 
cmpctblock.prefilledtxn.empty()42.4k
))
52
0
        return READ_STATUS_INVALID;
53
47.2k
    if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
54
0
        return READ_STATUS_INVALID;
55
56
47.2k
    if (!header.IsNull() || !txn_available.empty()) 
return READ_STATUS_INVALID0
;
57
58
47.2k
    header = cmpctblock.header;
59
47.2k
    txn_available.resize(cmpctblock.BlockTxCount());
60
61
47.2k
    int32_t lastprefilledindex = -1;
62
94.8k
    for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); 
i++47.6k
) {
63
47.8k
        if (cmpctblock.prefilledtxn[i].tx->IsNull())
64
234
            return READ_STATUS_INVALID;
65
66
47.6k
        lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
67
47.6k
        if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
68
0
            return READ_STATUS_INVALID;
69
47.6k
        if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
70
            // If we are inserting a tx at an index greater than our full list of shorttxids
71
            // plus the number of prefilled txn we've inserted, then we have txn for which we
72
            // have neither a prefilled txn or a shorttxid!
73
0
            return READ_STATUS_INVALID;
74
0
        }
75
47.6k
        txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76
47.6k
    }
77
47.0k
    prefilled_count = cmpctblock.prefilledtxn.size();
78
79
    // Calculate map of txids -> positions and check mempool to see what we have (or don't)
80
    // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
81
    // of short IDs, any highly-uneven distribution of elements can be safely treated as a
82
    // READ_STATUS_FAILED.
83
47.0k
    std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
84
47.0k
    uint16_t index_offset = 0;
85
81.3k
    for (size_t i = 0; i < cmpctblock.shorttxids.size(); 
i++34.3k
) {
86
39.5k
        while (txn_available[i + index_offset])
87
5.19k
            index_offset++;
88
34.3k
        shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
89
        // To determine the chance that the number of entries in a bucket exceeds N,
90
        // we use the fact that the number of elements in a single bucket is
91
        // binomially distributed (with n = the number of shorttxids S, and p =
92
        // 1 / the number of buckets), that in the worst case the number of buckets is
93
        // equal to S (due to std::unordered_map having a default load factor of 1.0),
94
        // and that the chance for any bucket to exceed N elements is at most
95
        // buckets * (the chance that any given bucket is above N elements).
96
        // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
97
        // If we assume blocks of up to 16000, allowing 12 elements per bucket should
98
        // only fail once per ~1 million block transfers (per peer and connection).
99
34.3k
        if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
100
0
            return READ_STATUS_FAILED;
101
34.3k
    }
102
    // TODO: in the shortid-collision case, we should instead request both transactions
103
    // which collided. Falling back to full-block-request here is overkill.
104
47.0k
    if (shorttxids.size() != cmpctblock.shorttxids.size())
105
2.30k
        return READ_STATUS_FAILED; // Short ID collision
106
107
44.7k
    std::vector<bool> have_txn(txn_available.size());
108
44.7k
    {
109
44.7k
    LOCK(pool->cs);
Line
Count
Source
257
44.7k
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
44.7k
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
44.7k
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
44.7k
#define PASTE(x, y) x ## y
110
44.7k
    for (const auto& tx : pool->txns_randomized) {
111
0
        uint64_t shortid = cmpctblock.GetShortID(tx->GetWitnessHash());
112
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113
0
        if (idit != shorttxids.end()) {
114
0
            if (!have_txn[idit->second]) {
115
0
                txn_available[idit->second] = tx;
116
0
                have_txn[idit->second]  = true;
117
0
                mempool_count++;
118
0
            } else {
119
                // If we find two mempool txn that match the short id, just request it.
120
                // This should be rare enough that the extra bandwidth doesn't matter,
121
                // but eating a round-trip due to FillBlock failure would be annoying
122
0
                if (txn_available[idit->second]) {
123
0
                    txn_available[idit->second].reset();
124
0
                    mempool_count--;
125
0
                }
126
0
            }
127
0
        }
128
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129
        // the performance win of an early exit here is too good to pass up and worth
130
        // the extra risk.
131
0
        if (mempool_count == shorttxids.size())
132
0
            break;
133
0
    }
134
44.7k
    }
135
136
44.7k
    for (size_t i = 0; i < extra_txn.size(); 
i++0
) {
137
0
        if (extra_txn[i] == nullptr) {
138
0
            continue;
139
0
        }
140
0
        uint64_t shortid = cmpctblock.GetShortID(extra_txn[i]->GetWitnessHash());
141
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
142
0
        if (idit != shorttxids.end()) {
143
0
            if (!have_txn[idit->second]) {
144
0
                txn_available[idit->second] = extra_txn[i];
145
0
                have_txn[idit->second]  = true;
146
0
                mempool_count++;
147
0
                extra_count++;
148
0
            } else {
149
                // If we find two mempool/extra txn that match the short id, just
150
                // request it.
151
                // This should be rare enough that the extra bandwidth doesn't matter,
152
                // but eating a round-trip due to FillBlock failure would be annoying
153
                // Note that we don't want duplication between extra_txn and mempool to
154
                // trigger this case, so we compare witness hashes first
155
0
                if (txn_available[idit->second] &&
156
0
                        txn_available[idit->second]->GetWitnessHash() != extra_txn[i]->GetWitnessHash()) {
157
0
                    txn_available[idit->second].reset();
158
0
                    mempool_count--;
159
0
                    extra_count--;
160
0
                }
161
0
            }
162
0
        }
163
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
164
        // the performance win of an early exit here is too good to pass up and worth
165
        // the extra risk.
166
0
        if (mempool_count == shorttxids.size())
167
0
            break;
168
0
    }
169
170
44.7k
    LogDebug(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
Line
Count
Source
280
44.7k
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
44.7k
    do {                                                  \
274
44.7k
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
44.7k
    } while (0)
171
172
44.7k
    return READ_STATUS_OK;
173
47.0k
}
174
175
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
176
59.8k
{
177
59.8k
    if (header.IsNull()) 
return false0
;
178
179
59.8k
    assert(index < txn_available.size());
180
59.8k
    return txn_available[index] != nullptr;
181
59.8k
}
182
183
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
184
44.5k
{
185
44.5k
    if (header.IsNull()) 
return READ_STATUS_INVALID0
;
186
187
44.5k
    uint256 hash = header.GetHash();
188
44.5k
    block = header;
189
44.5k
    block.vtx.resize(txn_available.size());
190
191
44.5k
    size_t tx_missing_offset = 0;
192
106k
    for (size_t i = 0; i < txn_available.size(); 
i++61.9k
) {
193
61.9k
        if (!txn_available[i]) {
194
16.9k
            if (vtx_missing.size() <= tx_missing_offset)
195
0
                return READ_STATUS_INVALID;
196
16.9k
            block.vtx[i] = vtx_missing[tx_missing_offset++];
197
16.9k
        } else
198
44.9k
            block.vtx[i] = std::move(txn_available[i]);
199
61.9k
    }
200
201
    // Make sure we can't call FillBlock again.
202
44.5k
    header.SetNull();
203
44.5k
    txn_available.clear();
204
205
44.5k
    if (vtx_missing.size() != tx_missing_offset)
206
79
        return READ_STATUS_INVALID;
207
208
44.5k
    BlockValidationState state;
209
44.5k
    CheckBlockFn check_block = m_check_block_mock ? 
m_check_block_mock0
: CheckBlock;
210
44.5k
    if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
211
        // TODO: We really want to just check merkle tree manually here,
212
        // but that is expensive, and CheckBlock caches a block's
213
        // "checked-status" (in the CBlock?). CBlock should be able to
214
        // check its own merkle root and cache that check.
215
41.6k
        if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
216
0
            return READ_STATUS_FAILED; // Possible Short ID collision
217
41.6k
        return READ_STATUS_CHECKBLOCK_FAILED;
218
41.6k
    }
219
220
2.83k
    LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
Line
Count
Source
280
2.83k
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
2.83k
    do {                                                  \
274
2.83k
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
2.83k
    } while (0)
221
2.83k
    if (vtx_missing.size() < 5) {
222
2.83k
        for (const auto& tx : vtx_missing) {
223
0
            LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
Line
Count
Source
280
0
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
0
    do {                                                  \
274
0
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
0
    } while (0)
224
0
        }
225
2.83k
    }
226
227
2.83k
    return READ_STATUS_OK;
228
44.5k
}