fuzz coverage

Coverage Report

Created: 2026-05-08 05:52

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/bitcoin/src/blockencodings.cpp
Line
Count
Source
1
// Copyright (c) 2016-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <blockencodings.h>
6
#include <chainparams.h>
7
#include <common/system.h>
8
#include <consensus/consensus.h>
9
#include <consensus/validation.h>
10
#include <crypto/sha256.h>
11
#include <crypto/siphash.h>
12
#include <logging.h>
13
#include <random.h>
14
#include <streams.h>
15
#include <txmempool.h>
16
#include <validation.h>
17
18
#include <unordered_map>
19
20
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, uint64_t nonce)
21
19.6k
    : nonce(nonce),
22
19.6k
      shorttxids(block.vtx.size() - 1),
23
19.6k
      prefilledtxn(1),
24
19.6k
      header(block)
25
19.6k
{
26
19.6k
    FillShortTxIDSelector();
27
    // TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
28
19.6k
    prefilledtxn[0] = {0, block.vtx[0]};
29
85.7k
    for (size_t i = 1; i < block.vtx.size(); 
i++66.1k
) {
30
66.1k
        const CTransaction& tx = *block.vtx[i];
31
66.1k
        shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
32
66.1k
    }
33
19.6k
}
34
35
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const
36
37.8k
{
37
37.8k
    DataStream stream{};
38
37.8k
    stream << header << nonce;
39
37.8k
    CSHA256 hasher;
40
37.8k
    hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
41
37.8k
    uint256 shorttxidhash;
42
37.8k
    hasher.Finalize(shorttxidhash.begin());
43
37.8k
    m_hasher.emplace(shorttxidhash.GetUint64(0), shorttxidhash.GetUint64(1));
44
37.8k
}
45
46
uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const
47
115k
{
48
115k
    static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
49
115k
    return (*Assert(m_hasher))(wtxid.ToUint256()) & 0xffffffffffffL;
Line
Count
Source
116
115k
#define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val)
50
115k
}
51
52
/* Reconstructing a compact block is in the hot-path for block relay,
53
 * so we want to do it as quickly as possible. Because this often
54
 * involves iterating over the entire mempool, we put all the data we
55
 * need (ie the wtxid and a reference to the actual transaction data)
56
 * in a vector and iterate over the vector directly. This allows optimal
57
 * CPU caching behaviour, at a cost of only 40 bytes per transaction.
58
 */
59
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<std::pair<Wtxid, CTransactionRef>>& extra_txn)
60
3.88k
{
61
3.88k
    LogDebug(BCLog::CMPCTBLOCK, "Initializing PartiallyDownloadedBlock for block %s using a cmpctblock of %u bytes\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
Line
Count
Source
123
3.88k
#define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
114
3.88k
    do {                                                               \
115
3.88k
        if (util::log::ShouldLog((category), (level))) {               \
116
0
            bool rate_limit{level >= BCLog::Level::Info};              \
117
0
            Assume(!rate_limit); /*Only called with the levels below*/ \
Line
Count
Source
128
0
#define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val)
118
0
            LogPrintLevel_(category, level, rate_limit, __VA_ARGS__);  \
Line
Count
Source
97
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
119
0
        }                                                              \
120
3.88k
    } while (0)
62
3.88k
    if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && 
cmpctblock.prefilledtxn.empty()1.03k
))
63
0
        return READ_STATUS_INVALID;
64
3.88k
    if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
65
0
        return READ_STATUS_INVALID;
66
67
3.88k
    if (!header.IsNull() || !txn_available.empty()) 
return READ_STATUS_INVALID0
;
68
69
3.88k
    header = cmpctblock.header;
70
3.88k
    txn_available.resize(cmpctblock.BlockTxCount());
71
72
3.88k
    int32_t lastprefilledindex = -1;
73
8.00k
    for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); 
i++4.12k
) {
74
4.12k
        if (cmpctblock.prefilledtxn[i].tx->IsNull())
75
0
            return READ_STATUS_INVALID;
76
77
4.12k
        lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
78
4.12k
        if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
79
0
            return READ_STATUS_INVALID;
80
4.12k
        if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
81
            // If we are inserting a tx at an index greater than our full list of shorttxids
82
            // plus the number of prefilled txn we've inserted, then we have txn for which we
83
            // have neither a prefilled txn or a shorttxid!
84
0
            return READ_STATUS_INVALID;
85
0
        }
86
4.12k
        txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
87
4.12k
    }
88
3.88k
    prefilled_count = cmpctblock.prefilledtxn.size();
89
90
    // Calculate map of txids -> positions and check mempool to see what we have (or don't)
91
    // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
92
    // of short IDs, any highly-uneven distribution of elements can be safely treated as a
93
    // READ_STATUS_FAILED.
94
3.88k
    std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
95
3.88k
    uint16_t index_offset = 0;
96
17.5k
    for (size_t i = 0; i < cmpctblock.shorttxids.size(); 
i++13.7k
) {
97
16.6k
        while (txn_available[i + index_offset])
98
2.95k
            index_offset++;
99
13.7k
        shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
100
        // To determine the chance that the number of entries in a bucket exceeds N,
101
        // we use the fact that the number of elements in a single bucket is
102
        // binomially distributed (with n = the number of shorttxids S, and p =
103
        // 1 / the number of buckets), that in the worst case the number of buckets is
104
        // equal to S (due to std::unordered_map having a default load factor of 1.0),
105
        // and that the chance for any bucket to exceed N elements is at most
106
        // buckets * (the chance that any given bucket is above N elements).
107
        // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
108
        // If we assume blocks of up to 16000, allowing 12 elements per bucket should
109
        // only fail once per ~1 million block transfers (per peer and connection).
110
13.7k
        if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
111
0
            return READ_STATUS_FAILED;
112
13.7k
    }
113
3.88k
    if (shorttxids.size() != cmpctblock.shorttxids.size())
114
1.23k
        return READ_STATUS_FAILED; // Short ID collision
115
116
2.64k
    std::vector<bool> have_txn(txn_available.size());
117
2.64k
    {
118
2.64k
    LOCK(pool->cs);
Line
Count
Source
268
2.64k
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
2.64k
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
2.64k
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
2.64k
#define PASTE(x, y) x ## y
119
2.64k
    for (const auto& [wtxid, txit] : pool->txns_randomized) {
120
695
        uint64_t shortid = cmpctblock.GetShortID(wtxid);
121
695
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
122
695
        if (idit != shorttxids.end()) {
123
315
            if (!have_txn[idit->second]) {
124
315
                txn_available[idit->second] = txit->GetSharedTx();
125
315
                have_txn[idit->second]  = true;
126
315
                mempool_count++;
127
315
            } else {
128
                // If we find two mempool txn that match the short id, just request it.
129
                // This should be rare enough that the extra bandwidth doesn't matter,
130
                // but eating a round-trip due to FillBlock failure would be annoying
131
0
                if (txn_available[idit->second]) {
132
0
                    txn_available[idit->second].reset();
133
0
                    mempool_count--;
134
0
                }
135
0
            }
136
315
        }
137
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
138
        // the performance win of an early exit here is too good to pass up and worth
139
        // the extra risk.
140
695
        if (mempool_count == shorttxids.size())
141
104
            break;
142
695
    }
143
2.64k
    }
144
145
51.0k
    for (size_t i = 0; i < extra_txn.size(); 
i++48.4k
) {
146
48.6k
        uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first);
147
48.6k
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
148
48.6k
        if (idit != shorttxids.end()) {
149
434
            if (!have_txn[idit->second]) {
150
434
                txn_available[idit->second] = extra_txn[i].second;
151
434
                have_txn[idit->second]  = true;
152
434
                mempool_count++;
153
434
                extra_count++;
154
434
            } else {
155
                // If we find two mempool/extra txn that match the short id, just
156
                // request it.
157
                // This should be rare enough that the extra bandwidth doesn't matter,
158
                // but eating a round-trip due to FillBlock failure would be annoying
159
                // Note that we don't want duplication between extra_txn and mempool to
160
                // trigger this case, so we compare witness hashes first
161
0
                if (txn_available[idit->second] &&
162
0
                        txn_available[idit->second]->GetWitnessHash() != extra_txn[i].second->GetWitnessHash()) {
163
0
                    txn_available[idit->second].reset();
164
0
                    mempool_count--;
165
0
                    extra_count--;
166
0
                }
167
0
            }
168
434
        }
169
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
170
        // the performance win of an early exit here is too good to pass up and worth
171
        // the extra risk.
172
48.6k
        if (mempool_count == shorttxids.size())
173
230
            break;
174
48.6k
    }
175
176
2.64k
    LogDebug(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of %u bytes\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
Line
Count
Source
123
2.64k
#define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
114
2.64k
    do {                                                               \
115
2.64k
        if (util::log::ShouldLog((category), (level))) {               \
116
0
            bool rate_limit{level >= BCLog::Level::Info};              \
117
0
            Assume(!rate_limit); /*Only called with the levels below*/ \
Line
Count
Source
128
0
#define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val)
118
0
            LogPrintLevel_(category, level, rate_limit, __VA_ARGS__);  \
Line
Count
Source
97
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
119
0
        }                                                              \
120
2.64k
    } while (0)
177
178
2.64k
    return READ_STATUS_OK;
179
3.88k
}
180
181
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
182
7.84k
{
183
7.84k
    if (header.IsNull()) 
return false0
;
184
185
7.84k
    assert(index < txn_available.size());
186
7.84k
    return txn_available[index] != nullptr;
187
7.84k
}
188
189
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active)
190
1.20k
{
191
1.20k
    if (header.IsNull()) 
return READ_STATUS_INVALID0
;
192
193
1.20k
    block = header;
194
1.20k
    block.vtx.resize(txn_available.size());
195
196
1.20k
    size_t tx_missing_offset = 0;
197
2.97k
    for (size_t i = 0; i < txn_available.size(); 
i++1.76k
) {
198
1.81k
        if (!txn_available[i]) {
199
166
            if (tx_missing_offset >= vtx_missing.size()) {
200
51
                return READ_STATUS_INVALID;
201
51
            }
202
115
            block.vtx[i] = vtx_missing[tx_missing_offset++];
203
1.65k
        } else {
204
1.65k
            block.vtx[i] = std::move(txn_available[i]);
205
1.65k
        }
206
1.81k
    }
207
208
    // Make sure we can't call FillBlock again.
209
1.15k
    header.SetNull();
210
1.15k
    txn_available.clear();
211
212
1.15k
    if (vtx_missing.size() != tx_missing_offset) {
213
1
        return READ_STATUS_INVALID;
214
1
    }
215
216
    // Check for possible mutations early now that we have a seemingly good block
217
1.15k
    IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? 
m_check_block_mutated_mock0
: IsBlockMutated};
218
1.15k
    if (check_mutated(/*block=*/block, /*check_witness_root=*/segwit_active)) {
219
5
        return READ_STATUS_FAILED; // Possible Short ID collision
220
5
    }
221
222
1.14k
    if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
223
0
        const uint256 hash{block.GetHash()};
224
0
        uint32_t tx_missing_size{0};
225
0
        for (const auto& tx : vtx_missing) tx_missing_size += tx->ComputeTotalSize();
226
0
        LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %u txn prefilled, %u txn from mempool (incl at least %u from extra pool) and %u txn (%u bytes) requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size(), tx_missing_size);
Line
Count
Source
123
0
#define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
114
0
    do {                                                               \
115
0
        if (util::log::ShouldLog((category), (level))) {               \
116
0
            bool rate_limit{level >= BCLog::Level::Info};              \
117
0
            Assume(!rate_limit); /*Only called with the levels below*/ \
Line
Count
Source
128
0
#define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val)
118
0
            LogPrintLevel_(category, level, rate_limit, __VA_ARGS__);  \
Line
Count
Source
97
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
119
0
        }                                                              \
120
0
    } while (0)
227
0
        if (vtx_missing.size() < 5) {
228
0
            for (const auto& tx : vtx_missing) {
229
0
                LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
Line
Count
Source
123
0
#define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
114
0
    do {                                                               \
115
0
        if (util::log::ShouldLog((category), (level))) {               \
116
0
            bool rate_limit{level >= BCLog::Level::Info};              \
117
0
            Assume(!rate_limit); /*Only called with the levels below*/ \
Line
Count
Source
128
0
#define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val)
118
0
            LogPrintLevel_(category, level, rate_limit, __VA_ARGS__);  \
Line
Count
Source
97
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
119
0
        }                                                              \
120
0
    } while (0)
230
0
            }
231
0
        }
232
0
    }
233
234
1.14k
    return READ_STATUS_OK;
235
1.15k
}