fuzz coverage

Coverage Report

Created: 2025-08-28 15:26

/Users/eugenesiegel/btc/bitcoin/src/blockencodings.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2016-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <blockencodings.h>
6
#include <chainparams.h>
7
#include <common/system.h>
8
#include <consensus/consensus.h>
9
#include <consensus/validation.h>
10
#include <crypto/sha256.h>
11
#include <crypto/siphash.h>
12
#include <logging.h>
13
#include <random.h>
14
#include <streams.h>
15
#include <txmempool.h>
16
#include <validation.h>
17
18
#include <unordered_map>
19
20
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) :
21
135k
        nonce(nonce),
22
135k
        shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23
135k
    FillShortTxIDSelector();
24
    //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25
135k
    prefilledtxn[0] = {0, block.vtx[0]};
26
286k
    for (size_t i = 1; i < block.vtx.size(); 
i++151k
) {
27
151k
        const CTransaction& tx = *block.vtx[i];
28
151k
        shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29
151k
    }
30
135k
}
31
32
196k
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33
196k
    DataStream stream{};
34
196k
    stream << header << nonce;
35
196k
    CSHA256 hasher;
36
196k
    hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37
196k
    uint256 shorttxidhash;
38
196k
    hasher.Finalize(shorttxidhash.begin());
39
196k
    shorttxidk0 = shorttxidhash.GetUint64(0);
40
196k
    shorttxidk1 = shorttxidhash.GetUint64(1);
41
196k
}
42
43
158k
uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const {
44
158k
    static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45
158k
    return SipHashUint256(shorttxidk0, shorttxidk1, wtxid) & 0xffffffffffffL;
46
158k
}
47
48
49
50
5.68k
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn) {
51
5.68k
    if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && 
cmpctblock.prefilledtxn.empty()648
))
52
0
        return READ_STATUS_INVALID;
53
5.68k
    if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
54
0
        return READ_STATUS_INVALID;
55
56
5.68k
    if (!header.IsNull() || !txn_available.empty()) 
return READ_STATUS_INVALID0
;
57
58
5.68k
    header = cmpctblock.header;
59
5.68k
    txn_available.resize(cmpctblock.BlockTxCount());
60
61
5.68k
    int32_t lastprefilledindex = -1;
62
11.8k
    for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); 
i++6.18k
) {
63
6.18k
        if (cmpctblock.prefilledtxn[i].tx->IsNull())
64
0
            return READ_STATUS_INVALID;
65
66
6.18k
        lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
67
6.18k
        if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
68
0
            return READ_STATUS_INVALID;
69
6.18k
        if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
70
            // If we are inserting a tx at an index greater than our full list of shorttxids
71
            // plus the number of prefilled txn we've inserted, then we have txn for which we
72
            // have neither a prefilled txn or a shorttxid!
73
0
            return READ_STATUS_INVALID;
74
0
        }
75
6.18k
        txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76
6.18k
    }
77
5.68k
    prefilled_count = cmpctblock.prefilledtxn.size();
78
79
    // Calculate map of txids -> positions and check mempool to see what we have (or don't)
80
    // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
81
    // of short IDs, any highly-uneven distribution of elements can be safely treated as a
82
    // READ_STATUS_FAILED.
83
5.68k
    std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
84
5.68k
    uint16_t index_offset = 0;
85
12.0k
    for (size_t i = 0; i < cmpctblock.shorttxids.size(); 
i++6.31k
) {
86
11.3k
        while (txn_available[i + index_offset])
87
5.04k
            index_offset++;
88
6.31k
        shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
89
        // To determine the chance that the number of entries in a bucket exceeds N,
90
        // we use the fact that the number of elements in a single bucket is
91
        // binomially distributed (with n = the number of shorttxids S, and p =
92
        // 1 / the number of buckets), that in the worst case the number of buckets is
93
        // equal to S (due to std::unordered_map having a default load factor of 1.0),
94
        // and that the chance for any bucket to exceed N elements is at most
95
        // buckets * (the chance that any given bucket is above N elements).
96
        // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
97
        // If we assume blocks of up to 16000, allowing 12 elements per bucket should
98
        // only fail once per ~1 million block transfers (per peer and connection).
99
6.31k
        if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
100
0
            return READ_STATUS_FAILED;
101
6.31k
    }
102
    // TODO: in the shortid-collision case, we should instead request both transactions
103
    // which collided. Falling back to full-block-request here is overkill.
104
5.68k
    if (shorttxids.size() != cmpctblock.shorttxids.size())
105
2
        return READ_STATUS_FAILED; // Short ID collision
106
107
5.68k
    std::vector<bool> have_txn(txn_available.size());
108
5.68k
    {
109
5.68k
    LOCK(pool->cs);
Line
Count
Source
257
5.68k
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
5.68k
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
5.68k
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
5.68k
#define PASTE(x, y) x ## y
110
6.84k
    for (const auto& tx : pool->txns_randomized) {
111
6.84k
        uint64_t shortid = cmpctblock.GetShortID(tx->GetWitnessHash());
112
6.84k
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113
6.84k
        if (idit != shorttxids.end()) {
114
1.66k
            if (!have_txn[idit->second]) {
115
1.66k
                txn_available[idit->second] = tx;
116
1.66k
                have_txn[idit->second]  = true;
117
1.66k
                mempool_count++;
118
1.66k
            } else {
119
                // If we find two mempool txn that match the short id, just request it.
120
                // This should be rare enough that the extra bandwidth doesn't matter,
121
                // but eating a round-trip due to FillBlock failure would be annoying
122
0
                if (txn_available[idit->second]) {
123
0
                    txn_available[idit->second].reset();
124
0
                    mempool_count--;
125
0
                }
126
0
            }
127
1.66k
        }
128
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129
        // the performance win of an early exit here is too good to pass up and worth
130
        // the extra risk.
131
6.84k
        if (mempool_count == shorttxids.size())
132
546
            break;
133
6.84k
    }
134
5.68k
    }
135
136
59.7k
    for (size_t i = 0; i < extra_txn.size(); 
i++54.0k
) {
137
54.1k
        if (extra_txn[i] == nullptr) {
138
53.1k
            continue;
139
53.1k
        }
140
1.03k
        uint64_t shortid = cmpctblock.GetShortID(extra_txn[i]->GetWitnessHash());
141
1.03k
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
142
1.03k
        if (idit != shorttxids.end()) {
143
52
            if (!have_txn[idit->second]) {
144
45
                txn_available[idit->second] = extra_txn[i];
145
45
                have_txn[idit->second]  = true;
146
45
                mempool_count++;
147
45
                extra_count++;
148
45
            } else {
149
                // If we find two mempool/extra txn that match the short id, just
150
                // request it.
151
                // This should be rare enough that the extra bandwidth doesn't matter,
152
                // but eating a round-trip due to FillBlock failure would be annoying
153
                // Note that we don't want duplication between extra_txn and mempool to
154
                // trigger this case, so we compare witness hashes first
155
7
                if (txn_available[idit->second] &&
156
7
                        txn_available[idit->second]->GetWitnessHash() != extra_txn[i]->GetWitnessHash()) {
157
0
                    txn_available[idit->second].reset();
158
0
                    mempool_count--;
159
0
                    extra_count--;
160
0
                }
161
7
            }
162
52
        }
163
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
164
        // the performance win of an early exit here is too good to pass up and worth
165
        // the extra risk.
166
1.03k
        if (mempool_count == shorttxids.size())
167
175
            break;
168
1.03k
    }
169
170
5.68k
    LogDebug(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
Line
Count
Source
280
5.68k
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
5.68k
    do {                                                  \
274
5.68k
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
5.68k
    } while (0)
171
172
5.68k
    return READ_STATUS_OK;
173
5.68k
}
174
175
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
176
12.4k
{
177
12.4k
    if (header.IsNull()) 
return false0
;
178
179
12.4k
    assert(index < txn_available.size());
180
12.4k
    return txn_available[index] != nullptr;
181
12.4k
}
182
183
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
184
2.44k
{
185
2.44k
    if (header.IsNull()) 
return READ_STATUS_INVALID7
;
186
187
2.43k
    uint256 hash = header.GetHash();
188
2.43k
    block = header;
189
2.43k
    block.vtx.resize(txn_available.size());
190
191
2.43k
    size_t tx_missing_offset = 0;
192
6.66k
    for (size_t i = 0; i < txn_available.size(); 
i++4.22k
) {
193
5.47k
        if (!txn_available[i]) {
194
1.32k
            if (vtx_missing.size() <= tx_missing_offset)
195
1.24k
                return READ_STATUS_INVALID;
196
74
            block.vtx[i] = vtx_missing[tx_missing_offset++];
197
74
        } else
198
4.15k
            block.vtx[i] = std::move(txn_available[i]);
199
5.47k
    }
200
201
    // Make sure we can't call FillBlock again.
202
1.18k
    header.SetNull();
203
1.18k
    txn_available.clear();
204
205
1.18k
    if (vtx_missing.size() != tx_missing_offset)
206
29
        return READ_STATUS_INVALID;
207
208
1.15k
    BlockValidationState state;
209
1.15k
    CheckBlockFn check_block = m_check_block_mock ? 
m_check_block_mock0
: CheckBlock;
210
1.15k
    if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
211
        // TODO: We really want to just check merkle tree manually here,
212
        // but that is expensive, and CheckBlock caches a block's
213
        // "checked-status" (in the CBlock?). CBlock should be able to
214
        // check its own merkle root and cache that check.
215
14
        if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
216
14
            return READ_STATUS_FAILED; // Possible Short ID collision
217
0
        return READ_STATUS_CHECKBLOCK_FAILED;
218
14
    }
219
220
1.14k
    LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
Line
Count
Source
280
1.14k
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
1.14k
    do {                                                  \
274
1.14k
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
1.14k
    } while (0)
221
1.14k
    if (vtx_missing.size() < 5) {
222
1.14k
        for (const auto& tx : vtx_missing) {
223
31
            LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
Line
Count
Source
280
31
#define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__)
Line
Count
Source
273
31
    do {                                                  \
274
31
        if (LogAcceptCategory((category), (level))) {     \
275
0
            LogPrintLevel_(category, level, __VA_ARGS__); \
Line
Count
Source
255
0
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
276
0
        }                                                 \
277
31
    } while (0)
224
31
        }
225
1.14k
    }
226
227
1.14k
    return READ_STATUS_OK;
228
1.15k
}