fuzz coverage

Coverage Report

Created: 2026-04-24 13:48

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/bitcoin/src/index/base.cpp
Line
Count
Source
1
// Copyright (c) 2017-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <index/base.h>
6
7
#include <chain.h>
8
#include <common/args.h>
9
#include <dbwrapper.h>
10
#include <interfaces/chain.h>
11
#include <interfaces/types.h>
12
#include <kernel/types.h>
13
#include <node/abort.h>
14
#include <node/blockstorage.h>
15
#include <node/context.h>
16
#include <node/database_args.h>
17
#include <node/interface_ui.h>
18
#include <primitives/block.h>
19
#include <sync.h>
20
#include <tinyformat.h>
21
#include <uint256.h>
22
#include <undo.h>
23
#include <util/fs.h>
24
#include <util/log.h>
25
#include <util/string.h>
26
#include <util/thread.h>
27
#include <util/threadinterrupt.h>
28
#include <util/time.h>
29
#include <util/translation.h>
30
#include <validation.h>
31
#include <validationinterface.h>
32
33
#include <cassert>
34
#include <compare>
35
#include <cstdint>
36
#include <memory>
37
#include <optional>
38
#include <stdexcept>
39
#include <string>
40
#include <thread>
41
#include <utility>
42
#include <vector>
43
44
using kernel::ChainstateRole;
45
46
constexpr uint8_t DB_BEST_BLOCK{'B'};
47
48
constexpr auto SYNC_LOG_INTERVAL{30s};
49
constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
50
51
template <typename... Args>
52
void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
53
0
{
54
0
    auto message = tfm::format(fmt, args...);
55
0
    node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
56
0
}
Unexecuted instantiation: void BaseIndex::FatalErrorf<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>(util::ConstevalFormatString<sizeof...(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>)>, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<int>(util::ConstevalFormatString<sizeof...(int)>, int const&)
57
58
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
59
0
{
60
0
    CBlockLocator locator;
61
0
    bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
62
0
    assert(found);
63
0
    assert(!locator.IsNull());
64
0
    return locator;
65
0
}
66
67
BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
68
0
    CDBWrapper{DBParams{
69
0
        .path = path,
70
0
        .cache_bytes = n_cache_size,
71
0
        .memory_only = f_memory,
72
0
        .wipe_data = f_wipe,
73
0
        .obfuscate = f_obfuscate,
74
0
        .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
75
0
{}
76
77
CBlockLocator BaseIndex::DB::ReadBestBlock() const
78
0
{
79
0
    CBlockLocator locator;
80
81
0
    bool success = Read(DB_BEST_BLOCK, locator);
82
0
    if (!success) {
83
0
        locator.SetNull();
84
0
    }
85
86
0
    return locator;
87
0
}
88
89
void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
90
0
{
91
0
    batch.Write(DB_BEST_BLOCK, locator);
92
0
}
93
94
BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
95
0
    : m_chain{std::move(chain)}, m_name{std::move(name)} {}
96
97
BaseIndex::~BaseIndex()
98
0
{
99
0
    Interrupt();
100
0
    Stop();
101
0
}
102
103
bool BaseIndex::Init()
104
0
{
105
0
    AssertLockNotHeld(cs_main);
Line
Count
Source
149
0
#define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs)
106
107
    // May need reset if index is being restarted.
108
0
    m_interrupt.reset();
109
110
    // m_chainstate member gives indexing code access to node internals. It is
111
    // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
112
0
    m_chainstate = WITH_LOCK(::cs_main,
Line
Count
Source
299
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
113
0
                             return &m_chain->context()->chainman->ValidatedChainstate());
114
    // Register to validation interface before setting the 'm_synced' flag, so that
115
    // callbacks are not missed once m_synced is true.
116
0
    m_chain->context()->validation_signals->RegisterValidationInterface(this);
117
118
0
    const auto locator{GetDB().ReadBestBlock()};
119
120
0
    LOCK(cs_main);
Line
Count
Source
268
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
121
0
    CChain& index_chain = m_chainstate->m_chain;
122
123
0
    if (locator.IsNull()) {
124
0
        SetBestBlockIndex(nullptr);
125
0
    } else {
126
        // Setting the best block to the locator's top block. If it is not part of the
127
        // best chain, we will rewind to the fork point during index sync
128
0
        const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
129
0
        if (!locator_index) {
130
0
            return InitError(Untranslated(strprintf("best block of %s not found. Please rebuild the index.", GetName())));
Line
Count
Source
1172
0
#define strprintf tfm::format
131
0
        }
132
0
        SetBestBlockIndex(locator_index);
133
0
    }
134
135
    // Child init
136
0
    const CBlockIndex* start_block = m_best_block_index.load();
137
0
    if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
138
0
        return false;
139
0
    }
140
141
    // Note: this will latch to true immediately if the user starts up with an empty
142
    // datadir and an index enabled. If this is the case, indexation will happen solely
143
    // via `BlockConnected` signals until, possibly, the next restart.
144
0
    m_synced = start_block == index_chain.Tip();
145
0
    m_init = true;
146
0
    return true;
147
0
}
148
149
static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
150
0
{
151
0
    AssertLockHeld(cs_main);
Line
Count
Source
144
0
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
152
153
0
    if (!pindex_prev) {
154
0
        return chain.Genesis();
155
0
    }
156
157
0
    const CBlockIndex* pindex = chain.Next(pindex_prev);
158
0
    if (pindex) {
159
0
        return pindex;
160
0
    }
161
162
    // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor.
163
    // Caller will be responsible for rewinding back to the common ancestor.
164
0
    return chain.Next(chain.FindFork(pindex_prev));
165
0
}
166
167
bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data)
168
0
{
169
0
    interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data);
170
171
0
    CBlock block;
172
0
    if (!block_data) { // disk lookup if block data wasn't provided
173
0
        if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) {
174
0
            FatalErrorf("Failed to read block %s from disk",
175
0
                        pindex->GetBlockHash().ToString());
176
0
            return false;
177
0
        }
178
0
        block_info.data = &block;
179
0
    }
180
181
0
    CBlockUndo block_undo;
182
0
    if (CustomOptions().connect_undo_data) {
183
0
        if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) {
184
0
            FatalErrorf("Failed to read undo block data %s from disk",
185
0
                        pindex->GetBlockHash().ToString());
186
0
            return false;
187
0
        }
188
0
        block_info.undo_data = &block_undo;
189
0
    }
190
191
0
    if (!CustomAppend(block_info)) {
192
0
        FatalErrorf("Failed to write block %s to index database",
193
0
                    pindex->GetBlockHash().ToString());
194
0
        return false;
195
0
    }
196
197
0
    return true;
198
0
}
199
200
void BaseIndex::Sync()
201
0
{
202
0
    const CBlockIndex* pindex = m_best_block_index.load();
203
0
    if (!m_synced) {
204
0
        auto last_log_time{NodeClock::now()};
205
0
        auto last_locator_write_time{last_log_time};
206
0
        while (true) {
207
0
            if (m_interrupt) {
208
0
                LogInfo("%s: m_interrupt set; exiting ThreadSync", GetName());
Line
Count
Source
97
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
209
210
0
                SetBestBlockIndex(pindex);
211
                // No need to handle errors in Commit. If it fails, the error will be already be
212
                // logged. The best way to recover is to continue, as index cannot be corrupted by
213
                // a missed commit to disk for an advanced index state.
214
0
                Commit();
215
0
                return;
216
0
            }
217
218
0
            const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
Line
Count
Source
299
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
219
            // If pindex_next is null, it means pindex is the chain tip, so
220
            // commit data indexed so far.
221
0
            if (!pindex_next) {
222
0
                SetBestBlockIndex(pindex);
223
                // No need to handle errors in Commit. See rationale above.
224
0
                Commit();
225
226
                // If pindex is still the chain tip after committing, exit the
227
                // sync loop. It is important for cs_main to be locked while
228
                // setting m_synced = true, otherwise a new block could be
229
                // attached while m_synced is still false, and it would not be
230
                // indexed.
231
0
                LOCK(::cs_main);
Line
Count
Source
268
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
232
0
                pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
233
0
                if (!pindex_next) {
234
0
                    m_synced = true;
235
0
                    break;
236
0
                }
237
0
            }
238
0
            if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
239
0
                FatalErrorf("Failed to rewind %s to a previous chain tip", GetName());
240
0
                return;
241
0
            }
242
0
            pindex = pindex_next;
243
244
245
0
            if (!ProcessBlock(pindex)) return; // error logged internally
246
247
0
            auto current_time{NodeClock::now()};
248
0
            if (current_time - last_log_time >= SYNC_LOG_INTERVAL) {
249
0
                LogInfo("Syncing %s with block chain from height %d", GetName(), pindex->nHeight);
Line
Count
Source
97
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
250
0
                last_log_time = current_time;
251
0
            }
252
253
0
            if (current_time - last_locator_write_time >= SYNC_LOCATOR_WRITE_INTERVAL) {
254
0
                SetBestBlockIndex(pindex);
255
0
                last_locator_write_time = current_time;
256
                // No need to handle errors in Commit. See rationale above.
257
0
                Commit();
258
0
            }
259
0
        }
260
0
    }
261
262
0
    if (pindex) {
263
0
        LogInfo("%s is enabled at height %d", GetName(), pindex->nHeight);
Line
Count
Source
97
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
264
0
    } else {
265
0
        LogInfo("%s is enabled", GetName());
Line
Count
Source
97
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
266
0
    }
267
0
}
268
269
bool BaseIndex::Commit()
270
0
{
271
    // Don't commit anything if we haven't indexed any block yet
272
    // (this could happen if init is interrupted).
273
0
    bool ok = m_best_block_index != nullptr;
274
0
    if (ok) {
275
0
        CDBBatch batch(GetDB());
276
0
        ok = CustomCommit(batch);
277
0
        if (ok) {
278
0
            GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
279
0
            GetDB().WriteBatch(batch);
280
0
        }
281
0
    }
282
0
    if (!ok) {
283
0
        LogError("Failed to commit latest %s state", GetName());
Line
Count
Source
99
0
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
284
0
        return false;
285
0
    }
286
0
    return true;
287
0
}
288
289
bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
290
0
{
291
0
    assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
292
293
0
    CBlock block;
294
0
    CBlockUndo block_undo;
295
296
0
    for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) {
297
0
        interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip);
298
0
        if (CustomOptions().disconnect_data) {
299
0
            if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) {
300
0
                LogError("Failed to read block %s from disk",
Line
Count
Source
99
0
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
301
0
                         iter_tip->GetBlockHash().ToString());
302
0
                return false;
303
0
            }
304
0
            block_info.data = &block;
305
0
        }
306
0
        if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) {
307
0
            if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) {
308
0
                return false;
309
0
            }
310
0
            block_info.undo_data = &block_undo;
311
0
        }
312
0
        if (!CustomRemove(block_info)) {
313
0
            return false;
314
0
        }
315
0
    }
316
317
    // Don't commit here - the committed index state must never be ahead of the
318
    // flushed chainstate, otherwise unclean restarts would lead to index corruption.
319
    // Pruning has a minimum of 288 blocks-to-keep and getting the index
320
    // out of sync may be possible but a users fault.
321
    // In case we reorg beyond the pruned depth, ReadBlock would
322
    // throw and lead to a graceful shutdown
323
0
    SetBestBlockIndex(new_tip);
324
0
    return true;
325
0
}
326
327
void BaseIndex::BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
328
0
{
329
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
330
    //
331
    // TODO at some point we could parameterize whether a particular index can be
332
    // built out of order, but for now just do the conservative simple thing.
333
0
    if (!role.validated) {
334
0
        return;
335
0
    }
336
337
    // Ignore BlockConnected signals until we have fully indexed the chain.
338
0
    if (!m_synced) {
339
0
        return;
340
0
    }
341
342
0
    const CBlockIndex* best_block_index = m_best_block_index.load();
343
0
    if (!best_block_index) {
344
0
        if (pindex->nHeight != 0) {
345
0
            FatalErrorf("First block connected is not the genesis block (height=%d)",
346
0
                       pindex->nHeight);
347
0
            return;
348
0
        }
349
0
    } else {
350
        // Ensure block connects to an ancestor of the current best block. This should be the case
351
        // most of the time, but may not be immediately after the sync thread catches up and sets
352
        // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
353
        // in the ValidationInterface queue backlog even after the sync thread has caught up to the
354
        // new chain tip. In this unlikely event, log a warning and let the queue clear.
355
0
        if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
356
0
            LogWarning("Block %s does not connect to an ancestor of "
Line
Count
Source
98
0
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
357
0
                      "known best chain (tip=%s); not updating index",
358
0
                      pindex->GetBlockHash().ToString(),
359
0
                      best_block_index->GetBlockHash().ToString());
360
0
            return;
361
0
        }
362
0
        if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
363
0
            FatalErrorf("Failed to rewind %s to a previous chain tip",
364
0
                       GetName());
365
0
            return;
366
0
        }
367
0
    }
368
369
    // Dispatch block to child class; errors are logged internally and abort the node.
370
0
    if (ProcessBlock(pindex, block.get())) {
371
        // Setting the best block index is intentionally the last step of this
372
        // function, so BlockUntilSyncedToCurrentChain callers waiting for the
373
        // best block index to be updated can rely on the block being fully
374
        // processed, and the index object being safe to delete.
375
0
        SetBestBlockIndex(pindex);
376
0
    }
377
0
}
378
379
void BaseIndex::ChainStateFlushed(const ChainstateRole& role, const CBlockLocator& locator)
380
0
{
381
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
382
0
    if (!role.validated) {
383
0
        return;
384
0
    }
385
386
0
    if (!m_synced) {
387
0
        return;
388
0
    }
389
390
0
    const uint256& locator_tip_hash = locator.vHave.front();
391
0
    const CBlockIndex* locator_tip_index;
392
0
    {
393
0
        LOCK(cs_main);
Line
Count
Source
268
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
394
0
        locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
395
0
    }
396
397
0
    if (!locator_tip_index) {
398
0
        FatalErrorf("First block (hash=%s) in locator was not found",
399
0
                   locator_tip_hash.ToString());
400
0
        return;
401
0
    }
402
403
    // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
404
    // immediately after the sync thread catches up and sets m_synced. Consider the case where
405
    // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
406
    // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
407
    // event, log a warning and let the queue clear.
408
0
    const CBlockIndex* best_block_index = m_best_block_index.load();
409
0
    if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
410
0
        LogWarning("Locator contains block (hash=%s) not on known best "
Line
Count
Source
98
0
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
411
0
                  "chain (tip=%s); not writing index locator",
412
0
                  locator_tip_hash.ToString(),
413
0
                  best_block_index->GetBlockHash().ToString());
414
0
        return;
415
0
    }
416
417
    // No need to handle errors in Commit. If it fails, the error will be already be logged. The
418
    // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
419
    // for an advanced index state.
420
0
    Commit();
421
0
}
422
423
bool BaseIndex::BlockUntilSyncedToCurrentChain() const
424
0
{
425
0
    AssertLockNotHeld(cs_main);
Line
Count
Source
149
0
#define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs)
426
427
0
    if (!m_synced) {
428
0
        return false;
429
0
    }
430
431
0
    {
432
        // Skip the queue-draining stuff if we know we're caught up with
433
        // m_chain.Tip().
434
0
        LOCK(cs_main);
Line
Count
Source
268
0
#define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__)
Line
Count
Source
11
0
#define UNIQUE_NAME(name) PASTE2(name, __COUNTER__)
Line
Count
Source
9
0
#define PASTE2(x, y) PASTE(x, y)
Line
Count
Source
8
0
#define PASTE(x, y) x ## y
435
0
        const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
436
0
        const CBlockIndex* best_block_index = m_best_block_index.load();
437
0
        if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
438
0
            return true;
439
0
        }
440
0
    }
441
442
0
    LogInfo("%s is catching up on block notifications", GetName());
Line
Count
Source
97
0
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
Line
Count
Source
91
0
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__)
443
0
    m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
444
0
    return true;
445
0
}
446
447
void BaseIndex::Interrupt()
448
0
{
449
0
    m_interrupt();
450
0
}
451
452
bool BaseIndex::StartBackgroundSync()
453
0
{
454
0
    if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
455
456
0
    m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
457
0
    return true;
458
0
}
459
460
void BaseIndex::Stop()
461
0
{
462
0
    if (m_chain->context()->validation_signals) {
463
0
        m_chain->context()->validation_signals->UnregisterValidationInterface(this);
464
0
    }
465
466
0
    if (m_thread_sync.joinable()) {
467
0
        m_thread_sync.join();
468
0
    }
469
0
}
470
471
IndexSummary BaseIndex::GetSummary() const
472
0
{
473
0
    IndexSummary summary{};
474
0
    summary.name = GetName();
475
0
    summary.synced = m_synced;
476
0
    if (const auto& pindex = m_best_block_index.load()) {
477
0
        summary.best_block_height = pindex->nHeight;
478
0
        summary.best_block_hash = pindex->GetBlockHash();
479
0
    } else {
480
0
        summary.best_block_height = 0;
481
0
        summary.best_block_hash = m_chain->getBlockHash(0);
482
0
    }
483
0
    return summary;
484
0
}
485
486
void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
487
0
{
488
0
    assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
489
490
0
    if (AllowPrune() && block) {
491
0
        node::PruneLockInfo prune_lock;
492
0
        prune_lock.height_first = block->nHeight;
493
0
        WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
Line
Count
Source
299
0
#define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }())
494
0
    }
495
496
    // Intentionally set m_best_block_index as the last step in this function,
497
    // after updating prune locks above, and after making any other references
498
    // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
499
    // m_best_block_index as an optimization) can be used to wait for the last
500
    // BlockConnected notification and safely assume that prune locks are
501
    // updated and that the index object is safe to delete.
502
0
    m_best_block_index = block;
503
0
}