/Users/eugenesiegel/btc/bitcoin/src/node/blockstorage.cpp
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | // Copyright (c) 2011-2022 The Bitcoin Core developers | 
| 2 |  | // Distributed under the MIT software license, see the accompanying | 
| 3 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 4 |  |  | 
| 5 |  | #include <node/blockstorage.h> | 
| 6 |  |  | 
| 7 |  | #include <arith_uint256.h> | 
| 8 |  | #include <chain.h> | 
| 9 |  | #include <consensus/params.h> | 
| 10 |  | #include <consensus/validation.h> | 
| 11 |  | #include <dbwrapper.h> | 
| 12 |  | #include <flatfile.h> | 
| 13 |  | #include <hash.h> | 
| 14 |  | #include <kernel/blockmanager_opts.h> | 
| 15 |  | #include <kernel/chainparams.h> | 
| 16 |  | #include <kernel/messagestartchars.h> | 
| 17 |  | #include <kernel/notifications_interface.h> | 
| 18 |  | #include <logging.h> | 
| 19 |  | #include <pow.h> | 
| 20 |  | #include <primitives/block.h> | 
| 21 |  | #include <primitives/transaction.h> | 
| 22 |  | #include <random.h> | 
| 23 |  | #include <serialize.h> | 
| 24 |  | #include <signet.h> | 
| 25 |  | #include <span.h> | 
| 26 |  | #include <streams.h> | 
| 27 |  | #include <sync.h> | 
| 28 |  | #include <tinyformat.h> | 
| 29 |  | #include <uint256.h> | 
| 30 |  | #include <undo.h> | 
| 31 |  | #include <util/batchpriority.h> | 
| 32 |  | #include <util/check.h> | 
| 33 |  | #include <util/fs.h> | 
| 34 |  | #include <util/obfuscation.h> | 
| 35 |  | #include <util/signalinterrupt.h> | 
| 36 |  | #include <util/strencodings.h> | 
| 37 |  | #include <util/syserror.h> | 
| 38 |  | #include <util/translation.h> | 
| 39 |  | #include <validation.h> | 
| 40 |  |  | 
| 41 |  | #include <cstddef> | 
| 42 |  | #include <map> | 
| 43 |  | #include <optional> | 
| 44 |  | #include <unordered_map> | 
| 45 |  |  | 
| 46 |  | namespace kernel { | 
| 47 |  | static constexpr uint8_t DB_BLOCK_FILES{'f'}; | 
| 48 |  | static constexpr uint8_t DB_BLOCK_INDEX{'b'}; | 
| 49 |  | static constexpr uint8_t DB_FLAG{'F'}; | 
| 50 |  | static constexpr uint8_t DB_REINDEX_FLAG{'R'}; | 
| 51 |  | static constexpr uint8_t DB_LAST_BLOCK{'l'}; | 
| 52 |  | // Keys used in previous version that might still be found in the DB: | 
| 53 |  | // BlockTreeDB::DB_TXINDEX_BLOCK{'T'}; | 
| 54 |  | // BlockTreeDB::DB_TXINDEX{'t'} | 
| 55 |  | // BlockTreeDB::ReadFlag("txindex") | 
| 56 |  |  | 
| 57 |  | bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info) | 
| 58 | 102k | { | 
| 59 | 102k |     return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); | 
| 60 | 102k | } | 
| 61 |  |  | 
| 62 |  | bool BlockTreeDB::WriteReindexing(bool fReindexing) | 
| 63 | 0 | { | 
| 64 | 0 |     if (fReindexing) { | 
| 65 | 0 |         return Write(DB_REINDEX_FLAG, uint8_t{'1'}); | 
| 66 | 0 |     } else { | 
| 67 | 0 |         return Erase(DB_REINDEX_FLAG); | 
| 68 | 0 |     } | 
| 69 | 0 | } | 
| 70 |  |  | 
| 71 |  | void BlockTreeDB::ReadReindexing(bool& fReindexing) | 
| 72 | 51.2k | { | 
| 73 | 51.2k |     fReindexing = Exists(DB_REINDEX_FLAG); | 
| 74 | 51.2k | } | 
| 75 |  |  | 
| 76 |  | bool BlockTreeDB::ReadLastBlockFile(int& nFile) | 
| 77 | 51.2k | { | 
| 78 | 51.2k |     return Read(DB_LAST_BLOCK, nFile); | 
| 79 | 51.2k | } | 
| 80 |  |  | 
| 81 |  | bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo) | 
| 82 | 4.58k | { | 
| 83 | 4.58k |     CDBBatch batch(*this); | 
| 84 | 4.58k |     for (const auto& [file, info] : fileInfo) { | 
| 85 | 1.89k |         batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info); | 
| 86 | 1.89k |     } | 
| 87 | 4.58k |     batch.Write(DB_LAST_BLOCK, nLastFile); | 
| 88 | 10.4k |     for (const CBlockIndex* bi : blockinfo) { | 
| 89 | 10.4k |         batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi}); | 
| 90 | 10.4k |     } | 
| 91 | 4.58k |     return WriteBatch(batch, true); | 
| 92 | 4.58k | } | 
| 93 |  |  | 
| 94 |  | bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue) | 
| 95 | 0 | { | 
| 96 | 0 |     return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'}); | 
| 97 | 0 | } | 
| 98 |  |  | 
| 99 |  | bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue) | 
| 100 | 51.2k | { | 
| 101 | 51.2k |     uint8_t ch; | 
| 102 | 51.2k |     if (!Read(std::make_pair(DB_FLAG, name), ch)) { | 
| 103 | 51.2k |         return false; | 
| 104 | 51.2k |     } | 
| 105 | 0 |     fValue = ch == uint8_t{'1'}; | 
| 106 | 0 |     return true; | 
| 107 | 51.2k | } | 
| 108 |  |  | 
| 109 |  | bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt) | 
| 110 | 51.2k | { | 
| 111 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 112 | 51.2k |     std::unique_ptr<CDBIterator> pcursor(NewIterator()); | 
| 113 | 51.2k |     pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); | 
| 114 |  |  | 
| 115 |  |     // Load m_block_index | 
| 116 | 10.3M |     while (pcursor->Valid()) { | 
| 117 | 10.3M |         if (interrupt) return false0; | 
| 118 | 10.3M |         std::pair<uint8_t, uint256> key; | 
| 119 | 10.3M |         if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX10.3M) { | 
| 120 | 10.3M |             CDiskBlockIndex diskindex; | 
| 121 | 10.3M |             if (pcursor->GetValue(diskindex)) { | 
| 122 |  |                 // Construct block index object | 
| 123 | 10.3M |                 CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash()); | 
| 124 | 10.3M |                 pindexNew->pprev          = insertBlockIndex(diskindex.hashPrev); | 
| 125 | 10.3M |                 pindexNew->nHeight        = diskindex.nHeight; | 
| 126 | 10.3M |                 pindexNew->nFile          = diskindex.nFile; | 
| 127 | 10.3M |                 pindexNew->nDataPos       = diskindex.nDataPos; | 
| 128 | 10.3M |                 pindexNew->nUndoPos       = diskindex.nUndoPos; | 
| 129 | 10.3M |                 pindexNew->nVersion       = diskindex.nVersion; | 
| 130 | 10.3M |                 pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; | 
| 131 | 10.3M |                 pindexNew->nTime          = diskindex.nTime; | 
| 132 | 10.3M |                 pindexNew->nBits          = diskindex.nBits; | 
| 133 | 10.3M |                 pindexNew->nNonce         = diskindex.nNonce; | 
| 134 | 10.3M |                 pindexNew->nStatus        = diskindex.nStatus; | 
| 135 | 10.3M |                 pindexNew->nTx            = diskindex.nTx; | 
| 136 |  |  | 
| 137 | 10.3M |                 if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) { | 
| 138 | 0 |                     LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 139 | 0 |                     return false; | 
| 140 | 0 |                 } | 
| 141 |  |  | 
| 142 | 10.3M |                 pcursor->Next(); | 
| 143 | 10.3M |             } else { | 
| 144 | 0 |                 LogError("%s: failed to read value\n", __func__);| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 145 | 0 |                 return false; | 
| 146 | 0 |             } | 
| 147 | 10.3M |         } else { | 
| 148 | 51.2k |             break; | 
| 149 | 51.2k |         } | 
| 150 | 10.3M |     } | 
| 151 |  |  | 
| 152 | 51.2k |     return true; | 
| 153 | 51.2k | } | 
| 154 |  | } // namespace kernel | 
| 155 |  |  | 
| 156 |  | namespace node { | 
| 157 |  |  | 
| 158 |  | bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const | 
| 159 | 1.48G | { | 
| 160 |  |     // First sort by most total work, ... | 
| 161 | 1.48G |     if (pa->nChainWork > pb->nChainWork) return false519M; | 
| 162 | 963M |     if (pa->nChainWork < pb->nChainWork) return true954M; | 
| 163 |  |  | 
| 164 |  |     // ... then by earliest time received, ... | 
| 165 | 8.72M |     if (pa->nSequenceId < pb->nSequenceId) return false1.19M; | 
| 166 | 7.53M |     if (pa->nSequenceId > pb->nSequenceId) return true1.20M; | 
| 167 |  |  | 
| 168 |  |     // Use pointer address as tie breaker (should only happen with blocks | 
| 169 |  |     // loaded from disk, as those all have id 0). | 
| 170 | 6.32M |     if (pa < pb) return false0; | 
| 171 | 6.32M |     if (pa > pb) return true0; | 
| 172 |  |  | 
| 173 |  |     // Identical blocks. | 
| 174 | 6.32M |     return false; | 
| 175 | 6.32M | } | 
| 176 |  |  | 
| 177 |  | bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const | 
| 178 | 189M | { | 
| 179 | 189M |     return pa->nHeight < pb->nHeight; | 
| 180 | 189M | } | 
| 181 |  |  | 
| 182 |  | bool CBlockIndexBlockHashComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const | 
| 183 | 504k | { | 
| 184 | 504k |     return UintToArith256(pa->GetBlockHash()) < UintToArith256(pb->GetBlockHash()); | 
| 185 | 504k | } | 
| 186 |  |  | 
| 187 |  | std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices() | 
| 188 | 102k | { | 
| 189 | 102k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 102k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 190 | 102k |     std::vector<CBlockIndex*> rv; | 
| 191 | 102k |     rv.reserve(m_block_index.size()); | 
| 192 | 20.6M |     for (auto& [_, block_index] : m_block_index) { | 
| 193 | 20.6M |         rv.push_back(&block_index); | 
| 194 | 20.6M |     } | 
| 195 | 102k |     return rv; | 
| 196 | 102k | } | 
| 197 |  |  | 
| 198 |  | CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) | 
| 199 | 8.89M | { | 
| 200 | 8.89M |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 8.89M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 201 | 8.89M |     BlockMap::iterator it = m_block_index.find(hash); | 
| 202 | 8.89M |     return it == m_block_index.end() ? nullptr817k: &it->second8.07M; | 
| 203 | 8.89M | } | 
| 204 |  |  | 
| 205 |  | const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const | 
| 206 | 0 | { | 
| 207 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 208 | 0 |     BlockMap::const_iterator it = m_block_index.find(hash); | 
| 209 | 0 |     return it == m_block_index.end() ? nullptr : &it->second; | 
| 210 | 0 | } | 
| 211 |  |  | 
| 212 |  | CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header) | 
| 213 | 108k | { | 
| 214 | 108k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 108k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 215 |  |  | 
| 216 | 108k |     auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block); | 
| 217 | 108k |     if (!inserted) { | 
| 218 | 0 |         return &mi->second; | 
| 219 | 0 |     } | 
| 220 | 108k |     CBlockIndex* pindexNew = &(*mi).second; | 
| 221 |  |  | 
| 222 |  |     // We assign the sequence id to blocks only when the full data is available, | 
| 223 |  |     // to avoid miners withholding blocks but broadcasting headers, to get a | 
| 224 |  |     // competitive advantage. | 
| 225 | 108k |     pindexNew->nSequenceId = 0; | 
| 226 |  |  | 
| 227 | 108k |     pindexNew->phashBlock = &((*mi).first); | 
| 228 | 108k |     BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); | 
| 229 | 108k |     if (miPrev != m_block_index.end()) { | 
| 230 | 108k |         pindexNew->pprev = &(*miPrev).second; | 
| 231 | 108k |         pindexNew->nHeight = pindexNew->pprev->nHeight + 1; | 
| 232 | 108k |         pindexNew->BuildSkip(); | 
| 233 | 108k |     } | 
| 234 | 108k |     pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime0); | 
| 235 | 108k |     pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 00) + GetBlockProof(*pindexNew); | 
| 236 | 108k |     pindexNew->RaiseValidity(BLOCK_VALID_TREE); | 
| 237 | 108k |     if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) { | 
| 238 | 46.9k |         best_header = pindexNew; | 
| 239 | 46.9k |     } | 
| 240 |  |  | 
| 241 | 108k |     m_dirty_blockindex.insert(pindexNew); | 
| 242 |  |  | 
| 243 | 108k |     return pindexNew; | 
| 244 | 108k | } | 
| 245 |  |  | 
| 246 |  | void BlockManager::PruneOneBlockFile(const int fileNumber) | 
| 247 | 0 | { | 
| 248 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 249 | 0 |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 250 |  | 
 | 
| 251 | 0 |     for (auto& entry : m_block_index) { | 
| 252 | 0 |         CBlockIndex* pindex = &entry.second; | 
| 253 | 0 |         if (pindex->nFile == fileNumber) { | 
| 254 | 0 |             pindex->nStatus &= ~BLOCK_HAVE_DATA; | 
| 255 | 0 |             pindex->nStatus &= ~BLOCK_HAVE_UNDO; | 
| 256 | 0 |             pindex->nFile = 0; | 
| 257 | 0 |             pindex->nDataPos = 0; | 
| 258 | 0 |             pindex->nUndoPos = 0; | 
| 259 | 0 |             m_dirty_blockindex.insert(pindex); | 
| 260 |  |  | 
| 261 |  |             // Prune from m_blocks_unlinked -- any block we prune would have | 
| 262 |  |             // to be downloaded again in order to consider its chain, at which | 
| 263 |  |             // point it would be considered as a candidate for | 
| 264 |  |             // m_blocks_unlinked or setBlockIndexCandidates. | 
| 265 | 0 |             auto range = m_blocks_unlinked.equal_range(pindex->pprev); | 
| 266 | 0 |             while (range.first != range.second) { | 
| 267 | 0 |                 std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first; | 
| 268 | 0 |                 range.first++; | 
| 269 | 0 |                 if (_it->second == pindex) { | 
| 270 | 0 |                     m_blocks_unlinked.erase(_it); | 
| 271 | 0 |                 } | 
| 272 | 0 |             } | 
| 273 | 0 |         } | 
| 274 | 0 |     } | 
| 275 |  | 
 | 
| 276 | 0 |     m_blockfile_info.at(fileNumber) = CBlockFileInfo{}; | 
| 277 | 0 |     m_dirty_fileinfo.insert(fileNumber); | 
| 278 | 0 | } | 
| 279 |  |  | 
| 280 |  | void BlockManager::FindFilesToPruneManual( | 
| 281 |  |     std::set<int>& setFilesToPrune, | 
| 282 |  |     int nManualPruneHeight, | 
| 283 |  |     const Chainstate& chain, | 
| 284 |  |     ChainstateManager& chainman) | 
| 285 | 0 | { | 
| 286 | 0 |     assert(IsPruneMode() && nManualPruneHeight > 0); | 
| 287 |  |  | 
| 288 | 0 |     LOCK2(cs_main, cs_LastBlockFile); | Line | Count | Source |  | 261 | 0 |     UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ |  | 262 | 0 |     UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) | 
 | 
| 289 | 0 |     if (chain.m_chain.Height() < 0) { | 
| 290 | 0 |         return; | 
| 291 | 0 |     } | 
| 292 |  |  | 
| 293 | 0 |     const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); | 
| 294 |  | 
 | 
| 295 | 0 |     int count = 0; | 
| 296 | 0 |     for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { | 
| 297 | 0 |         const auto& fileinfo = m_blockfile_info[fileNumber]; | 
| 298 | 0 |         if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { | 
| 299 | 0 |             continue; | 
| 300 | 0 |         } | 
| 301 |  |  | 
| 302 | 0 |         PruneOneBlockFile(fileNumber); | 
| 303 | 0 |         setFilesToPrune.insert(fileNumber); | 
| 304 | 0 |         count++; | 
| 305 | 0 |     } | 
| 306 | 0 |     LogInfo("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 307 | 0 |         chain.GetRole(), last_block_can_prune, count); | 
| 308 | 0 | } | 
| 309 |  |  | 
| 310 |  | void BlockManager::FindFilesToPrune( | 
| 311 |  |     std::set<int>& setFilesToPrune, | 
| 312 |  |     int last_prune, | 
| 313 |  |     const Chainstate& chain, | 
| 314 |  |     ChainstateManager& chainman) | 
| 315 | 0 | { | 
| 316 | 0 |     LOCK2(cs_main, cs_LastBlockFile); | Line | Count | Source |  | 261 | 0 |     UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ |  | 262 | 0 |     UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) | 
 | 
| 317 |  |     // Distribute our -prune budget over all chainstates. | 
| 318 | 0 |     const auto target = std::max( | 
| 319 | 0 |         MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size()); | 
| 320 | 0 |     const uint64_t target_sync_height = chainman.m_best_header->nHeight; | 
| 321 |  | 
 | 
| 322 | 0 |     if (chain.m_chain.Height() < 0 || target == 0) { | 
| 323 | 0 |         return; | 
| 324 | 0 |     } | 
| 325 | 0 |     if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) { | 
| 326 | 0 |         return; | 
| 327 | 0 |     } | 
| 328 |  |  | 
| 329 | 0 |     const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune); | 
| 330 |  | 
 | 
| 331 | 0 |     uint64_t nCurrentUsage = CalculateCurrentUsage(); | 
| 332 |  |     // We don't check to prune until after we've allocated new space for files | 
| 333 |  |     // So we should leave a buffer under our target to account for another allocation | 
| 334 |  |     // before the next pruning. | 
| 335 | 0 |     uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; | 
| 336 | 0 |     uint64_t nBytesToPrune; | 
| 337 | 0 |     int count = 0; | 
| 338 |  | 
 | 
| 339 | 0 |     if (nCurrentUsage + nBuffer >= target) { | 
| 340 |  |         // On a prune event, the chainstate DB is flushed. | 
| 341 |  |         // To avoid excessive prune events negating the benefit of high dbcache | 
| 342 |  |         // values, we should not prune too rapidly. | 
| 343 |  |         // So when pruning in IBD, increase the buffer to avoid a re-prune too soon. | 
| 344 | 0 |         const auto chain_tip_height = chain.m_chain.Height(); | 
| 345 | 0 |         if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) { | 
| 346 |  |             // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average | 
| 347 | 0 |             static constexpr uint64_t average_block_size = 1000000;  /* 1 MB */ | 
| 348 | 0 |             const uint64_t remaining_blocks = target_sync_height - chain_tip_height; | 
| 349 | 0 |             nBuffer += average_block_size * remaining_blocks; | 
| 350 | 0 |         } | 
| 351 |  | 
 | 
| 352 | 0 |         for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { | 
| 353 | 0 |             const auto& fileinfo = m_blockfile_info[fileNumber]; | 
| 354 | 0 |             nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; | 
| 355 |  | 
 | 
| 356 | 0 |             if (fileinfo.nSize == 0) { | 
| 357 | 0 |                 continue; | 
| 358 | 0 |             } | 
| 359 |  |  | 
| 360 | 0 |             if (nCurrentUsage + nBuffer < target) { // are we below our target? | 
| 361 | 0 |                 break; | 
| 362 | 0 |             } | 
| 363 |  |  | 
| 364 |  |             // don't prune files that could have a block that's not within the allowable | 
| 365 |  |             // prune range for the chain being pruned. | 
| 366 | 0 |             if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { | 
| 367 | 0 |                 continue; | 
| 368 | 0 |             } | 
| 369 |  |  | 
| 370 | 0 |             PruneOneBlockFile(fileNumber); | 
| 371 |  |             // Queue up the files for removal | 
| 372 | 0 |             setFilesToPrune.insert(fileNumber); | 
| 373 | 0 |             nCurrentUsage -= nBytesToPrune; | 
| 374 | 0 |             count++; | 
| 375 | 0 |         } | 
| 376 | 0 |     } | 
| 377 |  | 
 | 
| 378 | 0 |     LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n", | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 379 | 0 |              chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024, | 
| 380 | 0 |              (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024, | 
| 381 | 0 |              min_block_to_prune, last_block_can_prune, count); | 
| 382 | 0 | } | 
| 383 |  |  | 
| 384 | 0 | void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) { | 
| 385 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 386 | 0 |     m_prune_locks[name] = lock_info; | 
| 387 | 0 | } | 
| 388 |  |  | 
| 389 |  | CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash) | 
| 390 | 20.6M | { | 
| 391 | 20.6M |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 20.6M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 392 |  |  | 
| 393 | 20.6M |     if (hash.IsNull()) { | 
| 394 | 51.2k |         return nullptr; | 
| 395 | 51.2k |     } | 
| 396 |  |  | 
| 397 | 20.5M |     const auto [mi, inserted]{m_block_index.try_emplace(hash)}; | 
| 398 | 20.5M |     CBlockIndex* pindex = &(*mi).second; | 
| 399 | 20.5M |     if (inserted) { | 
| 400 | 10.3M |         pindex->phashBlock = &((*mi).first); | 
| 401 | 10.3M |     } | 
| 402 | 20.5M |     return pindex; | 
| 403 | 20.6M | } | 
| 404 |  |  | 
| 405 |  | bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash) | 
| 406 | 51.2k | { | 
| 407 | 51.2k |     if (!m_block_tree_db->LoadBlockIndexGuts( | 
| 408 | 20.6M |             GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) { | 
| 409 | 0 |         return false; | 
| 410 | 0 |     } | 
| 411 |  |  | 
| 412 | 51.2k |     if (snapshot_blockhash) { | 
| 413 | 0 |         const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash); | 
| 414 | 0 |         if (!maybe_au_data) { | 
| 415 | 0 |             m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 416 | 0 |             return false; | 
| 417 | 0 |         } | 
| 418 | 0 |         const AssumeutxoData& au_data = *Assert(maybe_au_data); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 419 | 0 |         m_snapshot_height = au_data.height; | 
| 420 | 0 |         CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; | 
| 421 |  |  | 
| 422 |  |         // Since m_chain_tx_count (responsible for estimated progress) isn't persisted | 
| 423 |  |         // to disk, we must bootstrap the value for assumedvalid chainstates | 
| 424 |  |         // from the hardcoded assumeutxo chainparams. | 
| 425 | 0 |         base->m_chain_tx_count = au_data.m_chain_tx_count; | 
| 426 | 0 |         LogInfo("[snapshot] set m_chain_tx_count=%d for %s", au_data.m_chain_tx_count, snapshot_blockhash->ToString());| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 427 | 51.2k |     } else { | 
| 428 |  |         // If this isn't called with a snapshot blockhash, make sure the cached snapshot height | 
| 429 |  |         // is null. This is relevant during snapshot completion, when the blockman may be loaded | 
| 430 |  |         // with a height that then needs to be cleared after the snapshot is fully validated. | 
| 431 | 51.2k |         m_snapshot_height.reset(); | 
| 432 | 51.2k |     } | 
| 433 |  |  | 
| 434 | 51.2k |     Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value()); | Line | Count | Source |  | 106 | 51.2k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 435 |  |  | 
| 436 |  |     // Calculate nChainWork | 
| 437 | 51.2k |     std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()}; | 
| 438 | 51.2k |     std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), | 
| 439 | 51.2k |               CBlockIndexHeightOnlyComparator()); | 
| 440 |  |  | 
| 441 | 51.2k |     CBlockIndex* previous_index{nullptr}; | 
| 442 | 10.3M |     for (CBlockIndex* pindex : vSortedByHeight) { | 
| 443 | 10.3M |         if (m_interrupt) return false0; | 
| 444 | 10.3M |         if (previous_index && pindex->nHeight > previous_index->nHeight + 110.2M) { | 
| 445 | 0 |             LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 446 | 0 |             return false; | 
| 447 | 0 |         } | 
| 448 | 10.3M |         previous_index = pindex; | 
| 449 | 10.3M |         pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork10.2M: 051.2k) + GetBlockProof(*pindex); | 
| 450 | 10.3M |         pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)10.2M: pindex->nTime51.2k); | 
| 451 |  |  | 
| 452 |  |         // We can link the chain of blocks for which we've received transactions at some point, or | 
| 453 |  |         // blocks that are assumed-valid on the basis of snapshot load (see | 
| 454 |  |         // PopulateAndValidateSnapshot()). | 
| 455 |  |         // Pruned nodes may have deleted the block. | 
| 456 | 10.3M |         if (pindex->nTx > 0) { | 
| 457 | 10.3M |             if (pindex->pprev) { | 
| 458 | 10.2M |                 if (m_snapshot_height && pindex->nHeight == *m_snapshot_height0&& | 
| 459 | 10.2M |                         pindex->GetBlockHash() == *snapshot_blockhash0) { | 
| 460 |  |                     // Should have been set above; don't disturb it with code below. | 
| 461 | 0 |                     Assert(pindex->m_chain_tx_count > 0); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 462 | 10.2M |                 } else if (pindex->pprev->m_chain_tx_count > 0) { | 
| 463 | 10.2M |                     pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx; | 
| 464 | 10.2M |                 } else { | 
| 465 | 0 |                     pindex->m_chain_tx_count = 0; | 
| 466 | 0 |                     m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); | 
| 467 | 0 |                 } | 
| 468 | 10.2M |             } else { | 
| 469 | 51.2k |                 pindex->m_chain_tx_count = pindex->nTx; | 
| 470 | 51.2k |             } | 
| 471 | 10.3M |         } | 
| 472 | 10.3M |         if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)10.2M) { | 
| 473 | 0 |             pindex->nStatus |= BLOCK_FAILED_CHILD; | 
| 474 | 0 |             m_dirty_blockindex.insert(pindex); | 
| 475 | 0 |         } | 
| 476 | 10.3M |         if (pindex->pprev) { | 
| 477 | 10.2M |             pindex->BuildSkip(); | 
| 478 | 10.2M |         } | 
| 479 | 10.3M |     } | 
| 480 |  |  | 
| 481 | 51.2k |     return true; | 
| 482 | 51.2k | } | 
| 483 |  |  | 
| 484 |  | bool BlockManager::WriteBlockIndexDB() | 
| 485 | 4.58k | { | 
| 486 | 4.58k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 4.58k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 487 | 4.58k |     std::vector<std::pair<int, const CBlockFileInfo*>> vFiles; | 
| 488 | 4.58k |     vFiles.reserve(m_dirty_fileinfo.size()); | 
| 489 | 6.48k |     for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) { | 
| 490 | 1.89k |         vFiles.emplace_back(*it, &m_blockfile_info[*it]); | 
| 491 | 1.89k |         m_dirty_fileinfo.erase(it++); | 
| 492 | 1.89k |     } | 
| 493 | 4.58k |     std::vector<const CBlockIndex*> vBlocks; | 
| 494 | 4.58k |     vBlocks.reserve(m_dirty_blockindex.size()); | 
| 495 | 15.0k |     for (std::set<CBlockIndex*, CBlockIndexBlockHashComparator>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) { | 
| 496 | 10.4k |         vBlocks.push_back(*it); | 
| 497 | 10.4k |         m_dirty_blockindex.erase(it++); | 
| 498 | 10.4k |     } | 
| 499 | 4.58k |     int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); | Line | Count | Source |  | 290 | 4.58k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 500 | 4.58k |     if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) { | 
| 501 | 0 |         return false; | 
| 502 | 0 |     } | 
| 503 | 4.58k |     return true; | 
| 504 | 4.58k | } | 
| 505 |  |  | 
| 506 |  | bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash) | 
| 507 | 51.2k | { | 
| 508 | 51.2k |     if (!LoadBlockIndex(snapshot_blockhash)) { | 
| 509 | 0 |         return false; | 
| 510 | 0 |     } | 
| 511 | 51.2k |     int max_blockfile_num{0}; | 
| 512 |  |  | 
| 513 |  |     // Load block file info | 
| 514 | 51.2k |     m_block_tree_db->ReadLastBlockFile(max_blockfile_num); | 
| 515 | 51.2k |     m_blockfile_info.resize(max_blockfile_num + 1); | 
| 516 | 51.2k |     LogInfo("Loading block index db: last block file = %i", max_blockfile_num);| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 517 | 102k |     for (int nFile = 0; nFile <= max_blockfile_num; nFile++51.2k) { | 
| 518 | 51.2k |         m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); | 
| 519 | 51.2k |     } | 
| 520 | 51.2k |     LogInfo("Loading block index db: last block file info: %s", m_blockfile_info[max_blockfile_num].ToString());| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 521 | 51.2k |     for (int nFile = max_blockfile_num + 1; true; nFile++0) { | 
| 522 | 51.2k |         CBlockFileInfo info; | 
| 523 | 51.2k |         if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { | 
| 524 | 0 |             m_blockfile_info.push_back(info); | 
| 525 | 51.2k |         } else { | 
| 526 | 51.2k |             break; | 
| 527 | 51.2k |         } | 
| 528 | 51.2k |     } | 
| 529 |  |  | 
| 530 |  |     // Check presence of blk files | 
| 531 | 51.2k |     LogInfo("Checking all blk files are present...");| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 532 | 51.2k |     std::set<int> setBlkDataFiles; | 
| 533 | 10.3M |     for (const auto& [_, block_index] : m_block_index) { | 
| 534 | 10.3M |         if (block_index.nStatus & BLOCK_HAVE_DATA) { | 
| 535 | 10.3M |             setBlkDataFiles.insert(block_index.nFile); | 
| 536 | 10.3M |         } | 
| 537 | 10.3M |     } | 
| 538 | 102k |     for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++51.2k) { | 
| 539 | 51.2k |         FlatFilePos pos(*it, 0); | 
| 540 | 51.2k |         if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) { | 
| 541 | 0 |             return false; | 
| 542 | 0 |         } | 
| 543 | 51.2k |     } | 
| 544 |  |  | 
| 545 | 51.2k |     { | 
| 546 |  |         // Initialize the blockfile cursors. | 
| 547 | 51.2k |         LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 51.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 51.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 51.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 51.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 548 | 102k |         for (size_t i = 0; i < m_blockfile_info.size(); ++i51.2k) { | 
| 549 | 51.2k |             const auto last_height_in_file = m_blockfile_info[i].nHeightLast; | 
| 550 | 51.2k |             m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0}; | 
| 551 | 51.2k |         } | 
| 552 | 51.2k |     } | 
| 553 |  |  | 
| 554 |  |     // Check whether we have ever pruned block & undo files | 
| 555 | 51.2k |     m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); | 
| 556 | 51.2k |     if (m_have_pruned) { | 
| 557 | 0 |         LogInfo("Loading block index db: Block files have previously been pruned");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 558 | 0 |     } | 
| 559 |  |  | 
| 560 |  |     // Check whether we need to continue reindexing | 
| 561 | 51.2k |     bool fReindexing = false; | 
| 562 | 51.2k |     m_block_tree_db->ReadReindexing(fReindexing); | 
| 563 | 51.2k |     if (fReindexing) m_blockfiles_indexed = false0; | 
| 564 |  |  | 
| 565 | 51.2k |     return true; | 
| 566 | 51.2k | } | 
| 567 |  |  | 
| 568 |  | void BlockManager::ScanAndUnlinkAlreadyPrunedFiles() | 
| 569 | 51.2k | { | 
| 570 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 571 | 51.2k |     int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); | Line | Count | Source |  | 290 | 51.2k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 572 | 51.2k |     if (!m_have_pruned) { | 
| 573 | 51.2k |         return; | 
| 574 | 51.2k |     } | 
| 575 |  |  | 
| 576 | 0 |     std::set<int> block_files_to_prune; | 
| 577 | 0 |     for (int file_number = 0; file_number < max_blockfile; file_number++) { | 
| 578 | 0 |         if (m_blockfile_info[file_number].nSize == 0) { | 
| 579 | 0 |             block_files_to_prune.insert(file_number); | 
| 580 | 0 |         } | 
| 581 | 0 |     } | 
| 582 |  | 
 | 
| 583 | 0 |     UnlinkPrunedFiles(block_files_to_prune); | 
| 584 | 0 | } | 
| 585 |  |  | 
| 586 |  | bool BlockManager::IsBlockPruned(const CBlockIndex& block) const | 
| 587 | 0 | { | 
| 588 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 589 | 0 |     return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0); | 
| 590 | 0 | } | 
| 591 |  |  | 
| 592 |  | const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const | 
| 593 | 0 | { | 
| 594 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 595 | 0 |     const CBlockIndex* last_block = &upper_block; | 
| 596 | 0 |     assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask | 
| 597 | 0 |     while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) { | 
| 598 | 0 |         if (lower_block) { | 
| 599 |  |             // Return if we reached the lower_block | 
| 600 | 0 |             if (last_block == lower_block) return lower_block; | 
| 601 |  |             // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain | 
| 602 |  |             // and so far this is not allowed. | 
| 603 | 0 |             assert(last_block->nHeight >= lower_block->nHeight); | 
| 604 | 0 |         } | 
| 605 | 0 |         last_block = last_block->pprev; | 
| 606 | 0 |     } | 
| 607 | 0 |     assert(last_block != nullptr); | 
| 608 | 0 |     return last_block; | 
| 609 | 0 | } | 
| 610 |  |  | 
| 611 |  | bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block) | 
| 612 | 0 | { | 
| 613 | 0 |     if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false; | 
| 614 | 0 |     return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block; | 
| 615 | 0 | } | 
| 616 |  |  | 
| 617 |  | // If we're using -prune with -reindex, then delete block files that will be ignored by the | 
| 618 |  | // reindex.  Since reindexing works by starting at block file 0 and looping until a blockfile | 
| 619 |  | // is missing, do the same here to delete any later block files after a gap.  Also delete all | 
| 620 |  | // rev files since they'll be rewritten by the reindex anyway.  This ensures that m_blockfile_info | 
| 621 |  | // is in sync with what's actually on disk by the time we start downloading, so that pruning | 
| 622 |  | // works correctly. | 
| 623 |  | void BlockManager::CleanupBlockRevFiles() const | 
| 624 | 0 | { | 
| 625 | 0 |     std::map<std::string, fs::path> mapBlockFiles; | 
| 626 |  |  | 
| 627 |  |     // Glob all blk?????.dat and rev?????.dat files from the blocks directory. | 
| 628 |  |     // Remove the rev files immediately and insert the blk file paths into an | 
| 629 |  |     // ordered map keyed by block file index. | 
| 630 | 0 |     LogInfo("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 631 | 0 |     for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) { | 
| 632 | 0 |         const std::string path = fs::PathToString(it->path().filename()); | 
| 633 | 0 |         if (fs::is_regular_file(*it) && | 
| 634 | 0 |             path.length() == 12 && | 
| 635 | 0 |             path.ends_with(".dat")) | 
| 636 | 0 |         { | 
| 637 | 0 |             if (path.starts_with("blk")) { | 
| 638 | 0 |                 mapBlockFiles[path.substr(3, 5)] = it->path(); | 
| 639 | 0 |             } else if (path.starts_with("rev")) { | 
| 640 | 0 |                 remove(it->path()); | 
| 641 | 0 |             } | 
| 642 | 0 |         } | 
| 643 | 0 |     } | 
| 644 |  |  | 
| 645 |  |     // Remove all block files that aren't part of a contiguous set starting at | 
| 646 |  |     // zero by walking the ordered map (keys are block file indices) by | 
| 647 |  |     // keeping a separate counter.  Once we hit a gap (or if 0 doesn't exist) | 
| 648 |  |     // start removing block files. | 
| 649 | 0 |     int nContigCounter = 0; | 
| 650 | 0 |     for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) { | 
| 651 | 0 |         if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) { | 
| 652 | 0 |             nContigCounter++; | 
| 653 | 0 |             continue; | 
| 654 | 0 |         } | 
| 655 | 0 |         remove(item.second); | 
| 656 | 0 |     } | 
| 657 | 0 | } | 
| 658 |  |  | 
| 659 |  | CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) | 
| 660 | 0 | { | 
| 661 | 0 |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 662 |  | 
 | 
| 663 | 0 |     return &m_blockfile_info.at(n); | 
| 664 | 0 | } | 
| 665 |  |  | 
| 666 |  | bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const | 
| 667 | 615k | { | 
| 668 | 615k |     const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};| Line | Count | Source |  | 290 | 615k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 669 |  |  | 
| 670 |  |     // Open history file to read | 
| 671 | 615k |     AutoFile file{OpenUndoFile(pos, true)}; | 
| 672 | 615k |     if (file.IsNull()) { | 
| 673 | 0 |         LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 674 | 0 |         return false; | 
| 675 | 0 |     } | 
| 676 | 615k |     BufferedReader filein{std::move(file)}; | 
| 677 |  |  | 
| 678 | 615k |     try { | 
| 679 |  |         // Read block | 
| 680 | 615k |         HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243 | 
| 681 |  |  | 
| 682 | 615k |         verifier << index.pprev->GetBlockHash(); | 
| 683 | 615k |         verifier >> blockundo; | 
| 684 |  |  | 
| 685 | 615k |         uint256 hashChecksum; | 
| 686 | 615k |         filein >> hashChecksum; | 
| 687 |  |  | 
| 688 |  |         // Verify checksum | 
| 689 | 615k |         if (hashChecksum != verifier.GetHash()) { | 
| 690 | 0 |             LogError("Checksum mismatch at %s while reading block undo", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 691 | 0 |             return false; | 
| 692 | 0 |         } | 
| 693 | 615k |     } catch (const std::exception& e) { | 
| 694 | 0 |         LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 695 | 0 |         return false; | 
| 696 | 0 |     } | 
| 697 |  |  | 
| 698 | 615k |     return true; | 
| 699 | 615k | } | 
| 700 |  |  | 
| 701 |  | bool BlockManager::FlushUndoFile(int block_file, bool finalize) | 
| 702 | 4.58k | { | 
| 703 | 4.58k |     FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); | 
| 704 | 4.58k |     if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) { | 
| 705 | 0 |         m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error.")); | 
| 706 | 0 |         return false; | 
| 707 | 0 |     } | 
| 708 | 4.58k |     return true; | 
| 709 | 4.58k | } | 
| 710 |  |  | 
| 711 |  | bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo) | 
| 712 | 4.58k | { | 
| 713 | 4.58k |     bool success = true; | 
| 714 | 4.58k |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 4.58k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 4.58k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 4.58k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 4.58k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 715 |  |  | 
| 716 | 4.58k |     if (m_blockfile_info.size() < 1) { | 
| 717 |  |         // Return if we haven't loaded any blockfiles yet. This happens during | 
| 718 |  |         // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which | 
| 719 |  |         // then calls FlushStateToDisk()), resulting in a call to this function before we | 
| 720 |  |         // have populated `m_blockfile_info` via LoadBlockIndexDB(). | 
| 721 | 0 |         return true; | 
| 722 | 0 |     } | 
| 723 | 4.58k |     assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num); | 
| 724 |  |  | 
| 725 | 4.58k |     FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize); | 
| 726 | 4.58k |     if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) { | 
| 727 | 0 |         m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error.")); | 
| 728 | 0 |         success = false; | 
| 729 | 0 |     } | 
| 730 |  |     // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, | 
| 731 |  |     // e.g. during IBD or a sync after a node going offline | 
| 732 | 4.58k |     if (!fFinalize || finalize_undo0) { | 
| 733 | 4.58k |         if (!FlushUndoFile(blockfile_num, finalize_undo)) { | 
| 734 | 0 |             success = false; | 
| 735 | 0 |         } | 
| 736 | 4.58k |     } | 
| 737 | 4.58k |     return success; | 
| 738 | 4.58k | } | 
| 739 |  |  | 
| 740 |  | BlockfileType BlockManager::BlockfileTypeForHeight(int height) | 
| 741 | 105k | { | 
| 742 | 105k |     if (!m_snapshot_height) { | 
| 743 | 105k |         return BlockfileType::NORMAL; | 
| 744 | 105k |     } | 
| 745 | 0 |     return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL; | 
| 746 | 105k | } | 
| 747 |  |  | 
| 748 |  | bool BlockManager::FlushChainstateBlockFile(int tip_height) | 
| 749 | 4.58k | { | 
| 750 | 4.58k |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 4.58k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 4.58k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 4.58k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 4.58k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 751 | 4.58k |     auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)]; | 
| 752 |  |     // If the cursor does not exist, it means an assumeutxo snapshot is loaded, | 
| 753 |  |     // but no blocks past the snapshot height have been written yet, so there | 
| 754 |  |     // is no data associated with the chainstate, and it is safe not to flush. | 
| 755 | 4.58k |     if (cursor) { | 
| 756 | 4.58k |         return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); | 
| 757 | 4.58k |     } | 
| 758 |  |     // No need to log warnings in this case. | 
| 759 | 0 |     return true; | 
| 760 | 4.58k | } | 
| 761 |  |  | 
| 762 |  | uint64_t BlockManager::CalculateCurrentUsage() | 
| 763 | 0 | { | 
| 764 | 0 |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 765 |  | 
 | 
| 766 | 0 |     uint64_t retval = 0; | 
| 767 | 0 |     for (const CBlockFileInfo& file : m_blockfile_info) { | 
| 768 | 0 |         retval += file.nSize + file.nUndoSize; | 
| 769 | 0 |     } | 
| 770 | 0 |     return retval; | 
| 771 | 0 | } | 
| 772 |  |  | 
| 773 |  | void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const | 
| 774 | 0 | { | 
| 775 | 0 |     std::error_code ec; | 
| 776 | 0 |     for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { | 
| 777 | 0 |         FlatFilePos pos(*it, 0); | 
| 778 | 0 |         const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)}; | 
| 779 | 0 |         const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)}; | 
| 780 | 0 |         if (removed_blockfile || removed_undofile) { | 
| 781 | 0 |             LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 782 | 0 |         } | 
| 783 | 0 |     } | 
| 784 | 0 | } | 
| 785 |  |  | 
| 786 |  | AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const | 
| 787 | 387k | { | 
| 788 | 387k |     return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation}; | 
| 789 | 387k | } | 
| 790 |  |  | 
| 791 |  | /** Open an undo file (rev?????.dat) */ | 
| 792 |  | AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const | 
| 793 | 637k | { | 
| 794 | 637k |     return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation}; | 
| 795 | 637k | } | 
| 796 |  |  | 
| 797 |  | fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const | 
| 798 | 0 | { | 
| 799 | 0 |     return m_block_file_seq.FileName(pos); | 
| 800 | 0 | } | 
| 801 |  |  | 
| 802 |  | FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) | 
| 803 | 28.2k | { | 
| 804 | 28.2k |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 28.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 28.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 28.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 28.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 805 |  |  | 
| 806 | 28.2k |     const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); | 
| 807 |  |  | 
| 808 | 28.2k |     if (!m_blockfile_cursors[chain_type]) { | 
| 809 |  |         // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. | 
| 810 | 0 |         assert(chain_type == BlockfileType::ASSUMED); | 
| 811 | 0 |         const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; | 
| 812 | 0 |         m_blockfile_cursors[chain_type] = new_cursor; | 
| 813 | 0 |         LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 814 | 0 |     } | 
| 815 | 28.2k |     const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; | 
| 816 |  |  | 
| 817 | 28.2k |     int nFile = last_blockfile; | 
| 818 | 28.2k |     if (static_cast<int>(m_blockfile_info.size()) <= nFile) { | 
| 819 | 0 |         m_blockfile_info.resize(nFile + 1); | 
| 820 | 0 |     } | 
| 821 |  |  | 
| 822 | 28.2k |     bool finalize_undo = false; | 
| 823 | 28.2k |     unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; | 
| 824 |  |     // Use smaller blockfiles in test-only -fastprune mode - but avoid | 
| 825 |  |     // the possibility of having a block not fit into the block file. | 
| 826 | 28.2k |     if (m_opts.fast_prune) { | 
| 827 | 0 |         max_blockfile_size = 0x10000; // 64kiB | 
| 828 | 0 |         if (nAddSize >= max_blockfile_size) { | 
| 829 |  |             // dynamically adjust the blockfile size to be larger than the added size | 
| 830 | 0 |             max_blockfile_size = nAddSize + 1; | 
| 831 | 0 |         } | 
| 832 | 0 |     } | 
| 833 | 28.2k |     assert(nAddSize < max_blockfile_size); | 
| 834 |  |  | 
| 835 | 28.2k |     while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { | 
| 836 |  |         // when the undo file is keeping up with the block file, we want to flush it explicitly | 
| 837 |  |         // when it is lagging behind (more blocks arrive than are being connected), we let the | 
| 838 |  |         // undo block write case handle it | 
| 839 | 0 |         finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) == | 
| 840 | 0 |                          Assert(m_blockfile_cursors[chain_type])->undo_height); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 841 |  |  | 
| 842 |  |         // Try the next unclaimed blockfile number | 
| 843 | 0 |         nFile = this->MaxBlockfileNum() + 1; | 
| 844 |  |         // Set to increment MaxBlockfileNum() for next iteration | 
| 845 | 0 |         m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; | 
| 846 |  | 
 | 
| 847 | 0 |         if (static_cast<int>(m_blockfile_info.size()) <= nFile) { | 
| 848 | 0 |             m_blockfile_info.resize(nFile + 1); | 
| 849 | 0 |         } | 
| 850 | 0 |     } | 
| 851 | 28.2k |     FlatFilePos pos; | 
| 852 | 28.2k |     pos.nFile = nFile; | 
| 853 | 28.2k |     pos.nPos = m_blockfile_info[nFile].nSize; | 
| 854 |  |  | 
| 855 | 28.2k |     if (nFile != last_blockfile) { | 
| 856 | 0 |         LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 857 | 0 |                  last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); | 
| 858 |  |  | 
| 859 |  |         // Do not propagate the return code. The flush concerns a previous block | 
| 860 |  |         // and undo file that has already been written to. If a flush fails | 
| 861 |  |         // here, and we crash, there is no expected additional block data | 
| 862 |  |         // inconsistency arising from the flush failure here. However, the undo | 
| 863 |  |         // data may be inconsistent after a crash if the flush is called during | 
| 864 |  |         // a reindex. A flush error might also leave some of the data files | 
| 865 |  |         // untrimmed. | 
| 866 | 0 |         if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) { | 
| 867 | 0 |             LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
| 868 | 0 |                           "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n", | 
| 869 | 0 |                           last_blockfile, finalize_undo, nFile); | 
| 870 | 0 |         } | 
| 871 |  |         // No undo data yet in the new file, so reset our undo-height tracking. | 
| 872 | 0 |         m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; | 
| 873 | 0 |     } | 
| 874 |  |  | 
| 875 | 28.2k |     m_blockfile_info[nFile].AddBlock(nHeight, nTime); | 
| 876 | 28.2k |     m_blockfile_info[nFile].nSize += nAddSize; | 
| 877 |  |  | 
| 878 | 28.2k |     bool out_of_space; | 
| 879 | 28.2k |     size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space); | 
| 880 | 28.2k |     if (out_of_space) { | 
| 881 | 0 |         m_opts.notifications.fatalError(_("Disk space is too low!")); | 
| 882 | 0 |         return {}; | 
| 883 | 0 |     } | 
| 884 | 28.2k |     if (bytes_allocated != 0 && IsPruneMode()0) { | 
| 885 | 0 |         m_check_for_pruning = true; | 
| 886 | 0 |     } | 
| 887 |  |  | 
| 888 | 28.2k |     m_dirty_fileinfo.insert(nFile); | 
| 889 | 28.2k |     return pos; | 
| 890 | 28.2k | } | 
| 891 |  |  | 
| 892 |  | void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos) | 
| 893 | 0 | { | 
| 894 | 0 |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 895 |  |  | 
| 896 |  |     // Update the cursor so it points to the last file. | 
| 897 | 0 |     const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)}; | 
| 898 | 0 |     auto& cursor{m_blockfile_cursors[chain_type]}; | 
| 899 | 0 |     if (!cursor || cursor->file_num < pos.nFile) { | 
| 900 | 0 |         m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile}; | 
| 901 | 0 |     } | 
| 902 |  |  | 
| 903 |  |     // Update the file information with the current block. | 
| 904 | 0 |     const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block)); | 
| 905 | 0 |     const int nFile = pos.nFile; | 
| 906 | 0 |     if (static_cast<int>(m_blockfile_info.size()) <= nFile) { | 
| 907 | 0 |         m_blockfile_info.resize(nFile + 1); | 
| 908 | 0 |     } | 
| 909 | 0 |     m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime()); | 
| 910 | 0 |     m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize); | 
| 911 | 0 |     m_dirty_fileinfo.insert(nFile); | 
| 912 | 0 | } | 
| 913 |  |  | 
| 914 |  | bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize) | 
| 915 | 21.7k | { | 
| 916 | 21.7k |     pos.nFile = nFile; | 
| 917 |  |  | 
| 918 | 21.7k |     LOCK(cs_LastBlockFile); | Line | Count | Source |  | 259 | 21.7k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 21.7k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 21.7k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 21.7k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 919 |  |  | 
| 920 | 21.7k |     pos.nPos = m_blockfile_info[nFile].nUndoSize; | 
| 921 | 21.7k |     m_blockfile_info[nFile].nUndoSize += nAddSize; | 
| 922 | 21.7k |     m_dirty_fileinfo.insert(nFile); | 
| 923 |  |  | 
| 924 | 21.7k |     bool out_of_space; | 
| 925 | 21.7k |     size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space); | 
| 926 | 21.7k |     if (out_of_space) { | 
| 927 | 0 |         return FatalError(m_opts.notifications, state, _("Disk space is too low!")); | 
| 928 | 0 |     } | 
| 929 | 21.7k |     if (bytes_allocated != 0 && IsPruneMode()0) { | 
| 930 | 0 |         m_check_for_pruning = true; | 
| 931 | 0 |     } | 
| 932 |  |  | 
| 933 | 21.7k |     return true; | 
| 934 | 21.7k | } | 
| 935 |  |  | 
| 936 |  | bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) | 
| 937 | 21.7k | { | 
| 938 | 21.7k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 21.7k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 939 | 21.7k |     const BlockfileType type = BlockfileTypeForHeight(block.nHeight); | 
| 940 | 21.7k |     auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type])); | Line | Count | Source |  | 106 | 21.7k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 941 |  |  | 
| 942 |  |     // Write undo information to disk | 
| 943 | 21.7k |     if (block.GetUndoPos().IsNull()) { | 
| 944 | 21.7k |         FlatFilePos pos; | 
| 945 | 21.7k |         const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))}; | 
| 946 | 21.7k |         if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) { | 
| 947 | 0 |             LogError("FindUndoPos failed for %s while writing block undo", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 948 | 0 |             return false; | 
| 949 | 0 |         } | 
| 950 |  |  | 
| 951 |  |         // Open history file to append | 
| 952 | 21.7k |         AutoFile file{OpenUndoFile(pos)}; | 
| 953 | 21.7k |         if (file.IsNull()) { | 
| 954 | 0 |             LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 955 | 0 |             return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); | 
| 956 | 0 |         } | 
| 957 | 21.7k |         { | 
| 958 | 21.7k |             BufferedWriter fileout{file}; | 
| 959 |  |  | 
| 960 |  |             // Write index header | 
| 961 | 21.7k |             fileout << GetParams().MessageStart() << blockundo_size; | 
| 962 | 21.7k |             pos.nPos += STORAGE_HEADER_BYTES; | 
| 963 | 21.7k |             { | 
| 964 |  |                 // Calculate checksum | 
| 965 | 21.7k |                 HashWriter hasher{}; | 
| 966 | 21.7k |                 hasher << block.pprev->GetBlockHash() << blockundo; | 
| 967 |  |                 // Write undo data & checksum | 
| 968 | 21.7k |                 fileout << blockundo << hasher.GetHash(); | 
| 969 | 21.7k |             } | 
| 970 |  |             // BufferedWriter will flush pending data to file when fileout goes out of scope. | 
| 971 | 21.7k |         } | 
| 972 |  |  | 
| 973 |  |         // Make sure that the file is closed before we call `FlushUndoFile`. | 
| 974 | 21.7k |         if (file.fclose() != 0) { | 
| 975 | 0 |             LogError("Failed to close block undo file %s: %s", pos.ToString(), SysErrorString(errno));| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 976 | 0 |             return FatalError(m_opts.notifications, state, _("Failed to close block undo file.")); | 
| 977 | 0 |         } | 
| 978 |  |  | 
| 979 |  |         // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) | 
| 980 |  |         // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height | 
| 981 |  |         // in the block file info as below; note that this does not catch the case where the undo writes are keeping up | 
| 982 |  |         // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in | 
| 983 |  |         // the FindNextBlockPos function | 
| 984 | 21.7k |         if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast0) { | 
| 985 |  |             // Do not propagate the return code, a failed flush here should not | 
| 986 |  |             // be an indication for a failed write. If it were propagated here, | 
| 987 |  |             // the caller would assume the undo data not to be written, when in | 
| 988 |  |             // fact it is. Note though, that a failed flush might leave the data | 
| 989 |  |             // file untrimmed. | 
| 990 | 0 |             if (!FlushUndoFile(pos.nFile, true)) { | 
| 991 | 0 |                 LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile); | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
| 992 | 0 |             } | 
| 993 | 21.7k |         } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { | 
| 994 | 21.7k |             cursor.undo_height = block.nHeight; | 
| 995 | 21.7k |         } | 
| 996 |  |         // update nUndoPos in block index | 
| 997 | 21.7k |         block.nUndoPos = pos.nPos; | 
| 998 | 21.7k |         block.nStatus |= BLOCK_HAVE_UNDO; | 
| 999 | 21.7k |         m_dirty_blockindex.insert(&block); | 
| 1000 | 21.7k |     } | 
| 1001 |  |  | 
| 1002 | 21.7k |     return true; | 
| 1003 | 21.7k | } | 
| 1004 |  |  | 
| 1005 |  | bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional<uint256>& expected_hash) const | 
| 1006 | 308k | { | 
| 1007 | 308k |     block.SetNull(); | 
| 1008 |  |  | 
| 1009 |  |     // Open history file to read | 
| 1010 | 308k |     std::vector<std::byte> block_data; | 
| 1011 | 308k |     if (!ReadRawBlock(block_data, pos)) { | 
| 1012 | 0 |         return false; | 
| 1013 | 0 |     } | 
| 1014 |  |  | 
| 1015 | 308k |     try { | 
| 1016 |  |         // Read block | 
| 1017 | 308k |         SpanReader{block_data} >> TX_WITH_WITNESS(block); | 
| 1018 | 308k |     } catch (const std::exception& e) { | 
| 1019 | 0 |         LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1020 | 0 |         return false; | 
| 1021 | 0 |     } | 
| 1022 |  |  | 
| 1023 | 308k |     const auto block_hash{block.GetHash()}; | 
| 1024 |  |  | 
| 1025 |  |     // Check the header | 
| 1026 | 308k |     if (!CheckProofOfWork(block_hash, block.nBits, GetConsensus())) { | 
| 1027 | 0 |         LogError("Errors in block header at %s while reading block", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1028 | 0 |         return false; | 
| 1029 | 0 |     } | 
| 1030 |  |  | 
| 1031 |  |     // Signet only: check block solution | 
| 1032 | 308k |     if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())0) { | 
| 1033 | 0 |         LogError("Errors in block solution at %s while reading block", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1034 | 0 |         return false; | 
| 1035 | 0 |     } | 
| 1036 |  |  | 
| 1037 | 308k |     if (expected_hash && block_hash != *expected_hash) { | 
| 1038 | 0 |         LogError("GetHash() doesn't match index at %s while reading block (%s != %s)",| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1039 | 0 |                  pos.ToString(), block_hash.ToString(), expected_hash->ToString()); | 
| 1040 | 0 |         return false; | 
| 1041 | 0 |     } | 
| 1042 |  |  | 
| 1043 | 308k |     return true; | 
| 1044 | 308k | } | 
| 1045 |  |  | 
| 1046 |  | bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const | 
| 1047 | 308k | { | 
| 1048 | 308k |     const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};| Line | Count | Source |  | 290 | 308k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 1049 | 308k |     return ReadBlock(block, block_pos, index.GetBlockHash()); | 
| 1050 | 308k | } | 
| 1051 |  |  | 
| 1052 |  | bool BlockManager::ReadRawBlock(std::vector<std::byte>& block, const FlatFilePos& pos) const | 
| 1053 | 308k | { | 
| 1054 | 308k |     if (pos.nPos < STORAGE_HEADER_BYTES) { | 
| 1055 |  |         // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data | 
| 1056 |  |         // This would cause an unsigned integer underflow when trying to position the file cursor | 
| 1057 |  |         // This can happen after pruning or default constructed positions | 
| 1058 | 0 |         LogError("Failed for %s while reading raw block storage header", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1059 | 0 |         return false; | 
| 1060 | 0 |     } | 
| 1061 | 308k |     AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)}; | 
| 1062 | 308k |     if (filein.IsNull()) { | 
| 1063 | 0 |         LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1064 | 0 |         return false; | 
| 1065 | 0 |     } | 
| 1066 |  |  | 
| 1067 | 308k |     try { | 
| 1068 | 308k |         MessageStartChars blk_start; | 
| 1069 | 308k |         unsigned int blk_size; | 
| 1070 |  |  | 
| 1071 | 308k |         filein >> blk_start >> blk_size; | 
| 1072 |  |  | 
| 1073 | 308k |         if (blk_start != GetParams().MessageStart()) { | 
| 1074 | 0 |             LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block",| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1075 | 0 |                 pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart())); | 
| 1076 | 0 |             return false; | 
| 1077 | 0 |         } | 
| 1078 |  |  | 
| 1079 | 308k |         if (blk_size > MAX_SIZE) { | 
| 1080 | 0 |             LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block",| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1081 | 0 |                 pos.ToString(), blk_size, MAX_SIZE); | 
| 1082 | 0 |             return false; | 
| 1083 | 0 |         } | 
| 1084 |  |  | 
| 1085 | 308k |         block.resize(blk_size); // Zeroing of memory is intentional here | 
| 1086 | 308k |         filein.read(block); | 
| 1087 | 308k |     } catch (const std::exception& e) { | 
| 1088 | 0 |         LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1089 | 0 |         return false; | 
| 1090 | 0 |     } | 
| 1091 |  |  | 
| 1092 | 308k |     return true; | 
| 1093 | 308k | } | 
| 1094 |  |  | 
| 1095 |  | FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight) | 
| 1096 | 28.2k | { | 
| 1097 | 28.2k |     const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))}; | 
| 1098 | 28.2k |     FlatFilePos pos{FindNextBlockPos(block_size + STORAGE_HEADER_BYTES, nHeight, block.GetBlockTime())}; | 
| 1099 | 28.2k |     if (pos.IsNull()) { | 
| 1100 | 0 |         LogError("FindNextBlockPos failed for %s while writing block", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1101 | 0 |         return FlatFilePos(); | 
| 1102 | 0 |     } | 
| 1103 | 28.2k |     AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)}; | 
| 1104 | 28.2k |     if (file.IsNull()) { | 
| 1105 | 0 |         LogError("OpenBlockFile failed for %s while writing block", pos.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1106 | 0 |         m_opts.notifications.fatalError(_("Failed to write block.")); | 
| 1107 | 0 |         return FlatFilePos(); | 
| 1108 | 0 |     } | 
| 1109 | 28.2k |     { | 
| 1110 | 28.2k |         BufferedWriter fileout{file}; | 
| 1111 |  |  | 
| 1112 |  |         // Write index header | 
| 1113 | 28.2k |         fileout << GetParams().MessageStart() << block_size; | 
| 1114 | 28.2k |         pos.nPos += STORAGE_HEADER_BYTES; | 
| 1115 |  |         // Write block | 
| 1116 | 28.2k |         fileout << TX_WITH_WITNESS(block); | 
| 1117 | 28.2k |     } | 
| 1118 |  |  | 
| 1119 | 28.2k |     if (file.fclose() != 0) { | 
| 1120 | 0 |         LogError("Failed to close block file %s: %s", pos.ToString(), SysErrorString(errno));| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1121 | 0 |         m_opts.notifications.fatalError(_("Failed to close file when writing block.")); | 
| 1122 | 0 |         return FlatFilePos(); | 
| 1123 | 0 |     } | 
| 1124 |  |  | 
| 1125 | 28.2k |     return pos; | 
| 1126 | 28.2k | } | 
| 1127 |  |  | 
| 1128 |  | static auto InitBlocksdirXorKey(const BlockManager::Options& opts) | 
| 1129 | 51.2k | { | 
| 1130 |  |     // Bytes are serialized without length indicator, so this is also the exact | 
| 1131 |  |     // size of the XOR-key file. | 
| 1132 | 51.2k |     std::array<std::byte, Obfuscation::KEY_SIZE> obfuscation{}; | 
| 1133 |  |  | 
| 1134 |  |     // Consider this to be the first run if the blocksdir contains only hidden | 
| 1135 |  |     // files (those which start with a .). Checking for a fully-empty dir would | 
| 1136 |  |     // be too aggressive as a .lock file may have already been written. | 
| 1137 | 51.2k |     bool first_run = true; | 
| 1138 | 51.2k |     for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) { | 
| 1139 | 51.2k |         const std::string path = fs::PathToString(entry.path().filename()); | 
| 1140 | 51.2k |         if (!entry.is_regular_file() || !path.starts_with('.')) { | 
| 1141 | 51.2k |             first_run = false; | 
| 1142 | 51.2k |             break; | 
| 1143 | 51.2k |         } | 
| 1144 | 51.2k |     } | 
| 1145 |  |  | 
| 1146 | 51.2k |     if (opts.use_xor && first_run) { | 
| 1147 |  |         // Only use random fresh key when the boolean option is set and on the | 
| 1148 |  |         // very first start of the program. | 
| 1149 | 0 |         FastRandomContext{}.fillrand(obfuscation); | 
| 1150 | 0 |     } | 
| 1151 |  |  | 
| 1152 | 51.2k |     const fs::path xor_key_path{opts.blocks_dir / "xor.dat"}; | 
| 1153 | 51.2k |     if (fs::exists(xor_key_path)) { | 
| 1154 |  |         // A pre-existing xor key file has priority. | 
| 1155 | 51.2k |         AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")}; | 
| 1156 | 51.2k |         xor_key_file >> obfuscation; | 
| 1157 | 51.2k |     } else { | 
| 1158 |  |         // Create initial or missing xor key file | 
| 1159 | 0 |         AutoFile xor_key_file{fsbridge::fopen(xor_key_path, | 
| 1160 |  | #ifdef __MINGW64__ | 
| 1161 |  |             "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 | 
| 1162 |  | #else | 
| 1163 | 0 |             "wbx" | 
| 1164 | 0 | #endif | 
| 1165 | 0 |         )}; | 
| 1166 | 0 |         xor_key_file << obfuscation; | 
| 1167 | 0 |         if (xor_key_file.fclose() != 0) { | 
| 1168 | 0 |             throw std::runtime_error{strprintf("Error closing XOR key file %s: %s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1169 | 0 |                                                fs::PathToString(xor_key_path), | 
| 1170 | 0 |                                                SysErrorString(errno))}; | 
| 1171 | 0 |         } | 
| 1172 | 0 |     } | 
| 1173 |  |     // If the user disabled the key, it must be zero. | 
| 1174 | 51.2k |     if (!opts.use_xor && obfuscation != decltype(obfuscation){}0) { | 
| 1175 | 0 |         throw std::runtime_error{ | 
| 1176 | 0 |             strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1177 | 0 |                       "Stored key: '%s', stored path: '%s'.", | 
| 1178 | 0 |                       HexStr(obfuscation), fs::PathToString(xor_key_path)), | 
| 1179 | 0 |         }; | 
| 1180 | 0 |     } | 
| 1181 | 51.2k |     LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation));| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1182 | 51.2k |     return Obfuscation{obfuscation}; | 
| 1183 | 51.2k | } | 
| 1184 |  |  | 
| 1185 |  | BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) | 
| 1186 | 51.2k |     : m_prune_mode{opts.prune_target > 0}, | 
| 1187 | 51.2k |       m_obfuscation{InitBlocksdirXorKey(opts)}, | 
| 1188 | 51.2k |       m_opts{std::move(opts)}, | 
| 1189 | 51.2k |       m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x40000/* 16kB */ : BLOCKFILE_CHUNK_SIZE}}, | 
| 1190 | 51.2k |       m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}}, | 
| 1191 | 51.2k |       m_interrupt{interrupt} | 
| 1192 | 51.2k | { | 
| 1193 | 51.2k |     m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params); | 
| 1194 |  |  | 
| 1195 | 51.2k |     if (m_opts.block_tree_db_params.wipe_data) { | 
| 1196 | 0 |         m_block_tree_db->WriteReindexing(true); | 
| 1197 | 0 |         m_blockfiles_indexed = false; | 
| 1198 |  |         // If we're reindexing in prune mode, wipe away unusable block files and all undo data files | 
| 1199 | 0 |         if (m_prune_mode) { | 
| 1200 | 0 |             CleanupBlockRevFiles(); | 
| 1201 | 0 |         } | 
| 1202 | 0 |     } | 
| 1203 | 51.2k | } | 
| 1204 |  |  | 
| 1205 |  | class ImportingNow | 
| 1206 |  | { | 
| 1207 |  |     std::atomic<bool>& m_importing; | 
| 1208 |  |  | 
| 1209 |  | public: | 
| 1210 | 0 |     ImportingNow(std::atomic<bool>& importing) : m_importing{importing} | 
| 1211 | 0 |     { | 
| 1212 | 0 |         assert(m_importing == false); | 
| 1213 | 0 |         m_importing = true; | 
| 1214 | 0 |     } | 
| 1215 |  |     ~ImportingNow() | 
| 1216 | 0 |     { | 
| 1217 | 0 |         assert(m_importing == true); | 
| 1218 | 0 |         m_importing = false; | 
| 1219 | 0 |     } | 
| 1220 |  | }; | 
| 1221 |  |  | 
| 1222 |  | void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths) | 
| 1223 | 0 | { | 
| 1224 | 0 |     ImportingNow imp{chainman.m_blockman.m_importing}; | 
| 1225 |  |  | 
| 1226 |  |     // -reindex | 
| 1227 | 0 |     if (!chainman.m_blockman.m_blockfiles_indexed) { | 
| 1228 | 0 |         int nFile = 0; | 
| 1229 |  |         // Map of disk positions for blocks with unknown parent (only used for reindex); | 
| 1230 |  |         // parent hash -> child disk position, multiple children can have the same parent. | 
| 1231 | 0 |         std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent; | 
| 1232 | 0 |         while (true) { | 
| 1233 | 0 |             FlatFilePos pos(nFile, 0); | 
| 1234 | 0 |             if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { | 
| 1235 | 0 |                 break; // No block files left to reindex | 
| 1236 | 0 |             } | 
| 1237 | 0 |             AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)}; | 
| 1238 | 0 |             if (file.IsNull()) { | 
| 1239 | 0 |                 break; // This error is logged in OpenBlockFile | 
| 1240 | 0 |             } | 
| 1241 | 0 |             LogInfo("Reindexing block file blk%05u.dat...", (unsigned int)nFile);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1242 | 0 |             chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); | 
| 1243 | 0 |             if (chainman.m_interrupt) { | 
| 1244 | 0 |                 LogInfo("Interrupt requested. Exit reindexing.");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1245 | 0 |                 return; | 
| 1246 | 0 |             } | 
| 1247 | 0 |             nFile++; | 
| 1248 | 0 |         } | 
| 1249 | 0 |         WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 1250 | 0 |         chainman.m_blockman.m_blockfiles_indexed = true; | 
| 1251 | 0 |         LogInfo("Reindexing finished");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1252 |  |         // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): | 
| 1253 | 0 |         chainman.ActiveChainstate().LoadGenesisBlock(); | 
| 1254 | 0 |     } | 
| 1255 |  |  | 
| 1256 |  |     // -loadblock= | 
| 1257 | 0 |     for (const fs::path& path : import_paths) { | 
| 1258 | 0 |         AutoFile file{fsbridge::fopen(path, "rb")}; | 
| 1259 | 0 |         if (!file.IsNull()) { | 
| 1260 | 0 |             LogInfo("Importing blocks file %s...", fs::PathToString(path));| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1261 | 0 |             chainman.LoadExternalBlockFile(file); | 
| 1262 | 0 |             if (chainman.m_interrupt) { | 
| 1263 | 0 |                 LogInfo("Interrupt requested. Exit block importing.");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 1264 | 0 |                 return; | 
| 1265 | 0 |             } | 
| 1266 | 0 |         } else { | 
| 1267 | 0 |             LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 1268 | 0 |         } | 
| 1269 | 0 |     } | 
| 1270 |  |  | 
| 1271 |  |     // scan for better chains in the block chain database, that are not yet connected in the active best chain | 
| 1272 |  |  | 
| 1273 |  |     // We can't hold cs_main during ActivateBestChain even though we're accessing | 
| 1274 |  |     // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve | 
| 1275 |  |     // the relevant pointers before the ABC call. | 
| 1276 | 0 |     for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {| Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 1277 | 0 |         BlockValidationState state; | 
| 1278 | 0 |         if (!chainstate->ActivateBestChain(state, nullptr)) { | 
| 1279 | 0 |             chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1280 | 0 |             return; | 
| 1281 | 0 |         } | 
| 1282 | 0 |     } | 
| 1283 |  |     // End scope of ImportingNow | 
| 1284 | 0 | } | 
| 1285 |  |  | 
| 1286 | 0 | std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { | 
| 1287 | 0 |     switch(type) { | 
| 1288 | 0 |         case BlockfileType::NORMAL: os << "normal"; break; | 
| 1289 | 0 |         case BlockfileType::ASSUMED: os << "assumed"; break; | 
| 1290 | 0 |         default: os.setstate(std::ios_base::failbit); | 
| 1291 | 0 |     } | 
| 1292 | 0 |     return os; | 
| 1293 | 0 | } | 
| 1294 |  |  | 
| 1295 | 0 | std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) { | 
| 1296 | 0 |     os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1297 | 0 |     return os; | 
| 1298 | 0 | } | 
| 1299 |  | } // namespace node |