/Users/eugenesiegel/btc/bitcoin/src/validation.cpp
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | // Copyright (c) 2009-2010 Satoshi Nakamoto | 
| 2 |  | // Copyright (c) 2009-present The Bitcoin Core developers | 
| 3 |  | // Distributed under the MIT software license, see the accompanying | 
| 4 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 5 |  |  | 
| 6 |  | #include <bitcoin-build-config.h> // IWYU pragma: keep | 
| 7 |  |  | 
| 8 |  | #include <validation.h> | 
| 9 |  |  | 
| 10 |  | #include <arith_uint256.h> | 
| 11 |  | #include <chain.h> | 
| 12 |  | #include <checkqueue.h> | 
| 13 |  | #include <clientversion.h> | 
| 14 |  | #include <consensus/amount.h> | 
| 15 |  | #include <consensus/consensus.h> | 
| 16 |  | #include <consensus/merkle.h> | 
| 17 |  | #include <consensus/tx_check.h> | 
| 18 |  | #include <consensus/tx_verify.h> | 
| 19 |  | #include <consensus/validation.h> | 
| 20 |  | #include <cuckoocache.h> | 
| 21 |  | #include <flatfile.h> | 
| 22 |  | #include <hash.h> | 
| 23 |  | #include <kernel/chain.h> | 
| 24 |  | #include <kernel/chainparams.h> | 
| 25 |  | #include <kernel/coinstats.h> | 
| 26 |  | #include <kernel/disconnected_transactions.h> | 
| 27 |  | #include <kernel/mempool_entry.h> | 
| 28 |  | #include <kernel/messagestartchars.h> | 
| 29 |  | #include <kernel/notifications_interface.h> | 
| 30 |  | #include <kernel/warning.h> | 
| 31 |  | #include <logging.h> | 
| 32 |  | #include <logging/timer.h> | 
| 33 |  | #include <node/blockstorage.h> | 
| 34 |  | #include <node/utxo_snapshot.h> | 
| 35 |  | #include <policy/ephemeral_policy.h> | 
| 36 |  | #include <policy/policy.h> | 
| 37 |  | #include <policy/rbf.h> | 
| 38 |  | #include <policy/settings.h> | 
| 39 |  | #include <policy/truc_policy.h> | 
| 40 |  | #include <pow.h> | 
| 41 |  | #include <primitives/block.h> | 
| 42 |  | #include <primitives/transaction.h> | 
| 43 |  | #include <random.h> | 
| 44 |  | #include <script/script.h> | 
| 45 |  | #include <script/sigcache.h> | 
| 46 |  | #include <signet.h> | 
| 47 |  | #include <tinyformat.h> | 
| 48 |  | #include <txdb.h> | 
| 49 |  | #include <txmempool.h> | 
| 50 |  | #include <uint256.h> | 
| 51 |  | #include <undo.h> | 
| 52 |  | #include <util/check.h> | 
| 53 |  | #include <util/fs.h> | 
| 54 |  | #include <util/fs_helpers.h> | 
| 55 |  | #include <util/hasher.h> | 
| 56 |  | #include <util/moneystr.h> | 
| 57 |  | #include <util/rbf.h> | 
| 58 |  | #include <util/result.h> | 
| 59 |  | #include <util/signalinterrupt.h> | 
| 60 |  | #include <util/strencodings.h> | 
| 61 |  | #include <util/string.h> | 
| 62 |  | #include <util/time.h> | 
| 63 |  | #include <util/trace.h> | 
| 64 |  | #include <util/translation.h> | 
| 65 |  | #include <validationinterface.h> | 
| 66 |  |  | 
| 67 |  | #include <algorithm> | 
| 68 |  | #include <cassert> | 
| 69 |  | #include <chrono> | 
| 70 |  | #include <deque> | 
| 71 |  | #include <numeric> | 
| 72 |  | #include <optional> | 
| 73 |  | #include <ranges> | 
| 74 |  | #include <span> | 
| 75 |  | #include <string> | 
| 76 |  | #include <tuple> | 
| 77 |  | #include <utility> | 
| 78 |  |  | 
| 79 |  | using kernel::CCoinsStats; | 
| 80 |  | using kernel::CoinStatsHashType; | 
| 81 |  | using kernel::ComputeUTXOStats; | 
| 82 |  | using kernel::Notifications; | 
| 83 |  |  | 
| 84 |  | using fsbridge::FopenFn; | 
| 85 |  | using node::BlockManager; | 
| 86 |  | using node::BlockMap; | 
| 87 |  | using node::CBlockIndexHeightOnlyComparator; | 
| 88 |  | using node::CBlockIndexWorkComparator; | 
| 89 |  | using node::SnapshotMetadata; | 
| 90 |  |  | 
| 91 |  | /** Size threshold for warning about slow UTXO set flush to disk. */ | 
| 92 |  | static constexpr size_t WARN_FLUSH_COINS_SIZE = 1 << 30; // 1 GiB | 
| 93 |  | /** Time window to wait between writing blocks/block index and chainstate to disk. | 
| 94 |  |  *  Randomize writing time inside the window to prevent a situation where the | 
| 95 |  |  *  network over time settles into a few cohorts of synchronized writers. | 
| 96 |  | */ | 
| 97 |  | static constexpr auto DATABASE_WRITE_INTERVAL_MIN{50min}; | 
| 98 |  | static constexpr auto DATABASE_WRITE_INTERVAL_MAX{70min}; | 
| 99 |  | /** Maximum age of our tip for us to be considered current for fee estimation */ | 
| 100 |  | static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3}; | 
| 101 |  | const std::vector<std::string> CHECKLEVEL_DOC { | 
| 102 |  |     "level 0 reads the blocks from disk", | 
| 103 |  |     "level 1 verifies block validity", | 
| 104 |  |     "level 2 verifies undo data", | 
| 105 |  |     "level 3 checks disconnection of tip blocks", | 
| 106 |  |     "level 4 tries to reconnect the blocks", | 
| 107 |  |     "each level includes the checks of the previous levels", | 
| 108 |  | }; | 
| 109 |  | /** The number of blocks to keep below the deepest prune lock. | 
| 110 |  |  *  There is nothing special about this number. It is higher than what we | 
| 111 |  |  *  expect to see in regular mainnet reorgs, but not so high that it would | 
| 112 |  |  *  noticeably interfere with the pruning mechanism. | 
| 113 |  |  * */ | 
| 114 |  | static constexpr int PRUNE_LOCK_BUFFER{10}; | 
| 115 |  |  | 
| 116 |  | TRACEPOINT_SEMAPHORE(validation, block_connected); | 
| 117 |  | TRACEPOINT_SEMAPHORE(utxocache, flush); | 
| 118 |  | TRACEPOINT_SEMAPHORE(mempool, replaced); | 
| 119 |  | TRACEPOINT_SEMAPHORE(mempool, rejected); | 
| 120 |  |  | 
| 121 |  | const CBlockIndex* Chainstate::FindForkInGlobalIndex(const CBlockLocator& locator) const | 
| 122 | 0 | { | 
| 123 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 124 |  |  | 
| 125 |  |     // Find the latest block common to locator and chain - we expect that | 
| 126 |  |     // locator.vHave is sorted descending by height. | 
| 127 | 0 |     for (const uint256& hash : locator.vHave) { | 
| 128 | 0 |         const CBlockIndex* pindex{m_blockman.LookupBlockIndex(hash)}; | 
| 129 | 0 |         if (pindex) { | 
| 130 | 0 |             if (m_chain.Contains(pindex)) { | 
| 131 | 0 |                 return pindex; | 
| 132 | 0 |             } | 
| 133 | 0 |             if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) { | 
| 134 | 0 |                 return m_chain.Tip(); | 
| 135 | 0 |             } | 
| 136 | 0 |         } | 
| 137 | 0 |     } | 
| 138 | 0 |     return m_chain.Genesis(); | 
| 139 | 0 | } | 
| 140 |  |  | 
| 141 |  | bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, | 
| 142 |  |                        const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, | 
| 143 |  |                        bool cacheFullScriptStore, PrecomputedTransactionData& txdata, | 
| 144 |  |                        ValidationCache& validation_cache, | 
| 145 |  |                        std::vector<CScriptCheck>* pvChecks = nullptr) | 
| 146 |  |                        EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 147 |  |  | 
| 148 |  | bool CheckFinalTxAtTip(const CBlockIndex& active_chain_tip, const CTransaction& tx) | 
| 149 | 719k | { | 
| 150 | 719k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 151 |  |  | 
| 152 |  |     // CheckFinalTxAtTip() uses active_chain_tip.Height()+1 to evaluate | 
| 153 |  |     // nLockTime because when IsFinalTx() is called within | 
| 154 |  |     // AcceptBlock(), the height of the block *being* | 
| 155 |  |     // evaluated is what is used. Thus if we want to know if a | 
| 156 |  |     // transaction can be part of the *next* block, we need to call | 
| 157 |  |     // IsFinalTx() with one more than active_chain_tip.Height(). | 
| 158 | 719k |     const int nBlockHeight = active_chain_tip.nHeight + 1; | 
| 159 |  |  | 
| 160 |  |     // BIP113 requires that time-locked transactions have nLockTime set to | 
| 161 |  |     // less than the median time of the previous block they're contained in. | 
| 162 |  |     // When the next block is created its previous block will be the current | 
| 163 |  |     // chain tip, so we use that to calculate the median time passed to | 
| 164 |  |     // IsFinalTx(). | 
| 165 | 719k |     const int64_t nBlockTime{active_chain_tip.GetMedianTimePast()}; | 
| 166 |  |  | 
| 167 | 719k |     return IsFinalTx(tx, nBlockHeight, nBlockTime); | 
| 168 | 719k | } | 
| 169 |  |  | 
| 170 |  | namespace { | 
| 171 |  | /** | 
| 172 |  |  * A helper which calculates heights of inputs of a given transaction. | 
| 173 |  |  * | 
| 174 |  |  * @param[in] tip    The current chain tip. If an input belongs to a mempool | 
| 175 |  |  *                   transaction, we assume it will be confirmed in the next block. | 
| 176 |  |  * @param[in] coins  Any CCoinsView that provides access to the relevant coins. | 
| 177 |  |  * @param[in] tx     The transaction being evaluated. | 
| 178 |  |  * | 
| 179 |  |  * @returns A vector of input heights or nullopt, in case of an error. | 
| 180 |  |  */ | 
| 181 |  | std::optional<std::vector<int>> CalculatePrevHeights( | 
| 182 |  |     const CBlockIndex& tip, | 
| 183 |  |     const CCoinsView& coins, | 
| 184 |  |     const CTransaction& tx) | 
| 185 | 654k | { | 
| 186 | 654k |     std::vector<int> prev_heights; | 
| 187 | 654k |     prev_heights.resize(tx.vin.size()); | 
| 188 | 1.30M |     for (size_t i = 0; i < tx.vin.size(); ++i654k) { | 
| 189 | 654k |         if (auto coin{coins.GetCoin(tx.vin[i].prevout)}) { | 
| 190 | 654k |             prev_heights[i] = coin->nHeight == MEMPOOL_HEIGHT | 
| 191 | 654k |                               ? tip.nHeight + 1478k// Assume all mempool transaction confirm in the next block. | 
| 192 | 654k |                               : coin->nHeight176k; | 
| 193 | 654k |         } else { | 
| 194 | 0 |             LogPrintf("ERROR: %s: Missing input %d in transaction \'%s\'\n", __func__, i, tx.GetHash().GetHex());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 195 | 0 |             return std::nullopt; | 
| 196 | 0 |         } | 
| 197 | 654k |     } | 
| 198 | 654k |     return prev_heights; | 
| 199 | 654k | } | 
| 200 |  | } // namespace | 
| 201 |  |  | 
| 202 |  | std::optional<LockPoints> CalculateLockPointsAtTip( | 
| 203 |  |     CBlockIndex* tip, | 
| 204 |  |     const CCoinsView& coins_view, | 
| 205 |  |     const CTransaction& tx) | 
| 206 | 654k | { | 
| 207 | 654k |     assert(tip); | 
| 208 |  |  | 
| 209 | 654k |     auto prev_heights{CalculatePrevHeights(*tip, coins_view, tx)}; | 
| 210 | 654k |     if (!prev_heights.has_value()) return std::nullopt0; | 
| 211 |  |  | 
| 212 | 654k |     CBlockIndex next_tip; | 
| 213 | 654k |     next_tip.pprev = tip; | 
| 214 |  |     // When SequenceLocks() is called within ConnectBlock(), the height | 
| 215 |  |     // of the block *being* evaluated is what is used. | 
| 216 |  |     // Thus if we want to know if a transaction can be part of the | 
| 217 |  |     // *next* block, we need to use one more than active_chainstate.m_chain.Height() | 
| 218 | 654k |     next_tip.nHeight = tip->nHeight + 1; | 
| 219 | 654k |     const auto [min_height, min_time] = CalculateSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, prev_heights.value(), next_tip); | 
| 220 |  |  | 
| 221 |  |     // Also store the hash of the block with the highest height of | 
| 222 |  |     // all the blocks which have sequence locked prevouts. | 
| 223 |  |     // This hash needs to still be on the chain | 
| 224 |  |     // for these LockPoint calculations to be valid | 
| 225 |  |     // Note: It is impossible to correctly calculate a maxInputBlock | 
| 226 |  |     // if any of the sequence locked inputs depend on unconfirmed txs, | 
| 227 |  |     // except in the special case where the relative lock time/height | 
| 228 |  |     // is 0, which is equivalent to no sequence lock. Since we assume | 
| 229 |  |     // input height of tip+1 for mempool txs and test the resulting | 
| 230 |  |     // min_height and min_time from CalculateSequenceLocks against tip+1. | 
| 231 | 654k |     int max_input_height{0}; | 
| 232 | 654k |     for (const int height : prev_heights.value()) { | 
| 233 |  |         // Can ignore mempool inputs since we'll fail if they had non-zero locks | 
| 234 | 654k |         if (height != next_tip.nHeight) { | 
| 235 | 640k |             max_input_height = std::max(max_input_height, height); | 
| 236 | 640k |         } | 
| 237 | 654k |     } | 
| 238 |  |  | 
| 239 |  |     // tip->GetAncestor(max_input_height) should never return a nullptr | 
| 240 |  |     // because max_input_height is always less than the tip height. | 
| 241 |  |     // It would, however, be a bad bug to continue execution, since a | 
| 242 |  |     // LockPoints object with the maxInputBlock member set to nullptr | 
| 243 |  |     // signifies no relative lock time. | 
| 244 | 654k |     return LockPoints{min_height, min_time, Assert(tip->GetAncestor(max_input_height))};| Line | Count | Source |  | 106 | 654k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 245 | 654k | } | 
| 246 |  |  | 
| 247 |  | bool CheckSequenceLocksAtTip(CBlockIndex* tip, | 
| 248 |  |                              const LockPoints& lock_points) | 
| 249 | 654k | { | 
| 250 | 654k |     assert(tip != nullptr); | 
| 251 |  |  | 
| 252 | 654k |     CBlockIndex index; | 
| 253 | 654k |     index.pprev = tip; | 
| 254 |  |     // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to evaluate | 
| 255 |  |     // height based locks because when SequenceLocks() is called within | 
| 256 |  |     // ConnectBlock(), the height of the block *being* | 
| 257 |  |     // evaluated is what is used. | 
| 258 |  |     // Thus if we want to know if a transaction can be part of the | 
| 259 |  |     // *next* block, we need to use one more than active_chainstate.m_chain.Height() | 
| 260 | 654k |     index.nHeight = tip->nHeight + 1; | 
| 261 |  |  | 
| 262 | 654k |     return EvaluateSequenceLocks(index, {lock_points.height, lock_points.time}); | 
| 263 | 654k | } | 
| 264 |  |  | 
| 265 |  | // Returns the script flags which should be checked for a given block | 
| 266 |  | static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman); | 
| 267 |  |  | 
| 268 |  | static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache) | 
| 269 |  |     EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs) | 
| 270 | 493k | { | 
| 271 | 493k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 272 | 493k |     AssertLockHeld(pool.cs); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 273 | 493k |     int expired = pool.Expire(GetTime<std::chrono::seconds>() - pool.m_opts.expiry); | 
| 274 | 493k |     if (expired != 0) { | 
| 275 | 3.63k |         LogDebug(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); | Line | Count | Source |  | 381 | 3.63k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 3.63k |     do {                                                              \ |  | 374 | 3.63k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 3.63k |     } while (0) | 
 | 
 | 
| 276 | 3.63k |     } | 
| 277 |  |  | 
| 278 | 493k |     std::vector<COutPoint> vNoSpendsRemaining; | 
| 279 | 493k |     pool.TrimToSize(pool.m_opts.max_size_bytes, &vNoSpendsRemaining); | 
| 280 | 493k |     for (const COutPoint& removed : vNoSpendsRemaining) | 
| 281 | 0 |         coins_cache.Uncache(removed); | 
| 282 | 493k | } | 
| 283 |  |  | 
| 284 |  | static bool IsCurrentForFeeEstimation(Chainstate& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | 
| 285 | 491k | { | 
| 286 | 491k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 491k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 287 | 491k |     if (active_chainstate.m_chainman.IsInitialBlockDownload()) { | 
| 288 | 0 |         return false; | 
| 289 | 0 |     } | 
| 290 | 491k |     if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) | 
| 291 | 6.77k |         return false; | 
| 292 | 484k |     if (active_chainstate.m_chain.Height() < active_chainstate.m_chainman.m_best_header->nHeight - 1) { | 
| 293 | 392 |         return false; | 
| 294 | 392 |     } | 
| 295 | 483k |     return true; | 
| 296 | 484k | } | 
| 297 |  |  | 
| 298 |  | void Chainstate::MaybeUpdateMempoolForReorg( | 
| 299 |  |     DisconnectedBlockTransactions& disconnectpool, | 
| 300 |  |     bool fAddToMempool) | 
| 301 | 78 | { | 
| 302 | 78 |     if (!m_mempool) return0; | 
| 303 |  |  | 
| 304 | 78 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 78 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 305 | 78 |     AssertLockHeld(m_mempool->cs); | Line | Count | Source |  | 137 | 78 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 306 | 78 |     std::vector<Txid> vHashUpdate; | 
| 307 | 78 |     { | 
| 308 |  |         // disconnectpool is ordered so that the front is the most recently-confirmed | 
| 309 |  |         // transaction (the last tx of the block at the tip) in the disconnected chain. | 
| 310 |  |         // Iterate disconnectpool in reverse, so that we add transactions | 
| 311 |  |         // back to the mempool starting with the earliest transaction that had | 
| 312 |  |         // been previously seen in a block. | 
| 313 | 78 |         const auto queuedTx = disconnectpool.take(); | 
| 314 | 78 |         auto it = queuedTx.rbegin(); | 
| 315 | 156 |         while (it != queuedTx.rend()) { | 
| 316 |  |             // ignore validation errors in resurrected transactions | 
| 317 | 78 |             if (!fAddToMempool || (*it)->IsCoinBase() || | 
| 318 | 78 |                 AcceptToMemoryPool(*this, *it, GetTime(), | 
| 319 | 0 |                     /*bypass_limits=*/true, /*test_accept=*/false).m_result_type != | 
| 320 | 78 |                         MempoolAcceptResult::ResultType::VALID) { | 
| 321 |  |                 // If the transaction doesn't make it in to the mempool, remove any | 
| 322 |  |                 // transactions that depend on it (which would now be orphans). | 
| 323 | 78 |                 m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG); | 
| 324 | 78 |             } else if (0 m_mempool->exists((*it)->GetHash())0) { | 
| 325 | 0 |                 vHashUpdate.push_back((*it)->GetHash()); | 
| 326 | 0 |             } | 
| 327 | 78 |             ++it; | 
| 328 | 78 |         } | 
| 329 | 78 |     } | 
| 330 |  |  | 
| 331 |  |     // AcceptToMemoryPool/addNewTransaction all assume that new mempool entries have | 
| 332 |  |     // no in-mempool children, which is generally not true when adding | 
| 333 |  |     // previously-confirmed transactions back to the mempool. | 
| 334 |  |     // UpdateTransactionsFromBlock finds descendants of any transactions in | 
| 335 |  |     // the disconnectpool that were added back and cleans up the mempool state. | 
| 336 | 78 |     m_mempool->UpdateTransactionsFromBlock(vHashUpdate); | 
| 337 |  |  | 
| 338 |  |     // Predicate to use for filtering transactions in removeForReorg. | 
| 339 |  |     // Checks whether the transaction is still final and, if it spends a coinbase output, mature. | 
| 340 |  |     // Also updates valid entries' cached LockPoints if needed. | 
| 341 |  |     // If false, the tx is still valid and its lockpoints are updated. | 
| 342 |  |     // If true, the tx would be invalid in the next block; remove this entry and all of its descendants. | 
| 343 |  |     // Note that TRUC rules are not applied here, so reorgs may cause violations of TRUC inheritance or | 
| 344 |  |     // topology restrictions. | 
| 345 | 78 |     const auto filter_final_and_mature = [&](CTxMemPool::txiter it) | 
| 346 | 78 |         EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) { | 
| 347 | 0 |         AssertLockHeld(m_mempool->cs); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 348 | 0 |         AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 349 | 0 |         const CTransaction& tx = it->GetTx(); | 
| 350 |  |  | 
| 351 |  |         // The transaction must be final. | 
| 352 | 0 |         if (!CheckFinalTxAtTip(*Assert(m_chain.Tip()), tx)) return true; | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 353 |  |  | 
| 354 | 0 |         const LockPoints& lp = it->GetLockPoints(); | 
| 355 |  |         // CheckSequenceLocksAtTip checks if the transaction will be final in the next block to be | 
| 356 |  |         // created on top of the new chain. | 
| 357 | 0 |         if (TestLockPointValidity(m_chain, lp)) { | 
| 358 | 0 |             if (!CheckSequenceLocksAtTip(m_chain.Tip(), lp)) { | 
| 359 | 0 |                 return true; | 
| 360 | 0 |             } | 
| 361 | 0 |         } else { | 
| 362 | 0 |             const CCoinsViewMemPool view_mempool{&CoinsTip(), *m_mempool}; | 
| 363 | 0 |             const std::optional<LockPoints> new_lock_points{CalculateLockPointsAtTip(m_chain.Tip(), view_mempool, tx)}; | 
| 364 | 0 |             if (new_lock_points.has_value() && CheckSequenceLocksAtTip(m_chain.Tip(), *new_lock_points)) { | 
| 365 |  |                 // Now update the mempool entry lockpoints as well. | 
| 366 | 0 |                 it->UpdateLockPoints(*new_lock_points); | 
| 367 | 0 |             } else { | 
| 368 | 0 |                 return true; | 
| 369 | 0 |             } | 
| 370 | 0 |         } | 
| 371 |  |  | 
| 372 |  |         // If the transaction spends any coinbase outputs, it must be mature. | 
| 373 | 0 |         if (it->GetSpendsCoinbase()) { | 
| 374 | 0 |             for (const CTxIn& txin : tx.vin) { | 
| 375 | 0 |                 if (m_mempool->exists(txin.prevout.hash)) continue; | 
| 376 | 0 |                 const Coin& coin{CoinsTip().AccessCoin(txin.prevout)}; | 
| 377 | 0 |                 assert(!coin.IsSpent()); | 
| 378 | 0 |                 const auto mempool_spend_height{m_chain.Tip()->nHeight + 1}; | 
| 379 | 0 |                 if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) { | 
| 380 | 0 |                     return true; | 
| 381 | 0 |                 } | 
| 382 | 0 |             } | 
| 383 | 0 |         } | 
| 384 |  |         // Transaction is still valid and cached LockPoints are updated. | 
| 385 | 0 |         return false; | 
| 386 | 0 |     }; | 
| 387 |  |  | 
| 388 |  |     // We also need to remove any now-immature transactions | 
| 389 | 78 |     m_mempool->removeForReorg(m_chain, filter_final_and_mature); | 
| 390 |  |     // Re-limit mempool size, in case we added any transactions | 
| 391 | 78 |     LimitMempoolSize(*m_mempool, this->CoinsTip()); | 
| 392 | 78 | } | 
| 393 |  |  | 
| 394 |  | /** | 
| 395 |  | * Checks to avoid mempool polluting consensus critical paths since cached | 
| 396 |  | * signature and script validity results will be reused if we validate this | 
| 397 |  | * transaction again during block validation. | 
| 398 |  | * */ | 
| 399 |  | static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, | 
| 400 |  |                 const CCoinsViewCache& view, const CTxMemPool& pool, | 
| 401 |  |                 unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip, | 
| 402 |  |                 ValidationCache& validation_cache) | 
| 403 |  |                 EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) | 
| 404 | 493k | { | 
| 405 | 493k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 406 | 493k |     AssertLockHeld(pool.cs); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 407 |  |  | 
| 408 | 493k |     assert(!tx.IsCoinBase()); | 
| 409 | 493k |     for (const CTxIn& txin : tx.vin) { | 
| 410 | 493k |         const Coin& coin = view.AccessCoin(txin.prevout); | 
| 411 |  |  | 
| 412 |  |         // This coin was checked in PreChecks and MemPoolAccept | 
| 413 |  |         // has been holding cs_main since then. | 
| 414 | 493k |         Assume(!coin.IsSpent()); | Line | Count | Source |  | 118 | 493k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 415 | 493k |         if (coin.IsSpent()) return false0; | 
| 416 |  |  | 
| 417 |  |         // If the Coin is available, there are 2 possibilities: | 
| 418 |  |         // it is available in our current ChainstateActive UTXO set, | 
| 419 |  |         // or it's a UTXO provided by a transaction in our mempool. | 
| 420 |  |         // Ensure the scriptPubKeys in Coins from CoinsView are correct. | 
| 421 | 493k |         const CTransactionRef& txFrom = pool.get(txin.prevout.hash); | 
| 422 | 493k |         if (txFrom) { | 
| 423 | 319k |             assert(txFrom->GetHash() == txin.prevout.hash); | 
| 424 | 319k |             assert(txFrom->vout.size() > txin.prevout.n); | 
| 425 | 319k |             assert(txFrom->vout[txin.prevout.n] == coin.out); | 
| 426 | 319k |         } else { | 
| 427 | 173k |             const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout); | 
| 428 | 173k |             assert(!coinFromUTXOSet.IsSpent()); | 
| 429 | 173k |             assert(coinFromUTXOSet.out == coin.out); | 
| 430 | 173k |         } | 
| 431 | 493k |     } | 
| 432 |  |  | 
| 433 |  |     // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules. | 
| 434 | 493k |     return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata, validation_cache); | 
| 435 | 493k | } | 
| 436 |  |  | 
| 437 |  | namespace { | 
| 438 |  |  | 
| 439 |  | class MemPoolAccept | 
| 440 |  | { | 
| 441 |  | public: | 
| 442 |  |     explicit MemPoolAccept(CTxMemPool& mempool, Chainstate& active_chainstate) : | 
| 443 | 719k |         m_pool(mempool), | 
| 444 | 719k |         m_view(&m_dummy), | 
| 445 | 719k |         m_viewmempool(&active_chainstate.CoinsTip(), m_pool), | 
| 446 | 719k |         m_active_chainstate(active_chainstate) | 
| 447 | 719k |     { | 
| 448 | 719k |     } | 
| 449 |  |  | 
| 450 |  |     // We put the arguments we're handed into a struct, so we can pass them | 
| 451 |  |     // around easier. | 
| 452 |  |     struct ATMPArgs { | 
| 453 |  |         const CChainParams& m_chainparams; | 
| 454 |  |         const int64_t m_accept_time; | 
| 455 |  |         const bool m_bypass_limits; | 
| 456 |  |         /* | 
| 457 |  |          * Return any outpoints which were not previously present in the coins | 
| 458 |  |          * cache, but were added as a result of validating the tx for mempool | 
| 459 |  |          * acceptance. This allows the caller to optionally remove the cache | 
| 460 |  |          * additions if the associated transaction ends up being rejected by | 
| 461 |  |          * the mempool. | 
| 462 |  |          */ | 
| 463 |  |         std::vector<COutPoint>& m_coins_to_uncache; | 
| 464 |  |         /** When true, the transaction or package will not be submitted to the mempool. */ | 
| 465 |  |         const bool m_test_accept; | 
| 466 |  |         /** Whether we allow transactions to replace mempool transactions. If false, | 
| 467 |  |          * any transaction spending the same inputs as a transaction in the mempool is considered | 
| 468 |  |          * a conflict. */ | 
| 469 |  |         const bool m_allow_replacement; | 
| 470 |  |         /** When true, allow sibling eviction. This only occurs in single transaction package settings. */ | 
| 471 |  |         const bool m_allow_sibling_eviction; | 
| 472 |  |         /** Used to skip the LimitMempoolSize() call within AcceptSingleTransaction(). This should be used when multiple | 
| 473 |  |          * AcceptSubPackage calls are expected and the mempool will be trimmed at the end of AcceptPackage(). */ | 
| 474 |  |         const bool m_package_submission; | 
| 475 |  |         /** When true, use package feerates instead of individual transaction feerates for fee-based | 
| 476 |  |          * policies such as mempool min fee and min relay fee. | 
| 477 |  |          */ | 
| 478 |  |         const bool m_package_feerates; | 
| 479 |  |         /** Used for local submission of transactions to catch "absurd" fees | 
| 480 |  |          * due to fee miscalculation by wallets. std:nullopt implies unset, allowing any feerates. | 
| 481 |  |          * Any individual transaction failing this check causes immediate failure. | 
| 482 |  |          */ | 
| 483 |  |         const std::optional<CFeeRate> m_client_maxfeerate; | 
| 484 |  |  | 
| 485 |  |         /** Whether CPFP carveout and RBF carveout are granted. */ | 
| 486 |  |         const bool m_allow_carveouts; | 
| 487 |  |  | 
| 488 |  |         /** Parameters for single transaction mempool validation. */ | 
| 489 |  |         static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time, | 
| 490 |  |                                      bool bypass_limits, std::vector<COutPoint>& coins_to_uncache, | 
| 491 | 719k |                                      bool test_accept) { | 
| 492 | 719k |             return ATMPArgs{/* m_chainparams */ chainparams, | 
| 493 | 719k |                             /* m_accept_time */ accept_time, | 
| 494 | 719k |                             /* m_bypass_limits */ bypass_limits, | 
| 495 | 719k |                             /* m_coins_to_uncache */ coins_to_uncache, | 
| 496 | 719k |                             /* m_test_accept */ test_accept, | 
| 497 | 719k |                             /* m_allow_replacement */ true, | 
| 498 | 719k |                             /* m_allow_sibling_eviction */ true, | 
| 499 | 719k |                             /* m_package_submission */ false, | 
| 500 | 719k |                             /* m_package_feerates */ false, | 
| 501 | 719k |                             /* m_client_maxfeerate */ {}, // checked by caller | 
| 502 | 719k |                             /* m_allow_carveouts */ true, | 
| 503 | 719k |             }; | 
| 504 | 719k |         } | 
| 505 |  |  | 
| 506 |  |         /** Parameters for test package mempool validation through testmempoolaccept. */ | 
| 507 |  |         static ATMPArgs PackageTestAccept(const CChainParams& chainparams, int64_t accept_time, | 
| 508 | 0 |                                           std::vector<COutPoint>& coins_to_uncache) { | 
| 509 | 0 |             return ATMPArgs{/* m_chainparams */ chainparams, | 
| 510 | 0 |                             /* m_accept_time */ accept_time, | 
| 511 | 0 |                             /* m_bypass_limits */ false, | 
| 512 | 0 |                             /* m_coins_to_uncache */ coins_to_uncache, | 
| 513 | 0 |                             /* m_test_accept */ true, | 
| 514 | 0 |                             /* m_allow_replacement */ false, | 
| 515 | 0 |                             /* m_allow_sibling_eviction */ false, | 
| 516 | 0 |                             /* m_package_submission */ false, // not submitting to mempool | 
| 517 | 0 |                             /* m_package_feerates */ false, | 
| 518 | 0 |                             /* m_client_maxfeerate */ {}, // checked by caller | 
| 519 | 0 |                             /* m_allow_carveouts */ false, | 
| 520 | 0 |             }; | 
| 521 | 0 |         } | 
| 522 |  |  | 
| 523 |  |         /** Parameters for child-with-parents package validation. */ | 
| 524 |  |         static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time, | 
| 525 | 0 |                                                 std::vector<COutPoint>& coins_to_uncache, const std::optional<CFeeRate>& client_maxfeerate) { | 
| 526 | 0 |             return ATMPArgs{/* m_chainparams */ chainparams, | 
| 527 | 0 |                             /* m_accept_time */ accept_time, | 
| 528 | 0 |                             /* m_bypass_limits */ false, | 
| 529 | 0 |                             /* m_coins_to_uncache */ coins_to_uncache, | 
| 530 | 0 |                             /* m_test_accept */ false, | 
| 531 | 0 |                             /* m_allow_replacement */ true, | 
| 532 | 0 |                             /* m_allow_sibling_eviction */ false, | 
| 533 | 0 |                             /* m_package_submission */ true, | 
| 534 | 0 |                             /* m_package_feerates */ true, | 
| 535 | 0 |                             /* m_client_maxfeerate */ client_maxfeerate, | 
| 536 | 0 |                             /* m_allow_carveouts */ false, | 
| 537 | 0 |             }; | 
| 538 | 0 |         } | 
| 539 |  |  | 
| 540 |  |         /** Parameters for a single transaction within a package. */ | 
| 541 | 0 |         static ATMPArgs SingleInPackageAccept(const ATMPArgs& package_args) { | 
| 542 | 0 |             return ATMPArgs{/* m_chainparams */ package_args.m_chainparams, | 
| 543 | 0 |                             /* m_accept_time */ package_args.m_accept_time, | 
| 544 | 0 |                             /* m_bypass_limits */ false, | 
| 545 | 0 |                             /* m_coins_to_uncache */ package_args.m_coins_to_uncache, | 
| 546 | 0 |                             /* m_test_accept */ package_args.m_test_accept, | 
| 547 | 0 |                             /* m_allow_replacement */ true, | 
| 548 | 0 |                             /* m_allow_sibling_eviction */ true, | 
| 549 | 0 |                             /* m_package_submission */ true, // trim at the end of AcceptPackage() | 
| 550 | 0 |                             /* m_package_feerates */ false, // only 1 transaction | 
| 551 | 0 |                             /* m_client_maxfeerate */ package_args.m_client_maxfeerate, | 
| 552 | 0 |                             /* m_allow_carveouts */ false, | 
| 553 | 0 |             }; | 
| 554 | 0 |         } | 
| 555 |  |  | 
| 556 |  |     private: | 
| 557 |  |         // Private ctor to avoid exposing details to clients and allowing the possibility of | 
| 558 |  |         // mixing up the order of the arguments. Use static functions above instead. | 
| 559 |  |         ATMPArgs(const CChainParams& chainparams, | 
| 560 |  |                  int64_t accept_time, | 
| 561 |  |                  bool bypass_limits, | 
| 562 |  |                  std::vector<COutPoint>& coins_to_uncache, | 
| 563 |  |                  bool test_accept, | 
| 564 |  |                  bool allow_replacement, | 
| 565 |  |                  bool allow_sibling_eviction, | 
| 566 |  |                  bool package_submission, | 
| 567 |  |                  bool package_feerates, | 
| 568 |  |                  std::optional<CFeeRate> client_maxfeerate, | 
| 569 |  |                  bool allow_carveouts) | 
| 570 | 719k |             : m_chainparams{chainparams}, | 
| 571 | 719k |               m_accept_time{accept_time}, | 
| 572 | 719k |               m_bypass_limits{bypass_limits}, | 
| 573 | 719k |               m_coins_to_uncache{coins_to_uncache}, | 
| 574 | 719k |               m_test_accept{test_accept}, | 
| 575 | 719k |               m_allow_replacement{allow_replacement}, | 
| 576 | 719k |               m_allow_sibling_eviction{allow_sibling_eviction}, | 
| 577 | 719k |               m_package_submission{package_submission}, | 
| 578 | 719k |               m_package_feerates{package_feerates}, | 
| 579 | 719k |               m_client_maxfeerate{client_maxfeerate}, | 
| 580 | 719k |               m_allow_carveouts{allow_carveouts} | 
| 581 | 719k |         { | 
| 582 |  |             // If we are using package feerates, we must be doing package submission. | 
| 583 |  |             // It also means carveouts and sibling eviction are not permitted. | 
| 584 | 719k |             if (m_package_feerates) { | 
| 585 | 0 |                 Assume(m_package_submission); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 586 | 0 |                 Assume(!m_allow_carveouts); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 587 | 0 |                 Assume(!m_allow_sibling_eviction); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 588 | 0 |             } | 
| 589 | 719k |             if (m_allow_sibling_eviction) Assume(m_allow_replacement); | Line | Count | Source |  | 118 | 719k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 590 | 719k |         } | 
| 591 |  |     }; | 
| 592 |  |  | 
| 593 |  |     /** Clean up all non-chainstate coins from m_view and m_viewmempool. */ | 
| 594 |  |     void CleanupTemporaryCoins() EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 595 |  |  | 
| 596 |  |     // Single transaction acceptance | 
| 597 |  |     MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 598 |  |  | 
| 599 |  |     /** | 
| 600 |  |     * Multiple transaction acceptance. Transactions may or may not be interdependent, but must not | 
| 601 |  |     * conflict with each other, and the transactions cannot already be in the mempool. Parents must | 
| 602 |  |     * come before children if any dependencies exist. | 
| 603 |  |     */ | 
| 604 |  |     PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 605 |  |  | 
| 606 |  |     /** | 
| 607 |  |      * Submission of a subpackage. | 
| 608 |  |      * If subpackage size == 1, calls AcceptSingleTransaction() with adjusted ATMPArgs to avoid | 
| 609 |  |      * package policy restrictions like no CPFP carve out (PackageMempoolChecks) | 
| 610 |  |      * and creates a PackageMempoolAcceptResult wrapping the result. | 
| 611 |  |      * | 
| 612 |  |      * If subpackage size > 1, calls AcceptMultipleTransactions() with the provided ATMPArgs. | 
| 613 |  |      * | 
| 614 |  |      * Also cleans up all non-chainstate coins from m_view at the end. | 
| 615 |  |     */ | 
| 616 |  |     PackageMempoolAcceptResult AcceptSubPackage(const std::vector<CTransactionRef>& subpackage, ATMPArgs& args) | 
| 617 |  |         EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 618 |  |  | 
| 619 |  |     /** | 
| 620 |  |      * Package (more specific than just multiple transactions) acceptance. Package must be a child | 
| 621 |  |      * with all of its unconfirmed parents, and topologically sorted. | 
| 622 |  |      */ | 
| 623 |  |     PackageMempoolAcceptResult AcceptPackage(const Package& package, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 624 |  |  | 
| 625 |  | private: | 
| 626 |  |     // All the intermediate state that gets passed between the various levels | 
| 627 |  |     // of checking a given transaction. | 
| 628 |  |     struct Workspace { | 
| 629 | 719k |         explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {} | 
| 630 |  |         /** Txids of mempool transactions that this transaction directly conflicts with or may | 
| 631 |  |          * replace via sibling eviction. */ | 
| 632 |  |         std::set<Txid> m_conflicts; | 
| 633 |  |         /** Iterators to mempool entries that this transaction directly conflicts with or may | 
| 634 |  |          * replace via sibling eviction. */ | 
| 635 |  |         CTxMemPool::setEntries m_iters_conflicting; | 
| 636 |  |         /** All mempool ancestors of this transaction. */ | 
| 637 |  |         CTxMemPool::setEntries m_ancestors; | 
| 638 |  |         /* Handle to the tx in the changeset */ | 
| 639 |  |         CTxMemPool::ChangeSet::TxHandle m_tx_handle; | 
| 640 |  |         /** Whether RBF-related data structures (m_conflicts, m_iters_conflicting, | 
| 641 |  |          * m_replaced_transactions) include a sibling in addition to txns with conflicting inputs. */ | 
| 642 |  |         bool m_sibling_eviction{false}; | 
| 643 |  |  | 
| 644 |  |         /** Virtual size of the transaction as used by the mempool, calculated using serialized size | 
| 645 |  |          * of the transaction and sigops. */ | 
| 646 |  |         int64_t m_vsize; | 
| 647 |  |         /** Fees paid by this transaction: total input amounts subtracted by total output amounts. */ | 
| 648 |  |         CAmount m_base_fees; | 
| 649 |  |         /** Base fees + any fee delta set by the user with prioritisetransaction. */ | 
| 650 |  |         CAmount m_modified_fees; | 
| 651 |  |  | 
| 652 |  |         /** If we're doing package validation (i.e. m_package_feerates=true), the "effective" | 
| 653 |  |          * package feerate of this transaction is the total fees divided by the total size of | 
| 654 |  |          * transactions (which may include its ancestors and/or descendants). */ | 
| 655 |  |         CFeeRate m_package_feerate{0}; | 
| 656 |  |  | 
| 657 |  |         const CTransactionRef& m_ptx; | 
| 658 |  |         /** Txid. */ | 
| 659 |  |         const Txid& m_hash; | 
| 660 |  |         TxValidationState m_state; | 
| 661 |  |         /** A temporary cache containing serialized transaction data for signature verification. | 
| 662 |  |          * Reused across PolicyScriptChecks and ConsensusScriptChecks. */ | 
| 663 |  |         PrecomputedTransactionData m_precomputed_txdata; | 
| 664 |  |     }; | 
| 665 |  |  | 
| 666 |  |     // Run the policy checks on a given transaction, excluding any script checks. | 
| 667 |  |     // Looks up inputs, calculates feerate, considers replacement, evaluates | 
| 668 |  |     // package limits, etc. As this function can be invoked for "free" by a peer, | 
| 669 |  |     // only tests that are fast should be done here (to avoid CPU DoS). | 
| 670 |  |     bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 671 |  |  | 
| 672 |  |     // Run checks for mempool replace-by-fee, only used in AcceptSingleTransaction. | 
| 673 |  |     bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 674 |  |  | 
| 675 |  |     // Enforce package mempool ancestor/descendant limits (distinct from individual | 
| 676 |  |     // ancestor/descendant limits done in PreChecks) and run Package RBF checks. | 
| 677 |  |     bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns, | 
| 678 |  |                               std::vector<Workspace>& workspaces, | 
| 679 |  |                               int64_t total_vsize, | 
| 680 |  |                               PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 681 |  |  | 
| 682 |  |     // Run the script checks using our policy flags. As this can be slow, we should | 
| 683 |  |     // only invoke this on transactions that have otherwise passed policy checks. | 
| 684 |  |     bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 685 |  |  | 
| 686 |  |     // Re-run the script checks, using consensus flags, and try to cache the | 
| 687 |  |     // result in the scriptcache. This should be done after | 
| 688 |  |     // PolicyScriptChecks(). This requires that all inputs either be in our | 
| 689 |  |     // utxo set or in the mempool. | 
| 690 |  |     bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 691 |  |  | 
| 692 |  |     // Try to add the transaction to the mempool, removing any conflicts first. | 
| 693 |  |     void FinalizeSubpackage(const ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 694 |  |  | 
| 695 |  |     // Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script | 
| 696 |  |     // cache - should only be called after successful validation of all transactions in the package. | 
| 697 |  |     // Does not call LimitMempoolSize(), so mempool max_size_bytes may be temporarily exceeded. | 
| 698 |  |     bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state, | 
| 699 |  |                        std::map<Wtxid, MempoolAcceptResult>& results) | 
| 700 |  |          EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); | 
| 701 |  |  | 
| 702 |  |     // Compare a package's feerate against minimum allowed. | 
| 703 |  |     bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs) | 
| 704 | 638k |     { | 
| 705 | 638k |         AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 638k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 706 | 638k |         AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 638k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 707 | 638k |         CAmount mempoolRejectFee = m_pool.GetMinFee().GetFee(package_size); | 
| 708 | 638k |         if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee0) { | 
| 709 | 0 |             return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 710 | 0 |         } | 
| 711 |  |  | 
| 712 | 638k |         if (package_fee < m_pool.m_opts.min_relay_feerate.GetFee(package_size)) { | 
| 713 | 0 |             return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "min relay fee not met", | 
| 714 | 0 |                                  strprintf("%d < %d", package_fee, m_pool.m_opts.min_relay_feerate.GetFee(package_size)));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 715 | 0 |         } | 
| 716 | 638k |         return true; | 
| 717 | 638k |     } | 
| 718 |  |  | 
| 719 |  |     ValidationCache& GetValidationCache() | 
| 720 | 987k |     { | 
| 721 | 987k |         return m_active_chainstate.m_chainman.m_validation_cache; | 
| 722 | 987k |     } | 
| 723 |  |  | 
| 724 |  | private: | 
| 725 |  |     CTxMemPool& m_pool; | 
| 726 |  |  | 
| 727 |  |     /** Holds a cached view of available coins from the UTXO set, mempool, and artificial temporary coins (to enable package validation). | 
| 728 |  |      * The view doesn't track whether a coin previously existed but has now been spent. We detect conflicts in other ways: | 
| 729 |  |      * - conflicts within a transaction are checked in CheckTransaction (bad-txns-inputs-duplicate) | 
| 730 |  |      * - conflicts within a package are checked in IsWellFormedPackage (conflict-in-package) | 
| 731 |  |      * - conflicts with an existing mempool transaction are found in CTxMemPool::GetConflictTx and replacements are allowed | 
| 732 |  |      * The temporary coins should persist between individual transaction checks so that package validation is possible, | 
| 733 |  |      * but must be cleaned up when we finish validating a subpackage, whether accepted or rejected. The cache must also | 
| 734 |  |      * be cleared when mempool contents change (when a changeset is applied or when the mempool trims itself) because it | 
| 735 |  |      * can return cached coins that no longer exist in the backend. Use CleanupTemporaryCoins() anytime you are finished | 
| 736 |  |      * with a SubPackageState or call LimitMempoolSize(). | 
| 737 |  |      */ | 
| 738 |  |     CCoinsViewCache m_view; | 
| 739 |  |  | 
| 740 |  |     // These are the two possible backends for m_view. | 
| 741 |  |     /** When m_view is connected to m_viewmempool as its backend, it can pull coins from the mempool and from the UTXO | 
| 742 |  |      * set. This is also where temporary coins are stored. */ | 
| 743 |  |     CCoinsViewMemPool m_viewmempool; | 
| 744 |  |     /** When m_view is connected to m_dummy, it can no longer look up coins from the mempool or UTXO set (meaning no disk | 
| 745 |  |      * operations happen), but can still return coins it accessed previously. Useful for keeping track of which coins | 
| 746 |  |      * were pulled from disk. */ | 
| 747 |  |     CCoinsView m_dummy; | 
| 748 |  |  | 
| 749 |  |     Chainstate& m_active_chainstate; | 
| 750 |  |  | 
| 751 |  |     // Fields below are per *sub*package state and must be reset prior to subsequent | 
| 752 |  |     // AcceptSingleTransaction and AcceptMultipleTransactions invocations | 
| 753 |  |     struct SubPackageState { | 
| 754 |  |         /** Aggregated modified fees of all transactions, used to calculate package feerate. */ | 
| 755 |  |         CAmount m_total_modified_fees{0}; | 
| 756 |  |         /** Aggregated virtual size of all transactions, used to calculate package feerate. */ | 
| 757 |  |         int64_t m_total_vsize{0}; | 
| 758 |  |  | 
| 759 |  |         // RBF-related members | 
| 760 |  |         /** Whether the transaction(s) would replace any mempool transactions and/or evict any siblings. | 
| 761 |  |          * If so, RBF rules apply. */ | 
| 762 |  |         bool m_rbf{false}; | 
| 763 |  |         /** Mempool transactions that were replaced. */ | 
| 764 |  |         std::list<CTransactionRef> m_replaced_transactions; | 
| 765 |  |         /* Changeset representing adding transactions and removing their conflicts. */ | 
| 766 |  |         std::unique_ptr<CTxMemPool::ChangeSet> m_changeset; | 
| 767 |  |  | 
| 768 |  |         /** Total modified fees of mempool transactions being replaced. */ | 
| 769 |  |         CAmount m_conflicting_fees{0}; | 
| 770 |  |         /** Total size (in virtual bytes) of mempool transactions being replaced. */ | 
| 771 |  |         size_t m_conflicting_size{0}; | 
| 772 |  |     }; | 
| 773 |  |  | 
| 774 |  |     struct SubPackageState m_subpackage; | 
| 775 |  |  | 
| 776 |  |     /** Re-set sub-package state to not leak between evaluations */ | 
| 777 |  |     void ClearSubPackageState() EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs) | 
| 778 | 0 |     { | 
| 779 | 0 |         m_subpackage = SubPackageState{}; | 
| 780 |  |  | 
| 781 |  |         // And clean coins while at it | 
| 782 | 0 |         CleanupTemporaryCoins(); | 
| 783 | 0 |     } | 
| 784 |  | }; | 
| 785 |  |  | 
| 786 |  | bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) | 
| 787 | 719k | { | 
| 788 | 719k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 789 | 719k |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 790 | 719k |     const CTransactionRef& ptx = ws.m_ptx; | 
| 791 | 719k |     const CTransaction& tx = *ws.m_ptx; | 
| 792 | 719k |     const Txid& hash = ws.m_hash; | 
| 793 |  |  | 
| 794 |  |     // Copy/alias what we need out of args | 
| 795 | 719k |     const int64_t nAcceptTime = args.m_accept_time; | 
| 796 | 719k |     const bool bypass_limits = args.m_bypass_limits; | 
| 797 | 719k |     std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache; | 
| 798 |  |  | 
| 799 |  |     // Alias what we need out of ws | 
| 800 | 719k |     TxValidationState& state = ws.m_state; | 
| 801 |  |  | 
| 802 | 719k |     if (!CheckTransaction(tx, state)) { | 
| 803 | 0 |         return false; // state filled in by CheckTransaction | 
| 804 | 0 |     } | 
| 805 |  |  | 
| 806 |  |     // Coinbase is only valid in a block, not as a loose transaction | 
| 807 | 719k |     if (tx.IsCoinBase()) | 
| 808 | 0 |         return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase"); | 
| 809 |  |  | 
| 810 |  |     // Rather not work on nonstandard transactions (unless -testnet/-regtest) | 
| 811 | 719k |     std::string reason; | 
| 812 | 719k |     if (m_pool.m_opts.require_standard && !IsStandardTx(tx, m_pool.m_opts.max_datacarrier_bytes, m_pool.m_opts.permit_bare_multisig, m_pool.m_opts.dust_relay_feerate, reason)) { | 
| 813 | 0 |         return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason); | 
| 814 | 0 |     } | 
| 815 |  |  | 
| 816 |  |     // Transactions smaller than 65 non-witness bytes are not relayed to mitigate CVE-2017-12842. | 
| 817 | 719k |     if (::GetSerializeSize(TX_NO_WITNESS(tx)) < MIN_STANDARD_TX_NONWITNESS_SIZE) | 
| 818 | 0 |         return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small"); | 
| 819 |  |  | 
| 820 |  |     // Only accept nLockTime-using transactions that can be mined in the next | 
| 821 |  |     // block; we don't want our mempool filled up with transactions that can't | 
| 822 |  |     // be mined yet. | 
| 823 | 719k |     if (!CheckFinalTxAtTip(*Assert(m_active_chainstate.m_chain.Tip()), tx)) {| Line | Count | Source |  | 106 | 719k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 824 | 64.7k |         return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final"); | 
| 825 | 64.7k |     } | 
| 826 |  |  | 
| 827 | 654k |     if (m_pool.exists(tx.GetWitnessHash())) { | 
| 828 |  |         // Exact transaction already exists in the mempool. | 
| 829 | 0 |         return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool"); | 
| 830 | 654k |     } else if (m_pool.exists(tx.GetHash())) { | 
| 831 |  |         // Transaction with the same non-witness data but different witness (same txid, different | 
| 832 |  |         // wtxid) already exists in the mempool. | 
| 833 | 0 |         return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool"); | 
| 834 | 0 |     } | 
| 835 |  |  | 
| 836 |  |     // Check for conflicts with in-memory transactions | 
| 837 | 654k |     for (const CTxIn &txin : tx.vin) | 
| 838 | 654k |     { | 
| 839 | 654k |         const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout); | 
| 840 | 654k |         if (ptxConflicting) { | 
| 841 | 156k |             if (!args.m_allow_replacement) { | 
| 842 |  |                 // Transaction conflicts with a mempool tx, but we're not allowing replacements in this context. | 
| 843 | 0 |                 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed"); | 
| 844 | 0 |             } | 
| 845 | 156k |             ws.m_conflicts.insert(ptxConflicting->GetHash()); | 
| 846 | 156k |         } | 
| 847 | 654k |     } | 
| 848 |  |  | 
| 849 | 654k |     m_view.SetBackend(m_viewmempool); | 
| 850 |  |  | 
| 851 | 654k |     const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip(); | 
| 852 |  |     // do all inputs exist? | 
| 853 | 654k |     for (const CTxIn& txin : tx.vin) { | 
| 854 | 654k |         if (!coins_cache.HaveCoinInCache(txin.prevout)) { | 
| 855 | 478k |             coins_to_uncache.push_back(txin.prevout); | 
| 856 | 478k |         } | 
| 857 |  |  | 
| 858 |  |         // Note: this call may add txin.prevout to the coins cache | 
| 859 |  |         // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed | 
| 860 |  |         // later (via coins_to_uncache) if this tx turns out to be invalid. | 
| 861 | 654k |         if (!m_view.HaveCoin(txin.prevout)) { | 
| 862 |  |             // Are inputs missing because we already have the tx? | 
| 863 | 0 |             for (size_t out = 0; out < tx.vout.size(); out++) { | 
| 864 |  |                 // Optimistically just do efficient check of cache for outputs | 
| 865 | 0 |                 if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) { | 
| 866 | 0 |                     return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known"); | 
| 867 | 0 |                 } | 
| 868 | 0 |             } | 
| 869 |  |             // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet | 
| 870 | 0 |             return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent"); | 
| 871 | 0 |         } | 
| 872 | 654k |     } | 
| 873 |  |  | 
| 874 |  |     // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the | 
| 875 |  |     // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock(). | 
| 876 | 654k |     m_view.GetBestBlock(); | 
| 877 |  |  | 
| 878 |  |     // we have all inputs cached now, so switch back to dummy (to protect | 
| 879 |  |     // against bugs where we pull more inputs from disk that miss being added | 
| 880 |  |     // to coins_to_uncache) | 
| 881 | 654k |     m_view.SetBackend(m_dummy); | 
| 882 |  |  | 
| 883 | 654k |     assert(m_active_chainstate.m_blockman.LookupBlockIndex(m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip()); | 
| 884 |  |  | 
| 885 |  |     // Only accept BIP68 sequence locked transactions that can be mined in the next | 
| 886 |  |     // block; we don't want our mempool filled up with transactions that can't | 
| 887 |  |     // be mined yet. | 
| 888 |  |     // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's | 
| 889 |  |     // backend was removed, it no longer pulls coins from the mempool. | 
| 890 | 654k |     const std::optional<LockPoints> lock_points{CalculateLockPointsAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx)}; | 
| 891 | 654k |     if (!lock_points.has_value() || !CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), *lock_points)) { | 
| 892 | 15.5k |         return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final"); | 
| 893 | 15.5k |     } | 
| 894 |  |  | 
| 895 |  |     // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs | 
| 896 | 638k |     if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_chain.Height() + 1, ws.m_base_fees)) { | 
| 897 | 0 |         return false; // state filled in by CheckTxInputs | 
| 898 | 0 |     } | 
| 899 |  |  | 
| 900 | 638k |     if (m_pool.m_opts.require_standard && !AreInputsStandard(tx, m_view)) { | 
| 901 | 0 |         return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); | 
| 902 | 0 |     } | 
| 903 |  |  | 
| 904 |  |     // Check for non-standard witnesses. | 
| 905 | 638k |     if (tx.HasWitness() && m_pool.m_opts.require_standard && !IsWitnessStandard(tx, m_view)) { | 
| 906 | 0 |         return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard"); | 
| 907 | 0 |     } | 
| 908 |  |  | 
| 909 | 638k |     int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS); | 
| 910 |  |  | 
| 911 |  |     // Keep track of transactions that spend a coinbase, which we re-scan | 
| 912 |  |     // during reorgs to ensure COINBASE_MATURITY is still met. | 
| 913 | 638k |     bool fSpendsCoinbase = false; | 
| 914 | 638k |     for (const CTxIn &txin : tx.vin) { | 
| 915 | 638k |         const Coin &coin = m_view.AccessCoin(txin.prevout); | 
| 916 | 638k |         if (coin.IsCoinBase()) { | 
| 917 | 173k |             fSpendsCoinbase = true; | 
| 918 | 173k |             break; | 
| 919 | 173k |         } | 
| 920 | 638k |     } | 
| 921 |  |  | 
| 922 |  |     // Set entry_sequence to 0 when bypass_limits is used; this allows txs from a block | 
| 923 |  |     // reorg to be marked earlier than any child txs that were already in the mempool. | 
| 924 | 638k |     const uint64_t entry_sequence = bypass_limits ? 00: m_pool.GetSequence(); | 
| 925 | 638k |     if (!m_subpackage.m_changeset) { | 
| 926 | 638k |         m_subpackage.m_changeset = m_pool.GetChangeSet(); | 
| 927 | 638k |     } | 
| 928 | 638k |     ws.m_tx_handle = m_subpackage.m_changeset->StageAddition(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(), entry_sequence, fSpendsCoinbase, nSigOpsCost, lock_points.value()); | 
| 929 |  |  | 
| 930 |  |     // ws.m_modified_fees includes any fee deltas from PrioritiseTransaction | 
| 931 | 638k |     ws.m_modified_fees = ws.m_tx_handle->GetModifiedFee(); | 
| 932 |  |  | 
| 933 | 638k |     ws.m_vsize = ws.m_tx_handle->GetTxSize(); | 
| 934 |  |  | 
| 935 |  |     // Enforces 0-fee for dust transactions, no incentive to be mined alone | 
| 936 | 638k |     if (m_pool.m_opts.require_standard) { | 
| 937 | 638k |         if (!PreCheckEphemeralTx(*ptx, m_pool.m_opts.dust_relay_feerate, ws.m_base_fees, ws.m_modified_fees, state)) { | 
| 938 | 0 |             return false; // state filled in by PreCheckEphemeralTx | 
| 939 | 0 |         } | 
| 940 | 638k |     } | 
| 941 |  |  | 
| 942 | 638k |     if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) | 
| 943 | 0 |         return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops", | 
| 944 | 0 |                 strprintf("%d", nSigOpsCost));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 945 |  |  | 
| 946 |  |     // No individual transactions are allowed below the min relay feerate except from disconnected blocks. | 
| 947 |  |     // This requirement, unlike CheckFeeRate, cannot be bypassed using m_package_feerates because, | 
| 948 |  |     // while a tx could be package CPFP'd when entering the mempool, we do not have a DoS-resistant | 
| 949 |  |     // method of ensuring the tx remains bumped. For example, the fee-bumping child could disappear | 
| 950 |  |     // due to a replacement. | 
| 951 |  |     // The only exception is TRUC transactions. | 
| 952 | 638k |     if (!bypass_limits && ws.m_ptx->version != TRUC_VERSION && ws.m_modified_fees < m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize)) { | 
| 953 |  |         // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not | 
| 954 |  |         // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. | 
| 955 | 0 |         return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", | 
| 956 | 0 |                              strprintf("%d < %d", ws.m_modified_fees, m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize)));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 957 | 0 |     } | 
| 958 |  |     // No individual transactions are allowed below the mempool min feerate except from disconnected | 
| 959 |  |     // blocks and transactions in a package. Package transactions will be checked using package | 
| 960 |  |     // feerate later. | 
| 961 | 638k |     if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false0; | 
| 962 |  |  | 
| 963 | 638k |     ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts); | 
| 964 |  |  | 
| 965 |  |     // Note that these modifications are only applicable to single transaction scenarios; | 
| 966 |  |     // carve-outs are disabled for multi-transaction evaluations. | 
| 967 | 638k |     CTxMemPool::Limits maybe_rbf_limits = m_pool.m_opts.limits; | 
| 968 |  |  | 
| 969 |  |     // Calculate in-mempool ancestors, up to a limit. | 
| 970 | 638k |     if (ws.m_conflicts.size() == 1 && args.m_allow_carveouts145k) { | 
| 971 |  |         // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we | 
| 972 |  |         // would meet the chain limits after the conflicts have been removed. However, there isn't a practical | 
| 973 |  |         // way to do this short of calculating the ancestor and descendant sets with an overlay cache of | 
| 974 |  |         // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't | 
| 975 |  |         // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool | 
| 976 |  |         // conflicts here. Importantly, we need to ensure that some transactions which were accepted using | 
| 977 |  |         // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides | 
| 978 |  |         // for off-chain contract systems (see link in the comment below). | 
| 979 |  |         // | 
| 980 |  |         // Specifically, the subset of RBF transactions which we allow despite chain limits are those which | 
| 981 |  |         // conflict directly with exactly one other transaction (but may evict children of said transaction), | 
| 982 |  |         // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies" | 
| 983 |  |         // check is accomplished later, so we don't bother doing anything about it here, but if our | 
| 984 |  |         // policy changes, we may need to move that check to here instead of removing it wholesale. | 
| 985 |  |         // | 
| 986 |  |         // Such transactions are clearly not merging any existing packages, so we are only concerned with | 
| 987 |  |         // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are | 
| 988 |  |         // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed | 
| 989 |  |         // to. | 
| 990 |  |         // | 
| 991 |  |         // To check these we first check if we meet the RBF criteria, above, and increment the descendant | 
| 992 |  |         // limits by the direct conflict and its descendants (as these are recalculated in | 
| 993 |  |         // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no | 
| 994 |  |         // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as | 
| 995 |  |         // the ancestor limits should be the same for both our new transaction and any conflicts). | 
| 996 |  |         // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes | 
| 997 |  |         // into force here (as we're only adding a single transaction). | 
| 998 | 145k |         assert(ws.m_iters_conflicting.size() == 1); | 
| 999 | 145k |         CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin(); | 
| 1000 |  |  | 
| 1001 | 145k |         maybe_rbf_limits.descendant_count += 1; | 
| 1002 | 145k |         maybe_rbf_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants(); | 
| 1003 | 145k |     } | 
| 1004 |  |  | 
| 1005 | 638k |     if (auto ancestors{m_subpackage.m_changeset->CalculateMemPoolAncestors(ws.m_tx_handle, maybe_rbf_limits)}) { | 
| 1006 | 638k |         ws.m_ancestors = std::move(*ancestors); | 
| 1007 | 638k |     } else { | 
| 1008 |  |         // If CalculateMemPoolAncestors fails second time, we want the original error string. | 
| 1009 | 0 |         const auto error_message{util::ErrorString(ancestors).original}; | 
| 1010 |  |  | 
| 1011 |  |         // Carve-out is not allowed in this context; fail | 
| 1012 | 0 |         if (!args.m_allow_carveouts) { | 
| 1013 | 0 |             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message); | 
| 1014 | 0 |         } | 
| 1015 |  |  | 
| 1016 |  |         // Contracting/payment channels CPFP carve-out: | 
| 1017 |  |         // If the new transaction is relatively small (up to 40k weight) | 
| 1018 |  |         // and has at most one ancestor (ie ancestor limit of 2, including | 
| 1019 |  |         // the new transaction), allow it if its parent has exactly the | 
| 1020 |  |         // descendant limit descendants. The transaction also cannot be TRUC, | 
| 1021 |  |         // as its topology restrictions do not allow a second child. | 
| 1022 |  |         // | 
| 1023 |  |         // This allows protocols which rely on distrusting counterparties | 
| 1024 |  |         // being able to broadcast descendants of an unconfirmed transaction | 
| 1025 |  |         // to be secure by simply only having two immediately-spendable | 
| 1026 |  |         // outputs - one for each counterparty. For more info on the uses for | 
| 1027 |  |         // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html | 
| 1028 | 0 |         CTxMemPool::Limits cpfp_carve_out_limits{ | 
| 1029 | 0 |             .ancestor_count = 2, | 
| 1030 | 0 |             .ancestor_size_vbytes = maybe_rbf_limits.ancestor_size_vbytes, | 
| 1031 | 0 |             .descendant_count = maybe_rbf_limits.descendant_count + 1, | 
| 1032 | 0 |             .descendant_size_vbytes = maybe_rbf_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT, | 
| 1033 | 0 |         }; | 
| 1034 | 0 |         if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT || ws.m_ptx->version == TRUC_VERSION) { | 
| 1035 | 0 |             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message); | 
| 1036 | 0 |         } | 
| 1037 | 0 |         if (auto ancestors_retry{m_subpackage.m_changeset->CalculateMemPoolAncestors(ws.m_tx_handle, cpfp_carve_out_limits)}) { | 
| 1038 | 0 |             ws.m_ancestors = std::move(*ancestors_retry); | 
| 1039 | 0 |         } else { | 
| 1040 | 0 |             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message); | 
| 1041 | 0 |         } | 
| 1042 | 0 |     } | 
| 1043 |  |  | 
| 1044 |  |     // Even though just checking direct mempool parents for inheritance would be sufficient, we | 
| 1045 |  |     // check using the full ancestor set here because it's more convenient to use what we have | 
| 1046 |  |     // already calculated. | 
| 1047 | 638k |     if (!args.m_bypass_limits) { | 
| 1048 | 638k |         if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { | 
| 1049 |  |             // Single transaction contexts only. | 
| 1050 | 0 |             if (args.m_allow_sibling_eviction && err->second != nullptr) { | 
| 1051 |  |                 // We should only be considering where replacement is considered valid as well. | 
| 1052 | 0 |                 Assume(args.m_allow_replacement); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1053 |  |  | 
| 1054 |  |                 // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be | 
| 1055 |  |                 // included in RBF checks. | 
| 1056 | 0 |                 ws.m_conflicts.insert(err->second->GetHash()); | 
| 1057 |  |                 // Adding the sibling to m_iters_conflicting here means that it doesn't count towards | 
| 1058 |  |                 // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from | 
| 1059 |  |                 // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. | 
| 1060 | 0 |                 ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); | 
| 1061 | 0 |                 ws.m_sibling_eviction = true; | 
| 1062 |  |                 // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. | 
| 1063 |  |                 // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC | 
| 1064 |  |                 // (which is normally done in PreChecks). However, the only way a TRUC transaction can | 
| 1065 |  |                 // have a non-TRUC and non-BIP125 descendant is due to a reorg. | 
| 1066 | 0 |             } else { | 
| 1067 | 0 |                 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); | 
| 1068 | 0 |             } | 
| 1069 | 0 |         } | 
| 1070 | 638k |     } | 
| 1071 |  |  | 
| 1072 |  |     // A transaction that spends outputs that would be replaced by it is invalid. Now | 
| 1073 |  |     // that we have the set of all ancestors we can detect this | 
| 1074 |  |     // pathological case by making sure ws.m_conflicts and ws.m_ancestors don't | 
| 1075 |  |     // intersect. | 
| 1076 | 638k |     if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) { | 
| 1077 |  |         // We classify this as a consensus error because a transaction depending on something it | 
| 1078 |  |         // conflicts with would be inconsistent. | 
| 1079 | 0 |         return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string); | 
| 1080 | 0 |     } | 
| 1081 |  |  | 
| 1082 |  |     // We want to detect conflicts in any tx in a package to trigger package RBF logic | 
| 1083 | 638k |     m_subpackage.m_rbf |= !ws.m_conflicts.empty(); | 
| 1084 | 638k |     return true; | 
| 1085 | 638k | } | 
| 1086 |  |  | 
| 1087 |  | bool MemPoolAccept::ReplacementChecks(Workspace& ws) | 
| 1088 | 145k | { | 
| 1089 | 145k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 145k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1090 | 145k |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 145k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1091 |  |  | 
| 1092 | 145k |     const CTransaction& tx = *ws.m_ptx; | 
| 1093 | 145k |     const Txid& hash = ws.m_hash; | 
| 1094 | 145k |     TxValidationState& state = ws.m_state; | 
| 1095 |  |  | 
| 1096 | 145k |     CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize); | 
| 1097 |  |     // Enforce Rule #6. The replacement transaction must have a higher feerate than its direct conflicts. | 
| 1098 |  |     // - The motivation for this check is to ensure that the replacement transaction is preferable for | 
| 1099 |  |     //   block-inclusion, compared to what would be removed from the mempool. | 
| 1100 |  |     // - This logic predates ancestor feerate-based transaction selection, which is why it doesn't | 
| 1101 |  |     //   consider feerates of descendants. | 
| 1102 |  |     // - Note: Ancestor feerate-based transaction selection has made this comparison insufficient to | 
| 1103 |  |     //   guarantee that this is incentive-compatible for miners, because it is possible for a | 
| 1104 |  |     //   descendant transaction of a direct conflict to pay a higher feerate than the transaction that | 
| 1105 |  |     //   might replace them, under these rules. | 
| 1106 | 145k |     if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) { | 
| 1107 |  |         // This fee-related failure is TX_RECONSIDERABLE because validating in a package may change | 
| 1108 |  |         // the result. | 
| 1109 | 145k |         return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, | 
| 1110 | 145k |                              strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)"0: ""), *err_string);| Line | Count | Source |  | 1172 | 145k | #define strprintf tfm::format | 
 | 
| 1111 | 145k |     } | 
| 1112 |  |  | 
| 1113 | 0 |     CTxMemPool::setEntries all_conflicts; | 
| 1114 |  |  | 
| 1115 |  |     // Calculate all conflicting entries and enforce Rule #5. | 
| 1116 | 0 |     if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, all_conflicts)}) { | 
| 1117 | 0 |         return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, | 
| 1118 | 0 |                              strprintf("too many potential replacements%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1119 | 0 |     } | 
| 1120 |  |     // Enforce Rule #2. | 
| 1121 | 0 |     if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, all_conflicts)}) { | 
| 1122 |  |         // Sibling eviction is only done for TRUC transactions, which cannot have multiple ancestors. | 
| 1123 | 0 |         Assume(!ws.m_sibling_eviction); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1124 | 0 |         return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, | 
| 1125 | 0 |                              strprintf("replacement-adds-unconfirmed%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1126 | 0 |     } | 
| 1127 |  |  | 
| 1128 |  |     // Check if it's economically rational to mine this transaction rather than the ones it | 
| 1129 |  |     // replaces and pays for its own relay fees. Enforce Rules #3 and #4. | 
| 1130 | 0 |     for (CTxMemPool::txiter it : all_conflicts) { | 
| 1131 | 0 |         m_subpackage.m_conflicting_fees += it->GetModifiedFee(); | 
| 1132 | 0 |         m_subpackage.m_conflicting_size += it->GetTxSize(); | 
| 1133 | 0 |     } | 
| 1134 | 0 |     if (const auto err_string{PaysForRBF(m_subpackage.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize, | 
| 1135 | 0 |                                          m_pool.m_opts.incremental_relay_feerate, hash)}) { | 
| 1136 |  |         // Result may change in a package context | 
| 1137 | 0 |         return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, | 
| 1138 | 0 |                              strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1139 | 0 |     } | 
| 1140 |  |  | 
| 1141 |  |     // Add all the to-be-removed transactions to the changeset. | 
| 1142 | 0 |     for (auto it : all_conflicts) { | 
| 1143 | 0 |         m_subpackage.m_changeset->StageRemoval(it); | 
| 1144 | 0 |     } | 
| 1145 | 0 |     return true; | 
| 1146 | 0 | } | 
| 1147 |  |  | 
| 1148 |  | bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns, | 
| 1149 |  |                                          std::vector<Workspace>& workspaces, | 
| 1150 |  |                                          const int64_t total_vsize, | 
| 1151 |  |                                          PackageValidationState& package_state) | 
| 1152 | 0 | { | 
| 1153 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1154 | 0 |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1155 |  |  | 
| 1156 |  |     // CheckPackageLimits expects the package transactions to not already be in the mempool. | 
| 1157 | 0 |     assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto& tx) { return !m_pool.exists(tx->GetHash()); })); | 
| 1158 |  |  | 
| 1159 | 0 |     assert(txns.size() == workspaces.size()); | 
| 1160 |  |  | 
| 1161 | 0 |     auto result = m_pool.CheckPackageLimits(txns, total_vsize); | 
| 1162 | 0 |     if (!result) { | 
| 1163 |  |         // This is a package-wide error, separate from an individual transaction error. | 
| 1164 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", util::ErrorString(result).original); | 
| 1165 | 0 |     } | 
| 1166 |  |  | 
| 1167 |  |     // No conflicts means we're finished. Further checks are all RBF-only. | 
| 1168 | 0 |     if (!m_subpackage.m_rbf) return true; | 
| 1169 |  |  | 
| 1170 |  |     // We're in package RBF context; replacement proposal must be size 2 | 
| 1171 | 0 |     if (workspaces.size() != 2 || !Assume(IsChildWithParents(txns))) {| Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1172 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package RBF failed: package must be 1-parent-1-child"); | 
| 1173 | 0 |     } | 
| 1174 |  |  | 
| 1175 |  |     // If the package has in-mempool ancestors, we won't consider a package RBF | 
| 1176 |  |     // since it would result in a cluster larger than 2. | 
| 1177 |  |     // N.B. To relax this constraint we will need to revisit how CCoinsViewMemPool::PackageAddTransaction | 
| 1178 |  |     // is being used inside AcceptMultipleTransactions to track available inputs while processing a package. | 
| 1179 | 0 |     for (const auto& ws : workspaces) { | 
| 1180 | 0 |         if (!ws.m_ancestors.empty()) { | 
| 1181 | 0 |             return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package RBF failed: new transaction cannot have mempool ancestors"); | 
| 1182 | 0 |         } | 
| 1183 | 0 |     } | 
| 1184 |  |  | 
| 1185 |  |     // Aggregate all conflicts into one set. | 
| 1186 | 0 |     CTxMemPool::setEntries direct_conflict_iters; | 
| 1187 | 0 |     for (Workspace& ws : workspaces) { | 
| 1188 |  |         // Aggregate all conflicts into one set. | 
| 1189 | 0 |         direct_conflict_iters.merge(ws.m_iters_conflicting); | 
| 1190 | 0 |     } | 
| 1191 |  | 
 | 
| 1192 | 0 |     const auto& parent_ws = workspaces[0]; | 
| 1193 | 0 |     const auto& child_ws = workspaces[1]; | 
| 1194 |  |  | 
| 1195 |  |     // Don't consider replacements that would cause us to remove a large number of mempool entries. | 
| 1196 |  |     // This limit is not increased in a package RBF. Use the aggregate number of transactions. | 
| 1197 | 0 |     CTxMemPool::setEntries all_conflicts; | 
| 1198 | 0 |     if (const auto err_string{GetEntriesForConflicts(*child_ws.m_ptx, m_pool, direct_conflict_iters, | 
| 1199 | 0 |                                                      all_conflicts)}) { | 
| 1200 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, | 
| 1201 | 0 |                                      "package RBF failed: too many potential replacements", *err_string); | 
| 1202 | 0 |     } | 
| 1203 |  |  | 
| 1204 |  |  | 
| 1205 | 0 |     for (CTxMemPool::txiter it : all_conflicts) { | 
| 1206 | 0 |         m_subpackage.m_changeset->StageRemoval(it); | 
| 1207 | 0 |         m_subpackage.m_conflicting_fees += it->GetModifiedFee(); | 
| 1208 | 0 |         m_subpackage.m_conflicting_size += it->GetTxSize(); | 
| 1209 | 0 |     } | 
| 1210 |  |  | 
| 1211 |  |     // Use the child as the transaction for attributing errors to. | 
| 1212 | 0 |     const Txid& child_hash = child_ws.m_ptx->GetHash(); | 
| 1213 | 0 |     if (const auto err_string{PaysForRBF(/*original_fees=*/m_subpackage.m_conflicting_fees, | 
| 1214 | 0 |                                          /*replacement_fees=*/m_subpackage.m_total_modified_fees, | 
| 1215 | 0 |                                          /*replacement_vsize=*/m_subpackage.m_total_vsize, | 
| 1216 | 0 |                                          m_pool.m_opts.incremental_relay_feerate, child_hash)}) { | 
| 1217 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, | 
| 1218 | 0 |                                      "package RBF failed: insufficient anti-DoS fees", *err_string); | 
| 1219 | 0 |     } | 
| 1220 |  |  | 
| 1221 |  |     // Ensure this two transaction package is a "chunk" on its own; we don't want the child | 
| 1222 |  |     // to be only paying anti-DoS fees | 
| 1223 | 0 |     const CFeeRate parent_feerate(parent_ws.m_modified_fees, parent_ws.m_vsize); | 
| 1224 | 0 |     const CFeeRate package_feerate(m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize); | 
| 1225 | 0 |     if (package_feerate <= parent_feerate) { | 
| 1226 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, | 
| 1227 | 0 |                                      "package RBF failed: package feerate is less than or equal to parent feerate", | 
| 1228 | 0 |                                      strprintf("package feerate %s <= parent feerate is %s", package_feerate.ToString(), parent_feerate.ToString()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1229 | 0 |     } | 
| 1230 |  |  | 
| 1231 |  |     // Check if it's economically rational to mine this package rather than the ones it replaces. | 
| 1232 |  |     // This takes the place of ReplacementChecks()'s PaysMoreThanConflicts() in the package RBF setting. | 
| 1233 | 0 |     if (const auto err_tup{ImprovesFeerateDiagram(*m_subpackage.m_changeset)}) { | 
| 1234 | 0 |         return package_state.Invalid(PackageValidationResult::PCKG_POLICY, | 
| 1235 | 0 |                                      "package RBF failed: " + err_tup.value().second, ""); | 
| 1236 | 0 |     } | 
| 1237 |  |  | 
| 1238 | 0 |     LogDebug(BCLog::TXPACKAGES, "package RBF checks passed: parent %s (wtxid=%s), child %s (wtxid=%s), package hash (%s)\n", | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 1239 | 0 |         txns.front()->GetHash().ToString(), txns.front()->GetWitnessHash().ToString(), | 
| 1240 | 0 |         txns.back()->GetHash().ToString(), txns.back()->GetWitnessHash().ToString(), | 
| 1241 | 0 |         GetPackageHash(txns).ToString()); | 
| 1242 |  |  | 
| 1243 |  | 
 | 
| 1244 | 0 |     return true; | 
| 1245 | 0 | } | 
| 1246 |  |  | 
| 1247 |  | bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) | 
| 1248 | 493k | { | 
| 1249 | 493k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1250 | 493k |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1251 | 493k |     const CTransaction& tx = *ws.m_ptx; | 
| 1252 | 493k |     TxValidationState& state = ws.m_state; | 
| 1253 |  |  | 
| 1254 | 493k |     constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; | 
| 1255 |  |  | 
| 1256 |  |     // Check input scripts and signatures. | 
| 1257 |  |     // This is done last to help prevent CPU exhaustion denial-of-service attacks. | 
| 1258 | 493k |     if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) { | 
| 1259 |  |         // Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately. | 
| 1260 | 0 |         if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) { | 
| 1261 | 0 |             state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, | 
| 1262 | 0 |                     state.GetRejectReason(), state.GetDebugMessage()); | 
| 1263 | 0 |         } | 
| 1264 | 0 |         return false; // state filled in by CheckInputScripts | 
| 1265 | 0 |     } | 
| 1266 |  |  | 
| 1267 | 493k |     return true; | 
| 1268 | 493k | } | 
| 1269 |  |  | 
| 1270 |  | bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) | 
| 1271 | 493k | { | 
| 1272 | 493k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1273 | 493k |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1274 | 493k |     const CTransaction& tx = *ws.m_ptx; | 
| 1275 | 493k |     const Txid& hash = ws.m_hash; | 
| 1276 | 493k |     TxValidationState& state = ws.m_state; | 
| 1277 |  |  | 
| 1278 |  |     // Check again against the current block tip's script verification | 
| 1279 |  |     // flags to cache our script execution flags. This is, of course, | 
| 1280 |  |     // useless if the next block has different script flags from the | 
| 1281 |  |     // previous one, but because the cache tracks script flags for us it | 
| 1282 |  |     // will auto-invalidate and we'll just have a few blocks of extra | 
| 1283 |  |     // misses on soft-fork activation. | 
| 1284 |  |     // | 
| 1285 |  |     // This is also useful in case of bugs in the standard flags that cause | 
| 1286 |  |     // transactions to pass as valid when they're actually invalid. For | 
| 1287 |  |     // instance the STRICTENC flag was incorrectly allowing certain | 
| 1288 |  |     // CHECKSIG NOT scripts to pass, even though they were invalid. | 
| 1289 |  |     // | 
| 1290 |  |     // There is a similar check in CreateNewBlock() to prevent creating | 
| 1291 |  |     // invalid blocks (using TestBlockValidity), however allowing such | 
| 1292 |  |     // transactions into the mempool can be exploited as a DoS attack. | 
| 1293 | 493k |     unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)}; | 
| 1294 | 493k |     if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, | 
| 1295 | 493k |                                         ws.m_precomputed_txdata, m_active_chainstate.CoinsTip(), GetValidationCache())) { | 
| 1296 | 0 |         LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 1297 | 0 |         return Assume(false); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1298 | 0 |     } | 
| 1299 |  |  | 
| 1300 | 493k |     return true; | 
| 1301 | 493k | } | 
| 1302 |  |  | 
| 1303 |  | void MemPoolAccept::FinalizeSubpackage(const ATMPArgs& args) | 
| 1304 | 493k | { | 
| 1305 | 493k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1306 | 493k |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 493k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1307 |  |  | 
| 1308 | 493k |     if (!m_subpackage.m_changeset->GetRemovals().empty()) Assume0(args.m_allow_replacement); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1309 |  |     // Remove conflicting transactions from the mempool | 
| 1310 | 493k |     for (CTxMemPool::txiter it : m_subpackage.m_changeset->GetRemovals()) | 
| 1311 | 0 |     { | 
| 1312 | 0 |         std::string log_string = strprintf("replacing mempool tx %s (wtxid=%s, fees=%s, vsize=%s). ",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1313 | 0 |                                       it->GetTx().GetHash().ToString(), | 
| 1314 | 0 |                                       it->GetTx().GetWitnessHash().ToString(), | 
| 1315 | 0 |                                       it->GetFee(), | 
| 1316 | 0 |                                       it->GetTxSize()); | 
| 1317 | 0 |         FeeFrac feerate{m_subpackage.m_total_modified_fees, int32_t(m_subpackage.m_total_vsize)}; | 
| 1318 | 0 |         uint256 tx_or_package_hash{}; | 
| 1319 | 0 |         const bool replaced_with_tx{m_subpackage.m_changeset->GetTxCount() == 1}; | 
| 1320 | 0 |         if (replaced_with_tx) { | 
| 1321 | 0 |             const CTransaction& tx = m_subpackage.m_changeset->GetAddedTxn(0); | 
| 1322 | 0 |             tx_or_package_hash = tx.GetHash().ToUint256(); | 
| 1323 | 0 |             log_string += strprintf("New tx %s (wtxid=%s, fees=%s, vsize=%s)",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1324 | 0 |                                     tx.GetHash().ToString(), | 
| 1325 | 0 |                                     tx.GetWitnessHash().ToString(), | 
| 1326 | 0 |                                     feerate.fee, | 
| 1327 | 0 |                                     feerate.size); | 
| 1328 | 0 |         } else { | 
| 1329 | 0 |             tx_or_package_hash = GetPackageHash(m_subpackage.m_changeset->GetAddedTxns()); | 
| 1330 | 0 |             log_string += strprintf("New package %s with %lu txs, fees=%s, vsize=%s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1331 | 0 |                                     tx_or_package_hash.ToString(), | 
| 1332 | 0 |                                     m_subpackage.m_changeset->GetTxCount(), | 
| 1333 | 0 |                                     feerate.fee, | 
| 1334 | 0 |                                     feerate.size); | 
| 1335 |  | 
 | 
| 1336 | 0 |         } | 
| 1337 | 0 |         LogDebug(BCLog::MEMPOOL, "%s\n", log_string); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 1338 | 0 |         TRACEPOINT(mempool, replaced, | 
| 1339 | 0 |                 it->GetTx().GetHash().data(), | 
| 1340 | 0 |                 it->GetTxSize(), | 
| 1341 | 0 |                 it->GetFee(), | 
| 1342 | 0 |                 std::chrono::duration_cast<std::chrono::duration<std::uint64_t>>(it->GetTime()).count(), | 
| 1343 | 0 |                 tx_or_package_hash.data(), | 
| 1344 | 0 |                 feerate.size, | 
| 1345 | 0 |                 feerate.fee, | 
| 1346 | 0 |                 replaced_with_tx | 
| 1347 | 0 |         ); | 
| 1348 | 0 |         m_subpackage.m_replaced_transactions.push_back(it->GetSharedTx()); | 
| 1349 | 0 |     } | 
| 1350 | 493k |     m_subpackage.m_changeset->Apply(); | 
| 1351 | 493k |     m_subpackage.m_changeset.reset(); | 
| 1352 | 493k | } | 
| 1353 |  |  | 
| 1354 |  | bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, | 
| 1355 |  |                                   PackageValidationState& package_state, | 
| 1356 |  |                                   std::map<Wtxid, MempoolAcceptResult>& results) | 
| 1357 | 0 | { | 
| 1358 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1359 | 0 |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1360 |  |     // Sanity check: none of the transactions should be in the mempool, and none of the transactions | 
| 1361 |  |     // should have a same-txid-different-witness equivalent in the mempool. | 
| 1362 | 0 |     assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws) { return !m_pool.exists(ws.m_ptx->GetHash()); })); | 
| 1363 |  |  | 
| 1364 | 0 |     bool all_submitted = true; | 
| 1365 | 0 |     FinalizeSubpackage(args); | 
| 1366 |  |     // ConsensusScriptChecks adds to the script cache and is therefore consensus-critical; | 
| 1367 |  |     // CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the | 
| 1368 |  |     // mempool or UTXO set. Submit each transaction to the mempool immediately after calling | 
| 1369 |  |     // ConsensusScriptChecks to make the outputs available for subsequent transactions. | 
| 1370 | 0 |     for (Workspace& ws : workspaces) { | 
| 1371 | 0 |         if (!ConsensusScriptChecks(args, ws)) { | 
| 1372 | 0 |             results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); | 
| 1373 |  |             // Since PolicyScriptChecks() passed, this should never fail. | 
| 1374 | 0 |             Assume(false); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1375 | 0 |             all_submitted = false; | 
| 1376 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR, | 
| 1377 | 0 |                                   strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 1378 | 0 |                                             ws.m_ptx->GetHash().ToString())); | 
| 1379 |  |             // Remove the transaction from the mempool. | 
| 1380 | 0 |             if (!m_subpackage.m_changeset) m_subpackage.m_changeset = m_pool.GetChangeSet(); | 
| 1381 | 0 |             m_subpackage.m_changeset->StageRemoval(m_pool.GetIter(ws.m_ptx->GetHash()).value()); | 
| 1382 | 0 |         } | 
| 1383 | 0 |     } | 
| 1384 | 0 |     if (!all_submitted) { | 
| 1385 | 0 |         Assume(m_subpackage.m_changeset); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1386 |  |         // This code should be unreachable; it's here as belt-and-suspenders | 
| 1387 |  |         // to try to ensure we have no consensus-invalid transactions in the | 
| 1388 |  |         // mempool. | 
| 1389 | 0 |         m_subpackage.m_changeset->Apply(); | 
| 1390 | 0 |         m_subpackage.m_changeset.reset(); | 
| 1391 | 0 |         return false; | 
| 1392 | 0 |     } | 
| 1393 |  |  | 
| 1394 | 0 |     std::vector<Wtxid> all_package_wtxids; | 
| 1395 | 0 |     all_package_wtxids.reserve(workspaces.size()); | 
| 1396 | 0 |     std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids), | 
| 1397 | 0 |                    [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); }); | 
| 1398 |  | 
 | 
| 1399 | 0 |     if (!m_subpackage.m_replaced_transactions.empty()) { | 
| 1400 | 0 |         LogDebug(BCLog::MEMPOOL, "replaced %u mempool transactions with %u new one(s) for %s additional fees, %d delta bytes\n", | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 1401 | 0 |                  m_subpackage.m_replaced_transactions.size(), workspaces.size(), | 
| 1402 | 0 |                  m_subpackage.m_total_modified_fees - m_subpackage.m_conflicting_fees, | 
| 1403 | 0 |                  m_subpackage.m_total_vsize - static_cast<int>(m_subpackage.m_conflicting_size)); | 
| 1404 | 0 |     } | 
| 1405 |  |  | 
| 1406 |  |     // Add successful results. The returned results may change later if LimitMempoolSize() evicts them. | 
| 1407 | 0 |     for (Workspace& ws : workspaces) { | 
| 1408 | 0 |         auto iter = m_pool.GetIter(ws.m_ptx->GetHash()); | 
| 1409 | 0 |         Assume(iter.has_value()); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1410 | 0 |         const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate : | 
| 1411 | 0 |             CFeeRate{ws.m_modified_fees, static_cast<int32_t>(ws.m_vsize)}; | 
| 1412 | 0 |         const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids : | 
| 1413 | 0 |             std::vector<Wtxid>{ws.m_ptx->GetWitnessHash()}; | 
| 1414 | 0 |         results.emplace(ws.m_ptx->GetWitnessHash(), | 
| 1415 | 0 |                         MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), ws.m_vsize, | 
| 1416 | 0 |                                          ws.m_base_fees, effective_feerate, effective_feerate_wtxids)); | 
| 1417 | 0 |         if (!m_pool.m_opts.signals) continue; | 
| 1418 | 0 |         const CTransaction& tx = *ws.m_ptx; | 
| 1419 | 0 |         const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, | 
| 1420 | 0 |                                                        ws.m_vsize, (*iter)->GetHeight(), | 
| 1421 | 0 |                                                        args.m_bypass_limits, args.m_package_submission, | 
| 1422 | 0 |                                                        IsCurrentForFeeEstimation(m_active_chainstate), | 
| 1423 | 0 |                                                        m_pool.HasNoInputsOf(tx)); | 
| 1424 | 0 |         m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); | 
| 1425 | 0 |     } | 
| 1426 | 0 |     return all_submitted; | 
| 1427 | 0 | } | 
| 1428 |  |  | 
| 1429 |  | MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) | 
| 1430 | 719k | { | 
| 1431 | 719k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1432 | 719k |     LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_opts.signals->TransactionAddedToMempool()) | Line | Count | Source |  | 259 | 719k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 719k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 719k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 719k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 1433 |  |  | 
| 1434 | 719k |     Workspace ws(ptx); | 
| 1435 | 719k |     const std::vector<Wtxid> single_wtxid{ws.m_ptx->GetWitnessHash()}; | 
| 1436 |  |  | 
| 1437 | 719k |     if (!PreChecks(args, ws)) { | 
| 1438 | 80.2k |         if (ws.m_state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { | 
| 1439 |  |             // Failed for fee reasons. Provide the effective feerate and which tx was included. | 
| 1440 | 0 |             return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), single_wtxid); | 
| 1441 | 0 |         } | 
| 1442 | 80.2k |         return MempoolAcceptResult::Failure(ws.m_state); | 
| 1443 | 80.2k |     } | 
| 1444 |  |  | 
| 1445 | 638k |     m_subpackage.m_total_vsize = ws.m_vsize; | 
| 1446 | 638k |     m_subpackage.m_total_modified_fees = ws.m_modified_fees; | 
| 1447 |  |  | 
| 1448 |  |     // Individual modified feerate exceeded caller-defined max; abort | 
| 1449 | 638k |     if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()0) { | 
| 1450 | 0 |         ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "max feerate exceeded", ""); | 
| 1451 | 0 |         return MempoolAcceptResult::Failure(ws.m_state); | 
| 1452 | 0 |     } | 
| 1453 |  |  | 
| 1454 | 638k |     if (m_pool.m_opts.require_standard) { | 
| 1455 | 638k |         Wtxid dummy_wtxid; | 
| 1456 | 638k |         if (!CheckEphemeralSpends(/*package=*/{ptx}, m_pool.m_opts.dust_relay_feerate, m_pool, ws.m_state, dummy_wtxid)) { | 
| 1457 | 0 |             return MempoolAcceptResult::Failure(ws.m_state); | 
| 1458 | 0 |         } | 
| 1459 | 638k |     } | 
| 1460 |  |  | 
| 1461 | 638k |     if (m_subpackage.m_rbf && !ReplacementChecks(ws)145k) { | 
| 1462 | 145k |         if (ws.m_state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { | 
| 1463 |  |             // Failed for incentives-based fee reasons. Provide the effective feerate and which tx was included. | 
| 1464 | 145k |             return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), single_wtxid); | 
| 1465 | 145k |         } | 
| 1466 | 0 |         return MempoolAcceptResult::Failure(ws.m_state); | 
| 1467 | 145k |     } | 
| 1468 |  |  | 
| 1469 |  |     // Perform the inexpensive checks first and avoid hashing and signature verification unless | 
| 1470 |  |     // those checks pass, to mitigate CPU exhaustion denial-of-service attacks. | 
| 1471 | 493k |     if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state)0; | 
| 1472 |  |  | 
| 1473 | 493k |     if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state)0; | 
| 1474 |  |  | 
| 1475 | 493k |     const CFeeRate effective_feerate{ws.m_modified_fees, static_cast<int32_t>(ws.m_vsize)}; | 
| 1476 |  |     // Tx was accepted, but not added | 
| 1477 | 493k |     if (args.m_test_accept) { | 
| 1478 | 0 |         return MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), ws.m_vsize, | 
| 1479 | 0 |                                             ws.m_base_fees, effective_feerate, single_wtxid); | 
| 1480 | 0 |     } | 
| 1481 |  |  | 
| 1482 | 493k |     FinalizeSubpackage(args); | 
| 1483 |  |  | 
| 1484 |  |     // Limit the mempool, if appropriate. | 
| 1485 | 493k |     if (!args.m_package_submission && !args.m_bypass_limits) { | 
| 1486 | 493k |         LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip()); | 
| 1487 |  |         // If mempool contents change, then the m_view cache is dirty. Given this isn't a package | 
| 1488 |  |         // submission, we won't be using the cache anymore, but clear it anyway for clarity. | 
| 1489 | 493k |         CleanupTemporaryCoins(); | 
| 1490 |  |  | 
| 1491 | 493k |         if (!m_pool.exists(ws.m_hash)) { | 
| 1492 |  |             // The tx no longer meets our (new) mempool minimum feerate but could be reconsidered in a package. | 
| 1493 | 2.56k |             ws.m_state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "mempool full"); | 
| 1494 | 2.56k |             return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), {ws.m_ptx->GetWitnessHash()}); | 
| 1495 | 2.56k |         } | 
| 1496 | 493k |     } | 
| 1497 |  |  | 
| 1498 | 491k |     if (m_pool.m_opts.signals) { | 
| 1499 | 491k |         const CTransaction& tx = *ws.m_ptx; | 
| 1500 | 491k |         auto iter = m_pool.GetIter(tx.GetHash()); | 
| 1501 | 491k |         Assume(iter.has_value()); | Line | Count | Source |  | 118 | 491k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1502 | 491k |         const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, | 
| 1503 | 491k |                                                        ws.m_vsize, (*iter)->GetHeight(), | 
| 1504 | 491k |                                                        args.m_bypass_limits, args.m_package_submission, | 
| 1505 | 491k |                                                        IsCurrentForFeeEstimation(m_active_chainstate), | 
| 1506 | 491k |                                                        m_pool.HasNoInputsOf(tx)); | 
| 1507 | 491k |         m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); | 
| 1508 | 491k |     } | 
| 1509 |  |  | 
| 1510 | 491k |     if (!m_subpackage.m_replaced_transactions.empty()) { | 
| 1511 | 0 |         LogDebug(BCLog::MEMPOOL, "replaced %u mempool transactions with 1 new transaction for %s additional fees, %d delta bytes\n", | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 1512 | 0 |                  m_subpackage.m_replaced_transactions.size(), | 
| 1513 | 0 |                  ws.m_modified_fees - m_subpackage.m_conflicting_fees, | 
| 1514 | 0 |                  ws.m_vsize - static_cast<int>(m_subpackage.m_conflicting_size)); | 
| 1515 | 0 |     } | 
| 1516 |  |  | 
| 1517 | 491k |     return MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, | 
| 1518 | 491k |                                         effective_feerate, single_wtxid); | 
| 1519 | 493k | } | 
| 1520 |  |  | 
| 1521 |  | PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) | 
| 1522 | 0 | { | 
| 1523 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1524 |  |  | 
| 1525 |  |     // These context-free package limits can be done before taking the mempool lock. | 
| 1526 | 0 |     PackageValidationState package_state; | 
| 1527 | 0 |     if (!IsWellFormedPackage(txns, package_state, /*require_sorted=*/true)) return PackageMempoolAcceptResult(package_state, {}); | 
| 1528 |  |  | 
| 1529 | 0 |     std::vector<Workspace> workspaces{}; | 
| 1530 | 0 |     workspaces.reserve(txns.size()); | 
| 1531 | 0 |     std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces), | 
| 1532 | 0 |                    [](const auto& tx) { return Workspace(tx); }); | 
| 1533 | 0 |     std::map<Wtxid, MempoolAcceptResult> results; | 
| 1534 |  | 
 | 
| 1535 | 0 |     LOCK(m_pool.cs); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 1536 |  |  | 
| 1537 |  |     // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary. | 
| 1538 | 0 |     for (Workspace& ws : workspaces) { | 
| 1539 | 0 |         if (!PreChecks(args, ws)) { | 
| 1540 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1541 |  |             // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished. | 
| 1542 | 0 |             results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); | 
| 1543 | 0 |             return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1544 | 0 |         } | 
| 1545 |  |  | 
| 1546 |  |         // Individual modified feerate exceeded caller-defined max; abort | 
| 1547 |  |         // N.B. this doesn't take into account CPFPs. Chunk-aware validation may be more robust. | 
| 1548 | 0 |         if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()) { | 
| 1549 |  |             // Need to set failure here both individually and at package level | 
| 1550 | 0 |             ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "max feerate exceeded", ""); | 
| 1551 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1552 |  |             // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished. | 
| 1553 | 0 |             results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); | 
| 1554 | 0 |             return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1555 | 0 |         } | 
| 1556 |  |  | 
| 1557 |  |         // Make the coins created by this transaction available for subsequent transactions in the | 
| 1558 |  |         // package to spend. If there are no conflicts within the package, no transaction can spend a coin | 
| 1559 |  |         // needed by another transaction in the package. We also need to make sure that no package | 
| 1560 |  |         // tx replaces (or replaces the ancestor of) the parent of another package tx. As long as we | 
| 1561 |  |         // check these two things, we don't need to track the coins spent. | 
| 1562 |  |         // If a package tx conflicts with a mempool tx, PackageMempoolChecks() ensures later that any package RBF attempt | 
| 1563 |  |         // has *no* in-mempool ancestors, so we don't have to worry about subsequent transactions in | 
| 1564 |  |         // same package spending the same in-mempool outpoints. This needs to be revisited for general | 
| 1565 |  |         // package RBF. | 
| 1566 | 0 |         m_viewmempool.PackageAddTransaction(ws.m_ptx); | 
| 1567 | 0 |     } | 
| 1568 |  |  | 
| 1569 |  |     // At this point we have all in-mempool ancestors, and we know every transaction's vsize. | 
| 1570 |  |     // Run the TRUC checks on the package. | 
| 1571 | 0 |     for (Workspace& ws : workspaces) { | 
| 1572 | 0 |         if (auto err{PackageTRUCChecks(ws.m_ptx, ws.m_vsize, txns, ws.m_ancestors)}) { | 
| 1573 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_POLICY, "TRUC-violation", err.value()); | 
| 1574 | 0 |             return PackageMempoolAcceptResult(package_state, {}); | 
| 1575 | 0 |         } | 
| 1576 | 0 |     } | 
| 1577 |  |  | 
| 1578 |  |     // Transactions must meet two minimum feerates: the mempool minimum fee and min relay fee. | 
| 1579 |  |     // For transactions consisting of exactly one child and its parents, it suffices to use the | 
| 1580 |  |     // package feerate (total modified fees / total virtual size) to check this requirement. | 
| 1581 |  |     // Note that this is an aggregate feerate; this function has not checked that there are transactions | 
| 1582 |  |     // too low feerate to pay for themselves, or that the child transactions are higher feerate than | 
| 1583 |  |     // their parents. Using aggregate feerate may allow "parents pay for child" behavior and permit | 
| 1584 |  |     // a child that is below mempool minimum feerate. To avoid these behaviors, callers of | 
| 1585 |  |     // AcceptMultipleTransactions need to restrict txns topology (e.g. to ancestor sets) and check | 
| 1586 |  |     // the feerates of individuals and subsets. | 
| 1587 | 0 |     m_subpackage.m_total_vsize = std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0}, | 
| 1588 | 0 |         [](int64_t sum, auto& ws) { return sum + ws.m_vsize; }); | 
| 1589 | 0 |     m_subpackage.m_total_modified_fees = std::accumulate(workspaces.cbegin(), workspaces.cend(), CAmount{0}, | 
| 1590 | 0 |         [](CAmount sum, auto& ws) { return sum + ws.m_modified_fees; }); | 
| 1591 | 0 |     const CFeeRate package_feerate(m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize); | 
| 1592 | 0 |     std::vector<Wtxid> all_package_wtxids; | 
| 1593 | 0 |     all_package_wtxids.reserve(workspaces.size()); | 
| 1594 | 0 |     std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids), | 
| 1595 | 0 |                    [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); }); | 
| 1596 | 0 |     TxValidationState placeholder_state; | 
| 1597 | 0 |     if (args.m_package_feerates && | 
| 1598 | 0 |         !CheckFeeRate(m_subpackage.m_total_vsize, m_subpackage.m_total_modified_fees, placeholder_state)) { | 
| 1599 | 0 |         package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1600 | 0 |         return PackageMempoolAcceptResult(package_state, {{workspaces.back().m_ptx->GetWitnessHash(), | 
| 1601 | 0 |             MempoolAcceptResult::FeeFailure(placeholder_state, CFeeRate(m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize), all_package_wtxids)}}); | 
| 1602 | 0 |     } | 
| 1603 |  |  | 
| 1604 |  |     // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction, | 
| 1605 |  |     // because it's unnecessary. | 
| 1606 | 0 |     if (txns.size() > 1 && !PackageMempoolChecks(txns, workspaces, m_subpackage.m_total_vsize, package_state)) { | 
| 1607 | 0 |         return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1608 | 0 |     } | 
| 1609 |  |  | 
| 1610 |  |     // Now that we've bounded the resulting possible ancestry count, check package for dust spends | 
| 1611 | 0 |     if (m_pool.m_opts.require_standard) { | 
| 1612 | 0 |         TxValidationState child_state; | 
| 1613 | 0 |         Wtxid child_wtxid; | 
| 1614 | 0 |         if (!CheckEphemeralSpends(txns, m_pool.m_opts.dust_relay_feerate, m_pool, child_state, child_wtxid)) { | 
| 1615 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_TX, "unspent-dust"); | 
| 1616 | 0 |             results.emplace(child_wtxid, MempoolAcceptResult::Failure(child_state)); | 
| 1617 | 0 |             return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1618 | 0 |         } | 
| 1619 | 0 |     } | 
| 1620 |  |  | 
| 1621 | 0 |     for (Workspace& ws : workspaces) { | 
| 1622 | 0 |         ws.m_package_feerate = package_feerate; | 
| 1623 | 0 |         if (!PolicyScriptChecks(args, ws)) { | 
| 1624 |  |             // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished. | 
| 1625 | 0 |             package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1626 | 0 |             results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); | 
| 1627 | 0 |             return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1628 | 0 |         } | 
| 1629 | 0 |         if (args.m_test_accept) { | 
| 1630 | 0 |             const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate : | 
| 1631 | 0 |                 CFeeRate{ws.m_modified_fees, static_cast<int32_t>(ws.m_vsize)}; | 
| 1632 | 0 |             const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids : | 
| 1633 | 0 |                 std::vector<Wtxid>{ws.m_ptx->GetWitnessHash()}; | 
| 1634 | 0 |             results.emplace(ws.m_ptx->GetWitnessHash(), | 
| 1635 | 0 |                             MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), | 
| 1636 | 0 |                                                          ws.m_vsize, ws.m_base_fees, effective_feerate, | 
| 1637 | 0 |                                                          effective_feerate_wtxids)); | 
| 1638 | 0 |         } | 
| 1639 | 0 |     } | 
| 1640 |  |  | 
| 1641 | 0 |     if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1642 |  |  | 
| 1643 | 0 |     if (!SubmitPackage(args, workspaces, package_state, results)) { | 
| 1644 |  |         // PackageValidationState filled in by SubmitPackage(). | 
| 1645 | 0 |         return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1646 | 0 |     } | 
| 1647 |  |  | 
| 1648 | 0 |     return PackageMempoolAcceptResult(package_state, std::move(results)); | 
| 1649 | 0 | } | 
| 1650 |  |  | 
| 1651 |  | void MemPoolAccept::CleanupTemporaryCoins() | 
| 1652 | 493k | { | 
| 1653 |  |     // There are 3 kinds of coins in m_view: | 
| 1654 |  |     // (1) Temporary coins from the transactions in subpackage, constructed by m_viewmempool. | 
| 1655 |  |     // (2) Mempool coins from transactions in the mempool, constructed by m_viewmempool. | 
| 1656 |  |     // (3) Confirmed coins fetched from our current UTXO set. | 
| 1657 |  |     // | 
| 1658 |  |     // (1) Temporary coins need to be removed, regardless of whether the transaction was submitted. | 
| 1659 |  |     // If the transaction was submitted to the mempool, m_viewmempool will be able to fetch them from | 
| 1660 |  |     // there. If it wasn't submitted to mempool, it is incorrect to keep them - future calls may try | 
| 1661 |  |     // to spend those coins that don't actually exist. | 
| 1662 |  |     // (2) Mempool coins also need to be removed. If the mempool contents have changed as a result | 
| 1663 |  |     // of submitting or replacing transactions, coins previously fetched from mempool may now be | 
| 1664 |  |     // spent or nonexistent. Those coins need to be deleted from m_view. | 
| 1665 |  |     // (3) Confirmed coins don't need to be removed. The chainstate has not changed (we are | 
| 1666 |  |     // holding cs_main and no blocks have been processed) so the confirmed tx cannot disappear like | 
| 1667 |  |     // a mempool tx can. The coin may now be spent after we submitted a tx to mempool, but | 
| 1668 |  |     // we have already checked that the package does not have 2 transactions spending the same coin | 
| 1669 |  |     // and we check whether a mempool transaction spends conflicting coins (CTxMemPool::GetConflictTx). | 
| 1670 |  |     // Keeping them in m_view is an optimization to not re-fetch confirmed coins if we later look up | 
| 1671 |  |     // inputs for this transaction again. | 
| 1672 | 493k |     for (const auto& outpoint : m_viewmempool.GetNonBaseCoins()) { | 
| 1673 |  |         // In addition to resetting m_viewmempool, we also need to manually delete these coins from | 
| 1674 |  |         // m_view because it caches copies of the coins it fetched from m_viewmempool previously. | 
| 1675 | 319k |         m_view.Uncache(outpoint); | 
| 1676 | 319k |     } | 
| 1677 |  |     // This deletes the temporary and mempool coins. | 
| 1678 | 493k |     m_viewmempool.Reset(); | 
| 1679 | 493k | } | 
| 1680 |  |  | 
| 1681 |  | PackageMempoolAcceptResult MemPoolAccept::AcceptSubPackage(const std::vector<CTransactionRef>& subpackage, ATMPArgs& args) | 
| 1682 | 0 | { | 
| 1683 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1684 | 0 |     AssertLockHeld(m_pool.cs); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1685 | 0 |     auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs) { | 
| 1686 | 0 |         if (subpackage.size() > 1) { | 
| 1687 | 0 |             return AcceptMultipleTransactions(subpackage, args); | 
| 1688 | 0 |         } | 
| 1689 | 0 |         const auto& tx = subpackage.front(); | 
| 1690 | 0 |         ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args); | 
| 1691 | 0 |         const auto single_res = AcceptSingleTransaction(tx, single_args); | 
| 1692 | 0 |         PackageValidationState package_state_wrapped; | 
| 1693 | 0 |         if (single_res.m_result_type != MempoolAcceptResult::ResultType::VALID) { | 
| 1694 | 0 |             package_state_wrapped.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1695 | 0 |         } | 
| 1696 | 0 |         return PackageMempoolAcceptResult(package_state_wrapped, {{tx->GetWitnessHash(), single_res}}); | 
| 1697 | 0 |     }(); | 
| 1698 |  |  | 
| 1699 |  |     // Clean up m_view and m_viewmempool so that other subpackage evaluations don't have access to | 
| 1700 |  |     // coins they shouldn't. Keep some coins in order to minimize re-fetching coins from the UTXO set. | 
| 1701 |  |     // Clean up package feerate and rbf calculations | 
| 1702 | 0 |     ClearSubPackageState(); | 
| 1703 |  | 
 | 
| 1704 | 0 |     return result; | 
| 1705 | 0 | } | 
| 1706 |  |  | 
| 1707 |  | PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, ATMPArgs& args) | 
| 1708 | 0 | { | 
| 1709 | 0 |     Assert(!package.empty()); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1710 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1711 |  |     // Used if returning a PackageMempoolAcceptResult directly from this function. | 
| 1712 | 0 |     PackageValidationState package_state_quit_early; | 
| 1713 |  |  | 
| 1714 |  |     // There are two topologies we are able to handle through this function: | 
| 1715 |  |     // (1) A single transaction | 
| 1716 |  |     // (2) A child-with-parents package. | 
| 1717 |  |     // Check that the package is well-formed. If it isn't, we won't try to validate any of the | 
| 1718 |  |     // transactions and thus won't return any MempoolAcceptResults, just a package-wide error. | 
| 1719 |  |  | 
| 1720 |  |     // Context-free package checks. | 
| 1721 | 0 |     if (!IsWellFormedPackage(package, package_state_quit_early, /*require_sorted=*/true)) { | 
| 1722 | 0 |         return PackageMempoolAcceptResult(package_state_quit_early, {}); | 
| 1723 | 0 |     } | 
| 1724 |  |  | 
| 1725 | 0 |     if (package.size() > 1 && !IsChildWithParents(package)) { | 
| 1726 |  |         // All transactions in the package must be a parent of the last transaction. This is just an | 
| 1727 |  |         // opportunity for us to fail fast on a context-free check without taking the mempool lock. | 
| 1728 | 0 |         package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents"); | 
| 1729 | 0 |         return PackageMempoolAcceptResult(package_state_quit_early, {}); | 
| 1730 | 0 |     } | 
| 1731 |  |  | 
| 1732 | 0 |     LOCK(m_pool.cs); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 1733 |  |     // Stores results from which we will create the returned PackageMempoolAcceptResult. | 
| 1734 |  |     // A result may be changed if a mempool transaction is evicted later due to LimitMempoolSize(). | 
| 1735 | 0 |     std::map<Wtxid, MempoolAcceptResult> results_final; | 
| 1736 |  |     // Results from individual validation which will be returned if no other result is available for | 
| 1737 |  |     // this transaction. "Nonfinal" because if a transaction fails by itself but succeeds later | 
| 1738 |  |     // (i.e. when evaluated with a fee-bumping child), the result in this map may be discarded. | 
| 1739 | 0 |     std::map<Wtxid, MempoolAcceptResult> individual_results_nonfinal; | 
| 1740 |  |     // Tracks whether we think package submission could result in successful entry to the mempool | 
| 1741 | 0 |     bool quit_early{false}; | 
| 1742 | 0 |     std::vector<CTransactionRef> txns_package_eval; | 
| 1743 | 0 |     for (const auto& tx : package) { | 
| 1744 | 0 |         const auto& wtxid = tx->GetWitnessHash(); | 
| 1745 | 0 |         const auto& txid = tx->GetHash(); | 
| 1746 |  |         // There are 3 possibilities: already in mempool, same-txid-diff-wtxid already in mempool, | 
| 1747 |  |         // or not in mempool. An already confirmed tx is treated as one not in mempool, because all | 
| 1748 |  |         // we know is that the inputs aren't available. | 
| 1749 | 0 |         if (m_pool.exists(wtxid)) { | 
| 1750 |  |             // Exact transaction already exists in the mempool. | 
| 1751 |  |             // Node operators are free to set their mempool policies however they please, nodes may receive | 
| 1752 |  |             // transactions in different orders, and malicious counterparties may try to take advantage of | 
| 1753 |  |             // policy differences to pin or delay propagation of transactions. As such, it's possible for | 
| 1754 |  |             // some package transaction(s) to already be in the mempool, and we don't want to reject the | 
| 1755 |  |             // entire package in that case (as that could be a censorship vector). De-duplicate the | 
| 1756 |  |             // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with | 
| 1757 |  |             // the new transactions. This ensures we don't double-count transaction counts and sizes when | 
| 1758 |  |             // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy. | 
| 1759 | 0 |             const auto& entry{*Assert(m_pool.GetEntry(txid))};| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1760 | 0 |             results_final.emplace(wtxid, MempoolAcceptResult::MempoolTx(entry.GetTxSize(), entry.GetFee())); | 
| 1761 | 0 |         } else if (m_pool.exists(txid)) { | 
| 1762 |  |             // Transaction with the same non-witness data but different witness (same txid, | 
| 1763 |  |             // different wtxid) already exists in the mempool. | 
| 1764 |  |             // | 
| 1765 |  |             // We don't allow replacement transactions right now, so just swap the package | 
| 1766 |  |             // transaction for the mempool one. Note that we are ignoring the validity of the | 
| 1767 |  |             // package transaction passed in. | 
| 1768 |  |             // TODO: allow witness replacement in packages. | 
| 1769 | 0 |             const auto& entry{*Assert(m_pool.GetEntry(txid))};| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1770 |  |             // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool. | 
| 1771 | 0 |             results_final.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(entry.GetTx().GetWitnessHash())); | 
| 1772 | 0 |         } else { | 
| 1773 |  |             // Transaction does not already exist in the mempool. | 
| 1774 |  |             // Try submitting the transaction on its own. | 
| 1775 | 0 |             const auto single_package_res = AcceptSubPackage({tx}, args); | 
| 1776 | 0 |             const auto& single_res = single_package_res.m_tx_results.at(wtxid); | 
| 1777 | 0 |             if (single_res.m_result_type == MempoolAcceptResult::ResultType::VALID) { | 
| 1778 |  |                 // The transaction succeeded on its own and is now in the mempool. Don't include it | 
| 1779 |  |                 // in package validation, because its fees should only be "used" once. | 
| 1780 | 0 |                 assert(m_pool.exists(wtxid)); | 
| 1781 | 0 |                 results_final.emplace(wtxid, single_res); | 
| 1782 | 0 |             } else if (package.size() == 1 || // If there is only one transaction, no need to retry it "as a package" | 
| 1783 | 0 |                        (single_res.m_state.GetResult() != TxValidationResult::TX_RECONSIDERABLE && | 
| 1784 | 0 |                        single_res.m_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS)) { | 
| 1785 |  |                 // Package validation policy only differs from individual policy in its evaluation | 
| 1786 |  |                 // of feerate. For example, if a transaction fails here due to violation of a | 
| 1787 |  |                 // consensus rule, the result will not change when it is submitted as part of a | 
| 1788 |  |                 // package. To minimize the amount of repeated work, unless the transaction fails | 
| 1789 |  |                 // due to feerate or missing inputs (its parent is a previous transaction in the | 
| 1790 |  |                 // package that failed due to feerate), don't run package validation. Note that this | 
| 1791 |  |                 // decision might not make sense if different types of packages are allowed in the | 
| 1792 |  |                 // future.  Continue individually validating the rest of the transactions, because | 
| 1793 |  |                 // some of them may still be valid. | 
| 1794 | 0 |                 quit_early = true; | 
| 1795 | 0 |                 package_state_quit_early.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1796 | 0 |                 individual_results_nonfinal.emplace(wtxid, single_res); | 
| 1797 | 0 |             } else { | 
| 1798 | 0 |                 individual_results_nonfinal.emplace(wtxid, single_res); | 
| 1799 | 0 |                 txns_package_eval.push_back(tx); | 
| 1800 | 0 |             } | 
| 1801 | 0 |         } | 
| 1802 | 0 |     } | 
| 1803 |  |  | 
| 1804 | 0 |     auto multi_submission_result = quit_early || txns_package_eval.empty() ? PackageMempoolAcceptResult(package_state_quit_early, {}) : | 
| 1805 | 0 |         AcceptSubPackage(txns_package_eval, args); | 
| 1806 | 0 |     PackageValidationState& package_state_final = multi_submission_result.m_state; | 
| 1807 |  |  | 
| 1808 |  |     // This is invoked by AcceptSubPackage() already, so this is just here for | 
| 1809 |  |     // clarity (since it's not permitted to invoke LimitMempoolSize() while a | 
| 1810 |  |     // changeset is outstanding). | 
| 1811 | 0 |     ClearSubPackageState(); | 
| 1812 |  |  | 
| 1813 |  |     // Make sure we haven't exceeded max mempool size. | 
| 1814 |  |     // Package transactions that were submitted to mempool or already in mempool may be evicted. | 
| 1815 |  |     // If mempool contents change, then the m_view cache is dirty. It has already been cleared above. | 
| 1816 | 0 |     LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip()); | 
| 1817 |  | 
 | 
| 1818 | 0 |     for (const auto& tx : package) { | 
| 1819 | 0 |         const auto& wtxid = tx->GetWitnessHash(); | 
| 1820 | 0 |         if (multi_submission_result.m_tx_results.count(wtxid) > 0) { | 
| 1821 |  |             // We shouldn't have re-submitted if the tx result was already in results_final. | 
| 1822 | 0 |             Assume(results_final.count(wtxid) == 0); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1823 |  |             // If it was submitted, check to see if the tx is still in the mempool. It could have | 
| 1824 |  |             // been evicted due to LimitMempoolSize() above. | 
| 1825 | 0 |             const auto& txresult = multi_submission_result.m_tx_results.at(wtxid); | 
| 1826 | 0 |             if (txresult.m_result_type == MempoolAcceptResult::ResultType::VALID && !m_pool.exists(wtxid)) { | 
| 1827 | 0 |                 package_state_final.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1828 | 0 |                 TxValidationState mempool_full_state; | 
| 1829 | 0 |                 mempool_full_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); | 
| 1830 | 0 |                 results_final.emplace(wtxid, MempoolAcceptResult::Failure(mempool_full_state)); | 
| 1831 | 0 |             } else { | 
| 1832 | 0 |                 results_final.emplace(wtxid, txresult); | 
| 1833 | 0 |             } | 
| 1834 | 0 |         } else if (const auto it{results_final.find(wtxid)}; it != results_final.end()) { | 
| 1835 |  |             // Already-in-mempool transaction. Check to see if it's still there, as it could have | 
| 1836 |  |             // been evicted when LimitMempoolSize() was called. | 
| 1837 | 0 |             Assume(it->second.m_result_type != MempoolAcceptResult::ResultType::INVALID); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1838 | 0 |             Assume(individual_results_nonfinal.count(wtxid) == 0); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1839 |  |             // Query by txid to include the same-txid-different-witness ones. | 
| 1840 | 0 |             if (!m_pool.exists(tx->GetHash())) { | 
| 1841 | 0 |                 package_state_final.Invalid(PackageValidationResult::PCKG_TX, "transaction failed"); | 
| 1842 | 0 |                 TxValidationState mempool_full_state; | 
| 1843 | 0 |                 mempool_full_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); | 
| 1844 |  |                 // Replace the previous result. | 
| 1845 | 0 |                 results_final.erase(wtxid); | 
| 1846 | 0 |                 results_final.emplace(wtxid, MempoolAcceptResult::Failure(mempool_full_state)); | 
| 1847 | 0 |             } | 
| 1848 | 0 |         } else if (const auto it{individual_results_nonfinal.find(wtxid)}; it != individual_results_nonfinal.end()) { | 
| 1849 | 0 |             Assume(it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1850 |  |             // Interesting result from previous processing. | 
| 1851 | 0 |             results_final.emplace(wtxid, it->second); | 
| 1852 | 0 |         } | 
| 1853 | 0 |     } | 
| 1854 | 0 |     Assume(results_final.size() == package.size()); | Line | Count | Source |  | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1855 | 0 |     return PackageMempoolAcceptResult(package_state_final, std::move(results_final)); | 
| 1856 | 0 | } | 
| 1857 |  |  | 
| 1858 |  | } // anon namespace | 
| 1859 |  |  | 
| 1860 |  | MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTransactionRef& tx, | 
| 1861 |  |                                        int64_t accept_time, bool bypass_limits, bool test_accept) | 
| 1862 | 719k | { | 
| 1863 | 719k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1864 | 719k |     const CChainParams& chainparams{active_chainstate.m_chainman.GetParams()}; | 
| 1865 | 719k |     assert(active_chainstate.GetMempool() != nullptr); | 
| 1866 | 719k |     CTxMemPool& pool{*active_chainstate.GetMempool()}; | 
| 1867 |  |  | 
| 1868 | 719k |     std::vector<COutPoint> coins_to_uncache; | 
| 1869 | 719k |     auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, accept_time, bypass_limits, coins_to_uncache, test_accept); | 
| 1870 | 719k |     MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args); | 
| 1871 | 719k |     if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { | 
| 1872 |  |         // Remove coins that were not present in the coins cache before calling | 
| 1873 |  |         // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large | 
| 1874 |  |         // number of invalid transactions that attempt to overrun the in-memory coins cache | 
| 1875 |  |         // (`CCoinsViewCache::cacheCoins`). | 
| 1876 |  |  | 
| 1877 | 228k |         for (const COutPoint& hashTx : coins_to_uncache) | 
| 1878 | 161k |             active_chainstate.CoinsTip().Uncache(hashTx); | 
| 1879 | 228k |         TRACEPOINT(mempool, rejected, | 
| 1880 | 228k |                 tx->GetHash().data(), | 
| 1881 | 228k |                 result.m_state.GetRejectReason().c_str() | 
| 1882 | 228k |         ); | 
| 1883 | 228k |     } | 
| 1884 |  |     // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits | 
| 1885 | 719k |     BlockValidationState state_dummy; | 
| 1886 | 719k |     active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC); | 
| 1887 | 719k |     return result; | 
| 1888 | 719k | } | 
| 1889 |  |  | 
| 1890 |  | PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool, | 
| 1891 |  |                                                    const Package& package, bool test_accept, const std::optional<CFeeRate>& client_maxfeerate) | 
| 1892 | 0 | { | 
| 1893 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1894 | 0 |     assert(!package.empty()); | 
| 1895 | 0 |     assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;})); | 
| 1896 |  |  | 
| 1897 | 0 |     std::vector<COutPoint> coins_to_uncache; | 
| 1898 | 0 |     const CChainParams& chainparams = active_chainstate.m_chainman.GetParams(); | 
| 1899 | 0 |     auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) { | 
| 1900 | 0 |         AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1901 | 0 |         if (test_accept) { | 
| 1902 | 0 |             auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache); | 
| 1903 | 0 |             return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args); | 
| 1904 | 0 |         } else { | 
| 1905 | 0 |             auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache, client_maxfeerate); | 
| 1906 | 0 |             return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args); | 
| 1907 | 0 |         } | 
| 1908 | 0 |     }(); | 
| 1909 |  |  | 
| 1910 |  |     // Uncache coins pertaining to transactions that were not submitted to the mempool. | 
| 1911 | 0 |     if (test_accept || result.m_state.IsInvalid()) { | 
| 1912 | 0 |         for (const COutPoint& hashTx : coins_to_uncache) { | 
| 1913 | 0 |             active_chainstate.CoinsTip().Uncache(hashTx); | 
| 1914 | 0 |         } | 
| 1915 | 0 |     } | 
| 1916 |  |     // Ensure the coins cache is still within limits. | 
| 1917 | 0 |     BlockValidationState state_dummy; | 
| 1918 | 0 |     active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC); | 
| 1919 | 0 |     return result; | 
| 1920 | 0 | } | 
| 1921 |  |  | 
| 1922 |  | CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) | 
| 1923 | 25.0k | { | 
| 1924 | 25.0k |     int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; | 
| 1925 |  |     // Force block reward to zero when right shift is undefined. | 
| 1926 | 25.0k |     if (halvings >= 64) | 
| 1927 | 0 |         return 0; | 
| 1928 |  |  | 
| 1929 | 25.0k |     CAmount nSubsidy = 50 * COIN; | 
| 1930 |  |     // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years. | 
| 1931 | 25.0k |     nSubsidy >>= halvings; | 
| 1932 | 25.0k |     return nSubsidy; | 
| 1933 | 25.0k | } | 
| 1934 |  |  | 
| 1935 |  | CoinsViews::CoinsViews(DBParams db_params, CoinsViewOptions options) | 
| 1936 | 51.2k |     : m_dbview{std::move(db_params), std::move(options)}, | 
| 1937 | 51.2k |       m_catcherview(&m_dbview) {} | 
| 1938 |  |  | 
| 1939 |  | void CoinsViews::InitCache() | 
| 1940 | 51.2k | { | 
| 1941 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1942 | 51.2k |     m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview); | 
| 1943 | 51.2k | } | 
| 1944 |  |  | 
| 1945 |  | Chainstate::Chainstate( | 
| 1946 |  |     CTxMemPool* mempool, | 
| 1947 |  |     BlockManager& blockman, | 
| 1948 |  |     ChainstateManager& chainman, | 
| 1949 |  |     std::optional<uint256> from_snapshot_blockhash) | 
| 1950 | 51.2k |     : m_mempool(mempool), | 
| 1951 | 51.2k |       m_blockman(blockman), | 
| 1952 | 51.2k |       m_chainman(chainman), | 
| 1953 | 51.2k |       m_from_snapshot_blockhash(from_snapshot_blockhash) {} | 
| 1954 |  |  | 
| 1955 |  | const CBlockIndex* Chainstate::SnapshotBase() const | 
| 1956 | 16.7M | { | 
| 1957 | 16.7M |     if (!m_from_snapshot_blockhash) return nullptr; | 
| 1958 | 0 |     if (!m_cached_snapshot_base) m_cached_snapshot_base = Assert(m_chainman.m_blockman.LookupBlockIndex(*m_from_snapshot_blockhash)); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 1959 | 0 |     return m_cached_snapshot_base; | 
| 1960 | 16.7M | } | 
| 1961 |  |  | 
| 1962 |  | void Chainstate::InitCoinsDB( | 
| 1963 |  |     size_t cache_size_bytes, | 
| 1964 |  |     bool in_memory, | 
| 1965 |  |     bool should_wipe, | 
| 1966 |  |     fs::path leveldb_name) | 
| 1967 | 51.2k | { | 
| 1968 | 51.2k |     if (m_from_snapshot_blockhash) { | 
| 1969 | 0 |         leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX; | 
| 1970 | 0 |     } | 
| 1971 |  |  | 
| 1972 | 51.2k |     m_coins_views = std::make_unique<CoinsViews>( | 
| 1973 | 51.2k |         DBParams{ | 
| 1974 | 51.2k |             .path = m_chainman.m_options.datadir / leveldb_name, | 
| 1975 | 51.2k |             .cache_bytes = cache_size_bytes, | 
| 1976 | 51.2k |             .memory_only = in_memory, | 
| 1977 | 51.2k |             .wipe_data = should_wipe, | 
| 1978 | 51.2k |             .obfuscate = true, | 
| 1979 | 51.2k |             .options = m_chainman.m_options.coins_db}, | 
| 1980 | 51.2k |         m_chainman.m_options.coins_view); | 
| 1981 |  |  | 
| 1982 | 51.2k |     m_coinsdb_cache_size_bytes = cache_size_bytes; | 
| 1983 | 51.2k | } | 
| 1984 |  |  | 
| 1985 |  | void Chainstate::InitCoinsCache(size_t cache_size_bytes) | 
| 1986 | 51.2k | { | 
| 1987 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 1988 | 51.2k |     assert(m_coins_views != nullptr); | 
| 1989 | 51.2k |     m_coinstip_cache_size_bytes = cache_size_bytes; | 
| 1990 | 51.2k |     m_coins_views->InitCache(); | 
| 1991 | 51.2k | } | 
| 1992 |  |  | 
| 1993 |  | // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which | 
| 1994 |  | // is a performance-related implementation detail. This function must be marked | 
| 1995 |  | // `const` so that `CValidationInterface` clients (which are given a `const Chainstate*`) | 
| 1996 |  | // can call it. | 
| 1997 |  | // | 
| 1998 |  | bool ChainstateManager::IsInitialBlockDownload() const | 
| 1999 | 14.5M | { | 
| 2000 |  |     // Optimization: pre-test latch before taking the lock. | 
| 2001 | 14.5M |     if (m_cached_finished_ibd.load(std::memory_order_relaxed)) | 
| 2002 | 13.5M |         return false; | 
| 2003 |  |  | 
| 2004 | 1.01M |     LOCK(cs_main); | Line | Count | Source |  | 259 | 1.01M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 1.01M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 1.01M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 1.01M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2005 | 1.01M |     if (m_cached_finished_ibd.load(std::memory_order_relaxed)) | 
| 2006 | 0 |         return false; | 
| 2007 | 1.01M |     if (m_blockman.LoadingBlocks()) { | 
| 2008 | 0 |         return true; | 
| 2009 | 0 |     } | 
| 2010 | 1.01M |     CChain& chain{ActiveChain()}; | 
| 2011 | 1.01M |     if (chain.Tip() == nullptr) { | 
| 2012 | 0 |         return true; | 
| 2013 | 0 |     } | 
| 2014 | 1.01M |     if (chain.Tip()->nChainWork < MinimumChainWork()) { | 
| 2015 | 0 |         return true; | 
| 2016 | 0 |     } | 
| 2017 | 1.01M |     if (chain.Tip()->Time() < Now<NodeSeconds>() - m_options.max_tip_age) { | 
| 2018 | 986k |         return true; | 
| 2019 | 986k |     } | 
| 2020 | 27.5k |     LogInfo("Leaving InitialBlockDownload (latching to false)");| Line | Count | Source |  | 356 | 27.5k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 27.5k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2021 | 27.5k |     m_cached_finished_ibd.store(true, std::memory_order_relaxed); | 
| 2022 | 27.5k |     return false; | 
| 2023 | 1.01M | } | 
| 2024 |  |  | 
| 2025 |  | void Chainstate::CheckForkWarningConditions() | 
| 2026 | 32.3k | { | 
| 2027 | 32.3k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 32.3k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2028 |  |  | 
| 2029 |  |     // Before we get past initial download, we cannot reliably alert about forks | 
| 2030 |  |     // (we assume we don't get stuck on a fork before finishing our initial sync) | 
| 2031 |  |     // Also not applicable to the background chainstate | 
| 2032 | 32.3k |     if (m_chainman.IsInitialBlockDownload() || this->GetRole() == ChainstateRole::BACKGROUND29.8k) { | 
| 2033 | 2.47k |         return; | 
| 2034 | 2.47k |     } | 
| 2035 |  |  | 
| 2036 | 29.8k |     if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)10.3k) { | 
| 2037 | 0 |         LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2038 | 0 |         m_chainman.GetNotifications().warningSet( | 
| 2039 | 0 |             kernel::Warning::LARGE_WORK_INVALID_CHAIN, | 
| 2040 | 0 |             _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.")); | 
| 2041 | 29.8k |     } else { | 
| 2042 | 29.8k |         m_chainman.GetNotifications().warningUnset(kernel::Warning::LARGE_WORK_INVALID_CHAIN); | 
| 2043 | 29.8k |     } | 
| 2044 | 29.8k | } | 
| 2045 |  |  | 
| 2046 |  | // Called both upon regular invalid block discovery *and* InvalidateBlock | 
| 2047 |  | void Chainstate::InvalidChainFound(CBlockIndex* pindexNew) | 
| 2048 | 7.40k | { | 
| 2049 | 7.40k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 7.40k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2050 | 7.40k |     if (!m_chainman.m_best_invalid || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork4.11k) { | 
| 2051 | 3.57k |         m_chainman.m_best_invalid = pindexNew; | 
| 2052 | 3.57k |     } | 
| 2053 | 7.40k |     SetBlockFailureFlags(pindexNew); | 
| 2054 | 7.40k |     if (m_chainman.m_best_header != nullptr && m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) == pindexNew) { | 
| 2055 | 3.67k |         m_chainman.RecalculateBestHeader(); | 
| 2056 | 3.67k |     } | 
| 2057 |  |  | 
| 2058 | 7.40k |     LogPrintf("%s: invalid block=%s  height=%d  log2_work=%f  date=%s\n", __func__,| Line | Count | Source |  | 361 | 7.40k | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 7.40k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 7.40k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2059 | 7.40k |       pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, | 
| 2060 | 7.40k |       log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime())); | 
| 2061 | 7.40k |     CBlockIndex *tip = m_chain.Tip(); | 
| 2062 | 7.40k |     assert (tip); | 
| 2063 | 7.40k |     LogPrintf("%s:  current best=%s  height=%d  log2_work=%f  date=%s\n", __func__,| Line | Count | Source |  | 361 | 7.40k | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 7.40k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 7.40k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2064 | 7.40k |       tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0), | 
| 2065 | 7.40k |       FormatISO8601DateTime(tip->GetBlockTime())); | 
| 2066 | 7.40k |     CheckForkWarningConditions(); | 
| 2067 | 7.40k | } | 
| 2068 |  |  | 
| 2069 |  | // Same as InvalidChainFound, above, except not called directly from InvalidateBlock, | 
| 2070 |  | // which does its own setBlockIndexCandidates management. | 
| 2071 |  | void Chainstate::InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state) | 
| 2072 | 4.15k | { | 
| 2073 | 4.15k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 4.15k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2074 | 4.15k |     if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) { | 
| 2075 | 4.15k |         pindex->nStatus |= BLOCK_FAILED_VALID; | 
| 2076 | 4.15k |         m_blockman.m_dirty_blockindex.insert(pindex); | 
| 2077 | 4.15k |         setBlockIndexCandidates.erase(pindex); | 
| 2078 | 4.15k |         InvalidChainFound(pindex); | 
| 2079 | 4.15k |     } | 
| 2080 | 4.15k | } | 
| 2081 |  |  | 
| 2082 |  | void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight) | 
| 2083 | 131k | { | 
| 2084 |  |     // mark inputs spent | 
| 2085 | 131k |     if (!tx.IsCoinBase()) { | 
| 2086 | 106k |         txundo.vprevout.reserve(tx.vin.size()); | 
| 2087 | 106k |         for (const CTxIn &txin : tx.vin) { | 
| 2088 | 106k |             txundo.vprevout.emplace_back(); | 
| 2089 | 106k |             bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back()); | 
| 2090 | 106k |             assert(is_spent); | 
| 2091 | 106k |         } | 
| 2092 | 106k |     } | 
| 2093 |  |     // add outputs | 
| 2094 | 131k |     AddCoins(inputs, tx, nHeight); | 
| 2095 | 131k | } | 
| 2096 |  |  | 
| 2097 | 998k | std::optional<std::pair<ScriptError, std::string>> CScriptCheck::operator()() { | 
| 2098 | 998k |     const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; | 
| 2099 | 998k |     const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness; | 
| 2100 | 998k |     ScriptError error{SCRIPT_ERR_UNKNOWN_ERROR}; | 
| 2101 | 998k |     if (VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *m_signature_cache, *txdata), &error)) { | 
| 2102 | 998k |         return std::nullopt; | 
| 2103 | 998k |     } else { | 
| 2104 | 0 |         auto debug_str = strprintf("input %i of %s (wtxid %s), spending %s:%i", nIn, ptxTo->GetHash().ToString(), ptxTo->GetWitnessHash().ToString(), ptxTo->vin[nIn].prevout.hash.ToString(), ptxTo->vin[nIn].prevout.n);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2105 | 0 |         return std::make_pair(error, std::move(debug_str)); | 
| 2106 | 0 |     } | 
| 2107 | 998k | } | 
| 2108 |  |  | 
| 2109 |  | ValidationCache::ValidationCache(const size_t script_execution_cache_bytes, const size_t signature_cache_bytes) | 
| 2110 | 51.2k |     : m_signature_cache{signature_cache_bytes} | 
| 2111 | 51.2k | { | 
| 2112 |  |     // Setup the salted hasher | 
| 2113 | 51.2k |     uint256 nonce = GetRandHash(); | 
| 2114 |  |     // We want the nonce to be 64 bytes long to force the hasher to process | 
| 2115 |  |     // this chunk, which makes later hash computations more efficient. We | 
| 2116 |  |     // just write our 32-byte entropy twice to fill the 64 bytes. | 
| 2117 | 51.2k |     m_script_execution_cache_hasher.Write(nonce.begin(), 32); | 
| 2118 | 51.2k |     m_script_execution_cache_hasher.Write(nonce.begin(), 32); | 
| 2119 |  |  | 
| 2120 | 51.2k |     const auto [num_elems, approx_size_bytes] = m_script_execution_cache.setup_bytes(script_execution_cache_bytes); | 
| 2121 | 51.2k |     LogInfo("Using %zu MiB out of %zu MiB requested for script execution cache, able to store %zu elements",| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2122 | 51.2k |               approx_size_bytes >> 20, script_execution_cache_bytes >> 20, num_elems); | 
| 2123 | 51.2k | } | 
| 2124 |  |  | 
| 2125 |  | /** | 
| 2126 |  |  * Check whether all of this transaction's input scripts succeed. | 
| 2127 |  |  * | 
| 2128 |  |  * This involves ECDSA signature checks so can be computationally intensive. This function should | 
| 2129 |  |  * only be called after the cheap sanity checks in CheckTxInputs passed. | 
| 2130 |  |  * | 
| 2131 |  |  * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any | 
| 2132 |  |  * script checks which are not necessary (eg due to script execution cache hits) are, obviously, | 
| 2133 |  |  * not pushed onto pvChecks/run. | 
| 2134 |  |  * | 
| 2135 |  |  * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache | 
| 2136 |  |  * which are matched. This is useful for checking blocks where we will likely never need the cache | 
| 2137 |  |  * entry again. | 
| 2138 |  |  * | 
| 2139 |  |  * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking | 
| 2140 |  |  * callers should probably reset it to CONSENSUS in such cases. | 
| 2141 |  |  * | 
| 2142 |  |  * Non-static (and redeclared) in src/test/txvalidationcache_tests.cpp | 
| 2143 |  |  */ | 
| 2144 |  | bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, | 
| 2145 |  |                        const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, | 
| 2146 |  |                        bool cacheFullScriptStore, PrecomputedTransactionData& txdata, | 
| 2147 |  |                        ValidationCache& validation_cache, | 
| 2148 |  |                        std::vector<CScriptCheck>* pvChecks) | 
| 2149 | 1.09M | { | 
| 2150 | 1.09M |     if (tx.IsCoinBase()) return true0; | 
| 2151 |  |  | 
| 2152 | 1.09M |     if (pvChecks) { | 
| 2153 | 0 |         pvChecks->reserve(tx.vin.size()); | 
| 2154 | 0 |     } | 
| 2155 |  |  | 
| 2156 |  |     // First check if script executions have been cached with the same | 
| 2157 |  |     // flags. Note that this assumes that the inputs provided are | 
| 2158 |  |     // correct (ie that the transaction hash which is in tx's prevouts | 
| 2159 |  |     // properly commits to the scriptPubKey in the inputs view of that | 
| 2160 |  |     // transaction). | 
| 2161 | 1.09M |     uint256 hashCacheEntry; | 
| 2162 | 1.09M |     CSHA256 hasher = validation_cache.ScriptExecutionCacheHasher(); | 
| 2163 | 1.09M |     hasher.Write(UCharCast(tx.GetWitnessHash().begin()), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); | 
| 2164 | 1.09M |     AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks | Line | Count | Source |  | 137 | 1.09M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2165 | 1.09M |     if (validation_cache.m_script_execution_cache.contains(hashCacheEntry, !cacheFullScriptStore)) { | 
| 2166 | 96.0k |         return true; | 
| 2167 | 96.0k |     } | 
| 2168 |  |  | 
| 2169 | 998k |     if (!txdata.m_spent_outputs_ready) { | 
| 2170 | 504k |         std::vector<CTxOut> spent_outputs; | 
| 2171 | 504k |         spent_outputs.reserve(tx.vin.size()); | 
| 2172 |  |  | 
| 2173 | 504k |         for (const auto& txin : tx.vin) { | 
| 2174 | 504k |             const COutPoint& prevout = txin.prevout; | 
| 2175 | 504k |             const Coin& coin = inputs.AccessCoin(prevout); | 
| 2176 | 504k |             assert(!coin.IsSpent()); | 
| 2177 | 504k |             spent_outputs.emplace_back(coin.out); | 
| 2178 | 504k |         } | 
| 2179 | 504k |         txdata.Init(tx, std::move(spent_outputs)); | 
| 2180 | 504k |     } | 
| 2181 | 998k |     assert(txdata.m_spent_outputs.size() == tx.vin.size()); | 
| 2182 |  |  | 
| 2183 | 1.99M |     for (unsigned int i = 0; 998ki < tx.vin.size(); i++998k) { | 
| 2184 |  |  | 
| 2185 |  |         // We very carefully only pass in things to CScriptCheck which | 
| 2186 |  |         // are clearly committed to by tx' witness hash. This provides | 
| 2187 |  |         // a sanity check that our caching is not introducing consensus | 
| 2188 |  |         // failures through additional data in, eg, the coins being | 
| 2189 |  |         // spent being checked as a part of CScriptCheck. | 
| 2190 |  |  | 
| 2191 |  |         // Verify signature | 
| 2192 | 998k |         CScriptCheck check(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i, flags, cacheSigStore, &txdata); | 
| 2193 | 998k |         if (pvChecks) { | 
| 2194 | 0 |             pvChecks->emplace_back(std::move(check)); | 
| 2195 | 998k |         } else if (auto result = check(); result.has_value()) { | 
| 2196 |  |             // Tx failures never trigger disconnections/bans. | 
| 2197 |  |             // This is so that network splits aren't triggered | 
| 2198 |  |             // either due to non-consensus relay policies (such as | 
| 2199 |  |             // non-standard DER encodings or non-null dummy | 
| 2200 |  |             // arguments) or due to new consensus rules introduced in | 
| 2201 |  |             // soft forks. | 
| 2202 | 0 |             if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { | 
| 2203 | 0 |                 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2204 | 0 |             } else { | 
| 2205 | 0 |                 return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("block-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2206 | 0 |             } | 
| 2207 | 0 |         } | 
| 2208 | 998k |     } | 
| 2209 |  |  | 
| 2210 | 998k |     if (cacheFullScriptStore && !pvChecks493k) { | 
| 2211 |  |         // We executed all of the provided scripts, and were told to | 
| 2212 |  |         // cache the result. Do so now. | 
| 2213 | 493k |         validation_cache.m_script_execution_cache.insert(hashCacheEntry); | 
| 2214 | 493k |     } | 
| 2215 |  |  | 
| 2216 | 998k |     return true; | 
| 2217 | 998k | } | 
| 2218 |  |  | 
| 2219 |  | bool FatalError(Notifications& notifications, BlockValidationState& state, const bilingual_str& message) | 
| 2220 | 0 | { | 
| 2221 | 0 |     notifications.fatalError(message); | 
| 2222 | 0 |     return state.Error(message.original); | 
| 2223 | 0 | } | 
| 2224 |  |  | 
| 2225 |  | /** | 
| 2226 |  |  * Restore the UTXO in a Coin at a given COutPoint | 
| 2227 |  |  * @param undo The Coin to be restored. | 
| 2228 |  |  * @param view The coins view to which to apply the changes. | 
| 2229 |  |  * @param out The out point that corresponds to the tx input. | 
| 2230 |  |  * @return A DisconnectResult as an int | 
| 2231 |  |  */ | 
| 2232 |  | int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out) | 
| 2233 | 0 | { | 
| 2234 | 0 |     bool fClean = true; | 
| 2235 |  | 
 | 
| 2236 | 0 |     if (view.HaveCoin(out)) fClean = false; // overwriting transaction output | 
| 2237 |  | 
 | 
| 2238 | 0 |     if (undo.nHeight == 0) { | 
| 2239 |  |         // Missing undo metadata (height and coinbase). Older versions included this | 
| 2240 |  |         // information only in undo records for the last spend of a transactions' | 
| 2241 |  |         // outputs. This implies that it must be present for some other output of the same tx. | 
| 2242 | 0 |         const Coin& alternate = AccessByTxid(view, out.hash); | 
| 2243 | 0 |         if (!alternate.IsSpent()) { | 
| 2244 | 0 |             undo.nHeight = alternate.nHeight; | 
| 2245 | 0 |             undo.fCoinBase = alternate.fCoinBase; | 
| 2246 | 0 |         } else { | 
| 2247 | 0 |             return DISCONNECT_FAILED; // adding output for transaction without known metadata | 
| 2248 | 0 |         } | 
| 2249 | 0 |     } | 
| 2250 |  |     // If the coin already exists as an unspent coin in the cache, then the | 
| 2251 |  |     // possible_overwrite parameter to AddCoin must be set to true. We have | 
| 2252 |  |     // already checked whether an unspent coin exists above using HaveCoin, so | 
| 2253 |  |     // we don't need to guess. When fClean is false, an unspent coin already | 
| 2254 |  |     // existed and it is an overwrite. | 
| 2255 | 0 |     view.AddCoin(out, std::move(undo), !fClean); | 
| 2256 |  | 
 | 
| 2257 | 0 |     return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; | 
| 2258 | 0 | } | 
| 2259 |  |  | 
| 2260 |  | /** Undo the effects of this block (with given index) on the UTXO set represented by coins. | 
| 2261 |  |  *  When FAILED is returned, view is left in an indeterminate state. */ | 
| 2262 |  | DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view) | 
| 2263 | 307k | { | 
| 2264 | 307k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 307k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2265 | 307k |     bool fClean = true; | 
| 2266 |  |  | 
| 2267 | 307k |     CBlockUndo blockUndo; | 
| 2268 | 307k |     if (!m_blockman.ReadBlockUndo(blockUndo, *pindex)) { | 
| 2269 | 0 |         LogError("DisconnectBlock(): failure reading undo data\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2270 | 0 |         return DISCONNECT_FAILED; | 
| 2271 | 0 |     } | 
| 2272 |  |  | 
| 2273 | 307k |     if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { | 
| 2274 | 0 |         LogError("DisconnectBlock(): block and undo data inconsistent\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2275 | 0 |         return DISCONNECT_FAILED; | 
| 2276 | 0 |     } | 
| 2277 |  |  | 
| 2278 |  |     // Ignore blocks that contain transactions which are 'overwritten' by later transactions, | 
| 2279 |  |     // unless those are already completely spent. | 
| 2280 |  |     // See https://github.com/bitcoin/bitcoin/issues/22596 for additional information. | 
| 2281 |  |     // Note: the blocks specified here are different than the ones used in ConnectBlock because DisconnectBlock | 
| 2282 |  |     // unwinds the blocks in reverse. As a result, the inconsistency is not discovered until the earlier | 
| 2283 |  |     // blocks with the duplicate coinbase transactions are disconnected. | 
| 2284 | 307k |     bool fEnforceBIP30 = !((pindex->nHeight==91722 && pindex->GetBlockHash() == uint256{"00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e"}0) || | 
| 2285 | 307k |                            (pindex->nHeight==91812 && pindex->GetBlockHash() == uint256{"00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f"}0)); | 
| 2286 |  |  | 
| 2287 |  |     // undo transactions in reverse order | 
| 2288 | 615k |     for (int i = block.vtx.size() - 1; i >= 0; i--307k) { | 
| 2289 | 307k |         const CTransaction &tx = *(block.vtx[i]); | 
| 2290 | 307k |         Txid hash = tx.GetHash(); | 
| 2291 | 307k |         bool is_coinbase = tx.IsCoinBase(); | 
| 2292 | 307k |         bool is_bip30_exception = (is_coinbase && !fEnforceBIP30); | 
| 2293 |  |  | 
| 2294 |  |         // Check that all outputs are available and match the outputs in the block itself | 
| 2295 |  |         // exactly. | 
| 2296 | 923k |         for (size_t o = 0; o < tx.vout.size(); o++615k) { | 
| 2297 | 615k |             if (!tx.vout[o].scriptPubKey.IsUnspendable()) { | 
| 2298 | 307k |                 COutPoint out(hash, o); | 
| 2299 | 307k |                 Coin coin; | 
| 2300 | 307k |                 bool is_spent = view.SpendCoin(out, &coin); | 
| 2301 | 307k |                 if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) { | 
| 2302 | 0 |                     if (!is_bip30_exception) { | 
| 2303 | 0 |                         fClean = false; // transaction output mismatch | 
| 2304 | 0 |                     } | 
| 2305 | 0 |                 } | 
| 2306 | 307k |             } | 
| 2307 | 615k |         } | 
| 2308 |  |  | 
| 2309 |  |         // restore inputs | 
| 2310 | 307k |         if (i > 0) { // not coinbases | 
| 2311 | 0 |             CTxUndo &txundo = blockUndo.vtxundo[i-1]; | 
| 2312 | 0 |             if (txundo.vprevout.size() != tx.vin.size()) { | 
| 2313 | 0 |                 LogError("DisconnectBlock(): transaction and undo data inconsistent\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2314 | 0 |                 return DISCONNECT_FAILED; | 
| 2315 | 0 |             } | 
| 2316 | 0 |             for (unsigned int j = tx.vin.size(); j > 0;) { | 
| 2317 | 0 |                 --j; | 
| 2318 | 0 |                 const COutPoint& out = tx.vin[j].prevout; | 
| 2319 | 0 |                 int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out); | 
| 2320 | 0 |                 if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED; | 
| 2321 | 0 |                 fClean = fClean && res != DISCONNECT_UNCLEAN; | 
| 2322 | 0 |             } | 
| 2323 |  |             // At this point, all of txundo.vprevout should have been moved out. | 
| 2324 | 0 |         } | 
| 2325 | 307k |     } | 
| 2326 |  |  | 
| 2327 |  |     // move best block pointer to prevout block | 
| 2328 | 307k |     view.SetBestBlock(pindex->pprev->GetBlockHash()); | 
| 2329 |  |  | 
| 2330 | 307k |     return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN0; | 
| 2331 | 307k | } | 
| 2332 |  |  | 
| 2333 |  | static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman) | 
| 2334 | 518k | { | 
| 2335 | 518k |     const Consensus::Params& consensusparams = chainman.GetConsensus(); | 
| 2336 |  |  | 
| 2337 |  |     // BIP16 didn't become active until Apr 1 2012 (on mainnet, and | 
| 2338 |  |     // retroactively applied to testnet) | 
| 2339 |  |     // However, only one historical block violated the P2SH rules (on both | 
| 2340 |  |     // mainnet and testnet). | 
| 2341 |  |     // Similarly, only one historical block violated the TAPROOT rules on | 
| 2342 |  |     // mainnet. | 
| 2343 |  |     // For simplicity, always leave P2SH+WITNESS+TAPROOT on except for the two | 
| 2344 |  |     // violating blocks. | 
| 2345 | 518k |     uint32_t flags{SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_TAPROOT}; | 
| 2346 | 518k |     const auto it{consensusparams.script_flag_exceptions.find(*Assert(block_index.phashBlock))};| Line | Count | Source |  | 106 | 518k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 2347 | 518k |     if (it != consensusparams.script_flag_exceptions.end()) { | 
| 2348 | 0 |         flags = it->second; | 
| 2349 | 0 |     } | 
| 2350 |  |  | 
| 2351 |  |     // Enforce the DERSIG (BIP66) rule | 
| 2352 | 518k |     if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_DERSIG)) { | 
| 2353 | 518k |         flags |= SCRIPT_VERIFY_DERSIG; | 
| 2354 | 518k |     } | 
| 2355 |  |  | 
| 2356 |  |     // Enforce CHECKLOCKTIMEVERIFY (BIP65) | 
| 2357 | 518k |     if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CLTV)) { | 
| 2358 | 518k |         flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; | 
| 2359 | 518k |     } | 
| 2360 |  |  | 
| 2361 |  |     // Enforce CHECKSEQUENCEVERIFY (BIP112) | 
| 2362 | 518k |     if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CSV)) { | 
| 2363 | 518k |         flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; | 
| 2364 | 518k |     } | 
| 2365 |  |  | 
| 2366 |  |     // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit) | 
| 2367 | 518k |     if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_SEGWIT)) { | 
| 2368 | 518k |         flags |= SCRIPT_VERIFY_NULLDUMMY; | 
| 2369 | 518k |     } | 
| 2370 |  |  | 
| 2371 | 518k |     return flags; | 
| 2372 | 518k | } | 
| 2373 |  |  | 
| 2374 |  |  | 
| 2375 |  | /** Apply the effects of this block (with given index) on the UTXO set represented by coins. | 
| 2376 |  |  *  Validity checks that depend on the UTXO set are also done; ConnectBlock() | 
| 2377 |  |  *  can fail if those validity checks fail (among other reasons). */ | 
| 2378 |  | bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex, | 
| 2379 |  |                                CCoinsViewCache& view, bool fJustCheck) | 
| 2380 | 25.0k | { | 
| 2381 | 25.0k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 25.0k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2382 | 25.0k |     assert(pindex); | 
| 2383 |  |  | 
| 2384 | 25.0k |     uint256 block_hash{block.GetHash()}; | 
| 2385 | 25.0k |     assert(*pindex->phashBlock == block_hash); | 
| 2386 |  |  | 
| 2387 | 25.0k |     const auto time_start{SteadyClock::now()}; | 
| 2388 | 25.0k |     const CChainParams& params{m_chainman.GetParams()}; | 
| 2389 |  |  | 
| 2390 |  |     // Check it again in case a previous version let a bad block in | 
| 2391 |  |     // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or | 
| 2392 |  |     // ContextualCheckBlockHeader() here. This means that if we add a new | 
| 2393 |  |     // consensus rule that is enforced in one of those two functions, then we | 
| 2394 |  |     // may have let in a block that violates the rule prior to updating the | 
| 2395 |  |     // software, and we would NOT be enforcing the rule here. Fully solving | 
| 2396 |  |     // upgrade from one software version to the next after a consensus rule | 
| 2397 |  |     // change is potentially tricky and issue-specific (see NeedsRedownload() | 
| 2398 |  |     // for one approach that was used for BIP 141 deployment). | 
| 2399 |  |     // Also, currently the rule against blocks more than 2 hours in the future | 
| 2400 |  |     // is enforced in ContextualCheckBlockHeader(); we wouldn't want to | 
| 2401 |  |     // re-enforce that rule here (at least until we make it impossible for | 
| 2402 |  |     // the clock to go backward). | 
| 2403 | 25.0k |     if (!CheckBlock(block, state, params.GetConsensus(), !fJustCheck, !fJustCheck)) { | 
| 2404 | 0 |         if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) { | 
| 2405 |  |             // We don't write down blocks to disk if they may have been | 
| 2406 |  |             // corrupted, so this should be impossible unless we're having hardware | 
| 2407 |  |             // problems. | 
| 2408 | 0 |             return FatalError(m_chainman.GetNotifications(), state, _("Corrupt block found indicating potential hardware failure.")); | 
| 2409 | 0 |         } | 
| 2410 | 0 |         LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2411 | 0 |         return false; | 
| 2412 | 0 |     } | 
| 2413 |  |  | 
| 2414 |  |     // verify that the view's current state corresponds to the previous block | 
| 2415 | 25.0k |     uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256()0: pindex->pprev->GetBlockHash(); | 
| 2416 | 25.0k |     assert(hashPrevBlock == view.GetBestBlock()); | 
| 2417 |  |  | 
| 2418 | 25.0k |     m_chainman.num_blocks_total++; | 
| 2419 |  |  | 
| 2420 |  |     // Special case for the genesis block, skipping connection of its transactions | 
| 2421 |  |     // (its coinbase is unspendable) | 
| 2422 | 25.0k |     if (block_hash == params.GetConsensus().hashGenesisBlock) { | 
| 2423 | 0 |         if (!fJustCheck) | 
| 2424 | 0 |             view.SetBestBlock(pindex->GetBlockHash()); | 
| 2425 | 0 |         return true; | 
| 2426 | 0 |     } | 
| 2427 |  |  | 
| 2428 | 25.0k |     bool fScriptChecks = true; | 
| 2429 | 25.0k |     if (!m_chainman.AssumedValidBlock().IsNull()) { | 
| 2430 |  |         // We've been configured with the hash of a block which has been externally verified to have a valid history. | 
| 2431 |  |         // A suitable default value is included with the software and updated from time to time.  Because validity | 
| 2432 |  |         //  relative to a piece of software is an objective fact these defaults can be easily reviewed. | 
| 2433 |  |         // This setting doesn't force the selection of any particular chain but makes validating some faster by | 
| 2434 |  |         //  effectively caching the result of part of the verification. | 
| 2435 | 0 |         BlockMap::const_iterator it{m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())}; | 
| 2436 | 0 |         if (it != m_blockman.m_block_index.end()) { | 
| 2437 | 0 |             if (it->second.GetAncestor(pindex->nHeight) == pindex && | 
| 2438 | 0 |                 m_chainman.m_best_header->GetAncestor(pindex->nHeight) == pindex && | 
| 2439 | 0 |                 m_chainman.m_best_header->nChainWork >= m_chainman.MinimumChainWork()) { | 
| 2440 |  |                 // This block is a member of the assumed verified chain and an ancestor of the best header. | 
| 2441 |  |                 // Script verification is skipped when connecting blocks under the | 
| 2442 |  |                 // assumevalid block. Assuming the assumevalid block is valid this | 
| 2443 |  |                 // is safe because block merkle hashes are still computed and checked, | 
| 2444 |  |                 // Of course, if an assumed valid block is invalid due to false scriptSigs | 
| 2445 |  |                 // this optimization would allow an invalid chain to be accepted. | 
| 2446 |  |                 // The equivalent time check discourages hash power from extorting the network via DOS attack | 
| 2447 |  |                 //  into accepting an invalid block through telling users they must manually set assumevalid. | 
| 2448 |  |                 //  Requiring a software change or burying the invalid block, regardless of the setting, makes | 
| 2449 |  |                 //  it hard to hide the implication of the demand.  This also avoids having release candidates | 
| 2450 |  |                 //  that are hardly doing any signature verification at all in testing without having to | 
| 2451 |  |                 //  artificially set the default assumed verified block further back. | 
| 2452 |  |                 // The test against the minimum chain work prevents the skipping when denied access to any chain at | 
| 2453 |  |                 //  least as good as the expected chain. | 
| 2454 | 0 |                 fScriptChecks = (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2); | 
| 2455 | 0 |             } | 
| 2456 | 0 |         } | 
| 2457 | 0 |     } | 
| 2458 |  |  | 
| 2459 | 25.0k |     const auto time_1{SteadyClock::now()}; | 
| 2460 | 25.0k |     m_chainman.time_check += time_1 - time_start; | 
| 2461 | 25.0k |     LogDebug(BCLog::BENCH, "    - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 25.0k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 25.0k |     do {                                                              \ |  | 374 | 25.0k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 25.0k |     } while (0) | 
 | 
 | 
| 2462 | 25.0k |              Ticks<MillisecondsDouble>(time_1 - time_start), | 
| 2463 | 25.0k |              Ticks<SecondsDouble>(m_chainman.time_check), | 
| 2464 | 25.0k |              Ticks<MillisecondsDouble>(m_chainman.time_check) / m_chainman.num_blocks_total); | 
| 2465 |  |  | 
| 2466 |  |     // Do not allow blocks that contain transactions which 'overwrite' older transactions, | 
| 2467 |  |     // unless those are already completely spent. | 
| 2468 |  |     // If such overwrites are allowed, coinbases and transactions depending upon those | 
| 2469 |  |     // can be duplicated to remove the ability to spend the first instance -- even after | 
| 2470 |  |     // being sent to another address. | 
| 2471 |  |     // See BIP30, CVE-2012-1909, and https://r6.ca/blog/20120206T005236Z.html for more information. | 
| 2472 |  |     // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC. | 
| 2473 |  |     // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the | 
| 2474 |  |     // two in the chain that violate it. This prevents exploiting the issue against nodes during their | 
| 2475 |  |     // initial block download. | 
| 2476 | 25.0k |     bool fEnforceBIP30 = !IsBIP30Repeat(*pindex); | 
| 2477 |  |  | 
| 2478 |  |     // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting | 
| 2479 |  |     // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs.  But by the | 
| 2480 |  |     // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first | 
| 2481 |  |     // before the first had been spent.  Since those coinbases are sufficiently buried it's no longer possible to create further | 
| 2482 |  |     // duplicate transactions descending from the known pairs either. | 
| 2483 |  |     // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check. | 
| 2484 |  |  | 
| 2485 |  |     // BIP34 requires that a block at height X (block X) has its coinbase | 
| 2486 |  |     // scriptSig start with a CScriptNum of X (indicated height X).  The above | 
| 2487 |  |     // logic of no longer requiring BIP30 once BIP34 activates is flawed in the | 
| 2488 |  |     // case that there is a block X before the BIP34 height of 227,931 which has | 
| 2489 |  |     // an indicated height Y where Y is greater than X.  The coinbase for block | 
| 2490 |  |     // X would also be a valid coinbase for block Y, which could be a BIP30 | 
| 2491 |  |     // violation.  An exhaustive search of all mainnet coinbases before the | 
| 2492 |  |     // BIP34 height which have an indicated height greater than the block height | 
| 2493 |  |     // reveals many occurrences. The 3 lowest indicated heights found are | 
| 2494 |  |     // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3 | 
| 2495 |  |     // heights would be the first opportunity for BIP30 to be violated. | 
| 2496 |  |  | 
| 2497 |  |     // The search reveals a great many blocks which have an indicated height | 
| 2498 |  |     // greater than 1,983,702, so we simply remove the optimization to skip | 
| 2499 |  |     // BIP30 checking for blocks at height 1,983,702 or higher.  Before we reach | 
| 2500 |  |     // that block in another 25 years or so, we should take advantage of a | 
| 2501 |  |     // future consensus change to do a new and improved version of BIP34 that | 
| 2502 |  |     // will actually prevent ever creating any duplicate coinbases in the | 
| 2503 |  |     // future. | 
| 2504 | 25.0k |     static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702; | 
| 2505 |  |  | 
| 2506 |  |     // There is no potential to create a duplicate coinbase at block 209,921 | 
| 2507 |  |     // because this is still before the BIP34 height and so explicit BIP30 | 
| 2508 |  |     // checking is still active. | 
| 2509 |  |  | 
| 2510 |  |     // The final case is block 176,684 which has an indicated height of | 
| 2511 |  |     // 490,897. Unfortunately, this issue was not discovered until about 2 weeks | 
| 2512 |  |     // before block 490,897 so there was not much opportunity to address this | 
| 2513 |  |     // case other than to carefully analyze it and determine it would not be a | 
| 2514 |  |     // problem. Block 490,897 was, in fact, mined with a different coinbase than | 
| 2515 |  |     // block 176,684, but it is important to note that even if it hadn't been or | 
| 2516 |  |     // is remined on an alternate fork with a duplicate coinbase, we would still | 
| 2517 |  |     // not run into a BIP30 violation.  This is because the coinbase for 176,684 | 
| 2518 |  |     // is spent in block 185,956 in transaction | 
| 2519 |  |     // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781.  This | 
| 2520 |  |     // spending transaction can't be duplicated because it also spends coinbase | 
| 2521 |  |     // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29.  This | 
| 2522 |  |     // coinbase has an indicated height of over 4.2 billion, and wouldn't be | 
| 2523 |  |     // duplicatable until that height, and it's currently impossible to create a | 
| 2524 |  |     // chain that long. Nevertheless we may wish to consider a future soft fork | 
| 2525 |  |     // which retroactively prevents block 490,897 from creating a duplicate | 
| 2526 |  |     // coinbase. The two historical BIP30 violations often provide a confusing | 
| 2527 |  |     // edge case when manipulating the UTXO and it would be simpler not to have | 
| 2528 |  |     // another edge case to deal with. | 
| 2529 |  |  | 
| 2530 |  |     // testnet3 has no blocks before the BIP34 height with indicated heights | 
| 2531 |  |     // post BIP34 before approximately height 486,000,000. After block | 
| 2532 |  |     // 1,983,702 testnet3 starts doing unnecessary BIP30 checking again. | 
| 2533 | 25.0k |     assert(pindex->pprev); | 
| 2534 | 25.0k |     CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(params.GetConsensus().BIP34Height); | 
| 2535 |  |     //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond. | 
| 2536 | 25.0k |     fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == params.GetConsensus().BIP34Hash)); | 
| 2537 |  |  | 
| 2538 |  |     // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a | 
| 2539 |  |     // consensus change that ensures coinbases at those heights cannot | 
| 2540 |  |     // duplicate earlier coinbases. | 
| 2541 | 25.0k |     if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT0) { | 
| 2542 | 156k |         for (const auto& tx : block.vtx) { | 
| 2543 | 338k |             for (size_t o = 0; o < tx->vout.size(); o++181k) { | 
| 2544 | 181k |                 if (view.HaveCoin(COutPoint(tx->GetHash(), o))) { | 
| 2545 | 0 |                     state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30", | 
| 2546 | 0 |                                   "tried to overwrite transaction"); | 
| 2547 | 0 |                 } | 
| 2548 | 181k |             } | 
| 2549 | 156k |         } | 
| 2550 | 25.0k |     } | 
| 2551 |  |  | 
| 2552 |  |     // Enforce BIP68 (sequence locks) | 
| 2553 | 25.0k |     int nLockTimeFlags = 0; | 
| 2554 | 25.0k |     if (DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_CSV)) { | 
| 2555 | 25.0k |         nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; | 
| 2556 | 25.0k |     } | 
| 2557 |  |  | 
| 2558 |  |     // Get the script flags for this block | 
| 2559 | 25.0k |     unsigned int flags{GetBlockScriptFlags(*pindex, m_chainman)}; | 
| 2560 |  |  | 
| 2561 | 25.0k |     const auto time_2{SteadyClock::now()}; | 
| 2562 | 25.0k |     m_chainman.time_forks += time_2 - time_1; | 
| 2563 | 25.0k |     LogDebug(BCLog::BENCH, "    - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 25.0k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 25.0k |     do {                                                              \ |  | 374 | 25.0k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 25.0k |     } while (0) | 
 | 
 | 
| 2564 | 25.0k |              Ticks<MillisecondsDouble>(time_2 - time_1), | 
| 2565 | 25.0k |              Ticks<SecondsDouble>(m_chainman.time_forks), | 
| 2566 | 25.0k |              Ticks<MillisecondsDouble>(m_chainman.time_forks) / m_chainman.num_blocks_total); | 
| 2567 |  |  | 
| 2568 | 25.0k |     if (fScriptChecks != m_prev_script_checks_logged && GetRole() == ChainstateRole::NORMAL0) { | 
| 2569 | 0 |         LogInfo("%s signature validations at block #%d (%s).", fScriptChecks ? "Enabling" : "Disabling", pindex->nHeight, block_hash.ToString());| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2570 | 0 |         m_prev_script_checks_logged = fScriptChecks; | 
| 2571 | 0 |     } | 
| 2572 |  |  | 
| 2573 | 25.0k |     CBlockUndo blockundo; | 
| 2574 |  |  | 
| 2575 |  |     // Precomputed transaction data pointers must not be invalidated | 
| 2576 |  |     // until after `control` has run the script checks (potentially | 
| 2577 |  |     // in multiple threads). Preallocate the vector size so a new allocation | 
| 2578 |  |     // doesn't invalidate pointers into the vector, and keep txsdata in scope | 
| 2579 |  |     // for as long as `control`. | 
| 2580 | 25.0k |     std::optional<CCheckQueueControl<CScriptCheck>> control; | 
| 2581 | 25.0k |     if (auto& queue = m_chainman.GetCheckQueue(); queue.HasThreads() && fScriptChecks0) control.emplace(queue)0; | 
| 2582 |  |  | 
| 2583 | 25.0k |     std::vector<PrecomputedTransactionData> txsdata(block.vtx.size()); | 
| 2584 |  |  | 
| 2585 | 25.0k |     std::vector<int> prevheights; | 
| 2586 | 25.0k |     CAmount nFees = 0; | 
| 2587 | 25.0k |     int nInputs = 0; | 
| 2588 | 25.0k |     int64_t nSigOpsCost = 0; | 
| 2589 | 25.0k |     blockundo.vtxundo.reserve(block.vtx.size() - 1); | 
| 2590 | 156k |     for (unsigned int i = 0; i < block.vtx.size(); i++131k) | 
| 2591 | 135k |     { | 
| 2592 | 135k |         if (!state.IsValid()) break0; | 
| 2593 | 135k |         const CTransaction &tx = *(block.vtx[i]); | 
| 2594 |  |  | 
| 2595 | 135k |         nInputs += tx.vin.size(); | 
| 2596 |  |  | 
| 2597 | 135k |         if (!tx.IsCoinBase()) | 
| 2598 | 109k |         { | 
| 2599 | 109k |             CAmount txfee = 0; | 
| 2600 | 109k |             TxValidationState tx_state; | 
| 2601 | 109k |             if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) { | 
| 2602 |  |                 // Any transaction validation failure in ConnectBlock is a block consensus failure | 
| 2603 | 2.67k |                 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, | 
| 2604 | 2.67k |                               tx_state.GetRejectReason(), | 
| 2605 | 2.67k |                               tx_state.GetDebugMessage() + " in transaction " + tx.GetHash().ToString()); | 
| 2606 | 2.67k |                 break; | 
| 2607 | 2.67k |             } | 
| 2608 | 107k |             nFees += txfee; | 
| 2609 | 107k |             if (!MoneyRange(nFees)) { | 
| 2610 | 0 |                 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange", | 
| 2611 | 0 |                               "accumulated fee in the block out of range"); | 
| 2612 | 0 |                 break; | 
| 2613 | 0 |             } | 
| 2614 |  |  | 
| 2615 |  |             // Check that transaction is BIP68 final | 
| 2616 |  |             // BIP68 lock checks (as opposed to nLockTime checks) must | 
| 2617 |  |             // be in ConnectBlock because they require the UTXO set | 
| 2618 | 107k |             prevheights.resize(tx.vin.size()); | 
| 2619 | 214k |             for (size_t j = 0; j < tx.vin.size(); j++107k) { | 
| 2620 | 107k |                 prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; | 
| 2621 | 107k |             } | 
| 2622 |  |  | 
| 2623 | 107k |             if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { | 
| 2624 | 581 |                 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", | 
| 2625 | 581 |                               "contains a non-BIP68-final transaction " + tx.GetHash().ToString()); | 
| 2626 | 581 |                 break; | 
| 2627 | 581 |             } | 
| 2628 | 107k |         } | 
| 2629 |  |  | 
| 2630 |  |         // GetTransactionSigOpCost counts 3 types of sigops: | 
| 2631 |  |         // * legacy (always) | 
| 2632 |  |         // * p2sh (when P2SH enabled in flags and excludes coinbase) | 
| 2633 |  |         // * witness (when witness enabled in flags and excludes coinbase) | 
| 2634 | 131k |         nSigOpsCost += GetTransactionSigOpCost(tx, view, flags); | 
| 2635 | 131k |         if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) { | 
| 2636 | 0 |             state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "too many sigops"); | 
| 2637 | 0 |             break; | 
| 2638 | 0 |         } | 
| 2639 |  |  | 
| 2640 | 131k |         if (!tx.IsCoinBase() && fScriptChecks106k) | 
| 2641 | 106k |         { | 
| 2642 | 106k |             bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ | 
| 2643 | 106k |             bool tx_ok; | 
| 2644 | 106k |             TxValidationState tx_state; | 
| 2645 |  |             // If CheckInputScripts is called with a pointer to a checks vector, the resulting checks are appended to it. In that case | 
| 2646 |  |             // they need to be added to control which runs them asynchronously. Otherwise, CheckInputScripts runs the checks before returning. | 
| 2647 | 106k |             if (control) { | 
| 2648 | 0 |                 std::vector<CScriptCheck> vChecks; | 
| 2649 | 0 |                 tx_ok = CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, &vChecks); | 
| 2650 | 0 |                 if (tx_ok) control->Add(std::move(vChecks)); | 
| 2651 | 106k |             } else { | 
| 2652 | 106k |                 tx_ok = CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache); | 
| 2653 | 106k |             } | 
| 2654 | 106k |             if (!tx_ok) { | 
| 2655 |  |                 // Any transaction validation failure in ConnectBlock is a block consensus failure | 
| 2656 | 0 |                 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, | 
| 2657 | 0 |                               tx_state.GetRejectReason(), tx_state.GetDebugMessage()); | 
| 2658 | 0 |                 break; | 
| 2659 | 0 |             } | 
| 2660 | 106k |         } | 
| 2661 |  |  | 
| 2662 | 131k |         CTxUndo undoDummy; | 
| 2663 | 131k |         if (i > 0) { | 
| 2664 | 106k |             blockundo.vtxundo.emplace_back(); | 
| 2665 | 106k |         } | 
| 2666 | 131k |         UpdateCoins(tx, view, i == 0 ? undoDummy25.0k: blockundo.vtxundo.back()106k, pindex->nHeight); | 
| 2667 | 131k |     } | 
| 2668 | 25.0k |     const auto time_3{SteadyClock::now()}; | 
| 2669 | 25.0k |     m_chainman.time_connect += time_3 - time_2; | 
| 2670 | 25.0k |     LogDebug(BCLog::BENCH, "      - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), | Line | Count | Source |  | 381 | 25.0k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 25.0k |     do {                                                              \ |  | 374 | 25.0k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 25.0k |     } while (0) | 
 | 
 | 
| 2671 | 25.0k |              Ticks<MillisecondsDouble>(time_3 - time_2), Ticks<MillisecondsDouble>(time_3 - time_2) / block.vtx.size(), | 
| 2672 | 25.0k |              nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_3 - time_2) / (nInputs - 1), | 
| 2673 | 25.0k |              Ticks<SecondsDouble>(m_chainman.time_connect), | 
| 2674 | 25.0k |              Ticks<MillisecondsDouble>(m_chainman.time_connect) / m_chainman.num_blocks_total); | 
| 2675 |  |  | 
| 2676 | 25.0k |     CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, params.GetConsensus()); | 
| 2677 | 25.0k |     if (block.vtx[0]->GetValueOut() > blockReward && state.IsValid()0) { | 
| 2678 | 0 |         state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount", | 
| 2679 | 0 |                       strprintf("coinbase pays too much (actual=%d vs limit=%d)", block.vtx[0]->GetValueOut(), blockReward));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2680 | 0 |     } | 
| 2681 | 25.0k |     if (control) { | 
| 2682 | 0 |         auto parallel_result = control->Complete(); | 
| 2683 | 0 |         if (parallel_result.has_value() && state.IsValid()) { | 
| 2684 | 0 |             state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, strprintf("block-script-verify-flag-failed (%s)", ScriptErrorString(parallel_result->first)), parallel_result->second);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2685 | 0 |         } | 
| 2686 | 0 |     } | 
| 2687 | 25.0k |     if (!state.IsValid()) { | 
| 2688 | 3.25k |         LogInfo("Block validation error: %s", state.ToString());| Line | Count | Source |  | 356 | 3.25k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 3.25k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2689 | 3.25k |         return false; | 
| 2690 | 3.25k |     } | 
| 2691 | 21.7k |     const auto time_4{SteadyClock::now()}; | 
| 2692 | 21.7k |     m_chainman.time_verify += time_4 - time_2; | 
| 2693 | 21.7k |     LogDebug(BCLog::BENCH, "    - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 2694 | 21.7k |              Ticks<MillisecondsDouble>(time_4 - time_2), | 
| 2695 | 21.7k |              nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_4 - time_2) / (nInputs - 1), | 
| 2696 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_verify), | 
| 2697 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_verify) / m_chainman.num_blocks_total); | 
| 2698 |  |  | 
| 2699 | 21.7k |     if (fJustCheck) { | 
| 2700 | 0 |         return true; | 
| 2701 | 0 |     } | 
| 2702 |  |  | 
| 2703 | 21.7k |     if (!m_blockman.WriteBlockUndo(blockundo, state, *pindex)) { | 
| 2704 | 0 |         return false; | 
| 2705 | 0 |     } | 
| 2706 |  |  | 
| 2707 | 21.7k |     const auto time_5{SteadyClock::now()}; | 
| 2708 | 21.7k |     m_chainman.time_undo += time_5 - time_4; | 
| 2709 | 21.7k |     LogDebug(BCLog::BENCH, "    - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 2710 | 21.7k |              Ticks<MillisecondsDouble>(time_5 - time_4), | 
| 2711 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_undo), | 
| 2712 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_undo) / m_chainman.num_blocks_total); | 
| 2713 |  |  | 
| 2714 | 21.7k |     if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) { | 
| 2715 | 21.7k |         pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); | 
| 2716 | 21.7k |         m_blockman.m_dirty_blockindex.insert(pindex); | 
| 2717 | 21.7k |     } | 
| 2718 |  |  | 
| 2719 |  |     // add this block to the view's block chain | 
| 2720 | 21.7k |     view.SetBestBlock(pindex->GetBlockHash()); | 
| 2721 |  |  | 
| 2722 | 21.7k |     const auto time_6{SteadyClock::now()}; | 
| 2723 | 21.7k |     m_chainman.time_index += time_6 - time_5; | 
| 2724 | 21.7k |     LogDebug(BCLog::BENCH, "    - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 2725 | 21.7k |              Ticks<MillisecondsDouble>(time_6 - time_5), | 
| 2726 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_index), | 
| 2727 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_index) / m_chainman.num_blocks_total); | 
| 2728 |  |  | 
| 2729 | 21.7k |     TRACEPOINT(validation, block_connected, | 
| 2730 | 21.7k |         block_hash.data(), | 
| 2731 | 21.7k |         pindex->nHeight, | 
| 2732 | 21.7k |         block.vtx.size(), | 
| 2733 | 21.7k |         nInputs, | 
| 2734 | 21.7k |         nSigOpsCost, | 
| 2735 | 21.7k |         Ticks<std::chrono::nanoseconds>(time_5 - time_start) | 
| 2736 | 21.7k |     ); | 
| 2737 |  |  | 
| 2738 | 21.7k |     return true; | 
| 2739 | 21.7k | } | 
| 2740 |  |  | 
| 2741 |  | CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() | 
| 2742 | 790k | { | 
| 2743 | 790k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 790k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2744 | 790k |     return this->GetCoinsCacheSizeState( | 
| 2745 | 790k |         m_coinstip_cache_size_bytes, | 
| 2746 | 790k |         m_mempool ? m_mempool->m_opts.max_size_bytes : 00); | 
| 2747 | 790k | } | 
| 2748 |  |  | 
| 2749 |  | CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState( | 
| 2750 |  |     size_t max_coins_cache_size_bytes, | 
| 2751 |  |     size_t max_mempool_size_bytes) | 
| 2752 | 790k | { | 
| 2753 | 790k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 790k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2754 | 790k |     const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 00; | 
| 2755 | 790k |     int64_t cacheSize = CoinsTip().DynamicMemoryUsage(); | 
| 2756 | 790k |     int64_t nTotalSpace = | 
| 2757 | 790k |         max_coins_cache_size_bytes + std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0); | 
| 2758 |  |  | 
| 2759 | 790k |     if (cacheSize > nTotalSpace) { | 
| 2760 | 0 |         LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2761 | 0 |         return CoinsCacheSizeState::CRITICAL; | 
| 2762 | 790k |     } else if (cacheSize > LargeCoinsCacheThreshold(nTotalSpace)) { | 
| 2763 | 0 |         return CoinsCacheSizeState::LARGE; | 
| 2764 | 0 |     } | 
| 2765 | 790k |     return CoinsCacheSizeState::OK; | 
| 2766 | 790k | } | 
| 2767 |  |  | 
| 2768 |  | bool Chainstate::FlushStateToDisk( | 
| 2769 |  |     BlockValidationState &state, | 
| 2770 |  |     FlushStateMode mode, | 
| 2771 |  |     int nManualPruneHeight) | 
| 2772 | 790k | { | 
| 2773 | 790k |     LOCK(cs_main); | Line | Count | Source |  | 259 | 790k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 790k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 790k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 790k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2774 | 790k |     assert(this->CanFlushToDisk()); | 
| 2775 | 790k |     std::set<int> setFilesToPrune; | 
| 2776 | 790k |     bool full_flush_completed = false; | 
| 2777 |  |  | 
| 2778 | 790k |     const size_t coins_count = CoinsTip().GetCacheSize(); | 
| 2779 | 790k |     const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage(); | 
| 2780 |  |  | 
| 2781 | 790k |     try { | 
| 2782 | 790k |     { | 
| 2783 | 790k |         bool fFlushForPrune = false; | 
| 2784 |  |  | 
| 2785 | 790k |         CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(); | 
| 2786 | 790k |         LOCK(m_blockman.cs_LastBlockFile); | Line | Count | Source |  | 259 | 790k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 790k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 790k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 790k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2787 | 790k |         if (m_blockman.IsPruneMode() && (0 m_blockman.m_check_for_pruning0|| nManualPruneHeight > 00) && m_chainman.m_blockman.m_blockfiles_indexed0) { | 
| 2788 |  |             // make sure we don't prune above any of the prune locks bestblocks | 
| 2789 |  |             // pruning is height-based | 
| 2790 | 0 |             int last_prune{m_chain.Height()}; // last height we can prune | 
| 2791 | 0 |             std::optional<std::string> limiting_lock; // prune lock that actually was the limiting factor, only used for logging | 
| 2792 |  | 
 | 
| 2793 | 0 |             for (const auto& prune_lock : m_blockman.m_prune_locks) { | 
| 2794 | 0 |                 if (prune_lock.second.height_first == std::numeric_limits<int>::max()) continue; | 
| 2795 |  |                 // Remove the buffer and one additional block here to get actual height that is outside of the buffer | 
| 2796 | 0 |                 const int lock_height{prune_lock.second.height_first - PRUNE_LOCK_BUFFER - 1}; | 
| 2797 | 0 |                 last_prune = std::max(1, std::min(last_prune, lock_height)); | 
| 2798 | 0 |                 if (last_prune == lock_height) { | 
| 2799 | 0 |                     limiting_lock = prune_lock.first; | 
| 2800 | 0 |                 } | 
| 2801 | 0 |             } | 
| 2802 |  | 
 | 
| 2803 | 0 |             if (limiting_lock) { | 
| 2804 | 0 |                 LogDebug(BCLog::PRUNE, "%s limited pruning to height %d\n", limiting_lock.value(), last_prune); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 2805 | 0 |             } | 
| 2806 |  | 
 | 
| 2807 | 0 |             if (nManualPruneHeight > 0) { | 
| 2808 | 0 |                 LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);| Line | Count | Source |  | 104 | 0 |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2809 |  | 
 | 
| 2810 | 0 |                 m_blockman.FindFilesToPruneManual( | 
| 2811 | 0 |                     setFilesToPrune, | 
| 2812 | 0 |                     std::min(last_prune, nManualPruneHeight), | 
| 2813 | 0 |                     *this, m_chainman); | 
| 2814 | 0 |             } else { | 
| 2815 | 0 |                 LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);| Line | Count | Source |  | 104 | 0 |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2816 |  | 
 | 
| 2817 | 0 |                 m_blockman.FindFilesToPrune(setFilesToPrune, last_prune, *this, m_chainman); | 
| 2818 | 0 |                 m_blockman.m_check_for_pruning = false; | 
| 2819 | 0 |             } | 
| 2820 | 0 |             if (!setFilesToPrune.empty()) { | 
| 2821 | 0 |                 fFlushForPrune = true; | 
| 2822 | 0 |                 if (!m_blockman.m_have_pruned) { | 
| 2823 | 0 |                     m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true); | 
| 2824 | 0 |                     m_blockman.m_have_pruned = true; | 
| 2825 | 0 |                 } | 
| 2826 | 0 |             } | 
| 2827 | 0 |         } | 
| 2828 | 790k |         const auto nNow{NodeClock::now()}; | 
| 2829 |  |         // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). | 
| 2830 | 790k |         bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE740k; | 
| 2831 |  |         // The cache is over the limit, we have to write now. | 
| 2832 | 790k |         bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL21.8k; | 
| 2833 |  |         // It's been a while since we wrote the block index and chain state to disk. Do this frequently, so we don't need to redownload or reindex after a crash. | 
| 2834 | 790k |         bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow >= m_next_write740k; | 
| 2835 |  |         // Combine all conditions that result in a write to disk. | 
| 2836 | 790k |         bool should_write = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicWrite || fFlushForPrune786k; | 
| 2837 |  |         // Write blocks, block index and best chain related state to disk. | 
| 2838 | 790k |         if (should_write) { | 
| 2839 | 4.58k |             LogDebug(BCLog::COINDB, "Writing chainstate to disk: flush mode=%s, prune=%d, large=%d, critical=%d, periodic=%d", | Line | Count | Source |  | 381 | 4.58k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 4.58k |     do {                                                              \ |  | 374 | 4.58k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 4.58k |     } while (0) | 
 | 
 | 
| 2840 | 4.58k |                      FlushStateModeNames[size_t(mode)], fFlushForPrune, fCacheLarge, fCacheCritical, fPeriodicWrite); | 
| 2841 |  |  | 
| 2842 |  |             // Ensure we can write block index | 
| 2843 | 4.58k |             if (!CheckDiskSpace(m_blockman.m_opts.blocks_dir)) { | 
| 2844 | 0 |                 return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); | 
| 2845 | 0 |             } | 
| 2846 | 4.58k |             { | 
| 2847 | 4.58k |                 LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);| Line | Count | Source |  | 104 | 4.58k |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 4.58k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 4.58k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 4.58k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2848 |  |  | 
| 2849 |  |                 // First make sure all block and undo data is flushed to disk. | 
| 2850 |  |                 // TODO: Handle return error, or add detailed comment why it is | 
| 2851 |  |                 // safe to not return an error upon failure. | 
| 2852 | 4.58k |                 if (!m_blockman.FlushChainstateBlockFile(m_chain.Height())) { | 
| 2853 | 0 |                     LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Warning, "%s: Failed to flush block file.\n", __func__); | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
| 2854 | 0 |                 } | 
| 2855 | 4.58k |             } | 
| 2856 |  |  | 
| 2857 |  |             // Then update all block file information (which may refer to block and undo files). | 
| 2858 | 4.58k |             { | 
| 2859 | 4.58k |                 LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);| Line | Count | Source |  | 104 | 4.58k |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 4.58k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 4.58k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 4.58k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2860 |  |  | 
| 2861 | 4.58k |                 if (!m_blockman.WriteBlockIndexDB()) { | 
| 2862 | 0 |                     return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to block index database.")); | 
| 2863 | 0 |                 } | 
| 2864 | 4.58k |             } | 
| 2865 |  |             // Finally remove any pruned files | 
| 2866 | 4.58k |             if (fFlushForPrune) { | 
| 2867 | 0 |                 LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);| Line | Count | Source |  | 104 | 0 |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2868 |  | 
 | 
| 2869 | 0 |                 m_blockman.UnlinkPrunedFiles(setFilesToPrune); | 
| 2870 | 0 |             } | 
| 2871 |  |  | 
| 2872 | 4.58k |             if (!CoinsTip().GetBestBlock().IsNull()) { | 
| 2873 | 4.58k |                 if (coins_mem_usage >= WARN_FLUSH_COINS_SIZE) LogWarning("Flushing large (%d GiB) UTXO set to disk, it may take several minutes", coins_mem_usage >> 30);| Line | Count | Source |  | 357 | 0 | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 2874 | 4.58k |                 LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fKiB)",| Line | Count | Source |  | 104 | 4.58k |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category) | Line | Count | Source |  | 11 | 4.58k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 4.58k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 4.58k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 2875 | 4.58k |                     coins_count, coins_mem_usage >> 10), BCLog::BENCH); | 
| 2876 |  |  | 
| 2877 |  |                 // Typical Coin structures on disk are around 48 bytes in size. | 
| 2878 |  |                 // Pushing a new one to the database can cause it to be written | 
| 2879 |  |                 // twice (once in the log, and once in the tables). This is already | 
| 2880 |  |                 // an overestimation, as most will delete an existing entry or | 
| 2881 |  |                 // overwrite one. Still, use a conservative safety factor of 2. | 
| 2882 | 4.58k |                 if (!CheckDiskSpace(m_chainman.m_options.datadir, 48 * 2 * 2 * CoinsTip().GetCacheSize())) { | 
| 2883 | 0 |                     return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); | 
| 2884 | 0 |                 } | 
| 2885 |  |                 // Flush the chainstate (which may refer to block index entries). | 
| 2886 | 4.58k |                 const auto empty_cache{(mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical}; | 
| 2887 | 4.58k |                 if (empty_cache ? !CoinsTip().Flush()0: !CoinsTip().Sync()) { | 
| 2888 | 0 |                     return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to coin database.")); | 
| 2889 | 0 |                 } | 
| 2890 | 4.58k |                 full_flush_completed = true; | 
| 2891 | 4.58k |                 TRACEPOINT(utxocache, flush, | 
| 2892 | 4.58k |                     int64_t{Ticks<std::chrono::microseconds>(NodeClock::now() - nNow)}, | 
| 2893 | 4.58k |                     (uint32_t)mode, | 
| 2894 | 4.58k |                     (uint64_t)coins_count, | 
| 2895 | 4.58k |                     (uint64_t)coins_mem_usage, | 
| 2896 | 4.58k |                     (bool)fFlushForPrune); | 
| 2897 | 4.58k |             } | 
| 2898 | 4.58k |         } | 
| 2899 |  |  | 
| 2900 | 790k |         if (should_write || m_next_write == NodeClock::time_point::max()786k) { | 
| 2901 | 33.3k |             constexpr auto range{DATABASE_WRITE_INTERVAL_MAX - DATABASE_WRITE_INTERVAL_MIN}; | 
| 2902 | 33.3k |             m_next_write = FastRandomContext().rand_uniform_delay(NodeClock::now() + DATABASE_WRITE_INTERVAL_MIN, range); | 
| 2903 | 33.3k |         } | 
| 2904 | 790k |     } | 
| 2905 | 790k |     if (full_flush_completed && m_chainman.m_options.signals4.58k) { | 
| 2906 |  |         // Update best block in wallet (so we can detect restored wallets). | 
| 2907 | 4.58k |         m_chainman.m_options.signals->ChainStateFlushed(this->GetRole(), GetLocator(m_chain.Tip())); | 
| 2908 | 4.58k |     } | 
| 2909 | 790k |     } catch (const std::runtime_error& e) { | 
| 2910 | 0 |         return FatalError(m_chainman.GetNotifications(), state, strprintf(_("System error while flushing: %s"), e.what()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2911 | 0 |     } | 
| 2912 | 790k |     return true; | 
| 2913 | 790k | } | 
| 2914 |  |  | 
| 2915 |  | void Chainstate::ForceFlushStateToDisk() | 
| 2916 | 0 | { | 
| 2917 | 0 |     BlockValidationState state; | 
| 2918 | 0 |     if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) { | 
| 2919 | 0 |         LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2920 | 0 |     } | 
| 2921 | 0 | } | 
| 2922 |  |  | 
| 2923 |  | void Chainstate::PruneAndFlush() | 
| 2924 | 0 | { | 
| 2925 | 0 |     BlockValidationState state; | 
| 2926 | 0 |     m_blockman.m_check_for_pruning = true; | 
| 2927 | 0 |     if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) { | 
| 2928 | 0 |         LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 2929 | 0 |     } | 
| 2930 | 0 | } | 
| 2931 |  |  | 
| 2932 |  | static void UpdateTipLog( | 
| 2933 |  |     const ChainstateManager& chainman, | 
| 2934 |  |     const CCoinsViewCache& coins_tip, | 
| 2935 |  |     const CBlockIndex* tip, | 
| 2936 |  |     const std::string& func_name, | 
| 2937 |  |     const std::string& prefix, | 
| 2938 |  |     const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) | 
| 2939 | 21.8k | { | 
| 2940 |  |  | 
| 2941 | 21.8k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 21.8k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2942 |  |  | 
| 2943 |  |     // Disable rate limiting in LogPrintLevel_ so this source location may log during IBD. | 
| 2944 | 21.8k |     LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/false, "%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", | Line | Count | Source |  | 350 | 43.7k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, 21.8k__VA_ARGS__) | 
 | 
| 2945 | 21.8k |                    prefix, func_name, | 
| 2946 | 21.8k |                    tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion, | 
| 2947 | 21.8k |                    log(tip->nChainWork.getdouble()) / log(2.0), tip->m_chain_tx_count, | 
| 2948 | 21.8k |                    FormatISO8601DateTime(tip->GetBlockTime()), | 
| 2949 | 21.8k |                    chainman.GuessVerificationProgress(tip), | 
| 2950 | 21.8k |                    coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)), | 
| 2951 | 21.8k |                    coins_tip.GetCacheSize(), | 
| 2952 | 21.8k |                    !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : ""); | 
| 2953 | 21.8k | } | 
| 2954 |  |  | 
| 2955 |  | void Chainstate::UpdateTip(const CBlockIndex* pindexNew) | 
| 2956 | 21.8k | { | 
| 2957 | 21.8k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 21.8k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 2958 | 21.8k |     const auto& coins_tip = this->CoinsTip(); | 
| 2959 |  |  | 
| 2960 |  |     // The remainder of the function isn't relevant if we are not acting on | 
| 2961 |  |     // the active chainstate, so return if need be. | 
| 2962 | 21.8k |     if (this != &m_chainman.ActiveChainstate()) { | 
| 2963 |  |         // Only log every so often so that we don't bury log messages at the tip. | 
| 2964 | 0 |         constexpr int BACKGROUND_LOG_INTERVAL = 2000; | 
| 2965 | 0 |         if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) { | 
| 2966 | 0 |             UpdateTipLog(m_chainman, coins_tip, pindexNew, __func__, "[background validation] ", ""); | 
| 2967 | 0 |         } | 
| 2968 | 0 |         return; | 
| 2969 | 0 |     } | 
| 2970 |  |  | 
| 2971 |  |     // New best block | 
| 2972 | 21.8k |     if (m_mempool) { | 
| 2973 | 21.8k |         m_mempool->AddTransactionsUpdated(1); | 
| 2974 | 21.8k |     } | 
| 2975 |  |  | 
| 2976 | 21.8k |     std::vector<bilingual_str> warning_messages; | 
| 2977 | 21.8k |     if (!m_chainman.IsInitialBlockDownload()) { | 
| 2978 | 20.4k |         auto bits = m_chainman.m_versionbitscache.CheckUnknownActivations(pindexNew, m_chainman.GetParams()); | 
| 2979 | 20.4k |         for (auto [bit, active] : bits) { | 
| 2980 | 0 |             const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 2981 | 0 |             if (active) { | 
| 2982 | 0 |                 m_chainman.GetNotifications().warningSet(kernel::Warning::UNKNOWN_NEW_RULES_ACTIVATED, warning); | 
| 2983 | 0 |             } else { | 
| 2984 | 0 |                 warning_messages.push_back(warning); | 
| 2985 | 0 |             } | 
| 2986 | 0 |         } | 
| 2987 | 20.4k |     } | 
| 2988 | 21.8k |     UpdateTipLog(m_chainman, coins_tip, pindexNew, __func__, "", | 
| 2989 | 21.8k |                  util::Join(warning_messages, Untranslated(", ")).original); | 
| 2990 | 21.8k | } | 
| 2991 |  |  | 
| 2992 |  | /** Disconnect m_chain's tip. | 
| 2993 |  |   * After calling, the mempool will be in an inconsistent state, with | 
| 2994 |  |   * transactions from disconnected blocks being added to disconnectpool.  You | 
| 2995 |  |   * should make the mempool consistent again by calling MaybeUpdateMempoolForReorg. | 
| 2996 |  |   * with cs_main held. | 
| 2997 |  |   * | 
| 2998 |  |   * If disconnectpool is nullptr, then no disconnected transactions are added to | 
| 2999 |  |   * disconnectpool (note that the caller is responsible for mempool consistency | 
| 3000 |  |   * in any case). | 
| 3001 |  |   */ | 
| 3002 |  | bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool) | 
| 3003 | 78 | { | 
| 3004 | 78 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 78 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3005 | 78 |     if (m_mempool) AssertLockHeld(m_mempool->cs); | Line | Count | Source |  | 137 | 78 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3006 |  |  | 
| 3007 | 78 |     CBlockIndex *pindexDelete = m_chain.Tip(); | 
| 3008 | 78 |     assert(pindexDelete); | 
| 3009 | 78 |     assert(pindexDelete->pprev); | 
| 3010 |  |     // Read block from disk. | 
| 3011 | 78 |     std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | 
| 3012 | 78 |     CBlock& block = *pblock; | 
| 3013 | 78 |     if (!m_blockman.ReadBlock(block, *pindexDelete)) { | 
| 3014 | 0 |         LogError("DisconnectTip(): Failed to read block\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 3015 | 0 |         return false; | 
| 3016 | 0 |     } | 
| 3017 |  |     // Apply the block atomically to the chain state. | 
| 3018 | 78 |     const auto time_start{SteadyClock::now()}; | 
| 3019 | 78 |     { | 
| 3020 | 78 |         CCoinsViewCache view(&CoinsTip()); | 
| 3021 | 78 |         assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); | 
| 3022 | 78 |         if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { | 
| 3023 | 0 |             LogError("DisconnectTip(): DisconnectBlock %s failed\n", pindexDelete->GetBlockHash().ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 3024 | 0 |             return false; | 
| 3025 | 0 |         } | 
| 3026 | 78 |         bool flushed = view.Flush(); | 
| 3027 | 78 |         assert(flushed); | 
| 3028 | 78 |     } | 
| 3029 | 78 |     LogDebug(BCLog::BENCH, "- Disconnect block: %.2fms\n", | Line | Count | Source |  | 381 | 78 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 78 |     do {                                                              \ |  | 374 | 78 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 78 |     } while (0) | 
 | 
 | 
| 3030 | 78 |              Ticks<MillisecondsDouble>(SteadyClock::now() - time_start)); | 
| 3031 |  |  | 
| 3032 | 78 |     { | 
| 3033 |  |         // Prune locks that began at or after the tip should be moved backward so they get a chance to reorg | 
| 3034 | 78 |         const int max_height_first{pindexDelete->nHeight - 1}; | 
| 3035 | 78 |         for (auto& prune_lock : m_blockman.m_prune_locks) { | 
| 3036 | 0 |             if (prune_lock.second.height_first <= max_height_first) continue; | 
| 3037 |  |  | 
| 3038 | 0 |             prune_lock.second.height_first = max_height_first; | 
| 3039 | 0 |             LogDebug(BCLog::PRUNE, "%s prune lock moved back to %d\n", prune_lock.first, max_height_first); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 3040 | 0 |         } | 
| 3041 | 78 |     } | 
| 3042 |  |  | 
| 3043 |  |     // Write the chain state to disk, if necessary. | 
| 3044 | 78 |     if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) { | 
| 3045 | 0 |         return false; | 
| 3046 | 0 |     } | 
| 3047 |  |  | 
| 3048 | 78 |     if (disconnectpool && m_mempool) { | 
| 3049 |  |         // Save transactions to re-add to mempool at end of reorg. If any entries are evicted for | 
| 3050 |  |         // exceeding memory limits, remove them and their descendants from the mempool. | 
| 3051 | 78 |         for (auto&& evicted_tx : disconnectpool->AddTransactionsFromBlock(block.vtx)) { | 
| 3052 | 0 |             m_mempool->removeRecursive(*evicted_tx, MemPoolRemovalReason::REORG); | 
| 3053 | 0 |         } | 
| 3054 | 78 |     } | 
| 3055 |  |  | 
| 3056 | 78 |     m_chain.SetTip(*pindexDelete->pprev); | 
| 3057 |  |  | 
| 3058 | 78 |     UpdateTip(pindexDelete->pprev); | 
| 3059 |  |     // Let wallets know transactions went from 1-confirmed to | 
| 3060 |  |     // 0-confirmed or conflicted: | 
| 3061 | 78 |     if (m_chainman.m_options.signals) { | 
| 3062 | 78 |         m_chainman.m_options.signals->BlockDisconnected(pblock, pindexDelete); | 
| 3063 | 78 |     } | 
| 3064 | 78 |     return true; | 
| 3065 | 78 | } | 
| 3066 |  |  | 
| 3067 |  | struct PerBlockConnectTrace { | 
| 3068 |  |     CBlockIndex* pindex = nullptr; | 
| 3069 |  |     std::shared_ptr<const CBlock> pblock; | 
| 3070 | 104k |     PerBlockConnectTrace() = default; | 
| 3071 |  | }; | 
| 3072 |  | /** | 
| 3073 |  |  * Used to track blocks whose transactions were applied to the UTXO state as a | 
| 3074 |  |  * part of a single ActivateBestChainStep call. | 
| 3075 |  |  * | 
| 3076 |  |  * This class is single-use, once you call GetBlocksConnected() you have to throw | 
| 3077 |  |  * it away and make a new one. | 
| 3078 |  |  */ | 
| 3079 |  | class ConnectTrace { | 
| 3080 |  | private: | 
| 3081 |  |     std::vector<PerBlockConnectTrace> blocksConnected; | 
| 3082 |  |  | 
| 3083 |  | public: | 
| 3084 | 82.9k |     explicit ConnectTrace() : blocksConnected(1) {} | 
| 3085 |  |  | 
| 3086 | 21.7k |     void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) { | 
| 3087 | 21.7k |         assert(!blocksConnected.back().pindex); | 
| 3088 | 21.7k |         assert(pindex); | 
| 3089 | 21.7k |         assert(pblock); | 
| 3090 | 21.7k |         blocksConnected.back().pindex = pindex; | 
| 3091 | 21.7k |         blocksConnected.back().pblock = std::move(pblock); | 
| 3092 | 21.7k |         blocksConnected.emplace_back(); | 
| 3093 | 21.7k |     } | 
| 3094 |  |  | 
| 3095 | 24.9k |     std::vector<PerBlockConnectTrace>& GetBlocksConnected() { | 
| 3096 |  |         // We always keep one extra block at the end of our list because | 
| 3097 |  |         // blocks are added after all the conflicted transactions have | 
| 3098 |  |         // been filled in. Thus, the last entry should always be an empty | 
| 3099 |  |         // one waiting for the transactions from the next block. We pop | 
| 3100 |  |         // the last entry here to make sure the list we return is sane. | 
| 3101 | 24.9k |         assert(!blocksConnected.back().pindex); | 
| 3102 | 24.9k |         blocksConnected.pop_back(); | 
| 3103 | 24.9k |         return blocksConnected; | 
| 3104 | 24.9k |     } | 
| 3105 |  | }; | 
| 3106 |  |  | 
| 3107 |  | /** | 
| 3108 |  |  * Connect a new block to m_chain. block_to_connect is either nullptr or a pointer to a CBlock | 
| 3109 |  |  * corresponding to pindexNew, to bypass loading it again from disk. | 
| 3110 |  |  * | 
| 3111 |  |  * The block is added to connectTrace if connection succeeds. | 
| 3112 |  |  */ | 
| 3113 |  | bool Chainstate::ConnectTip( | 
| 3114 |  |     BlockValidationState& state, | 
| 3115 |  |     CBlockIndex* pindexNew, | 
| 3116 |  |     std::shared_ptr<const CBlock> block_to_connect, | 
| 3117 |  |     ConnectTrace& connectTrace, | 
| 3118 |  |     DisconnectedBlockTransactions& disconnectpool) | 
| 3119 | 25.0k | { | 
| 3120 | 25.0k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 25.0k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3121 | 25.0k |     if (m_mempool) AssertLockHeld(m_mempool->cs); | Line | Count | Source |  | 137 | 25.0k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3122 |  |  | 
| 3123 | 25.0k |     assert(pindexNew->pprev == m_chain.Tip()); | 
| 3124 |  |     // Read block from disk. | 
| 3125 | 25.0k |     const auto time_1{SteadyClock::now()}; | 
| 3126 | 25.0k |     if (!block_to_connect) { | 
| 3127 | 539 |         std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>(); | 
| 3128 | 539 |         if (!m_blockman.ReadBlock(*pblockNew, *pindexNew)) { | 
| 3129 | 0 |             return FatalError(m_chainman.GetNotifications(), state, _("Failed to read block.")); | 
| 3130 | 0 |         } | 
| 3131 | 539 |         block_to_connect = std::move(pblockNew); | 
| 3132 | 24.4k |     } else { | 
| 3133 | 24.4k |         LogDebug(BCLog::BENCH, "  - Using cached block\n"); | Line | Count | Source |  | 381 | 24.4k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 24.4k |     do {                                                              \ |  | 374 | 24.4k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 24.4k |     } while (0) | 
 | 
 | 
| 3134 | 24.4k |     } | 
| 3135 |  |     // Apply the block atomically to the chain state. | 
| 3136 | 25.0k |     const auto time_2{SteadyClock::now()}; | 
| 3137 | 25.0k |     SteadyClock::time_point time_3; | 
| 3138 |  |     // When adding aggregate statistics in the future, keep in mind that | 
| 3139 |  |     // num_blocks_total may be zero until the ConnectBlock() call below. | 
| 3140 | 25.0k |     LogDebug(BCLog::BENCH, "  - Load block from disk: %.2fms\n", | Line | Count | Source |  | 381 | 25.0k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 25.0k |     do {                                                              \ |  | 374 | 25.0k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 25.0k |     } while (0) | 
 | 
 | 
| 3141 | 25.0k |              Ticks<MillisecondsDouble>(time_2 - time_1)); | 
| 3142 | 25.0k |     { | 
| 3143 | 25.0k |         CCoinsViewCache view(&CoinsTip()); | 
| 3144 | 25.0k |         bool rv = ConnectBlock(*block_to_connect, state, pindexNew, view); | 
| 3145 | 25.0k |         if (m_chainman.m_options.signals) { | 
| 3146 | 25.0k |             m_chainman.m_options.signals->BlockChecked(block_to_connect, state); | 
| 3147 | 25.0k |         } | 
| 3148 | 25.0k |         if (!rv) { | 
| 3149 | 3.25k |             if (state.IsInvalid()) | 
| 3150 | 3.25k |                 InvalidBlockFound(pindexNew, state); | 
| 3151 | 3.25k |             LogError("%s: ConnectBlock %s failed, %s\n", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());| Line | Count | Source |  | 358 | 3.25k | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 3.25k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 3152 | 3.25k |             return false; | 
| 3153 | 3.25k |         } | 
| 3154 | 21.7k |         time_3 = SteadyClock::now(); | 
| 3155 | 21.7k |         m_chainman.time_connect_total += time_3 - time_2; | 
| 3156 | 21.7k |         assert(m_chainman.num_blocks_total > 0); | 
| 3157 | 21.7k |         LogDebug(BCLog::BENCH, "  - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 3158 | 21.7k |                  Ticks<MillisecondsDouble>(time_3 - time_2), | 
| 3159 | 21.7k |                  Ticks<SecondsDouble>(m_chainman.time_connect_total), | 
| 3160 | 21.7k |                  Ticks<MillisecondsDouble>(m_chainman.time_connect_total) / m_chainman.num_blocks_total); | 
| 3161 | 21.7k |         bool flushed = view.Flush(); | 
| 3162 | 21.7k |         assert(flushed); | 
| 3163 | 21.7k |     } | 
| 3164 | 21.7k |     const auto time_4{SteadyClock::now()}; | 
| 3165 | 21.7k |     m_chainman.time_flush += time_4 - time_3; | 
| 3166 | 21.7k |     LogDebug(BCLog::BENCH, "  - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 3167 | 21.7k |              Ticks<MillisecondsDouble>(time_4 - time_3), | 
| 3168 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_flush), | 
| 3169 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_flush) / m_chainman.num_blocks_total); | 
| 3170 |  |     // Write the chain state to disk, if necessary. | 
| 3171 | 21.7k |     if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) { | 
| 3172 | 0 |         return false; | 
| 3173 | 0 |     } | 
| 3174 | 21.7k |     const auto time_5{SteadyClock::now()}; | 
| 3175 | 21.7k |     m_chainman.time_chainstate += time_5 - time_4; | 
| 3176 | 21.7k |     LogDebug(BCLog::BENCH, "  - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 3177 | 21.7k |              Ticks<MillisecondsDouble>(time_5 - time_4), | 
| 3178 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_chainstate), | 
| 3179 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_chainstate) / m_chainman.num_blocks_total); | 
| 3180 |  |     // Remove conflicting transactions from the mempool.; | 
| 3181 | 21.7k |     if (m_mempool) { | 
| 3182 | 21.7k |         m_mempool->removeForBlock(block_to_connect->vtx, pindexNew->nHeight); | 
| 3183 | 21.7k |         disconnectpool.removeForBlock(block_to_connect->vtx); | 
| 3184 | 21.7k |     } | 
| 3185 |  |     // Update m_chain & related variables. | 
| 3186 | 21.7k |     m_chain.SetTip(*pindexNew); | 
| 3187 | 21.7k |     UpdateTip(pindexNew); | 
| 3188 |  |  | 
| 3189 | 21.7k |     const auto time_6{SteadyClock::now()}; | 
| 3190 | 21.7k |     m_chainman.time_post_connect += time_6 - time_5; | 
| 3191 | 21.7k |     m_chainman.time_total += time_6 - time_1; | 
| 3192 | 21.7k |     LogDebug(BCLog::BENCH, "  - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 3193 | 21.7k |              Ticks<MillisecondsDouble>(time_6 - time_5), | 
| 3194 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_post_connect), | 
| 3195 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_post_connect) / m_chainman.num_blocks_total); | 
| 3196 | 21.7k |     LogDebug(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", | Line | Count | Source |  | 381 | 21.7k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 21.7k |     do {                                                              \ |  | 374 | 21.7k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 21.7k |     } while (0) | 
 | 
 | 
| 3197 | 21.7k |              Ticks<MillisecondsDouble>(time_6 - time_1), | 
| 3198 | 21.7k |              Ticks<SecondsDouble>(m_chainman.time_total), | 
| 3199 | 21.7k |              Ticks<MillisecondsDouble>(m_chainman.time_total) / m_chainman.num_blocks_total); | 
| 3200 |  |  | 
| 3201 |  |     // If we are the background validation chainstate, check to see if we are done | 
| 3202 |  |     // validating the snapshot (i.e. our tip has reached the snapshot's base block). | 
| 3203 | 21.7k |     if (this != &m_chainman.ActiveChainstate()) { | 
| 3204 |  |         // This call may set `m_disabled`, which is referenced immediately afterwards in | 
| 3205 |  |         // ActivateBestChain, so that we stop connecting blocks past the snapshot base. | 
| 3206 | 0 |         m_chainman.MaybeCompleteSnapshotValidation(); | 
| 3207 | 0 |     } | 
| 3208 |  |  | 
| 3209 | 21.7k |     connectTrace.BlockConnected(pindexNew, std::move(block_to_connect)); | 
| 3210 | 21.7k |     return true; | 
| 3211 | 21.7k | } | 
| 3212 |  |  | 
| 3213 |  | /** | 
| 3214 |  |  * Return the tip of the chain with the most work in it, that isn't | 
| 3215 |  |  * known to be invalid (it's however far from certain to be valid). | 
| 3216 |  |  */ | 
| 3217 |  | CBlockIndex* Chainstate::FindMostWorkChain() | 
| 3218 | 82.7k | { | 
| 3219 | 82.7k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 82.7k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3220 | 82.8k |     do { | 
| 3221 | 82.8k |         CBlockIndex *pindexNew = nullptr; | 
| 3222 |  |  | 
| 3223 |  |         // Find the best candidate header. | 
| 3224 | 82.8k |         { | 
| 3225 | 82.8k |             std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); | 
| 3226 | 82.8k |             if (it == setBlockIndexCandidates.rend()) | 
| 3227 | 0 |                 return nullptr; | 
| 3228 | 82.8k |             pindexNew = *it; | 
| 3229 | 82.8k |         } | 
| 3230 |  |  | 
| 3231 |  |         // Check whether all blocks on the path between the currently active chain and the candidate are valid. | 
| 3232 |  |         // Just going until the active chain is an optimization, as we know all blocks in it are valid already. | 
| 3233 | 0 |         CBlockIndex *pindexTest = pindexNew; | 
| 3234 | 82.8k |         bool fInvalidAncestor = false; | 
| 3235 | 107k |         while (pindexTest && !m_chain.Contains(pindexTest)) { | 
| 3236 | 25.1k |             assert(pindexTest->HaveNumChainTxs() || pindexTest->nHeight == 0); | 
| 3237 |  |  | 
| 3238 |  |             // Pruned nodes may have entries in setBlockIndexCandidates for | 
| 3239 |  |             // which block files have been deleted.  Remove those as candidates | 
| 3240 |  |             // for the most work chain if we come across them; we can't switch | 
| 3241 |  |             // to a chain unless we have all the non-active-chain parent blocks. | 
| 3242 | 25.1k |             bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; | 
| 3243 | 25.1k |             bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); | 
| 3244 | 25.1k |             if (fFailedChain || fMissingData25.0k) { | 
| 3245 |  |                 // Candidate chain is not usable (either invalid or missing data) | 
| 3246 | 41 |                 if (fFailedChain && (m_chainman.m_best_invalid == nullptr || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork)) { | 
| 3247 | 0 |                     m_chainman.m_best_invalid = pindexNew; | 
| 3248 | 0 |                 } | 
| 3249 | 41 |                 CBlockIndex *pindexFailed = pindexNew; | 
| 3250 |  |                 // Remove the entire chain from the set. | 
| 3251 | 41 |                 while (pindexTest != pindexFailed) { | 
| 3252 | 0 |                     if (fFailedChain) { | 
| 3253 | 0 |                         pindexFailed->nStatus |= BLOCK_FAILED_CHILD; | 
| 3254 | 0 |                         m_blockman.m_dirty_blockindex.insert(pindexFailed); | 
| 3255 | 0 |                     } else if (fMissingData) { | 
| 3256 |  |                         // If we're missing data, then add back to m_blocks_unlinked, | 
| 3257 |  |                         // so that if the block arrives in the future we can try adding | 
| 3258 |  |                         // to setBlockIndexCandidates again. | 
| 3259 | 0 |                         m_blockman.m_blocks_unlinked.insert( | 
| 3260 | 0 |                             std::make_pair(pindexFailed->pprev, pindexFailed)); | 
| 3261 | 0 |                     } | 
| 3262 | 0 |                     setBlockIndexCandidates.erase(pindexFailed); | 
| 3263 | 0 |                     pindexFailed = pindexFailed->pprev; | 
| 3264 | 0 |                 } | 
| 3265 | 41 |                 setBlockIndexCandidates.erase(pindexTest); | 
| 3266 | 41 |                 fInvalidAncestor = true; | 
| 3267 | 41 |                 break; | 
| 3268 | 41 |             } | 
| 3269 | 25.0k |             pindexTest = pindexTest->pprev; | 
| 3270 | 25.0k |         } | 
| 3271 | 82.8k |         if (!fInvalidAncestor) | 
| 3272 | 82.7k |             return pindexNew; | 
| 3273 | 82.8k |     } while(true41); | 
| 3274 | 82.7k | } | 
| 3275 |  |  | 
| 3276 |  | /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ | 
| 3277 | 73.0k | void Chainstate::PruneBlockIndexCandidates() { | 
| 3278 |  |     // Note that we can't delete the current block itself, as we may need to return to it later in case a | 
| 3279 |  |     // reorganization to a better block fails. | 
| 3280 | 73.0k |     std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin(); | 
| 3281 | 10.3M |     while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) { | 
| 3282 | 10.2M |         setBlockIndexCandidates.erase(it++); | 
| 3283 | 10.2M |     } | 
| 3284 |  |     // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. | 
| 3285 | 73.0k |     assert(!setBlockIndexCandidates.empty()); | 
| 3286 | 73.0k | } | 
| 3287 |  |  | 
| 3288 |  | /** | 
| 3289 |  |  * Try to make some progress towards making pindexMostWork the active block. | 
| 3290 |  |  * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork. | 
| 3291 |  |  * | 
| 3292 |  |  * @returns true unless a system error occurred | 
| 3293 |  |  */ | 
| 3294 |  | bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) | 
| 3295 | 24.9k | { | 
| 3296 | 24.9k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 24.9k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3297 | 24.9k |     if (m_mempool) AssertLockHeld(m_mempool->cs); | Line | Count | Source |  | 137 | 24.9k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3298 |  |  | 
| 3299 | 24.9k |     const CBlockIndex* pindexOldTip = m_chain.Tip(); | 
| 3300 | 24.9k |     const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork); | 
| 3301 |  |  | 
| 3302 |  |     // Disconnect active blocks which are no longer in the best chain. | 
| 3303 | 24.9k |     bool fBlocksDisconnected = false; | 
| 3304 | 24.9k |     DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_BYTES}; | 
| 3305 | 25.0k |     while (m_chain.Tip() && m_chain.Tip() != pindexFork) { | 
| 3306 | 78 |         if (!DisconnectTip(state, &disconnectpool)) { | 
| 3307 |  |             // This is likely a fatal error, but keep the mempool consistent, | 
| 3308 |  |             // just in case. Only remove from the mempool in this case. | 
| 3309 | 0 |             MaybeUpdateMempoolForReorg(disconnectpool, false); | 
| 3310 |  |  | 
| 3311 |  |             // If we're unable to disconnect a block during normal operation, | 
| 3312 |  |             // then that is a failure of our local system -- we should abort | 
| 3313 |  |             // rather than stay on a less work chain. | 
| 3314 | 0 |             FatalError(m_chainman.GetNotifications(), state, _("Failed to disconnect block.")); | 
| 3315 | 0 |             return false; | 
| 3316 | 0 |         } | 
| 3317 | 78 |         fBlocksDisconnected = true; | 
| 3318 | 78 |     } | 
| 3319 |  |  | 
| 3320 |  |     // Build list of new blocks to connect (in descending height order). | 
| 3321 | 24.9k |     std::vector<CBlockIndex*> vpindexToConnect; | 
| 3322 | 24.9k |     bool fContinue = true; | 
| 3323 | 24.9k |     int nHeight = pindexFork ? pindexFork->nHeight : -10; | 
| 3324 | 49.9k |     while (fContinue && nHeight != pindexMostWork->nHeight24.9k) { | 
| 3325 |  |         // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need | 
| 3326 |  |         // a few blocks along the way. | 
| 3327 | 24.9k |         int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); | 
| 3328 | 24.9k |         vpindexToConnect.clear(); | 
| 3329 | 24.9k |         vpindexToConnect.reserve(nTargetHeight - nHeight); | 
| 3330 | 24.9k |         CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight); | 
| 3331 | 50.2k |         while (pindexIter && pindexIter->nHeight != nHeight) { | 
| 3332 | 25.2k |             vpindexToConnect.push_back(pindexIter); | 
| 3333 | 25.2k |             pindexIter = pindexIter->pprev; | 
| 3334 | 25.2k |         } | 
| 3335 | 24.9k |         nHeight = nTargetHeight; | 
| 3336 |  |  | 
| 3337 |  |         // Connect new blocks. | 
| 3338 | 25.0k |         for (CBlockIndex* pindexConnect : vpindexToConnect | std::views::reverse) { | 
| 3339 | 25.0k |             if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock24.7k: std::shared_ptr<const CBlock>()295, connectTrace, disconnectpool)) { | 
| 3340 | 3.25k |                 if (state.IsInvalid()) { | 
| 3341 |  |                     // The block violates a consensus rule. | 
| 3342 | 3.25k |                     if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) { | 
| 3343 | 3.25k |                         InvalidChainFound(vpindexToConnect.front()); | 
| 3344 | 3.25k |                     } | 
| 3345 | 3.25k |                     state = BlockValidationState(); | 
| 3346 | 3.25k |                     fInvalidFound = true; | 
| 3347 | 3.25k |                     fContinue = false; | 
| 3348 | 3.25k |                     break; | 
| 3349 | 3.25k |                 } else { | 
| 3350 |  |                     // A system error occurred (disk space, database error, ...). | 
| 3351 |  |                     // Make the mempool consistent with the current tip, just in case | 
| 3352 |  |                     // any observers try to use it before shutdown. | 
| 3353 | 0 |                     MaybeUpdateMempoolForReorg(disconnectpool, false); | 
| 3354 | 0 |                     return false; | 
| 3355 | 0 |                 } | 
| 3356 | 21.7k |             } else { | 
| 3357 | 21.7k |                 PruneBlockIndexCandidates(); | 
| 3358 | 21.7k |                 if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) { | 
| 3359 |  |                     // We're in a better position than we were. Return temporarily to release the lock. | 
| 3360 | 21.7k |                     fContinue = false; | 
| 3361 | 21.7k |                     break; | 
| 3362 | 21.7k |                 } | 
| 3363 | 21.7k |             } | 
| 3364 | 25.0k |         } | 
| 3365 | 24.9k |     } | 
| 3366 |  |  | 
| 3367 | 24.9k |     if (fBlocksDisconnected) { | 
| 3368 |  |         // If any blocks were disconnected, disconnectpool may be non empty.  Add | 
| 3369 |  |         // any disconnected transactions back to the mempool. | 
| 3370 | 78 |         MaybeUpdateMempoolForReorg(disconnectpool, true); | 
| 3371 | 78 |     } | 
| 3372 | 24.9k |     if (m_mempool) m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1); | 
| 3373 |  |  | 
| 3374 | 24.9k |     CheckForkWarningConditions(); | 
| 3375 |  |  | 
| 3376 | 24.9k |     return true; | 
| 3377 | 24.9k | } | 
| 3378 |  |  | 
| 3379 |  | static SynchronizationState GetSynchronizationState(bool init, bool blockfiles_indexed) | 
| 3380 | 120k | { | 
| 3381 | 120k |     if (!init) return SynchronizationState::POST_INIT52.4k; | 
| 3382 | 67.7k |     if (!blockfiles_indexed) return SynchronizationState::INIT_REINDEX0; | 
| 3383 | 67.7k |     return SynchronizationState::INIT_DOWNLOAD; | 
| 3384 | 67.7k | } | 
| 3385 |  |  | 
| 3386 |  | bool ChainstateManager::NotifyHeaderTip() | 
| 3387 | 1.69M | { | 
| 3388 | 1.69M |     bool fNotify = false; | 
| 3389 | 1.69M |     bool fInitialBlockDownload = false; | 
| 3390 | 1.69M |     CBlockIndex* pindexHeader = nullptr; | 
| 3391 | 1.69M |     { | 
| 3392 | 1.69M |         LOCK(GetMutex()); | Line | Count | Source |  | 259 | 1.69M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 1.69M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 1.69M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 1.69M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3393 | 1.69M |         pindexHeader = m_best_header; | 
| 3394 |  |  | 
| 3395 | 1.69M |         if (pindexHeader != m_last_notified_header) { | 
| 3396 | 47.2k |             fNotify = true; | 
| 3397 | 47.2k |             fInitialBlockDownload = IsInitialBlockDownload(); | 
| 3398 | 47.2k |             m_last_notified_header = pindexHeader; | 
| 3399 | 47.2k |         } | 
| 3400 | 1.69M |     } | 
| 3401 |  |     // Send block tip changed notifications without the lock held | 
| 3402 | 1.69M |     if (fNotify) { | 
| 3403 | 47.2k |         GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload, m_blockman.m_blockfiles_indexed), pindexHeader->nHeight, pindexHeader->nTime, false); | 
| 3404 | 47.2k |     } | 
| 3405 | 1.69M |     return fNotify; | 
| 3406 | 1.69M | } | 
| 3407 |  |  | 
| 3408 | 82.9k | static void LimitValidationInterfaceQueue(ValidationSignals& signals) LOCKS_EXCLUDED(cs_main) { | 
| 3409 | 82.9k |     AssertLockNotHeld(cs_main); | Line | Count | Source |  | 142 | 82.9k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3410 |  |  | 
| 3411 | 82.9k |     if (signals.CallbacksPending() > 10) { | 
| 3412 | 0 |         signals.SyncWithValidationInterfaceQueue(); | 
| 3413 | 0 |     } | 
| 3414 | 82.9k | } | 
| 3415 |  |  | 
| 3416 |  | bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock) | 
| 3417 | 79.5k | { | 
| 3418 | 79.5k |     AssertLockNotHeld(m_chainstate_mutex); | Line | Count | Source |  | 142 | 79.5k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3419 |  |  | 
| 3420 |  |     // Note that while we're often called here from ProcessNewBlock, this is | 
| 3421 |  |     // far from a guarantee. Things in the P2P/RPC will often end up calling | 
| 3422 |  |     // us in the middle of ProcessNewBlock - do not assume pblock is set | 
| 3423 |  |     // sanely for performance or correctness! | 
| 3424 | 79.5k |     AssertLockNotHeld(::cs_main); | Line | Count | Source |  | 142 | 79.5k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3425 |  |  | 
| 3426 |  |     // ABC maintains a fair degree of expensive-to-calculate internal state | 
| 3427 |  |     // because this function periodically releases cs_main so that it does not lock up other threads for too long | 
| 3428 |  |     // during large connects - and to allow for e.g. the callback queue to drain | 
| 3429 |  |     // we use m_chainstate_mutex to enforce mutual exclusion so that only one caller may execute this function at a time | 
| 3430 | 79.5k |     LOCK(m_chainstate_mutex); | Line | Count | Source |  | 259 | 79.5k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 79.5k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 79.5k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 79.5k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3431 |  |  | 
| 3432 |  |     // Belt-and-suspenders check that we aren't attempting to advance the background | 
| 3433 |  |     // chainstate past the snapshot base block. | 
| 3434 | 79.5k |     if (WITH_LOCK(::cs_main, return m_disabled)) {| Line | Count | Source |  | 290 | 79.5k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 3435 | 0 |         LogPrintf("m_disabled is set - this chainstate should not be in operation. "| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 3436 | 0 |             "Please report this as a bug. %s\n", CLIENT_BUGREPORT); | 
| 3437 | 0 |         return false; | 
| 3438 | 0 |     } | 
| 3439 |  |  | 
| 3440 | 79.5k |     CBlockIndex *pindexMostWork = nullptr; | 
| 3441 | 79.5k |     CBlockIndex *pindexNewTip = nullptr; | 
| 3442 | 79.5k |     bool exited_ibd{false}; | 
| 3443 | 82.9k |     do { | 
| 3444 |  |         // Block until the validation queue drains. This should largely | 
| 3445 |  |         // never happen in normal operation, however may happen during | 
| 3446 |  |         // reindex, causing memory blowup if we run too far ahead. | 
| 3447 |  |         // Note that if a validationinterface callback ends up calling | 
| 3448 |  |         // ActivateBestChain this may lead to a deadlock! We should | 
| 3449 |  |         // probably have a DEBUG_LOCKORDER test for this in the future. | 
| 3450 | 82.9k |         if (m_chainman.m_options.signals) LimitValidationInterfaceQueue(*m_chainman.m_options.signals); | 
| 3451 |  |  | 
| 3452 | 82.9k |         { | 
| 3453 | 82.9k |             LOCK(cs_main); | Line | Count | Source |  | 259 | 82.9k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 82.9k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 82.9k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 82.9k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3454 | 82.9k |             { | 
| 3455 |  |             // Lock transaction pool for at least as long as it takes for connectTrace to be consumed | 
| 3456 | 82.9k |             LOCK(MempoolMutex()); | Line | Count | Source |  | 259 | 82.9k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 82.9k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 82.9k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 82.9k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3457 | 82.9k |             const bool was_in_ibd = m_chainman.IsInitialBlockDownload(); | 
| 3458 | 82.9k |             CBlockIndex* starting_tip = m_chain.Tip(); | 
| 3459 | 82.9k |             bool blocks_connected = false; | 
| 3460 | 82.9k |             do { | 
| 3461 |  |                 // We absolutely may not unlock cs_main until we've made forward progress | 
| 3462 |  |                 // (with the exception of shutdown due to hardware issues, low disk space, etc). | 
| 3463 | 82.9k |                 ConnectTrace connectTrace; // Destructed before cs_main is unlocked | 
| 3464 |  |  | 
| 3465 | 82.9k |                 if (pindexMostWork == nullptr) { | 
| 3466 | 82.7k |                     pindexMostWork = FindMostWorkChain(); | 
| 3467 | 82.7k |                 } | 
| 3468 |  |  | 
| 3469 |  |                 // Whether we have anything to do at all. | 
| 3470 | 82.9k |                 if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) { | 
| 3471 | 57.9k |                     break; | 
| 3472 | 57.9k |                 } | 
| 3473 |  |  | 
| 3474 | 24.9k |                 bool fInvalidFound = false; | 
| 3475 | 24.9k |                 std::shared_ptr<const CBlock> nullBlockPtr; | 
| 3476 |  |                 // BlockConnected signals must be sent for the original role; | 
| 3477 |  |                 // in case snapshot validation is completed during ActivateBestChainStep, the | 
| 3478 |  |                 // result of GetRole() changes from BACKGROUND to NORMAL. | 
| 3479 | 24.9k |                const ChainstateRole chainstate_role{this->GetRole()}; | 
| 3480 | 24.9k |                 if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock24.4k: nullBlockPtr461, fInvalidFound, connectTrace)) { | 
| 3481 |  |                     // A system error occurred | 
| 3482 | 0 |                     return false; | 
| 3483 | 0 |                 } | 
| 3484 | 24.9k |                 blocks_connected = true; | 
| 3485 |  |  | 
| 3486 | 24.9k |                 if (fInvalidFound) { | 
| 3487 |  |                     // Wipe cache, we may need another branch now. | 
| 3488 | 3.25k |                     pindexMostWork = nullptr; | 
| 3489 | 3.25k |                 } | 
| 3490 | 24.9k |                 pindexNewTip = m_chain.Tip(); | 
| 3491 |  |  | 
| 3492 | 24.9k |                 for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { | 
| 3493 | 21.7k |                     assert(trace.pblock && trace.pindex); | 
| 3494 | 21.7k |                     if (m_chainman.m_options.signals) { | 
| 3495 | 21.7k |                         m_chainman.m_options.signals->BlockConnected(chainstate_role, trace.pblock, trace.pindex); | 
| 3496 | 21.7k |                     } | 
| 3497 | 21.7k |                 } | 
| 3498 |  |  | 
| 3499 |  |                 // This will have been toggled in | 
| 3500 |  |                 // ActivateBestChainStep -> ConnectTip -> MaybeCompleteSnapshotValidation, | 
| 3501 |  |                 // if at all, so we should catch it here. | 
| 3502 |  |                 // | 
| 3503 |  |                 // Break this do-while to ensure we don't advance past the base snapshot. | 
| 3504 | 24.9k |                 if (m_disabled) { | 
| 3505 | 0 |                     break; | 
| 3506 | 0 |                 } | 
| 3507 | 24.9k |             } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip))); | 
| 3508 | 82.9k |             if (!blocks_connected) return true57.9k; | 
| 3509 |  |  | 
| 3510 | 24.9k |             const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip); | 
| 3511 | 24.9k |             bool still_in_ibd = m_chainman.IsInitialBlockDownload(); | 
| 3512 |  |  | 
| 3513 | 24.9k |             if (was_in_ibd && !still_in_ibd1.71k) { | 
| 3514 |  |                 // Active chainstate has exited IBD. | 
| 3515 | 0 |                 exited_ibd = true; | 
| 3516 | 0 |             } | 
| 3517 |  |  | 
| 3518 |  |             // Notify external listeners about the new tip. | 
| 3519 |  |             // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected | 
| 3520 | 24.9k |             if (this == &m_chainman.ActiveChainstate() && pindexFork != pindexNewTip) { | 
| 3521 |  |                 // Notify ValidationInterface subscribers | 
| 3522 | 21.7k |                 if (m_chainman.m_options.signals) { | 
| 3523 | 21.7k |                     m_chainman.m_options.signals->UpdatedBlockTip(pindexNewTip, pindexFork, still_in_ibd); | 
| 3524 | 21.7k |                 } | 
| 3525 |  |  | 
| 3526 | 21.7k |                 if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip( | 
| 3527 | 21.7k |                         /*state=*/GetSynchronizationState(still_in_ibd, m_chainman.m_blockman.m_blockfiles_indexed), | 
| 3528 | 21.7k |                         /*index=*/*pindexNewTip, | 
| 3529 | 21.7k |                         /*verification_progress=*/m_chainman.GuessVerificationProgress(pindexNewTip)))) | 
| 3530 | 0 |                 { | 
| 3531 |  |                     // Just breaking and returning success for now. This could | 
| 3532 |  |                     // be changed to bubble up the kernel::Interrupted value to | 
| 3533 |  |                     // the caller so the caller could distinguish between | 
| 3534 |  |                     // completed and interrupted operations. | 
| 3535 | 0 |                     break; | 
| 3536 | 0 |                 } | 
| 3537 | 21.7k |             } | 
| 3538 | 24.9k |             } // release MempoolMutex | 
| 3539 |  |             // Notify external listeners about the new tip, even if pindexFork == pindexNewTip. | 
| 3540 | 24.9k |             if (m_chainman.m_options.signals && this == &m_chainman.ActiveChainstate()) { | 
| 3541 | 24.9k |                 m_chainman.m_options.signals->ActiveTipChange(*Assert(pindexNewTip), m_chainman.IsInitialBlockDownload()); | Line | Count | Source |  | 106 | 24.9k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 3542 | 24.9k |             } | 
| 3543 | 24.9k |         } // release cs_main | 
| 3544 |  |         // When we reach this point, we switched to a new tip (stored in pindexNewTip). | 
| 3545 |  |  | 
| 3546 | 24.9k |         if (exited_ibd) { | 
| 3547 |  |             // If a background chainstate is in use, we may need to rebalance our | 
| 3548 |  |             // allocation of caches once a chainstate exits initial block download. | 
| 3549 | 0 |             LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3550 | 0 |             m_chainman.MaybeRebalanceCaches(); | 
| 3551 | 0 |         } | 
| 3552 |  |  | 
| 3553 | 24.9k |         if (WITH_LOCK(::cs_main, return m_disabled)) {| Line | Count | Source |  | 290 | 24.9k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 3554 |  |             // Background chainstate has reached the snapshot base block, so exit. | 
| 3555 |  |  | 
| 3556 |  |             // Restart indexes to resume indexing for all blocks unique to the snapshot | 
| 3557 |  |             // chain. This resumes indexing "in order" from where the indexing on the | 
| 3558 |  |             // background validation chain left off. | 
| 3559 |  |             // | 
| 3560 |  |             // This cannot be done while holding cs_main (within | 
| 3561 |  |             // MaybeCompleteSnapshotValidation) or a cs_main deadlock will occur. | 
| 3562 | 0 |             if (m_chainman.snapshot_download_completed) { | 
| 3563 | 0 |                 m_chainman.snapshot_download_completed(); | 
| 3564 | 0 |             } | 
| 3565 | 0 |             break; | 
| 3566 | 0 |         } | 
| 3567 |  |  | 
| 3568 |  |         // We check interrupt only after giving ActivateBestChainStep a chance to run once so that we | 
| 3569 |  |         // never interrupt before connecting the genesis block during LoadChainTip(). Previously this | 
| 3570 |  |         // caused an assert() failure during interrupt in such cases as the UTXO DB flushing checks | 
| 3571 |  |         // that the best block hash is non-null. | 
| 3572 | 24.9k |         if (m_chainman.m_interrupt) break0; | 
| 3573 | 24.9k |     } while (pindexNewTip != pindexMostWork); | 
| 3574 |  |  | 
| 3575 | 21.5k |     m_chainman.CheckBlockIndex(); | 
| 3576 |  |  | 
| 3577 |  |     // Write changes periodically to disk, after relay. | 
| 3578 | 21.5k |     if (!FlushStateToDisk(state, FlushStateMode::PERIODIC)) { | 
| 3579 | 0 |         return false; | 
| 3580 | 0 |     } | 
| 3581 |  |  | 
| 3582 | 21.5k |     return true; | 
| 3583 | 21.5k | } | 
| 3584 |  |  | 
| 3585 |  | bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex) | 
| 3586 | 0 | { | 
| 3587 | 0 |     AssertLockNotHeld(m_chainstate_mutex); | Line | Count | Source |  | 142 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3588 | 0 |     AssertLockNotHeld(::cs_main); | Line | Count | Source |  | 142 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3589 | 0 |     { | 
| 3590 | 0 |         LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3591 | 0 |         if (pindex->nChainWork < m_chain.Tip()->nChainWork) { | 
| 3592 |  |             // Nothing to do, this block is not at the tip. | 
| 3593 | 0 |             return true; | 
| 3594 | 0 |         } | 
| 3595 | 0 |         if (m_chain.Tip()->nChainWork > m_chainman.nLastPreciousChainwork) { | 
| 3596 |  |             // The chain has been extended since the last call, reset the counter. | 
| 3597 | 0 |             m_chainman.nBlockReverseSequenceId = -1; | 
| 3598 | 0 |         } | 
| 3599 | 0 |         m_chainman.nLastPreciousChainwork = m_chain.Tip()->nChainWork; | 
| 3600 | 0 |         setBlockIndexCandidates.erase(pindex); | 
| 3601 | 0 |         pindex->nSequenceId = m_chainman.nBlockReverseSequenceId; | 
| 3602 | 0 |         if (m_chainman.nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) { | 
| 3603 |  |             // We can't keep reducing the counter if somebody really wants to | 
| 3604 |  |             // call preciousblock 2**31-1 times on the same set of tips... | 
| 3605 | 0 |             m_chainman.nBlockReverseSequenceId--; | 
| 3606 | 0 |         } | 
| 3607 | 0 |         if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveNumChainTxs()) { | 
| 3608 | 0 |             setBlockIndexCandidates.insert(pindex); | 
| 3609 | 0 |             PruneBlockIndexCandidates(); | 
| 3610 | 0 |         } | 
| 3611 | 0 |     } | 
| 3612 |  |  | 
| 3613 | 0 |     return ActivateBestChain(state, std::shared_ptr<const CBlock>()); | 
| 3614 | 0 | } | 
| 3615 |  |  | 
| 3616 |  | bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex) | 
| 3617 | 0 | { | 
| 3618 | 0 |     AssertLockNotHeld(m_chainstate_mutex); | Line | Count | Source |  | 142 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3619 | 0 |     AssertLockNotHeld(::cs_main); | Line | Count | Source |  | 142 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3620 |  |  | 
| 3621 |  |     // Genesis block can't be invalidated | 
| 3622 | 0 |     assert(pindex); | 
| 3623 | 0 |     if (pindex->nHeight == 0) return false; | 
| 3624 |  |  | 
| 3625 | 0 |     CBlockIndex* to_mark_failed = pindex; | 
| 3626 | 0 |     bool pindex_was_in_chain = false; | 
| 3627 | 0 |     int disconnected = 0; | 
| 3628 |  |  | 
| 3629 |  |     // We do not allow ActivateBestChain() to run while InvalidateBlock() is | 
| 3630 |  |     // running, as that could cause the tip to change while we disconnect | 
| 3631 |  |     // blocks. | 
| 3632 | 0 |     LOCK(m_chainstate_mutex); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3633 |  |  | 
| 3634 |  |     // We'll be acquiring and releasing cs_main below, to allow the validation | 
| 3635 |  |     // callbacks to run. However, we should keep the block index in a | 
| 3636 |  |     // consistent state as we disconnect blocks -- in particular we need to | 
| 3637 |  |     // add equal-work blocks to setBlockIndexCandidates as we disconnect. | 
| 3638 |  |     // To avoid walking the block index repeatedly in search of candidates, | 
| 3639 |  |     // build a map once so that we can look up candidate blocks by chain | 
| 3640 |  |     // work as we go. | 
| 3641 | 0 |     std::multimap<const arith_uint256, CBlockIndex*> highpow_outofchain_headers; | 
| 3642 |  | 
 | 
| 3643 | 0 |     { | 
| 3644 | 0 |         LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3645 | 0 |         for (auto& entry : m_blockman.m_block_index) { | 
| 3646 | 0 |             CBlockIndex* candidate = &entry.second; | 
| 3647 |  |             // We don't need to put anything in our active chain into the | 
| 3648 |  |             // multimap, because those candidates will be found and considered | 
| 3649 |  |             // as we disconnect. | 
| 3650 |  |             // Instead, consider only non-active-chain blocks that score | 
| 3651 |  |             // at least as good with CBlockIndexWorkComparator as the new tip. | 
| 3652 | 0 |             if (!m_chain.Contains(candidate) && | 
| 3653 | 0 |                 !CBlockIndexWorkComparator()(candidate, pindex->pprev) && | 
| 3654 | 0 |                 !(candidate->nStatus & BLOCK_FAILED_MASK)) { | 
| 3655 | 0 |                 highpow_outofchain_headers.insert({candidate->nChainWork, candidate}); | 
| 3656 | 0 |             } | 
| 3657 | 0 |         } | 
| 3658 | 0 |     } | 
| 3659 |  |  | 
| 3660 |  |     // Disconnect (descendants of) pindex, and mark them invalid. | 
| 3661 | 0 |     while (true) { | 
| 3662 | 0 |         if (m_chainman.m_interrupt) break; | 
| 3663 |  |  | 
| 3664 |  |         // Make sure the queue of validation callbacks doesn't grow unboundedly. | 
| 3665 | 0 |         if (m_chainman.m_options.signals) LimitValidationInterfaceQueue(*m_chainman.m_options.signals); | 
| 3666 |  | 
 | 
| 3667 | 0 |         LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3668 |  |         // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is | 
| 3669 |  |         // called after DisconnectTip without unlocking in between | 
| 3670 | 0 |         LOCK(MempoolMutex()); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3671 | 0 |         if (!m_chain.Contains(pindex)) break; | 
| 3672 | 0 |         pindex_was_in_chain = true; | 
| 3673 | 0 |         CBlockIndex *invalid_walk_tip = m_chain.Tip(); | 
| 3674 |  |  | 
| 3675 |  |         // ActivateBestChain considers blocks already in m_chain | 
| 3676 |  |         // unconditionally valid already, so force disconnect away from it. | 
| 3677 | 0 |         DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_BYTES}; | 
| 3678 | 0 |         bool ret = DisconnectTip(state, &disconnectpool); | 
| 3679 |  |         // DisconnectTip will add transactions to disconnectpool. | 
| 3680 |  |         // Adjust the mempool to be consistent with the new tip, adding | 
| 3681 |  |         // transactions back to the mempool if disconnecting was successful, | 
| 3682 |  |         // and we're not doing a very deep invalidation (in which case | 
| 3683 |  |         // keeping the mempool up to date is probably futile anyway). | 
| 3684 | 0 |         MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); | 
| 3685 | 0 |         if (!ret) return false; | 
| 3686 | 0 |         assert(invalid_walk_tip->pprev == m_chain.Tip()); | 
| 3687 |  |  | 
| 3688 |  |         // We immediately mark the disconnected blocks as invalid. | 
| 3689 |  |         // This prevents a case where pruned nodes may fail to invalidateblock | 
| 3690 |  |         // and be left unable to start as they have no tip candidates (as there | 
| 3691 |  |         // are no blocks that meet the "have data and are not invalid per | 
| 3692 |  |         // nStatus" criteria for inclusion in setBlockIndexCandidates). | 
| 3693 | 0 |         invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID; | 
| 3694 | 0 |         m_blockman.m_dirty_blockindex.insert(invalid_walk_tip); | 
| 3695 | 0 |         setBlockIndexCandidates.erase(invalid_walk_tip); | 
| 3696 | 0 |         setBlockIndexCandidates.insert(invalid_walk_tip->pprev); | 
| 3697 | 0 |         if (invalid_walk_tip == to_mark_failed->pprev && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) { | 
| 3698 |  |             // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children | 
| 3699 |  |             // need to be BLOCK_FAILED_CHILD instead. | 
| 3700 | 0 |             to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD; | 
| 3701 | 0 |             m_blockman.m_dirty_blockindex.insert(to_mark_failed); | 
| 3702 | 0 |         } | 
| 3703 |  |  | 
| 3704 |  |         // Mark out-of-chain descendants of the invalidated block as invalid | 
| 3705 |  |         // (possibly replacing a pre-existing BLOCK_FAILED_VALID with BLOCK_FAILED_CHILD) | 
| 3706 |  |         // Add any equal or more work headers that are not invalidated to setBlockIndexCandidates | 
| 3707 |  |         // Recalculate m_best_header if it became invalid. | 
| 3708 | 0 |         auto candidate_it = highpow_outofchain_headers.lower_bound(invalid_walk_tip->pprev->nChainWork); | 
| 3709 |  | 
 | 
| 3710 | 0 |         const bool best_header_needs_update{m_chainman.m_best_header->GetAncestor(invalid_walk_tip->nHeight) == invalid_walk_tip}; | 
| 3711 | 0 |         if (best_header_needs_update) { | 
| 3712 |  |             // pprev is definitely still valid at this point, but there may be better ones | 
| 3713 | 0 |             m_chainman.m_best_header = invalid_walk_tip->pprev; | 
| 3714 | 0 |         } | 
| 3715 |  | 
 | 
| 3716 | 0 |         while (candidate_it != highpow_outofchain_headers.end()) { | 
| 3717 | 0 |             CBlockIndex* candidate{candidate_it->second}; | 
| 3718 | 0 |             if (candidate->GetAncestor(invalid_walk_tip->nHeight) == invalid_walk_tip) { | 
| 3719 |  |                 // Children of failed blocks should be marked as BLOCK_FAILED_CHILD instead. | 
| 3720 | 0 |                 candidate->nStatus &= ~BLOCK_FAILED_VALID; | 
| 3721 | 0 |                 candidate->nStatus |= BLOCK_FAILED_CHILD; | 
| 3722 | 0 |                 m_blockman.m_dirty_blockindex.insert(candidate); | 
| 3723 |  |                 // If invalidated, the block is irrelevant for setBlockIndexCandidates | 
| 3724 |  |                 // and for m_best_header and can be removed from the cache. | 
| 3725 | 0 |                 candidate_it = highpow_outofchain_headers.erase(candidate_it); | 
| 3726 | 0 |                 continue; | 
| 3727 | 0 |             } | 
| 3728 | 0 |             if (!CBlockIndexWorkComparator()(candidate, invalid_walk_tip->pprev) && | 
| 3729 | 0 |                 candidate->IsValid(BLOCK_VALID_TRANSACTIONS) && | 
| 3730 | 0 |                 candidate->HaveNumChainTxs()) { | 
| 3731 | 0 |                 setBlockIndexCandidates.insert(candidate); | 
| 3732 |  |                 // Do not remove candidate from the highpow_outofchain_headers cache, because it might be a descendant of the block being invalidated | 
| 3733 |  |                 // which needs to be marked failed later. | 
| 3734 | 0 |             } | 
| 3735 | 0 |             if (best_header_needs_update && | 
| 3736 | 0 |                 m_chainman.m_best_header->nChainWork < candidate->nChainWork) { | 
| 3737 | 0 |                 m_chainman.m_best_header = candidate; | 
| 3738 | 0 |             } | 
| 3739 | 0 |             ++candidate_it; | 
| 3740 | 0 |         } | 
| 3741 |  |  | 
| 3742 |  |         // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future | 
| 3743 |  |         // iterations, or, if it's the last one, call InvalidChainFound on it. | 
| 3744 | 0 |         to_mark_failed = invalid_walk_tip; | 
| 3745 | 0 |     } | 
| 3746 |  |  | 
| 3747 | 0 |     m_chainman.CheckBlockIndex(); | 
| 3748 |  | 
 | 
| 3749 | 0 |     { | 
| 3750 | 0 |         LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 3751 | 0 |         if (m_chain.Contains(to_mark_failed)) { | 
| 3752 |  |             // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed. | 
| 3753 | 0 |             return false; | 
| 3754 | 0 |         } | 
| 3755 |  |  | 
| 3756 |  |         // Mark pindex as invalid if it never was in the main chain | 
| 3757 | 0 |         if (!pindex_was_in_chain && !(pindex->nStatus & BLOCK_FAILED_MASK)) { | 
| 3758 | 0 |             pindex->nStatus |= BLOCK_FAILED_VALID; | 
| 3759 | 0 |             m_blockman.m_dirty_blockindex.insert(pindex); | 
| 3760 | 0 |             setBlockIndexCandidates.erase(pindex); | 
| 3761 | 0 |         } | 
| 3762 |  |  | 
| 3763 |  |         // If any new blocks somehow arrived while we were disconnecting | 
| 3764 |  |         // (above), then the pre-calculation of what should go into | 
| 3765 |  |         // setBlockIndexCandidates may have missed entries. This would | 
| 3766 |  |         // technically be an inconsistency in the block index, but if we clean | 
| 3767 |  |         // it up here, this should be an essentially unobservable error. | 
| 3768 |  |         // Loop back over all block index entries and add any missing entries | 
| 3769 |  |         // to setBlockIndexCandidates. | 
| 3770 | 0 |         for (auto& [_, block_index] : m_blockman.m_block_index) { | 
| 3771 | 0 |             if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) { | 
| 3772 | 0 |                 setBlockIndexCandidates.insert(&block_index); | 
| 3773 | 0 |             } | 
| 3774 | 0 |         } | 
| 3775 |  | 
 | 
| 3776 | 0 |         InvalidChainFound(to_mark_failed); | 
| 3777 | 0 |     } | 
| 3778 |  |  | 
| 3779 |  |     // Only notify about a new block tip if the active chain was modified. | 
| 3780 | 0 |     if (pindex_was_in_chain) { | 
| 3781 |  |         // Ignoring return value for now, this could be changed to bubble up | 
| 3782 |  |         // kernel::Interrupted value to the caller so the caller could | 
| 3783 |  |         // distinguish between completed and interrupted operations. It might | 
| 3784 |  |         // also make sense for the blockTip notification to have an enum | 
| 3785 |  |         // parameter indicating the source of the tip change so hooks can | 
| 3786 |  |         // distinguish user-initiated invalidateblock changes from other | 
| 3787 |  |         // changes. | 
| 3788 | 0 |         (void)m_chainman.GetNotifications().blockTip( | 
| 3789 | 0 |             /*state=*/GetSynchronizationState(m_chainman.IsInitialBlockDownload(), m_chainman.m_blockman.m_blockfiles_indexed), | 
| 3790 | 0 |             /*index=*/*to_mark_failed->pprev, | 
| 3791 | 0 |             /*verification_progress=*/WITH_LOCK(m_chainman.GetMutex(), return m_chainman.GuessVerificationProgress(to_mark_failed->pprev))); | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 3792 |  |  | 
| 3793 |  |         // Fire ActiveTipChange now for the current chain tip to make sure clients are notified. | 
| 3794 |  |         // ActivateBestChain may call this as well, but not necessarily. | 
| 3795 | 0 |         if (m_chainman.m_options.signals) { | 
| 3796 | 0 |             m_chainman.m_options.signals->ActiveTipChange(*Assert(m_chain.Tip()), m_chainman.IsInitialBlockDownload()); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 3797 | 0 |         } | 
| 3798 | 0 |     } | 
| 3799 | 0 |     return true; | 
| 3800 | 0 | } | 
| 3801 |  |  | 
| 3802 |  | void Chainstate::SetBlockFailureFlags(CBlockIndex* invalid_block) | 
| 3803 | 7.40k | { | 
| 3804 | 7.40k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 7.40k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3805 |  |  | 
| 3806 | 1.50M |     for (auto& [_, block_index] : m_blockman.m_block_index) { | 
| 3807 | 1.50M |         if (invalid_block != &block_index && block_index.GetAncestor(invalid_block->nHeight) == invalid_block1.50M) { | 
| 3808 | 352 |             block_index.nStatus = (block_index.nStatus & ~BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD; | 
| 3809 | 352 |             m_blockman.m_dirty_blockindex.insert(&block_index); | 
| 3810 | 352 |         } | 
| 3811 | 1.50M |     } | 
| 3812 | 7.40k | } | 
| 3813 |  |  | 
| 3814 | 0 | void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { | 
| 3815 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3816 |  | 
 | 
| 3817 | 0 |     int nHeight = pindex->nHeight; | 
| 3818 |  |  | 
| 3819 |  |     // Remove the invalidity flag from this block and all its descendants and ancestors. | 
| 3820 | 0 |     for (auto& [_, block_index] : m_blockman.m_block_index) { | 
| 3821 | 0 |         if ((block_index.nStatus & BLOCK_FAILED_MASK) && (block_index.GetAncestor(nHeight) == pindex || pindex->GetAncestor(block_index.nHeight) == &block_index)) { | 
| 3822 | 0 |             block_index.nStatus &= ~BLOCK_FAILED_MASK; | 
| 3823 | 0 |             m_blockman.m_dirty_blockindex.insert(&block_index); | 
| 3824 | 0 |             if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) { | 
| 3825 | 0 |                 setBlockIndexCandidates.insert(&block_index); | 
| 3826 | 0 |             } | 
| 3827 | 0 |             if (&block_index == m_chainman.m_best_invalid) { | 
| 3828 |  |                 // Reset invalid block marker if it was pointing to one of those. | 
| 3829 | 0 |                 m_chainman.m_best_invalid = nullptr; | 
| 3830 | 0 |             } | 
| 3831 | 0 |         } | 
| 3832 | 0 |     } | 
| 3833 | 0 | } | 
| 3834 |  |  | 
| 3835 |  | void Chainstate::TryAddBlockIndexCandidate(CBlockIndex* pindex) | 
| 3836 | 10.3M | { | 
| 3837 | 10.3M |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 10.3M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3838 |  |     // The block only is a candidate for the most-work-chain if it has the same | 
| 3839 |  |     // or more work than our current tip. | 
| 3840 | 10.3M |     if (m_chain.Tip() != nullptr && setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())25.1k) { | 
| 3841 | 149 |         return; | 
| 3842 | 149 |     } | 
| 3843 |  |  | 
| 3844 | 10.3M |     bool is_active_chainstate = this == &m_chainman.ActiveChainstate(); | 
| 3845 | 10.3M |     if (is_active_chainstate) { | 
| 3846 |  |         // The active chainstate should always add entries that have more | 
| 3847 |  |         // work than the tip. | 
| 3848 | 10.3M |         setBlockIndexCandidates.insert(pindex); | 
| 3849 | 10.3M |     } else if (0 !m_disabled0) { | 
| 3850 |  |         // For the background chainstate, we only consider connecting blocks | 
| 3851 |  |         // towards the snapshot base (which can't be nullptr or else we'll | 
| 3852 |  |         // never make progress). | 
| 3853 | 0 |         const CBlockIndex* snapshot_base{Assert(m_chainman.GetSnapshotBaseBlock())};| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 3854 | 0 |         if (snapshot_base->GetAncestor(pindex->nHeight) == pindex) { | 
| 3855 | 0 |             setBlockIndexCandidates.insert(pindex); | 
| 3856 | 0 |         } | 
| 3857 | 0 |     } | 
| 3858 | 10.3M | } | 
| 3859 |  |  | 
| 3860 |  | /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ | 
| 3861 |  | void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos) | 
| 3862 | 28.2k | { | 
| 3863 | 28.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 28.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 3864 | 28.2k |     pindexNew->nTx = block.vtx.size(); | 
| 3865 |  |     // Typically m_chain_tx_count will be 0 at this point, but it can be nonzero if this | 
| 3866 |  |     // is a pruned block which is being downloaded again, or if this is an | 
| 3867 |  |     // assumeutxo snapshot block which has a hardcoded m_chain_tx_count value from the | 
| 3868 |  |     // snapshot metadata. If the pindex is not the snapshot block and the | 
| 3869 |  |     // m_chain_tx_count value is not zero, assert that value is actually correct. | 
| 3870 | 28.2k |     auto prev_tx_sum = [](CBlockIndex& block) { return block.nTx + (25.1k block.pprev25.1k? block.pprev->m_chain_tx_count25.1k: 00); }; | 
| 3871 | 28.2k |     if (!Assume(pindexNew->m_chain_tx_count == 0 || pindexNew->m_chain_tx_count == prev_tx_sum(*pindexNew) || | Line | Count | Source |  | 118 | 56.4k | #define Assume(val) inline_assertion_check<false>(28.2kval, __FILE__, __LINE__, __func__, #val) | 
 | 
| 3872 | 28.2k |                 pindexNew == GetSnapshotBaseBlock())) { | 
| 3873 | 0 |         LogWarning("Internal bug detected: block %d has unexpected m_chain_tx_count %i that should be %i (%s %s). Please report this issue here: %s\n",| Line | Count | Source |  | 357 | 0 | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 3874 | 0 |             pindexNew->nHeight, pindexNew->m_chain_tx_count, prev_tx_sum(*pindexNew), CLIENT_NAME, FormatFullVersion(), CLIENT_BUGREPORT); | 
| 3875 | 0 |         pindexNew->m_chain_tx_count = 0; | 
| 3876 | 0 |     } | 
| 3877 | 28.2k |     pindexNew->nFile = pos.nFile; | 
| 3878 | 28.2k |     pindexNew->nDataPos = pos.nPos; | 
| 3879 | 28.2k |     pindexNew->nUndoPos = 0; | 
| 3880 | 28.2k |     pindexNew->nStatus |= BLOCK_HAVE_DATA; | 
| 3881 | 28.2k |     if (DeploymentActiveAt(*pindexNew, *this, Consensus::DEPLOYMENT_SEGWIT)) { | 
| 3882 | 28.2k |         pindexNew->nStatus |= BLOCK_OPT_WITNESS; | 
| 3883 | 28.2k |     } | 
| 3884 | 28.2k |     pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); | 
| 3885 | 28.2k |     m_blockman.m_dirty_blockindex.insert(pindexNew); | 
| 3886 |  |  | 
| 3887 | 28.2k |     if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveNumChainTxs()) { | 
| 3888 |  |         // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. | 
| 3889 | 24.8k |         std::deque<CBlockIndex*> queue; | 
| 3890 | 24.8k |         queue.push_back(pindexNew); | 
| 3891 |  |  | 
| 3892 |  |         // Recursively process any descendant blocks that now may be eligible to be connected. | 
| 3893 | 50.0k |         while (!queue.empty()) { | 
| 3894 | 25.1k |             CBlockIndex *pindex = queue.front(); | 
| 3895 | 25.1k |             queue.pop_front(); | 
| 3896 |  |             // Before setting m_chain_tx_count, assert that it is 0 or already set to | 
| 3897 |  |             // the correct value. This assert will fail after receiving the | 
| 3898 |  |             // assumeutxo snapshot block if assumeutxo snapshot metadata has an | 
| 3899 |  |             // incorrect hardcoded AssumeutxoData::m_chain_tx_count value. | 
| 3900 | 25.1k |             if (!Assume(pindex->m_chain_tx_count == 0 || pindex->m_chain_tx_count == prev_tx_sum(*pindex))) {| Line | Count | Source |  | 118 | 25.1k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 3901 | 0 |                 LogWarning("Internal bug detected: block %d has unexpected m_chain_tx_count %i that should be %i (%s %s). Please report this issue here: %s\n",| Line | Count | Source |  | 357 | 0 | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 3902 | 0 |                    pindex->nHeight, pindex->m_chain_tx_count, prev_tx_sum(*pindex), CLIENT_NAME, FormatFullVersion(), CLIENT_BUGREPORT); | 
| 3903 | 0 |             } | 
| 3904 | 25.1k |             pindex->m_chain_tx_count = prev_tx_sum(*pindex); | 
| 3905 | 25.1k |             pindex->nSequenceId = nBlockSequenceId++; | 
| 3906 | 25.1k |             for (Chainstate *c : GetAll()) { | 
| 3907 | 25.1k |                 c->TryAddBlockIndexCandidate(pindex); | 
| 3908 | 25.1k |             } | 
| 3909 | 25.1k |             std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex); | 
| 3910 | 25.4k |             while (range.first != range.second) { | 
| 3911 | 315 |                 std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first; | 
| 3912 | 315 |                 queue.push_back(it->second); | 
| 3913 | 315 |                 range.first++; | 
| 3914 | 315 |                 m_blockman.m_blocks_unlinked.erase(it); | 
| 3915 | 315 |             } | 
| 3916 | 25.1k |         } | 
| 3917 | 24.8k |     } else { | 
| 3918 | 3.39k |         if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { | 
| 3919 | 3.39k |             m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); | 
| 3920 | 3.39k |         } | 
| 3921 | 3.39k |     } | 
| 3922 | 28.2k | } | 
| 3923 |  |  | 
| 3924 |  | static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true) | 
| 3925 | 716k | { | 
| 3926 |  |     // Check proof of work matches claimed amount | 
| 3927 | 716k |     if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams)) | 
| 3928 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed"); | 
| 3929 |  |  | 
| 3930 | 716k |     return true; | 
| 3931 | 716k | } | 
| 3932 |  |  | 
| 3933 |  | static bool CheckMerkleRoot(const CBlock& block, BlockValidationState& state) | 
| 3934 | 366k | { | 
| 3935 | 366k |     if (block.m_checked_merkle_root) return true29.1k; | 
| 3936 |  |  | 
| 3937 | 337k |     bool mutated; | 
| 3938 | 337k |     uint256 merkle_root = BlockMerkleRoot(block, &mutated); | 
| 3939 | 337k |     if (block.hashMerkleRoot != merkle_root) { | 
| 3940 | 12 |         return state.Invalid( | 
| 3941 | 12 |             /*result=*/BlockValidationResult::BLOCK_MUTATED, | 
| 3942 | 12 |             /*reject_reason=*/"bad-txnmrklroot", | 
| 3943 | 12 |             /*debug_message=*/"hashMerkleRoot mismatch"); | 
| 3944 | 12 |     } | 
| 3945 |  |  | 
| 3946 |  |     // Check for merkle tree malleability (CVE-2012-2459): repeating sequences | 
| 3947 |  |     // of transactions in a block without affecting the merkle root of a block, | 
| 3948 |  |     // while still invalidating it. | 
| 3949 | 337k |     if (mutated) { | 
| 3950 | 442 |         return state.Invalid( | 
| 3951 | 442 |             /*result=*/BlockValidationResult::BLOCK_MUTATED, | 
| 3952 | 442 |             /*reject_reason=*/"bad-txns-duplicate", | 
| 3953 | 442 |             /*debug_message=*/"duplicate transaction"); | 
| 3954 | 442 |     } | 
| 3955 |  |  | 
| 3956 | 337k |     block.m_checked_merkle_root = true; | 
| 3957 | 337k |     return true; | 
| 3958 | 337k | } | 
| 3959 |  |  | 
| 3960 |  | /** CheckWitnessMalleation performs checks for block malleation with regard to | 
| 3961 |  |  * its witnesses. | 
| 3962 |  |  * | 
| 3963 |  |  * Note: If the witness commitment is expected (i.e. `expect_witness_commitment | 
| 3964 |  |  * = true`), then the block is required to have at least one transaction and the | 
| 3965 |  |  * first transaction needs to have at least one input. */ | 
| 3966 |  | static bool CheckWitnessMalleation(const CBlock& block, bool expect_witness_commitment, BlockValidationState& state) | 
| 3967 | 57.3k | { | 
| 3968 | 57.3k |     if (expect_witness_commitment) { | 
| 3969 | 57.3k |         if (block.m_checked_witness_commitment) return true28.2k; | 
| 3970 |  |  | 
| 3971 | 29.1k |         int commitpos = GetWitnessCommitmentIndex(block); | 
| 3972 | 29.1k |         if (commitpos != NO_WITNESS_COMMITMENT) { | 
| 3973 | 29.1k |             assert(!block.vtx.empty() && !block.vtx[0]->vin.empty()); | 
| 3974 | 29.1k |             const auto& witness_stack{block.vtx[0]->vin[0].scriptWitness.stack}; | 
| 3975 |  |  | 
| 3976 | 29.1k |             if (witness_stack.size() != 1 || witness_stack[0].size() != 32) { | 
| 3977 | 0 |                 return state.Invalid( | 
| 3978 | 0 |                     /*result=*/BlockValidationResult::BLOCK_MUTATED, | 
| 3979 | 0 |                     /*reject_reason=*/"bad-witness-nonce-size", | 
| 3980 | 0 |                     /*debug_message=*/strprintf("%s : invalid witness reserved value size", __func__));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 3981 | 0 |             } | 
| 3982 |  |  | 
| 3983 |  |             // The malleation check is ignored; as the transaction tree itself | 
| 3984 |  |             // already does not permit it, it is impossible to trigger in the | 
| 3985 |  |             // witness tree. | 
| 3986 | 29.1k |             uint256 hash_witness = BlockWitnessMerkleRoot(block, /*mutated=*/nullptr); | 
| 3987 |  |  | 
| 3988 | 29.1k |             CHash256().Write(hash_witness).Write(witness_stack[0]).Finalize(hash_witness); | 
| 3989 | 29.1k |             if (memcmp(hash_witness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { | 
| 3990 | 0 |                 return state.Invalid( | 
| 3991 | 0 |                     /*result=*/BlockValidationResult::BLOCK_MUTATED, | 
| 3992 | 0 |                     /*reject_reason=*/"bad-witness-merkle-match", | 
| 3993 | 0 |                     /*debug_message=*/strprintf("%s : witness merkle commitment mismatch", __func__));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 3994 | 0 |             } | 
| 3995 |  |  | 
| 3996 | 29.1k |             block.m_checked_witness_commitment = true; | 
| 3997 | 29.1k |             return true; | 
| 3998 | 29.1k |         } | 
| 3999 | 29.1k |     } | 
| 4000 |  |  | 
| 4001 |  |     // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam | 
| 4002 | 0 |     for (const auto& tx : block.vtx) { | 
| 4003 | 0 |         if (tx->HasWitness()) { | 
| 4004 | 0 |             return state.Invalid( | 
| 4005 | 0 |                 /*result=*/BlockValidationResult::BLOCK_MUTATED, | 
| 4006 | 0 |                 /*reject_reason=*/"unexpected-witness", | 
| 4007 | 0 |                 /*debug_message=*/strprintf("%s : unexpected witness data found", __func__));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 4008 | 0 |         } | 
| 4009 | 0 |     } | 
| 4010 |  |  | 
| 4011 | 0 |     return true; | 
| 4012 | 0 | } | 
| 4013 |  |  | 
| 4014 |  | bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot) | 
| 4015 | 390k | { | 
| 4016 |  |     // These are checks that are independent of context. | 
| 4017 |  |  | 
| 4018 | 390k |     if (block.fChecked) | 
| 4019 | 53.6k |         return true; | 
| 4020 |  |  | 
| 4021 |  |     // Check that the header is valid (particularly PoW).  This is mostly | 
| 4022 |  |     // redundant with the call in AcceptBlockHeader. | 
| 4023 | 337k |     if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW)) | 
| 4024 | 0 |         return false; | 
| 4025 |  |  | 
| 4026 |  |     // Signet only: check block solution | 
| 4027 | 337k |     if (consensusParams.signet_blocks && fCheckPOW0&& !CheckSignetBlockSolution(block, consensusParams)0) { | 
| 4028 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure"); | 
| 4029 | 0 |     } | 
| 4030 |  |  | 
| 4031 |  |     // Check the merkle root. | 
| 4032 | 337k |     if (fCheckMerkleRoot && !CheckMerkleRoot(block, state)) { | 
| 4033 | 0 |         return false; | 
| 4034 | 0 |     } | 
| 4035 |  |  | 
| 4036 |  |     // All potential-corruption validation must be done before we do any | 
| 4037 |  |     // transaction validation, as otherwise we may mark the header as invalid | 
| 4038 |  |     // because we receive the wrong transactions for it. | 
| 4039 |  |     // Note that witness malleability is checked in ContextualCheckBlock, so no | 
| 4040 |  |     // checks that use witness data may be performed here. | 
| 4041 |  |  | 
| 4042 |  |     // Size limits | 
| 4043 | 337k |     if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(TX_NO_WITNESS(block)) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT) | 
| 4044 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed"); | 
| 4045 |  |  | 
| 4046 |  |     // First transaction must be coinbase, the rest must not be | 
| 4047 | 337k |     if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) | 
| 4048 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase"); | 
| 4049 | 480k |     for (unsigned int i = 1; 337ki < block.vtx.size(); i++143k) | 
| 4050 | 143k |         if (block.vtx[i]->IsCoinBase()) | 
| 4051 | 0 |             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase"); | 
| 4052 |  |  | 
| 4053 |  |     // Check transactions | 
| 4054 |  |     // Must check for duplicate inputs (see CVE-2018-17144) | 
| 4055 | 480k |     for (const auto& tx : block.vtx)337k{ | 
| 4056 | 480k |         TxValidationState tx_state; | 
| 4057 | 480k |         if (!CheckTransaction(*tx, tx_state)) { | 
| 4058 |  |             // CheckBlock() does context-free validation checks. The only | 
| 4059 |  |             // possible failures are consensus failures. | 
| 4060 | 0 |             assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS); | 
| 4061 | 0 |             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), | 
| 4062 | 0 |                                  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 4063 | 0 |         } | 
| 4064 | 480k |     } | 
| 4065 |  |     // This underestimates the number of sigops, because unlike ConnectBlock it | 
| 4066 |  |     // does not count witness and p2sh sigops. | 
| 4067 | 337k |     unsigned int nSigOps = 0; | 
| 4068 | 337k |     for (const auto& tx : block.vtx) | 
| 4069 | 480k |     { | 
| 4070 | 480k |         nSigOps += GetLegacySigOpCount(*tx); | 
| 4071 | 480k |     } | 
| 4072 | 337k |     if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST) | 
| 4073 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount"); | 
| 4074 |  |  | 
| 4075 | 337k |     if (fCheckPOW && fCheckMerkleRoot) | 
| 4076 | 337k |         block.fChecked = true; | 
| 4077 |  |  | 
| 4078 | 337k |     return true; | 
| 4079 | 337k | } | 
| 4080 |  |  | 
| 4081 |  | void ChainstateManager::UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev) const | 
| 4082 | 2.00M | { | 
| 4083 | 2.00M |     int commitpos = GetWitnessCommitmentIndex(block); | 
| 4084 | 2.00M |     static const std::vector<unsigned char> nonce(32, 0x00); | 
| 4085 | 2.00M |     if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, *this, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) { | 
| 4086 | 2.00M |         CMutableTransaction tx(*block.vtx[0]); | 
| 4087 | 2.00M |         tx.vin[0].scriptWitness.stack.resize(1); | 
| 4088 | 2.00M |         tx.vin[0].scriptWitness.stack[0] = nonce; | 
| 4089 | 2.00M |         block.vtx[0] = MakeTransactionRef(std::move(tx)); | 
| 4090 | 2.00M |     } | 
| 4091 | 2.00M | } | 
| 4092 |  |  | 
| 4093 |  | std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev) const | 
| 4094 | 2.00M | { | 
| 4095 | 2.00M |     std::vector<unsigned char> commitment; | 
| 4096 | 2.00M |     int commitpos = GetWitnessCommitmentIndex(block); | 
| 4097 | 2.00M |     std::vector<unsigned char> ret(32, 0x00); | 
| 4098 | 2.00M |     if (commitpos == NO_WITNESS_COMMITMENT) { | 
| 4099 | 2.00M |         uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr); | 
| 4100 | 2.00M |         CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot); | 
| 4101 | 2.00M |         CTxOut out; | 
| 4102 | 2.00M |         out.nValue = 0; | 
| 4103 | 2.00M |         out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT); | 
| 4104 | 2.00M |         out.scriptPubKey[0] = OP_RETURN; | 
| 4105 | 2.00M |         out.scriptPubKey[1] = 0x24; | 
| 4106 | 2.00M |         out.scriptPubKey[2] = 0xaa; | 
| 4107 | 2.00M |         out.scriptPubKey[3] = 0x21; | 
| 4108 | 2.00M |         out.scriptPubKey[4] = 0xa9; | 
| 4109 | 2.00M |         out.scriptPubKey[5] = 0xed; | 
| 4110 | 2.00M |         memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32); | 
| 4111 | 2.00M |         commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end()); | 
| 4112 | 2.00M |         CMutableTransaction tx(*block.vtx[0]); | 
| 4113 | 2.00M |         tx.vout.push_back(out); | 
| 4114 | 2.00M |         block.vtx[0] = MakeTransactionRef(std::move(tx)); | 
| 4115 | 2.00M |     } | 
| 4116 | 2.00M |     UpdateUncommittedBlockStructures(block, pindexPrev); | 
| 4117 | 2.00M |     return commitment; | 
| 4118 | 2.00M | } | 
| 4119 |  |  | 
| 4120 |  | bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams) | 
| 4121 | 725k | { | 
| 4122 | 725k |     return std::all_of(headers.cbegin(), headers.cend(), | 
| 4123 | 725k |             [&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams);}); | 
| 4124 | 725k | } | 
| 4125 |  |  | 
| 4126 |  | bool IsBlockMutated(const CBlock& block, bool check_witness_root) | 
| 4127 | 29.6k | { | 
| 4128 | 29.6k |     BlockValidationState state; | 
| 4129 | 29.6k |     if (!CheckMerkleRoot(block, state)) { | 
| 4130 | 454 |         LogDebug(BCLog::VALIDATION, "Block mutated: %s\n", state.ToString()); | Line | Count | Source |  | 381 | 454 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 454 |     do {                                                              \ |  | 374 | 454 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 454 |     } while (0) | 
 | 
 | 
| 4131 | 454 |         return true; | 
| 4132 | 454 |     } | 
| 4133 |  |  | 
| 4134 | 29.1k |     if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { | 
| 4135 |  |         // Consider the block mutated if any transaction is 64 bytes in size (see 3.1 | 
| 4136 |  |         // in "Weaknesses in Bitcoin’s Merkle Root Construction": | 
| 4137 |  |         // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/attachments/20190225/a27d8837/attachment-0001.pdf). | 
| 4138 |  |         // | 
| 4139 |  |         // Note: This is not a consensus change as this only applies to blocks that | 
| 4140 |  |         // don't have a coinbase transaction and would therefore already be invalid. | 
| 4141 | 0 |         return std::any_of(block.vtx.begin(), block.vtx.end(), | 
| 4142 | 0 |                            [](auto& tx) { return GetSerializeSize(TX_NO_WITNESS(tx)) == 64; }); | 
| 4143 | 29.1k |     } else { | 
| 4144 |  |         // Theoretically it is still possible for a block with a 64 byte | 
| 4145 |  |         // coinbase transaction to be mutated but we neglect that possibility | 
| 4146 |  |         // here as it requires at least 224 bits of work. | 
| 4147 | 29.1k |     } | 
| 4148 |  |  | 
| 4149 | 29.1k |     if (!CheckWitnessMalleation(block, check_witness_root, state)) { | 
| 4150 | 0 |         LogDebug(BCLog::VALIDATION, "Block mutated: %s\n", state.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 4151 | 0 |         return true; | 
| 4152 | 0 |     } | 
| 4153 |  |  | 
| 4154 | 29.1k |     return false; | 
| 4155 | 29.1k | } | 
| 4156 |  |  | 
| 4157 |  | arith_uint256 CalculateClaimedHeadersWork(std::span<const CBlockHeader> headers) | 
| 4158 | 1.80M | { | 
| 4159 | 1.80M |     arith_uint256 total_work{0}; | 
| 4160 | 1.80M |     for (const CBlockHeader& header : headers) { | 
| 4161 | 1.80M |         CBlockIndex dummy(header); | 
| 4162 | 1.80M |         total_work += GetBlockProof(dummy); | 
| 4163 | 1.80M |     } | 
| 4164 | 1.80M |     return total_work; | 
| 4165 | 1.80M | } | 
| 4166 |  |  | 
| 4167 |  | /** Context-dependent validity checks. | 
| 4168 |  |  *  By "context", we mean only the previous block headers, but not the UTXO | 
| 4169 |  |  *  set; UTXO-related validity checks are done in ConnectBlock(). | 
| 4170 |  |  *  NOTE: This function is not currently invoked by ConnectBlock(), so we | 
| 4171 |  |  *  should consider upgrade issues if we change which consensus rules are | 
| 4172 |  |  *  enforced in this function (eg by adding a new consensus rule). See comment | 
| 4173 |  |  *  in ConnectBlock(). | 
| 4174 |  |  *  Note that -reindex-chainstate skips the validation that happens here! | 
| 4175 |  |  * | 
| 4176 |  |  *  NOTE: failing to check the header's height against the last checkpoint's opened a DoS vector between | 
| 4177 |  |  *  v0.12 and v0.15 (when no additional protection was in place) whereby an attacker could unboundedly | 
| 4178 |  |  *  grow our in-memory block index. See https://bitcoincore.org/en/2024/07/03/disclose-header-spam. | 
| 4179 |  |  */ | 
| 4180 |  | static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) | 
| 4181 | 353k | { | 
| 4182 | 353k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 353k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4183 | 353k |     assert(pindexPrev != nullptr); | 
| 4184 | 353k |     const int nHeight = pindexPrev->nHeight + 1; | 
| 4185 |  |  | 
| 4186 |  |     // Check proof of work | 
| 4187 | 353k |     const Consensus::Params& consensusParams = chainman.GetConsensus(); | 
| 4188 | 353k |     if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) | 
| 4189 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work"); | 
| 4190 |  |  | 
| 4191 |  |     // Check timestamp against prev | 
| 4192 | 353k |     if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) | 
| 4193 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early"); | 
| 4194 |  |  | 
| 4195 |  |     // Testnet4 and regtest only: Check timestamp against prev for difficulty-adjustment | 
| 4196 |  |     // blocks to prevent timewarp attacks (see https://github.com/bitcoin/bitcoin/pull/15482). | 
| 4197 | 353k |     if (consensusParams.enforce_BIP94) { | 
| 4198 |  |         // Check timestamp for the first block of each difficulty adjustment | 
| 4199 |  |         // interval, except the genesis block. | 
| 4200 | 0 |         if (nHeight % consensusParams.DifficultyAdjustmentInterval() == 0) { | 
| 4201 | 0 |             if (block.GetBlockTime() < pindexPrev->GetBlockTime() - MAX_TIMEWARP) { | 
| 4202 | 0 |                 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-timewarp-attack", "block's timestamp is too early on diff adjustment block"); | 
| 4203 | 0 |             } | 
| 4204 | 0 |         } | 
| 4205 | 0 |     } | 
| 4206 |  |  | 
| 4207 |  |     // Check timestamp | 
| 4208 | 353k |     if (block.Time() > NodeClock::now() + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) { | 
| 4209 | 4.50k |         return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future"); | 
| 4210 | 4.50k |     } | 
| 4211 |  |  | 
| 4212 |  |     // Reject blocks with outdated version | 
| 4213 | 348k |     if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB)240k) || | 
| 4214 | 348k |         (108k block.nVersion < 3108k&& DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_DERSIG)33) || | 
| 4215 | 348k |         (108k block.nVersion < 4108k&& DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CLTV)11)) { | 
| 4216 | 240k |             return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),| Line | Count | Source |  | 1172 | 240k | #define strprintf tfm::format | 
 | 
| 4217 | 240k |                                  strprintf("rejected nVersion=0x%08x block", block.nVersion));| Line | Count | Source |  | 1172 | 240k | #define strprintf tfm::format | 
 | 
| 4218 | 240k |     } | 
| 4219 |  |  | 
| 4220 | 108k |     return true; | 
| 4221 | 348k | } | 
| 4222 |  |  | 
| 4223 |  | /** NOTE: This function is not currently invoked by ConnectBlock(), so we | 
| 4224 |  |  *  should consider upgrade issues if we change which consensus rules are | 
| 4225 |  |  *  enforced in this function (eg by adding a new consensus rule). See comment | 
| 4226 |  |  *  in ConnectBlock(). | 
| 4227 |  |  *  Note that -reindex-chainstate skips the validation that happens here! | 
| 4228 |  |  */ | 
| 4229 |  | static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const ChainstateManager& chainman, const CBlockIndex* pindexPrev) | 
| 4230 | 29.1k | { | 
| 4231 | 29.1k |     const int nHeight = pindexPrev == nullptr ? 00: pindexPrev->nHeight + 1; | 
| 4232 |  |  | 
| 4233 |  |     // Enforce BIP113 (Median Time Past). | 
| 4234 | 29.1k |     bool enforce_locktime_median_time_past{false}; | 
| 4235 | 29.1k |     if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CSV)) { | 
| 4236 | 29.1k |         assert(pindexPrev != nullptr); | 
| 4237 | 29.1k |         enforce_locktime_median_time_past = true; | 
| 4238 | 29.1k |     } | 
| 4239 |  |  | 
| 4240 | 29.1k |     const int64_t nLockTimeCutoff{enforce_locktime_median_time_past ? | 
| 4241 | 29.1k |                                       pindexPrev->GetMedianTimePast() : | 
| 4242 | 29.1k |                                       block.GetBlockTime()0}; | 
| 4243 |  |  | 
| 4244 |  |     // Check that all transactions are finalized | 
| 4245 | 169k |     for (const auto& tx : block.vtx) { | 
| 4246 | 169k |         if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) { | 
| 4247 | 899 |             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction"); | 
| 4248 | 899 |         } | 
| 4249 | 169k |     } | 
| 4250 |  |  | 
| 4251 |  |     // Enforce rule that the coinbase starts with serialized block height | 
| 4252 | 28.2k |     if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB)) | 
| 4253 | 28.2k |     { | 
| 4254 | 28.2k |         CScript expect = CScript() << nHeight; | 
| 4255 | 28.2k |         if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() || | 
| 4256 | 28.2k |             !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) { | 
| 4257 | 0 |             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase"); | 
| 4258 | 0 |         } | 
| 4259 | 28.2k |     } | 
| 4260 |  |  | 
| 4261 |  |     // Validation for witness commitments. | 
| 4262 |  |     // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the | 
| 4263 |  |     //   coinbase (where 0x0000....0000 is used instead). | 
| 4264 |  |     // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained). | 
| 4265 |  |     // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header). | 
| 4266 |  |     // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are | 
| 4267 |  |     //   {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are | 
| 4268 |  |     //   multiple, the last one is used. | 
| 4269 | 28.2k |     if (!CheckWitnessMalleation(block, DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_SEGWIT), state)) { | 
| 4270 | 0 |         return false; | 
| 4271 | 0 |     } | 
| 4272 |  |  | 
| 4273 |  |     // After the coinbase witness reserved value and commitment are verified, | 
| 4274 |  |     // we can check if the block weight passes (before we've checked the | 
| 4275 |  |     // coinbase witness, it would be possible for the weight to be too | 
| 4276 |  |     // large by filling up the coinbase witness, which doesn't change | 
| 4277 |  |     // the block hash, so we couldn't mark the block as permanently | 
| 4278 |  |     // failed). | 
| 4279 | 28.2k |     if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) { | 
| 4280 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 4281 | 0 |     } | 
| 4282 |  |  | 
| 4283 | 28.2k |     return true; | 
| 4284 | 28.2k | } | 
| 4285 |  |  | 
| 4286 |  | bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, CBlockIndex** ppindex, bool min_pow_checked) | 
| 4287 | 2.01M | { | 
| 4288 | 2.01M |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 2.01M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4289 |  |  | 
| 4290 |  |     // Check for duplicate | 
| 4291 | 2.01M |     uint256 hash = block.GetHash(); | 
| 4292 | 2.01M |     BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)}; | 
| 4293 | 2.01M |     if (hash != GetConsensus().hashGenesisBlock) { | 
| 4294 | 2.01M |         if (miSelf != m_blockman.m_block_index.end()) { | 
| 4295 |  |             // Block header is already known. | 
| 4296 | 1.63M |             CBlockIndex* pindex = &(miSelf->second); | 
| 4297 | 1.63M |             if (ppindex) | 
| 4298 | 1.63M |                 *ppindex = pindex; | 
| 4299 | 1.63M |             if (pindex->nStatus & BLOCK_FAILED_MASK) { | 
| 4300 | 48.4k |                 LogDebug(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString()); | Line | Count | Source |  | 381 | 48.4k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 48.4k |     do {                                                              \ |  | 374 | 48.4k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 48.4k |     } while (0) | 
 | 
 | 
| 4301 | 48.4k |                 return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate-invalid"); | 
| 4302 | 48.4k |             } | 
| 4303 | 1.58M |             return true; | 
| 4304 | 1.63M |         } | 
| 4305 |  |  | 
| 4306 | 378k |         if (!CheckBlockHeader(block, state, GetConsensus())) { | 
| 4307 | 0 |             LogDebug(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 4308 | 0 |             return false; | 
| 4309 | 0 |         } | 
| 4310 |  |  | 
| 4311 |  |         // Get prev block index | 
| 4312 | 378k |         CBlockIndex* pindexPrev = nullptr; | 
| 4313 | 378k |         BlockMap::iterator mi{m_blockman.m_block_index.find(block.hashPrevBlock)}; | 
| 4314 | 378k |         if (mi == m_blockman.m_block_index.end()) { | 
| 4315 | 0 |             LogDebug(BCLog::VALIDATION, "header %s has prev block not found: %s\n", hash.ToString(), block.hashPrevBlock.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 4316 | 0 |             return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found"); | 
| 4317 | 0 |         } | 
| 4318 | 378k |         pindexPrev = &((*mi).second); | 
| 4319 | 378k |         if (pindexPrev->nStatus & BLOCK_FAILED_MASK) { | 
| 4320 | 25.6k |             LogDebug(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString()); | Line | Count | Source |  | 381 | 25.6k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 25.6k |     do {                                                              \ |  | 374 | 25.6k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 25.6k |     } while (0) | 
 | 
 | 
| 4321 | 25.6k |             return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk"); | 
| 4322 | 25.6k |         } | 
| 4323 | 353k |         if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev)) { | 
| 4324 | 244k |             LogDebug(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString()); | Line | Count | Source |  | 381 | 244k | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 244k |     do {                                                              \ |  | 374 | 244k |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 244k |     } while (0) | 
 | 
 | 
| 4325 | 244k |             return false; | 
| 4326 | 244k |         } | 
| 4327 | 353k |     } | 
| 4328 | 108k |     if (!min_pow_checked) { | 
| 4329 | 0 |         LogDebug(BCLog::VALIDATION, "%s: not adding new block header %s, missing anti-dos proof-of-work validation\n", __func__, hash.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 4330 | 0 |         return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK, "too-little-chainwork"); | 
| 4331 | 0 |     } | 
| 4332 | 108k |     CBlockIndex* pindex{m_blockman.AddToBlockIndex(block, m_best_header)}; | 
| 4333 |  |  | 
| 4334 | 108k |     if (ppindex) | 
| 4335 | 108k |         *ppindex = pindex; | 
| 4336 |  |  | 
| 4337 | 108k |     return true; | 
| 4338 | 108k | } | 
| 4339 |  |  | 
| 4340 |  | // Exposed wrapper for AcceptBlockHeader | 
| 4341 |  | bool ChainstateManager::ProcessNewBlockHeaders(std::span<const CBlockHeader> headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex) | 
| 4342 | 1.98M | { | 
| 4343 | 1.98M |     AssertLockNotHeld(cs_main); | Line | Count | Source |  | 142 | 1.98M | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4344 | 1.98M |     { | 
| 4345 | 1.98M |         LOCK(cs_main); | Line | Count | Source |  | 259 | 1.98M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 1.98M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 1.98M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 1.98M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 4346 | 1.98M |         for (const CBlockHeader& header : headers) { | 
| 4347 | 1.98M |             CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast | 
| 4348 | 1.98M |             bool accepted{AcceptBlockHeader(header, state, &pindex, min_pow_checked)}; | 
| 4349 | 1.98M |             CheckBlockIndex(); | 
| 4350 |  |  | 
| 4351 | 1.98M |             if (!accepted) { | 
| 4352 | 318k |                 return false; | 
| 4353 | 318k |             } | 
| 4354 | 1.66M |             if (ppindex) { | 
| 4355 | 1.66M |                 *ppindex = pindex; | 
| 4356 | 1.66M |             } | 
| 4357 | 1.66M |         } | 
| 4358 | 1.98M |     } | 
| 4359 | 1.66M |     if (NotifyHeaderTip()) { | 
| 4360 | 47.2k |         if (IsInitialBlockDownload() && ppindex15.0k&& *ppindex15.0k) { | 
| 4361 | 15.0k |             const CBlockIndex& last_accepted{**ppindex}; | 
| 4362 | 15.0k |             int64_t blocks_left{(NodeClock::now() - last_accepted.Time()) / GetConsensus().PowTargetSpacing()}; | 
| 4363 | 15.0k |             blocks_left = std::max<int64_t>(0, blocks_left); | 
| 4364 | 15.0k |             const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)}; | 
| 4365 | 15.0k |             LogInfo("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);| Line | Count | Source |  | 356 | 15.0k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 15.0k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4366 | 15.0k |         } | 
| 4367 | 47.2k |     } | 
| 4368 | 1.66M |     return true; | 
| 4369 | 1.98M | } | 
| 4370 |  |  | 
| 4371 |  | void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp) | 
| 4372 | 0 | { | 
| 4373 | 0 |     AssertLockNotHeld(GetMutex()); | Line | Count | Source |  | 142 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4374 | 0 |     { | 
| 4375 | 0 |         LOCK(GetMutex()); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 4376 |  |         // Don't report headers presync progress if we already have a post-minchainwork header chain. | 
| 4377 |  |         // This means we lose reporting for potentially legitimate, but unlikely, deep reorgs, but | 
| 4378 |  |         // prevent attackers that spam low-work headers from filling our logs. | 
| 4379 | 0 |         if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return; | 
| 4380 |  |         // Rate limit headers presync updates to 4 per second, as these are not subject to DoS | 
| 4381 |  |         // protection. | 
| 4382 | 0 |         auto now = MockableSteadyClock::now(); | 
| 4383 | 0 |         if (now < m_last_presync_update + std::chrono::milliseconds{250}) return; | 
| 4384 | 0 |         m_last_presync_update = now; | 
| 4385 | 0 |     } | 
| 4386 | 0 |     bool initial_download = IsInitialBlockDownload(); | 
| 4387 | 0 |     GetNotifications().headerTip(GetSynchronizationState(initial_download, m_blockman.m_blockfiles_indexed), height, timestamp, /*presync=*/true); | 
| 4388 | 0 |     if (initial_download) { | 
| 4389 | 0 |         int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()}; | 
| 4390 | 0 |         blocks_left = std::max<int64_t>(0, blocks_left); | 
| 4391 | 0 |         const double progress{100.0 * height / (height + blocks_left)}; | 
| 4392 | 0 |         LogInfo("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4393 | 0 |     } | 
| 4394 | 0 | } | 
| 4395 |  |  | 
| 4396 |  | /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */ | 
| 4397 |  | bool ChainstateManager::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock, bool min_pow_checked) | 
| 4398 | 29.1k | { | 
| 4399 | 29.1k |     const CBlock& block = *pblock; | 
| 4400 |  |  | 
| 4401 | 29.1k |     if (fNewBlock) *fNewBlock = false; | 
| 4402 | 29.1k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 29.1k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4403 |  |  | 
| 4404 | 29.1k |     CBlockIndex *pindexDummy = nullptr; | 
| 4405 | 29.1k |     CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy0; | 
| 4406 |  |  | 
| 4407 | 29.1k |     bool accepted_header{AcceptBlockHeader(block, state, &pindex, min_pow_checked)}; | 
| 4408 | 29.1k |     CheckBlockIndex(); | 
| 4409 |  |  | 
| 4410 | 29.1k |     if (!accepted_header) | 
| 4411 | 11 |         return false; | 
| 4412 |  |  | 
| 4413 |  |     // Check all requested blocks that we do not already have for validity and | 
| 4414 |  |     // save them to disk. Skip processing of unrequested blocks as an anti-DoS | 
| 4415 |  |     // measure, unless the blocks have more work than the active chain tip, and | 
| 4416 |  |     // aren't too far ahead of it, so are likely to be attached soon. | 
| 4417 | 29.1k |     bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; | 
| 4418 | 29.1k |     bool fHasMoreOrSameWork = (ActiveTip() ? pindex->nChainWork >= ActiveTip()->nChainWork : true0); | 
| 4419 |  |     // Blocks that are too out-of-order needlessly limit the effectiveness of | 
| 4420 |  |     // pruning, because pruning will not delete block files that contain any | 
| 4421 |  |     // blocks which are too close in height to the tip.  Apply this test | 
| 4422 |  |     // regardless of whether pruning is enabled; it should generally be safe to | 
| 4423 |  |     // not process unrequested blocks. | 
| 4424 | 29.1k |     bool fTooFarAhead{pindex->nHeight > ActiveHeight() + int(MIN_BLOCKS_TO_KEEP)}; | 
| 4425 |  |  | 
| 4426 |  |     // TODO: Decouple this function from the block download logic by removing fRequested | 
| 4427 |  |     // This requires some new chain data structure to efficiently look up if a | 
| 4428 |  |     // block is in a chain leading to a candidate for best tip, despite not | 
| 4429 |  |     // being such a candidate itself. | 
| 4430 |  |     // Note that this would break the getblockfrompeer RPC | 
| 4431 |  |  | 
| 4432 |  |     // TODO: deal better with return value and error conditions for duplicate | 
| 4433 |  |     // and unrequested blocks. | 
| 4434 | 29.1k |     if (fAlreadyHave) return true0; | 
| 4435 | 29.1k |     if (!fRequested) {  // If we didn't ask for it: | 
| 4436 | 0 |         if (pindex->nTx != 0) return true;    // This is a previously-processed block that was pruned | 
| 4437 | 0 |         if (!fHasMoreOrSameWork) return true; // Don't process less-work chains | 
| 4438 | 0 |         if (fTooFarAhead) return true;        // Block height is too high | 
| 4439 |  |  | 
| 4440 |  |         // Protect against DoS attacks from low-work chains. | 
| 4441 |  |         // If our tip is behind, a peer could try to send us | 
| 4442 |  |         // low-work blocks on a fake chain that we would never | 
| 4443 |  |         // request; don't process these. | 
| 4444 | 0 |         if (pindex->nChainWork < MinimumChainWork()) return true; | 
| 4445 | 0 |     } | 
| 4446 |  |  | 
| 4447 | 29.1k |     const CChainParams& params{GetParams()}; | 
| 4448 |  |  | 
| 4449 | 29.1k |     if (!CheckBlock(block, state, params.GetConsensus()) || | 
| 4450 | 29.1k |         !ContextualCheckBlock(block, state, *this, pindex->pprev)) { | 
| 4451 | 899 |         if (Assume(state.IsInvalid())) {| Line | Count | Source |  | 118 | 899 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 4452 | 899 |             ActiveChainstate().InvalidBlockFound(pindex, state); | 
| 4453 | 899 |         } | 
| 4454 | 899 |         LogError("%s: %s\n", __func__, state.ToString());| Line | Count | Source |  | 358 | 899 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 899 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4455 | 899 |         return false; | 
| 4456 | 899 |     } | 
| 4457 |  |  | 
| 4458 |  |     // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW | 
| 4459 |  |     // (but if it does not build on our best tip, let the SendMessages loop relay it) | 
| 4460 | 28.2k |     if (!IsInitialBlockDownload() && ActiveTip() == pindex->pprev26.1k&& m_options.signals23.0k) { | 
| 4461 | 23.0k |         m_options.signals->NewPoWValidBlock(pindex, pblock); | 
| 4462 | 23.0k |     } | 
| 4463 |  |  | 
| 4464 |  |     // Write block to history file | 
| 4465 | 28.2k |     if (fNewBlock) *fNewBlock = true; | 
| 4466 | 28.2k |     try { | 
| 4467 | 28.2k |         FlatFilePos blockPos{}; | 
| 4468 | 28.2k |         if (dbp) { | 
| 4469 | 0 |             blockPos = *dbp; | 
| 4470 | 0 |             m_blockman.UpdateBlockInfo(block, pindex->nHeight, blockPos); | 
| 4471 | 28.2k |         } else { | 
| 4472 | 28.2k |             blockPos = m_blockman.WriteBlock(block, pindex->nHeight); | 
| 4473 | 28.2k |             if (blockPos.IsNull()) { | 
| 4474 | 0 |                 state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 4475 | 0 |                 return false; | 
| 4476 | 0 |             } | 
| 4477 | 28.2k |         } | 
| 4478 | 28.2k |         ReceivedBlockTransactions(block, pindex, blockPos); | 
| 4479 | 28.2k |     } catch (const std::runtime_error& e) { | 
| 4480 | 0 |         return FatalError(GetNotifications(), state, strprintf(_("System error while saving block to disk: %s"), e.what()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 4481 | 0 |     } | 
| 4482 |  |  | 
| 4483 |  |     // TODO: FlushStateToDisk() handles flushing of both block and chainstate | 
| 4484 |  |     // data, so we should move this to ChainstateManager so that we can be more | 
| 4485 |  |     // intelligent about how we flush. | 
| 4486 |  |     // For now, since FlushStateMode::NONE is used, all that can happen is that | 
| 4487 |  |     // the block files may be pruned, so we can just call this on one | 
| 4488 |  |     // chainstate (particularly if we haven't implemented pruning with | 
| 4489 |  |     // background validation yet). | 
| 4490 | 28.2k |     ActiveChainstate().FlushStateToDisk(state, FlushStateMode::NONE); | 
| 4491 |  |  | 
| 4492 | 28.2k |     CheckBlockIndex(); | 
| 4493 |  |  | 
| 4494 | 28.2k |     return true; | 
| 4495 | 28.2k | } | 
| 4496 |  |  | 
| 4497 |  | bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked, bool* new_block) | 
| 4498 | 29.1k | { | 
| 4499 | 29.1k |     AssertLockNotHeld(cs_main); | Line | Count | Source |  | 142 | 29.1k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4500 |  |  | 
| 4501 | 29.1k |     { | 
| 4502 | 29.1k |         CBlockIndex *pindex = nullptr; | 
| 4503 | 29.1k |         if (new_block) *new_block = false; | 
| 4504 | 29.1k |         BlockValidationState state; | 
| 4505 |  |  | 
| 4506 |  |         // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race. | 
| 4507 |  |         // Therefore, the following critical section must include the CheckBlock() call as well. | 
| 4508 | 29.1k |         LOCK(cs_main); | Line | Count | Source |  | 259 | 29.1k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 29.1k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 29.1k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 29.1k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 4509 |  |  | 
| 4510 |  |         // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if | 
| 4511 |  |         // CheckBlock() fails.  This is protective against consensus failure if there are any unknown forms of block | 
| 4512 |  |         // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and | 
| 4513 |  |         // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html.  Because CheckBlock() is | 
| 4514 |  |         // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial. | 
| 4515 | 29.1k |         bool ret = CheckBlock(*block, state, GetConsensus()); | 
| 4516 | 29.1k |         if (ret) { | 
| 4517 |  |             // Store to disk | 
| 4518 | 29.1k |             ret = AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block, min_pow_checked); | 
| 4519 | 29.1k |         } | 
| 4520 | 29.1k |         if (!ret) { | 
| 4521 | 910 |             if (m_options.signals) { | 
| 4522 | 910 |                 m_options.signals->BlockChecked(block, state); | 
| 4523 | 910 |             } | 
| 4524 | 910 |             LogError("%s: AcceptBlock FAILED (%s)\n", __func__, state.ToString());| Line | Count | Source |  | 358 | 910 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 910 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4525 | 910 |             return false; | 
| 4526 | 910 |         } | 
| 4527 | 29.1k |     } | 
| 4528 |  |  | 
| 4529 | 28.2k |     NotifyHeaderTip(); | 
| 4530 |  |  | 
| 4531 | 28.2k |     BlockValidationState state; // Only used to report errors, not invalidity - ignore it | 
| 4532 | 28.2k |     if (!ActiveChainstate().ActivateBestChain(state, block)) { | 
| 4533 | 0 |         LogError("%s: ActivateBestChain failed (%s)\n", __func__, state.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4534 | 0 |         return false; | 
| 4535 | 0 |     } | 
| 4536 |  |  | 
| 4537 | 28.2k |     Chainstate* bg_chain{WITH_LOCK(cs_main, return BackgroundSyncInProgress() ? m_ibd_chainstate.get() : nullptr)};| Line | Count | Source |  | 290 | 28.2k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 4538 | 28.2k |     BlockValidationState bg_state; | 
| 4539 | 28.2k |     if (bg_chain && !bg_chain->ActivateBestChain(bg_state, block)0) { | 
| 4540 | 0 |         LogError("%s: [background] ActivateBestChain failed (%s)\n", __func__, bg_state.ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4541 | 0 |         return false; | 
| 4542 | 0 |      } | 
| 4543 |  |  | 
| 4544 | 28.2k |     return true; | 
| 4545 | 28.2k | } | 
| 4546 |  |  | 
| 4547 |  | MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef& tx, bool test_accept) | 
| 4548 | 719k | { | 
| 4549 | 719k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 719k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4550 | 719k |     Chainstate& active_chainstate = ActiveChainstate(); | 
| 4551 | 719k |     if (!active_chainstate.GetMempool()) { | 
| 4552 | 0 |         TxValidationState state; | 
| 4553 | 0 |         state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool"); | 
| 4554 | 0 |         return MempoolAcceptResult::Failure(state); | 
| 4555 | 0 |     } | 
| 4556 | 719k |     auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(), /*bypass_limits=*/ false, test_accept); | 
| 4557 | 719k |     active_chainstate.GetMempool()->check(active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1); | 
| 4558 | 719k |     return result; | 
| 4559 | 719k | } | 
| 4560 |  |  | 
| 4561 |  |  | 
| 4562 |  | BlockValidationState TestBlockValidity( | 
| 4563 |  |     Chainstate& chainstate, | 
| 4564 |  |     const CBlock& block, | 
| 4565 |  |     const bool check_pow, | 
| 4566 |  |     const bool check_merkle_root) | 
| 4567 | 0 | { | 
| 4568 |  |     // Lock must be held throughout this function for two reasons: | 
| 4569 |  |     // 1. We don't want the tip to change during several of the validation steps | 
| 4570 |  |     // 2. To prevent a CheckBlock() race condition for fChecked, see ProcessNewBlock() | 
| 4571 | 0 |     AssertLockHeld(chainstate.m_chainman.GetMutex()); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4572 |  | 
 | 
| 4573 | 0 |     BlockValidationState state; | 
| 4574 | 0 |     CBlockIndex* tip{Assert(chainstate.m_chain.Tip())};| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 4575 |  | 
 | 
| 4576 | 0 |     if (block.hashPrevBlock != *Assert(tip->phashBlock)) {| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 4577 | 0 |         state.Invalid({}, "inconclusive-not-best-prevblk"); | 
| 4578 | 0 |         return state; | 
| 4579 | 0 |     } | 
| 4580 |  |  | 
| 4581 |  |     // For signets CheckBlock() verifies the challenge iff fCheckPow is set. | 
| 4582 | 0 |     if (!CheckBlock(block, state, chainstate.m_chainman.GetConsensus(), /*fCheckPow=*/check_pow, /*fCheckMerkleRoot=*/check_merkle_root)) { | 
| 4583 |  |         // This should never happen, but belt-and-suspenders don't approve the | 
| 4584 |  |         // block if it does. | 
| 4585 | 0 |         if (state.IsValid()) NONFATAL_UNREACHABLE(); | Line | Count | Source |  | 124 | 0 |     throw NonFatalCheckError(                                         \ |  | 125 | 0 |         "Unreachable code reached (non-fatal)", __FILE__, __LINE__, __func__) | 
 | 
| 4586 | 0 |         return state; | 
| 4587 | 0 |     } | 
| 4588 |  |  | 
| 4589 |  |     /** | 
| 4590 |  |      * At this point ProcessNewBlock would call AcceptBlock(), but we | 
| 4591 |  |      * don't want to store the block or its header. Run individual checks | 
| 4592 |  |      * instead: | 
| 4593 |  |      * - skip AcceptBlockHeader() because: | 
| 4594 |  |      *   - we don't want to update the block index | 
| 4595 |  |      *   - we do not care about duplicates | 
| 4596 |  |      *   - we already ran CheckBlockHeader() via CheckBlock() | 
| 4597 |  |      *   - we already checked for prev-blk-not-found | 
| 4598 |  |      *   - we know the tip is valid, so no need to check bad-prevblk | 
| 4599 |  |      * - we already ran CheckBlock() | 
| 4600 |  |      * - do run ContextualCheckBlockHeader() | 
| 4601 |  |      * - do run ContextualCheckBlock() | 
| 4602 |  |      */ | 
| 4603 |  |  | 
| 4604 | 0 |     if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, tip)) { | 
| 4605 | 0 |         if (state.IsValid()) NONFATAL_UNREACHABLE(); | Line | Count | Source |  | 124 | 0 |     throw NonFatalCheckError(                                         \ |  | 125 | 0 |         "Unreachable code reached (non-fatal)", __FILE__, __LINE__, __func__) | 
 | 
| 4606 | 0 |         return state; | 
| 4607 | 0 |     } | 
| 4608 |  |  | 
| 4609 | 0 |     if (!ContextualCheckBlock(block, state, chainstate.m_chainman, tip)) { | 
| 4610 | 0 |         if (state.IsValid()) NONFATAL_UNREACHABLE(); | Line | Count | Source |  | 124 | 0 |     throw NonFatalCheckError(                                         \ |  | 125 | 0 |         "Unreachable code reached (non-fatal)", __FILE__, __LINE__, __func__) | 
 | 
| 4611 | 0 |         return state; | 
| 4612 | 0 |     } | 
| 4613 |  |  | 
| 4614 |  |     // We don't want ConnectBlock to update the actual chainstate, so create | 
| 4615 |  |     // a cache on top of it, along with a dummy block index. | 
| 4616 | 0 |     CBlockIndex index_dummy{block}; | 
| 4617 | 0 |     uint256 block_hash(block.GetHash()); | 
| 4618 | 0 |     index_dummy.pprev = tip; | 
| 4619 | 0 |     index_dummy.nHeight = tip->nHeight + 1; | 
| 4620 | 0 |     index_dummy.phashBlock = &block_hash; | 
| 4621 | 0 |     CCoinsViewCache view_dummy(&chainstate.CoinsTip()); | 
| 4622 |  |  | 
| 4623 |  |     // Set fJustCheck to true in order to update, and not clear, validation caches. | 
| 4624 | 0 |     if(!chainstate.ConnectBlock(block, state, &index_dummy, view_dummy, /*fJustCheck=*/true)) { | 
| 4625 | 0 |         if (state.IsValid()) NONFATAL_UNREACHABLE(); | Line | Count | Source |  | 124 | 0 |     throw NonFatalCheckError(                                         \ |  | 125 | 0 |         "Unreachable code reached (non-fatal)", __FILE__, __LINE__, __func__) | 
 | 
| 4626 | 0 |         return state; | 
| 4627 | 0 |     } | 
| 4628 |  |  | 
| 4629 |  |     // Ensure no check returned successfully while also setting an invalid state. | 
| 4630 | 0 |     if (!state.IsValid()) NONFATAL_UNREACHABLE(); | Line | Count | Source |  | 124 | 0 |     throw NonFatalCheckError(                                         \ |  | 125 | 0 |         "Unreachable code reached (non-fatal)", __FILE__, __LINE__, __func__) | 
 | 
| 4631 |  |  | 
| 4632 | 0 |     return state; | 
| 4633 | 0 | } | 
| 4634 |  |  | 
| 4635 |  | /* This function is called from the RPC code for pruneblockchain */ | 
| 4636 |  | void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight) | 
| 4637 | 0 | { | 
| 4638 | 0 |     BlockValidationState state; | 
| 4639 | 0 |     if (!active_chainstate.FlushStateToDisk( | 
| 4640 | 0 |             state, FlushStateMode::NONE, nManualPruneHeight)) { | 
| 4641 | 0 |         LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4642 | 0 |     } | 
| 4643 | 0 | } | 
| 4644 |  |  | 
| 4645 |  | bool Chainstate::LoadChainTip() | 
| 4646 | 51.2k | { | 
| 4647 | 51.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4648 | 51.2k |     const CCoinsViewCache& coins_cache = CoinsTip(); | 
| 4649 | 51.2k |     assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty | 
| 4650 | 51.2k |     const CBlockIndex* tip = m_chain.Tip(); | 
| 4651 |  |  | 
| 4652 | 51.2k |     if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()0) { | 
| 4653 | 0 |         return true; | 
| 4654 | 0 |     } | 
| 4655 |  |  | 
| 4656 |  |     // Load pointer to end of best chain | 
| 4657 | 51.2k |     CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock()); | 
| 4658 | 51.2k |     if (!pindex) { | 
| 4659 | 0 |         return false; | 
| 4660 | 0 |     } | 
| 4661 | 51.2k |     m_chain.SetTip(*pindex); | 
| 4662 | 51.2k |     PruneBlockIndexCandidates(); | 
| 4663 |  |  | 
| 4664 | 51.2k |     tip = m_chain.Tip(); | 
| 4665 | 51.2k |     LogInfo("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f",| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4666 | 51.2k |               tip->GetBlockHash().ToString(), | 
| 4667 | 51.2k |               m_chain.Height(), | 
| 4668 | 51.2k |               FormatISO8601DateTime(tip->GetBlockTime()), | 
| 4669 | 51.2k |               m_chainman.GuessVerificationProgress(tip)); | 
| 4670 |  |  | 
| 4671 |  |     // Ensure KernelNotifications m_tip_block is set even if no new block arrives. | 
| 4672 | 51.2k |     if (this->GetRole() != ChainstateRole::BACKGROUND) { | 
| 4673 |  |         // Ignoring return value for now. | 
| 4674 | 51.2k |         (void)m_chainman.GetNotifications().blockTip( | 
| 4675 | 51.2k |             /*state=*/GetSynchronizationState(/*init=*/true, m_chainman.m_blockman.m_blockfiles_indexed), | 
| 4676 | 51.2k |             /*index=*/*pindex, | 
| 4677 | 51.2k |             /*verification_progress=*/m_chainman.GuessVerificationProgress(tip)); | 
| 4678 | 51.2k |     } | 
| 4679 |  |  | 
| 4680 | 51.2k |     return true; | 
| 4681 | 51.2k | } | 
| 4682 |  |  | 
| 4683 |  | CVerifyDB::CVerifyDB(Notifications& notifications) | 
| 4684 | 51.2k |     : m_notifications{notifications} | 
| 4685 | 51.2k | { | 
| 4686 | 51.2k |     m_notifications.progress(_("Verifying blocks…"), 0, false); | 
| 4687 | 51.2k | } | 
| 4688 |  |  | 
| 4689 |  | CVerifyDB::~CVerifyDB() | 
| 4690 | 51.2k | { | 
| 4691 | 51.2k |     m_notifications.progress(bilingual_str{}, 100, false); | 
| 4692 | 51.2k | } | 
| 4693 |  |  | 
| 4694 |  | VerifyDBResult CVerifyDB::VerifyDB( | 
| 4695 |  |     Chainstate& chainstate, | 
| 4696 |  |     const Consensus::Params& consensus_params, | 
| 4697 |  |     CCoinsView& coinsview, | 
| 4698 |  |     int nCheckLevel, int nCheckDepth) | 
| 4699 | 51.2k | { | 
| 4700 | 51.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4701 |  |  | 
| 4702 | 51.2k |     if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) { | 
| 4703 | 0 |         return VerifyDBResult::SUCCESS; | 
| 4704 | 0 |     } | 
| 4705 |  |  | 
| 4706 |  |     // Verify blocks in the best chain | 
| 4707 | 51.2k |     if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) { | 
| 4708 | 0 |         nCheckDepth = chainstate.m_chain.Height(); | 
| 4709 | 0 |     } | 
| 4710 | 51.2k |     nCheckLevel = std::max(0, std::min(4, nCheckLevel)); | 
| 4711 | 51.2k |     LogInfo("Verifying last %i blocks at level %i", nCheckDepth, nCheckLevel);| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4712 | 51.2k |     CCoinsViewCache coins(&coinsview); | 
| 4713 | 51.2k |     CBlockIndex* pindex; | 
| 4714 | 51.2k |     CBlockIndex* pindexFailure = nullptr; | 
| 4715 | 51.2k |     int nGoodTransactions = 0; | 
| 4716 | 51.2k |     BlockValidationState state; | 
| 4717 | 51.2k |     int reportDone = 0; | 
| 4718 | 51.2k |     bool skipped_no_block_data{false}; | 
| 4719 | 51.2k |     bool skipped_l3_checks{false}; | 
| 4720 | 51.2k |     LogInfo("Verification progress: 0%%");| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4721 |  |  | 
| 4722 | 51.2k |     const bool is_snapshot_cs{chainstate.m_from_snapshot_blockhash}; | 
| 4723 |  |  | 
| 4724 | 358k |     for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev307k) { | 
| 4725 | 358k |         const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 500: 100)))); | 
| 4726 | 358k |         if (reportDone < percentageDone / 10) { | 
| 4727 |  |             // report every 10% step | 
| 4728 | 307k |             LogInfo("Verification progress: %d%%", percentageDone);| Line | Count | Source |  | 356 | 307k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 307k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4729 | 307k |             reportDone = percentageDone / 10; | 
| 4730 | 307k |         } | 
| 4731 | 358k |         m_notifications.progress(_("Verifying blocks…"), percentageDone, false); | 
| 4732 | 358k |         if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) { | 
| 4733 | 51.2k |             break; | 
| 4734 | 51.2k |         } | 
| 4735 | 307k |         if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)0) { | 
| 4736 |  |             // If pruning or running under an assumeutxo snapshot, only go | 
| 4737 |  |             // back as far as we have data. | 
| 4738 | 0 |             LogInfo("Block verification stopping at height %d (no data). This could be due to pruning or use of an assumeutxo snapshot.", pindex->nHeight);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4739 | 0 |             skipped_no_block_data = true; | 
| 4740 | 0 |             break; | 
| 4741 | 0 |         } | 
| 4742 | 307k |         CBlock block; | 
| 4743 |  |         // check level 0: read from disk | 
| 4744 | 307k |         if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { | 
| 4745 | 0 |             LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4746 | 0 |             return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4747 | 0 |         } | 
| 4748 |  |         // check level 1: verify block validity | 
| 4749 | 307k |         if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) { | 
| 4750 | 0 |             LogPrintf("Verification error: found bad block at %d, hash=%s (%s)\n",| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4751 | 0 |                       pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); | 
| 4752 | 0 |             return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4753 | 0 |         } | 
| 4754 |  |         // check level 2: verify undo validity | 
| 4755 | 307k |         if (nCheckLevel >= 2 && pindex) { | 
| 4756 | 307k |             CBlockUndo undo; | 
| 4757 | 307k |             if (!pindex->GetUndoPos().IsNull()) { | 
| 4758 | 307k |                 if (!chainstate.m_blockman.ReadBlockUndo(undo, *pindex)) { | 
| 4759 | 0 |                     LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4760 | 0 |                     return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4761 | 0 |                 } | 
| 4762 | 307k |             } | 
| 4763 | 307k |         } | 
| 4764 |  |         // check level 3: check for inconsistencies during memory-only disconnect of tip blocks | 
| 4765 | 307k |         size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage(); | 
| 4766 |  |  | 
| 4767 | 307k |         if (nCheckLevel >= 3) { | 
| 4768 | 307k |             if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) { | 
| 4769 | 307k |                 assert(coins.GetBestBlock() == pindex->GetBlockHash()); | 
| 4770 | 307k |                 DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins); | 
| 4771 | 307k |                 if (res == DISCONNECT_FAILED) { | 
| 4772 | 0 |                     LogPrintf("Verification error: irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4773 | 0 |                     return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4774 | 0 |                 } | 
| 4775 | 307k |                 if (res == DISCONNECT_UNCLEAN) { | 
| 4776 | 0 |                     nGoodTransactions = 0; | 
| 4777 | 0 |                     pindexFailure = pindex; | 
| 4778 | 307k |                 } else { | 
| 4779 | 307k |                     nGoodTransactions += block.vtx.size(); | 
| 4780 | 307k |                 } | 
| 4781 | 307k |             } else { | 
| 4782 | 0 |                 skipped_l3_checks = true; | 
| 4783 | 0 |             } | 
| 4784 | 307k |         } | 
| 4785 | 307k |         if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED0; | 
| 4786 | 307k |     } | 
| 4787 | 51.2k |     if (pindexFailure) { | 
| 4788 | 0 |         LogPrintf("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4789 | 0 |         return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4790 | 0 |     } | 
| 4791 | 51.2k |     if (skipped_l3_checks) { | 
| 4792 | 0 |         LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n");| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4793 | 0 |     } | 
| 4794 |  |  | 
| 4795 |  |     // store block count as we move pindex at check level >= 4 | 
| 4796 | 51.2k |     int block_count = chainstate.m_chain.Height() - pindex->nHeight; | 
| 4797 |  |  | 
| 4798 |  |     // check level 4: try reconnecting blocks | 
| 4799 | 51.2k |     if (nCheckLevel >= 4 && !skipped_l3_checks0) { | 
| 4800 | 0 |         while (pindex != chainstate.m_chain.Tip()) { | 
| 4801 | 0 |             const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))); | 
| 4802 | 0 |             if (reportDone < percentageDone / 10) { | 
| 4803 |  |                 // report every 10% step | 
| 4804 | 0 |                 LogInfo("Verification progress: %d%%", percentageDone);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4805 | 0 |                 reportDone = percentageDone / 10; | 
| 4806 | 0 |             } | 
| 4807 | 0 |             m_notifications.progress(_("Verifying blocks…"), percentageDone, false); | 
| 4808 | 0 |             pindex = chainstate.m_chain.Next(pindex); | 
| 4809 | 0 |             CBlock block; | 
| 4810 | 0 |             if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { | 
| 4811 | 0 |                 LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4812 | 0 |                 return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4813 | 0 |             } | 
| 4814 | 0 |             if (!chainstate.ConnectBlock(block, state, pindex, coins)) { | 
| 4815 | 0 |                 LogPrintf("Verification error: found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 4816 | 0 |                 return VerifyDBResult::CORRUPTED_BLOCK_DB; | 
| 4817 | 0 |             } | 
| 4818 | 0 |             if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED; | 
| 4819 | 0 |         } | 
| 4820 | 0 |     } | 
| 4821 |  |  | 
| 4822 | 51.2k |     LogInfo("Verification: No coin database inconsistencies in last %i blocks (%i transactions)", block_count, nGoodTransactions);| Line | Count | Source |  | 356 | 51.2k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 51.2k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4823 |  |  | 
| 4824 | 51.2k |     if (skipped_l3_checks) { | 
| 4825 | 0 |         return VerifyDBResult::SKIPPED_L3_CHECKS; | 
| 4826 | 0 |     } | 
| 4827 | 51.2k |     if (skipped_no_block_data) { | 
| 4828 | 0 |         return VerifyDBResult::SKIPPED_MISSING_BLOCKS; | 
| 4829 | 0 |     } | 
| 4830 | 51.2k |     return VerifyDBResult::SUCCESS; | 
| 4831 | 51.2k | } | 
| 4832 |  |  | 
| 4833 |  | /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */ | 
| 4834 |  | bool Chainstate::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs) | 
| 4835 | 0 | { | 
| 4836 | 0 |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4837 |  |     // TODO: merge with ConnectBlock | 
| 4838 | 0 |     CBlock block; | 
| 4839 | 0 |     if (!m_blockman.ReadBlock(block, *pindex)) { | 
| 4840 | 0 |         LogError("ReplayBlock(): ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4841 | 0 |         return false; | 
| 4842 | 0 |     } | 
| 4843 |  |  | 
| 4844 | 0 |     for (const CTransactionRef& tx : block.vtx) { | 
| 4845 | 0 |         if (!tx->IsCoinBase()) { | 
| 4846 | 0 |             for (const CTxIn &txin : tx->vin) { | 
| 4847 | 0 |                 inputs.SpendCoin(txin.prevout); | 
| 4848 | 0 |             } | 
| 4849 | 0 |         } | 
| 4850 |  |         // Pass check = true as every addition may be an overwrite. | 
| 4851 | 0 |         AddCoins(inputs, *tx, pindex->nHeight, true); | 
| 4852 | 0 |     } | 
| 4853 | 0 |     return true; | 
| 4854 | 0 | } | 
| 4855 |  |  | 
| 4856 |  | bool Chainstate::ReplayBlocks() | 
| 4857 | 51.2k | { | 
| 4858 | 51.2k |     LOCK(cs_main); | Line | Count | Source |  | 259 | 51.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 51.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 51.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 51.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 4859 |  |  | 
| 4860 | 51.2k |     CCoinsView& db = this->CoinsDB(); | 
| 4861 | 51.2k |     CCoinsViewCache cache(&db); | 
| 4862 |  |  | 
| 4863 | 51.2k |     std::vector<uint256> hashHeads = db.GetHeadBlocks(); | 
| 4864 | 51.2k |     if (hashHeads.empty()) return true; // We're already in a consistent state. | 
| 4865 | 0 |     if (hashHeads.size() != 2) { | 
| 4866 | 0 |         LogError("ReplayBlocks(): unknown inconsistent state\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4867 | 0 |         return false; | 
| 4868 | 0 |     } | 
| 4869 |  |  | 
| 4870 | 0 |     m_chainman.GetNotifications().progress(_("Replaying blocks…"), 0, false); | 
| 4871 | 0 |     LogInfo("Replaying blocks");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4872 |  | 
 | 
| 4873 | 0 |     const CBlockIndex* pindexOld = nullptr;  // Old tip during the interrupted flush. | 
| 4874 | 0 |     const CBlockIndex* pindexNew;            // New tip during the interrupted flush. | 
| 4875 | 0 |     const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip. | 
| 4876 |  | 
 | 
| 4877 | 0 |     if (m_blockman.m_block_index.count(hashHeads[0]) == 0) { | 
| 4878 | 0 |         LogError("ReplayBlocks(): reorganization to unknown block requested\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4879 | 0 |         return false; | 
| 4880 | 0 |     } | 
| 4881 | 0 |     pindexNew = &(m_blockman.m_block_index[hashHeads[0]]); | 
| 4882 |  | 
 | 
| 4883 | 0 |     if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. | 
| 4884 | 0 |         if (m_blockman.m_block_index.count(hashHeads[1]) == 0) { | 
| 4885 | 0 |             LogError("ReplayBlocks(): reorganization from unknown block requested\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4886 | 0 |             return false; | 
| 4887 | 0 |         } | 
| 4888 | 0 |         pindexOld = &(m_blockman.m_block_index[hashHeads[1]]); | 
| 4889 | 0 |         pindexFork = LastCommonAncestor(pindexOld, pindexNew); | 
| 4890 | 0 |         assert(pindexFork != nullptr); | 
| 4891 | 0 |     } | 
| 4892 |  |  | 
| 4893 |  |     // Rollback along the old branch. | 
| 4894 | 0 |     while (pindexOld != pindexFork) { | 
| 4895 | 0 |         if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. | 
| 4896 | 0 |             CBlock block; | 
| 4897 | 0 |             if (!m_blockman.ReadBlock(block, *pindexOld)) { | 
| 4898 | 0 |                 LogError("RollbackBlock(): ReadBlock() failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4899 | 0 |                 return false; | 
| 4900 | 0 |             } | 
| 4901 | 0 |             LogInfo("Rolling back %s (%i)", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4902 | 0 |             DisconnectResult res = DisconnectBlock(block, pindexOld, cache); | 
| 4903 | 0 |             if (res == DISCONNECT_FAILED) { | 
| 4904 | 0 |                 LogError("RollbackBlock(): DisconnectBlock failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4905 | 0 |                 return false; | 
| 4906 | 0 |             } | 
| 4907 |  |             // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was | 
| 4908 |  |             // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations | 
| 4909 |  |             // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations, | 
| 4910 |  |             // the result is still a version of the UTXO set with the effects of that block undone. | 
| 4911 | 0 |         } | 
| 4912 | 0 |         pindexOld = pindexOld->pprev; | 
| 4913 | 0 |     } | 
| 4914 |  |  | 
| 4915 |  |     // Roll forward from the forking point to the new tip. | 
| 4916 | 0 |     int nForkHeight = pindexFork ? pindexFork->nHeight : 0; | 
| 4917 | 0 |     for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) { | 
| 4918 | 0 |         const CBlockIndex& pindex{*Assert(pindexNew->GetAncestor(nHeight))};| Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 4919 |  | 
 | 
| 4920 | 0 |         LogInfo("Rolling forward %s (%i)", pindex.GetBlockHash().ToString(), nHeight);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 4921 | 0 |         m_chainman.GetNotifications().progress(_("Replaying blocks…"), (int)((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)), false); | 
| 4922 | 0 |         if (!RollforwardBlock(&pindex, cache)) return false; | 
| 4923 | 0 |     } | 
| 4924 |  |  | 
| 4925 | 0 |     cache.SetBestBlock(pindexNew->GetBlockHash()); | 
| 4926 | 0 |     cache.Flush(); | 
| 4927 | 0 |     m_chainman.GetNotifications().progress(bilingual_str{}, 100, false); | 
| 4928 | 0 |     return true; | 
| 4929 | 0 | } | 
| 4930 |  |  | 
| 4931 |  | bool Chainstate::NeedsRedownload() const | 
| 4932 | 51.2k | { | 
| 4933 | 51.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4934 |  |  | 
| 4935 |  |     // At and above m_params.SegwitHeight, segwit consensus rules must be validated | 
| 4936 | 51.2k |     CBlockIndex* block{m_chain.Tip()}; | 
| 4937 |  |  | 
| 4938 | 10.3M |     while (block != nullptr && DeploymentActiveAt(*block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)10.3M) { | 
| 4939 | 10.3M |         if (!(block->nStatus & BLOCK_OPT_WITNESS)) { | 
| 4940 |  |             // block is insufficiently validated for a segwit client | 
| 4941 | 0 |             return true; | 
| 4942 | 0 |         } | 
| 4943 | 10.3M |         block = block->pprev; | 
| 4944 | 10.3M |     } | 
| 4945 |  |  | 
| 4946 | 51.2k |     return false; | 
| 4947 | 51.2k | } | 
| 4948 |  |  | 
| 4949 |  | void Chainstate::ClearBlockIndexCandidates() | 
| 4950 | 0 | { | 
| 4951 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4952 | 0 |     setBlockIndexCandidates.clear(); | 
| 4953 | 0 | } | 
| 4954 |  |  | 
| 4955 |  | bool ChainstateManager::LoadBlockIndex() | 
| 4956 | 51.2k | { | 
| 4957 | 51.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 4958 |  |     // Load block index from databases | 
| 4959 | 51.2k |     if (m_blockman.m_blockfiles_indexed) { | 
| 4960 | 51.2k |         bool ret{m_blockman.LoadBlockIndexDB(SnapshotBlockhash())}; | 
| 4961 | 51.2k |         if (!ret) return false0; | 
| 4962 |  |  | 
| 4963 | 51.2k |         m_blockman.ScanAndUnlinkAlreadyPrunedFiles(); | 
| 4964 |  |  | 
| 4965 | 51.2k |         std::vector<CBlockIndex*> vSortedByHeight{m_blockman.GetAllBlockIndices()}; | 
| 4966 | 51.2k |         std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), | 
| 4967 | 51.2k |                   CBlockIndexHeightOnlyComparator()); | 
| 4968 |  |  | 
| 4969 | 10.3M |         for (CBlockIndex* pindex : vSortedByHeight) { | 
| 4970 | 10.3M |             if (m_interrupt) return false0; | 
| 4971 |  |             // If we have an assumeutxo-based chainstate, then the snapshot | 
| 4972 |  |             // block will be a candidate for the tip, but it may not be | 
| 4973 |  |             // VALID_TRANSACTIONS (eg if we haven't yet downloaded the block), | 
| 4974 |  |             // so we special-case the snapshot block as a potential candidate | 
| 4975 |  |             // here. | 
| 4976 | 10.3M |             if (pindex == GetSnapshotBaseBlock() || | 
| 4977 | 10.3M |                     (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && | 
| 4978 | 10.3M |                      (pindex->HaveNumChainTxs() || pindex->pprev == nullptr0))) { | 
| 4979 |  |  | 
| 4980 | 10.3M |                 for (Chainstate* chainstate : GetAll()) { | 
| 4981 | 10.3M |                     chainstate->TryAddBlockIndexCandidate(pindex); | 
| 4982 | 10.3M |                 } | 
| 4983 | 10.3M |             } | 
| 4984 | 10.3M |             if (pindex->nStatus & BLOCK_FAILED_MASK && (0 !m_best_invalid0|| pindex->nChainWork > m_best_invalid->nChainWork0)) { | 
| 4985 | 0 |                 m_best_invalid = pindex; | 
| 4986 | 0 |             } | 
| 4987 | 10.3M |             if (pindex->IsValid(BLOCK_VALID_TREE) && (m_best_header == nullptr || CBlockIndexWorkComparator()(m_best_header, pindex)10.2M)) | 
| 4988 | 10.3M |                 m_best_header = pindex; | 
| 4989 | 10.3M |         } | 
| 4990 | 51.2k |     } | 
| 4991 | 51.2k |     return true; | 
| 4992 | 51.2k | } | 
| 4993 |  |  | 
| 4994 |  | bool Chainstate::LoadGenesisBlock() | 
| 4995 | 51.2k | { | 
| 4996 | 51.2k |     LOCK(cs_main); | Line | Count | Source |  | 259 | 51.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 51.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 51.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 51.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 4997 |  |  | 
| 4998 | 51.2k |     const CChainParams& params{m_chainman.GetParams()}; | 
| 4999 |  |  | 
| 5000 |  |     // Check whether we're already initialized by checking for genesis in | 
| 5001 |  |     // m_blockman.m_block_index. Note that we can't use m_chain here, since it is | 
| 5002 |  |     // set based on the coins db, not the block index db, which is the only | 
| 5003 |  |     // thing loaded at this point. | 
| 5004 | 51.2k |     if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash())) | 
| 5005 | 51.2k |         return true; | 
| 5006 |  |  | 
| 5007 | 0 |     try { | 
| 5008 | 0 |         const CBlock& block = params.GenesisBlock(); | 
| 5009 | 0 |         FlatFilePos blockPos{m_blockman.WriteBlock(block, 0)}; | 
| 5010 | 0 |         if (blockPos.IsNull()) { | 
| 5011 | 0 |             LogError("%s: writing genesis block to disk failed\n", __func__);| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5012 | 0 |             return false; | 
| 5013 | 0 |         } | 
| 5014 | 0 |         CBlockIndex* pindex = m_blockman.AddToBlockIndex(block, m_chainman.m_best_header); | 
| 5015 | 0 |         m_chainman.ReceivedBlockTransactions(block, pindex, blockPos); | 
| 5016 | 0 |     } catch (const std::runtime_error& e) { | 
| 5017 | 0 |         LogError("%s: failed to write genesis block: %s\n", __func__, e.what());| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5018 | 0 |         return false; | 
| 5019 | 0 |     } | 
| 5020 |  |  | 
| 5021 | 0 |     return true; | 
| 5022 | 0 | } | 
| 5023 |  |  | 
| 5024 |  | void ChainstateManager::LoadExternalBlockFile( | 
| 5025 |  |     AutoFile& file_in, | 
| 5026 |  |     FlatFilePos* dbp, | 
| 5027 |  |     std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent) | 
| 5028 | 0 | { | 
| 5029 |  |     // Either both should be specified (-reindex), or neither (-loadblock). | 
| 5030 | 0 |     assert(!dbp == !blocks_with_unknown_parent); | 
| 5031 |  |  | 
| 5032 | 0 |     const auto start{SteadyClock::now()}; | 
| 5033 | 0 |     const CChainParams& params{GetParams()}; | 
| 5034 |  | 
 | 
| 5035 | 0 |     int nLoaded = 0; | 
| 5036 | 0 |     try { | 
| 5037 | 0 |         BufferedFile blkdat{file_in, 2 * MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE + 8}; | 
| 5038 |  |         // nRewind indicates where to resume scanning in case something goes wrong, | 
| 5039 |  |         // such as a block fails to deserialize. | 
| 5040 | 0 |         uint64_t nRewind = blkdat.GetPos(); | 
| 5041 | 0 |         while (!blkdat.eof()) { | 
| 5042 | 0 |             if (m_interrupt) return; | 
| 5043 |  |  | 
| 5044 | 0 |             blkdat.SetPos(nRewind); | 
| 5045 | 0 |             nRewind++; // start one byte further next time, in case of failure | 
| 5046 | 0 |             blkdat.SetLimit(); // remove former limit | 
| 5047 | 0 |             unsigned int nSize = 0; | 
| 5048 | 0 |             try { | 
| 5049 |  |                 // locate a header | 
| 5050 | 0 |                 MessageStartChars buf; | 
| 5051 | 0 |                 blkdat.FindByte(std::byte(params.MessageStart()[0])); | 
| 5052 | 0 |                 nRewind = blkdat.GetPos() + 1; | 
| 5053 | 0 |                 blkdat >> buf; | 
| 5054 | 0 |                 if (buf != params.MessageStart()) { | 
| 5055 | 0 |                     continue; | 
| 5056 | 0 |                 } | 
| 5057 |  |                 // read size | 
| 5058 | 0 |                 blkdat >> nSize; | 
| 5059 | 0 |                 if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE) | 
| 5060 | 0 |                     continue; | 
| 5061 | 0 |             } catch (const std::exception&) { | 
| 5062 |  |                 // no valid block header found; don't complain | 
| 5063 |  |                 // (this happens at the end of every blk.dat file) | 
| 5064 | 0 |                 break; | 
| 5065 | 0 |             } | 
| 5066 | 0 |             try { | 
| 5067 |  |                 // read block header | 
| 5068 | 0 |                 const uint64_t nBlockPos{blkdat.GetPos()}; | 
| 5069 | 0 |                 if (dbp) | 
| 5070 | 0 |                     dbp->nPos = nBlockPos; | 
| 5071 | 0 |                 blkdat.SetLimit(nBlockPos + nSize); | 
| 5072 | 0 |                 CBlockHeader header; | 
| 5073 | 0 |                 blkdat >> header; | 
| 5074 | 0 |                 const uint256 hash{header.GetHash()}; | 
| 5075 |  |                 // Skip the rest of this block (this may read from disk into memory); position to the marker before the | 
| 5076 |  |                 // next block, but it's still possible to rewind to the start of the current block (without a disk read). | 
| 5077 | 0 |                 nRewind = nBlockPos + nSize; | 
| 5078 | 0 |                 blkdat.SkipTo(nRewind); | 
| 5079 |  | 
 | 
| 5080 | 0 |                 std::shared_ptr<CBlock> pblock{}; // needs to remain available after the cs_main lock is released to avoid duplicate reads from disk | 
| 5081 |  | 
 | 
| 5082 | 0 |                 { | 
| 5083 | 0 |                     LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5084 |  |                     // detect out of order blocks, and store them for later | 
| 5085 | 0 |                     if (hash != params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(header.hashPrevBlock)) { | 
| 5086 | 0 |                         LogDebug(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5087 | 0 |                                  header.hashPrevBlock.ToString()); | 
| 5088 | 0 |                         if (dbp && blocks_with_unknown_parent) { | 
| 5089 | 0 |                             blocks_with_unknown_parent->emplace(header.hashPrevBlock, *dbp); | 
| 5090 | 0 |                         } | 
| 5091 | 0 |                         continue; | 
| 5092 | 0 |                     } | 
| 5093 |  |  | 
| 5094 |  |                     // process in case the block isn't known yet | 
| 5095 | 0 |                     const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash); | 
| 5096 | 0 |                     if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) { | 
| 5097 |  |                         // This block can be processed immediately; rewind to its start, read and deserialize it. | 
| 5098 | 0 |                         blkdat.SetPos(nBlockPos); | 
| 5099 | 0 |                         pblock = std::make_shared<CBlock>(); | 
| 5100 | 0 |                         blkdat >> TX_WITH_WITNESS(*pblock); | 
| 5101 | 0 |                         nRewind = blkdat.GetPos(); | 
| 5102 |  | 
 | 
| 5103 | 0 |                         BlockValidationState state; | 
| 5104 | 0 |                         if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr, true)) { | 
| 5105 | 0 |                             nLoaded++; | 
| 5106 | 0 |                         } | 
| 5107 | 0 |                         if (state.IsError()) { | 
| 5108 | 0 |                             break; | 
| 5109 | 0 |                         } | 
| 5110 | 0 |                     } else if (hash != params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) { | 
| 5111 | 0 |                         LogDebug(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5112 | 0 |                     } | 
| 5113 | 0 |                 } | 
| 5114 |  |  | 
| 5115 |  |                 // Activate the genesis block so normal node progress can continue | 
| 5116 |  |                 // During first -reindex, this will only connect Genesis since | 
| 5117 |  |                 // ActivateBestChain only connects blocks which are in the block tree db, | 
| 5118 |  |                 // which only contains blocks whose parents are in it. | 
| 5119 |  |                 // But do this only if genesis isn't activated yet, to avoid connecting many blocks | 
| 5120 |  |                 // without assumevalid in the case of a continuation of a reindex that | 
| 5121 |  |                 // was interrupted by the user. | 
| 5122 | 0 |                 if (hash == params.GetConsensus().hashGenesisBlock && WITH_LOCK(::cs_main, return ActiveHeight()) == -1) {| Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5123 | 0 |                     BlockValidationState state; | 
| 5124 | 0 |                     if (!ActiveChainstate().ActivateBestChain(state, nullptr)) { | 
| 5125 | 0 |                         break; | 
| 5126 | 0 |                     } | 
| 5127 | 0 |                 } | 
| 5128 |  |  | 
| 5129 | 0 |                 if (m_blockman.IsPruneMode() && m_blockman.m_blockfiles_indexed && pblock) { | 
| 5130 |  |                     // must update the tip for pruning to work while importing with -loadblock. | 
| 5131 |  |                     // this is a tradeoff to conserve disk space at the expense of time | 
| 5132 |  |                     // spent updating the tip to be able to prune. | 
| 5133 |  |                     // otherwise, ActivateBestChain won't be called by the import process | 
| 5134 |  |                     // until after all of the block files are loaded. ActivateBestChain can be | 
| 5135 |  |                     // called by concurrent network message processing. but, that is not | 
| 5136 |  |                     // reliable for the purpose of pruning while importing. | 
| 5137 | 0 |                     bool activation_failure = false; | 
| 5138 | 0 |                     for (auto c : GetAll()) { | 
| 5139 | 0 |                         BlockValidationState state; | 
| 5140 | 0 |                         if (!c->ActivateBestChain(state, pblock)) { | 
| 5141 | 0 |                             LogDebug(BCLog::REINDEX, "failed to activate chain (%s)\n", state.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5142 | 0 |                             activation_failure = true; | 
| 5143 | 0 |                             break; | 
| 5144 | 0 |                         } | 
| 5145 | 0 |                     } | 
| 5146 | 0 |                     if (activation_failure) { | 
| 5147 | 0 |                         break; | 
| 5148 | 0 |                     } | 
| 5149 | 0 |                 } | 
| 5150 |  |  | 
| 5151 | 0 |                 NotifyHeaderTip(); | 
| 5152 |  | 
 | 
| 5153 | 0 |                 if (!blocks_with_unknown_parent) continue; | 
| 5154 |  |  | 
| 5155 |  |                 // Recursively process earlier encountered successors of this block | 
| 5156 | 0 |                 std::deque<uint256> queue; | 
| 5157 | 0 |                 queue.push_back(hash); | 
| 5158 | 0 |                 while (!queue.empty()) { | 
| 5159 | 0 |                     uint256 head = queue.front(); | 
| 5160 | 0 |                     queue.pop_front(); | 
| 5161 | 0 |                     auto range = blocks_with_unknown_parent->equal_range(head); | 
| 5162 | 0 |                     while (range.first != range.second) { | 
| 5163 | 0 |                         std::multimap<uint256, FlatFilePos>::iterator it = range.first; | 
| 5164 | 0 |                         std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>(); | 
| 5165 | 0 |                         if (m_blockman.ReadBlock(*pblockrecursive, it->second, {})) { | 
| 5166 | 0 |                             const auto& block_hash{pblockrecursive->GetHash()}; | 
| 5167 | 0 |                             LogDebug(BCLog::REINDEX, "%s: Processing out of order child %s of %s", __func__, block_hash.ToString(), head.ToString()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5168 | 0 |                             LOCK(cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5169 | 0 |                             BlockValidationState dummy; | 
| 5170 | 0 |                             if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr, true)) { | 
| 5171 | 0 |                                 nLoaded++; | 
| 5172 | 0 |                                 queue.push_back(block_hash); | 
| 5173 | 0 |                             } | 
| 5174 | 0 |                         } | 
| 5175 | 0 |                         range.first++; | 
| 5176 | 0 |                         blocks_with_unknown_parent->erase(it); | 
| 5177 | 0 |                         NotifyHeaderTip(); | 
| 5178 | 0 |                     } | 
| 5179 | 0 |                 } | 
| 5180 | 0 |             } catch (const std::exception& e) { | 
| 5181 |  |                 // historical bugs added extra data to the block files that does not deserialize cleanly. | 
| 5182 |  |                 // commonly this data is between readable blocks, but it does not really matter. such data is not fatal to the import process. | 
| 5183 |  |                 // the code that reads the block files deals with invalid data by simply ignoring it. | 
| 5184 |  |                 // it continues to search for the next {4 byte magic message start bytes + 4 byte length + block} that does deserialize cleanly | 
| 5185 |  |                 // and passes all of the other block validation checks dealing with POW and the merkle root, etc... | 
| 5186 |  |                 // we merely note with this informational log message when unexpected data is encountered. | 
| 5187 |  |                 // we could also be experiencing a storage system read error, or a read of a previous bad write. these are possible, but | 
| 5188 |  |                 // less likely scenarios. we don't have enough information to tell a difference here. | 
| 5189 |  |                 // the reindex process is not the place to attempt to clean and/or compact the block files. if so desired, a studious node operator | 
| 5190 |  |                 // may use knowledge of the fact that the block files are not entirely pristine in order to prepare a set of pristine, and | 
| 5191 |  |                 // perhaps ordered, block files for later reindexing. | 
| 5192 | 0 |                 LogDebug(BCLog::REINDEX, "%s: unexpected data at file offset 0x%x - %s. continuing\n", __func__, (nRewind - 1), e.what()); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5193 | 0 |             } | 
| 5194 | 0 |         } | 
| 5195 | 0 |     } catch (const std::runtime_error& e) { | 
| 5196 | 0 |         GetNotifications().fatalError(strprintf(_("System error while loading external block file: %s"), e.what()));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5197 | 0 |     } | 
| 5198 | 0 |     LogInfo("Loaded %i blocks from external file in %dms", nLoaded, Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5199 | 0 | } | 
| 5200 |  |  | 
| 5201 |  | bool ChainstateManager::ShouldCheckBlockIndex() const | 
| 5202 | 2.06M | { | 
| 5203 |  |     // Assert to verify Flatten() has been called. | 
| 5204 | 2.06M |     if (!*Assert(m_options.check_block_index)) return false0; | Line | Count | Source |  | 106 | 2.06M | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 5205 | 2.06M |     if (FastRandomContext().randrange(*m_options.check_block_index) >= 1) return false0; | 
| 5206 | 2.06M |     return true; | 
| 5207 | 2.06M | } | 
| 5208 |  |  | 
| 5209 |  | void ChainstateManager::CheckBlockIndex() const | 
| 5210 | 2.06M | { | 
| 5211 | 2.06M |     if (!ShouldCheckBlockIndex()) { | 
| 5212 | 0 |         return; | 
| 5213 | 0 |     } | 
| 5214 |  |  | 
| 5215 | 2.06M |     LOCK(cs_main); | Line | Count | Source |  | 259 | 2.06M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 2.06M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 2.06M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 2.06M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5216 |  |  | 
| 5217 |  |     // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, | 
| 5218 |  |     // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the | 
| 5219 |  |     // tests when iterating the block tree require that m_chain has been initialized.) | 
| 5220 | 2.06M |     if (ActiveChain().Height() < 0) { | 
| 5221 | 0 |         assert(m_blockman.m_block_index.size() <= 1); | 
| 5222 | 0 |         return; | 
| 5223 | 0 |     } | 
| 5224 |  |  | 
| 5225 |  |     // Build forward-pointing data structure for the entire block tree. | 
| 5226 |  |     // For performance reasons, indexes of the best header chain are stored in a vector (within CChain). | 
| 5227 |  |     // All remaining blocks are stored in a multimap. | 
| 5228 |  |     // The best header chain can differ from the active chain: E.g. its entries may belong to blocks that | 
| 5229 |  |     // are not yet validated. | 
| 5230 | 2.06M |     CChain best_hdr_chain; | 
| 5231 | 2.06M |     assert(m_best_header); | 
| 5232 | 2.06M |     assert(!(m_best_header->nStatus & BLOCK_FAILED_MASK)); | 
| 5233 | 2.06M |     best_hdr_chain.SetTip(*m_best_header); | 
| 5234 |  |  | 
| 5235 | 2.06M |     std::multimap<const CBlockIndex*, const CBlockIndex*> forward; | 
| 5236 | 419M |     for (auto& [_, block_index] : m_blockman.m_block_index) { | 
| 5237 |  |         // Only save indexes in forward that are not part of the best header chain. | 
| 5238 | 419M |         if (!best_hdr_chain.Contains(&block_index)) { | 
| 5239 |  |             // Only genesis, which must be part of the best header chain, can have a nullptr parent. | 
| 5240 | 2.45M |             assert(block_index.pprev); | 
| 5241 | 2.45M |             forward.emplace(block_index.pprev, &block_index); | 
| 5242 | 2.45M |         } | 
| 5243 | 419M |     } | 
| 5244 | 2.06M |     assert(forward.size() + best_hdr_chain.Height() + 1 == m_blockman.m_block_index.size()); | 
| 5245 |  |  | 
| 5246 | 2.06M |     const CBlockIndex* pindex = best_hdr_chain[0]; | 
| 5247 | 2.06M |     assert(pindex); | 
| 5248 |  |     // Iterate over the entire block tree, using depth-first search. | 
| 5249 |  |     // Along the way, remember whether there are blocks on the path from genesis | 
| 5250 |  |     // block being explored which are the first to have certain properties. | 
| 5251 | 2.06M |     size_t nNodes = 0; | 
| 5252 | 2.06M |     int nHeight = 0; | 
| 5253 | 2.06M |     const CBlockIndex* pindexFirstInvalid = nullptr;              // Oldest ancestor of pindex which is invalid. | 
| 5254 | 2.06M |     const CBlockIndex* pindexFirstMissing = nullptr;              // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA, since assumeutxo snapshot if used. | 
| 5255 | 2.06M |     const CBlockIndex* pindexFirstNeverProcessed = nullptr;       // Oldest ancestor of pindex for which nTx == 0, since assumeutxo snapshot if used. | 
| 5256 | 2.06M |     const CBlockIndex* pindexFirstNotTreeValid = nullptr;         // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). | 
| 5257 | 2.06M |     const CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not), since assumeutxo snapshot if used. | 
| 5258 | 2.06M |     const CBlockIndex* pindexFirstNotChainValid = nullptr;        // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not), since assumeutxo snapshot if used. | 
| 5259 | 2.06M |     const CBlockIndex* pindexFirstNotScriptsValid = nullptr;      // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not), since assumeutxo snapshot if used. | 
| 5260 |  |  | 
| 5261 |  |     // After checking an assumeutxo snapshot block, reset pindexFirst pointers | 
| 5262 |  |     // to earlier blocks that have not been downloaded or validated yet, so | 
| 5263 |  |     // checks for later blocks can assume the earlier blocks were validated and | 
| 5264 |  |     // be stricter, testing for more requirements. | 
| 5265 | 2.06M |     const CBlockIndex* snap_base{GetSnapshotBaseBlock()}; | 
| 5266 | 2.06M |     const CBlockIndex *snap_first_missing{}, *snap_first_notx{}, *snap_first_notv{}, *snap_first_nocv{}, *snap_first_nosv{}; | 
| 5267 | 422M |     auto snap_update_firsts = [&] { | 
| 5268 | 422M |         if (pindex == snap_base) { | 
| 5269 | 0 |             std::swap(snap_first_missing, pindexFirstMissing); | 
| 5270 | 0 |             std::swap(snap_first_notx, pindexFirstNeverProcessed); | 
| 5271 | 0 |             std::swap(snap_first_notv, pindexFirstNotTransactionsValid); | 
| 5272 | 0 |             std::swap(snap_first_nocv, pindexFirstNotChainValid); | 
| 5273 | 0 |             std::swap(snap_first_nosv, pindexFirstNotScriptsValid); | 
| 5274 | 0 |         } | 
| 5275 | 422M |     }; | 
| 5276 |  |  | 
| 5277 | 419M |     while (pindex != nullptr) { | 
| 5278 | 419M |         nNodes++; | 
| 5279 | 419M |         if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID419M) pindexFirstInvalid = pindex107k; | 
| 5280 | 419M |         if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)419M) { | 
| 5281 | 3.32M |             pindexFirstMissing = pindex; | 
| 5282 | 3.32M |         } | 
| 5283 | 419M |         if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0419M) pindexFirstNeverProcessed = pindex3.32M; | 
| 5284 | 419M |         if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr417M&& (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE417M) pindexFirstNotTreeValid = pindex0; | 
| 5285 |  |  | 
| 5286 | 419M |         if (pindex->pprev != nullptr) { | 
| 5287 | 417M |             if (pindexFirstNotTransactionsValid == nullptr && | 
| 5288 | 417M |                     (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS417M) { | 
| 5289 | 3.32M |                 pindexFirstNotTransactionsValid = pindex; | 
| 5290 | 3.32M |             } | 
| 5291 |  |  | 
| 5292 | 417M |             if (pindexFirstNotChainValid == nullptr && | 
| 5293 | 417M |                     (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN417M) { | 
| 5294 | 3.39M |                 pindexFirstNotChainValid = pindex; | 
| 5295 | 3.39M |             } | 
| 5296 |  |  | 
| 5297 | 417M |             if (pindexFirstNotScriptsValid == nullptr && | 
| 5298 | 417M |                     (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS417M) { | 
| 5299 | 3.39M |                 pindexFirstNotScriptsValid = pindex; | 
| 5300 | 3.39M |             } | 
| 5301 | 417M |         } | 
| 5302 |  |  | 
| 5303 |  |         // Begin: actual consistency checks. | 
| 5304 | 419M |         if (pindex->pprev == nullptr) { | 
| 5305 |  |             // Genesis block checks. | 
| 5306 | 2.06M |             assert(pindex->GetBlockHash() == GetConsensus().hashGenesisBlock); // Genesis block's hash must match. | 
| 5307 | 4.12M |             for (const Chainstate* c : {m_ibd_chainstate.get(), m_snapshot_chainstate.get()})2.06M{ | 
| 5308 | 4.12M |                 if (c && c->m_chain.Genesis() != nullptr2.06M) { | 
| 5309 | 2.06M |                     assert(pindex == c->m_chain.Genesis()); // The chain's genesis block must be this block. | 
| 5310 | 2.06M |                 } | 
| 5311 | 4.12M |             } | 
| 5312 | 2.06M |         } | 
| 5313 | 419M |         if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock) | 
| 5314 |  |         // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). | 
| 5315 |  |         // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. | 
| 5316 | 419M |         if (!m_blockman.m_have_pruned) { | 
| 5317 |  |             // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 | 
| 5318 | 419M |             assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); | 
| 5319 | 419M |             assert(pindexFirstMissing == pindexFirstNeverProcessed); | 
| 5320 | 419M |         } else { | 
| 5321 |  |             // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 | 
| 5322 | 0 |             if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); | 
| 5323 | 0 |         } | 
| 5324 | 419M |         if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); | 
| 5325 | 419M |         if (snap_base && snap_base->GetAncestor(pindex->nHeight) == pindex0) { | 
| 5326 |  |             // Assumed-valid blocks should connect to the main chain. | 
| 5327 | 0 |             assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE); | 
| 5328 | 0 |         } | 
| 5329 |  |         // There should only be an nTx value if we have | 
| 5330 |  |         // actually seen a block's transactions. | 
| 5331 | 419M |         assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. | 
| 5332 |  |         // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveNumChainTxs(). | 
| 5333 |  |         // HaveNumChainTxs will also be set in the assumeutxo snapshot block from snapshot metadata. | 
| 5334 | 419M |         assert((pindexFirstNeverProcessed == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs()); | 
| 5335 | 419M |         assert((pindexFirstNotTransactionsValid == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs()); | 
| 5336 | 419M |         assert(pindex->nHeight == nHeight); // nHeight must be consistent. | 
| 5337 | 419M |         assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. | 
| 5338 | 419M |         assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. | 
| 5339 | 419M |         assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid | 
| 5340 | 419M |         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid | 
| 5341 | 419M |         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid | 
| 5342 | 419M |         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid | 
| 5343 | 419M |         if (pindexFirstInvalid == nullptr) { | 
| 5344 |  |             // Checks for not-invalid blocks. | 
| 5345 | 419M |             assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. | 
| 5346 | 419M |         } else { | 
| 5347 | 108k |             assert(pindex->nStatus & BLOCK_FAILED_MASK); // Invalid blocks and their descendants must be marked as invalid | 
| 5348 | 108k |         } | 
| 5349 |  |         // Make sure m_chain_tx_count sum is correctly computed. | 
| 5350 | 419M |         if (!pindex->pprev) { | 
| 5351 |  |             // If no previous block, nTx and m_chain_tx_count must be the same. | 
| 5352 | 2.06M |             assert(pindex->m_chain_tx_count == pindex->nTx); | 
| 5353 | 417M |         } else if (pindex->pprev->m_chain_tx_count > 0 && pindex->nTx > 0417M) { | 
| 5354 |  |             // If previous m_chain_tx_count is set and number of transactions in block is known, sum must be set. | 
| 5355 | 414M |             assert(pindex->m_chain_tx_count == pindex->nTx + pindex->pprev->m_chain_tx_count); | 
| 5356 | 414M |         } else { | 
| 5357 |  |             // Otherwise m_chain_tx_count should only be set if this is a snapshot | 
| 5358 |  |             // block, and must be set if it is. | 
| 5359 | 3.60M |             assert((pindex->m_chain_tx_count != 0) == (pindex == snap_base)); | 
| 5360 | 3.60M |         } | 
| 5361 |  |         // There should be no block with more work than m_best_header, unless it's known to be invalid | 
| 5362 | 419M |         assert((pindex->nStatus & BLOCK_FAILED_MASK) || pindex->nChainWork <= m_best_header->nChainWork); | 
| 5363 |  |  | 
| 5364 |  |         // Chainstate-specific checks on setBlockIndexCandidates | 
| 5365 | 839M |         for (const Chainstate* c : {m_ibd_chainstate.get(), m_snapshot_chainstate.get()})419M{ | 
| 5366 | 839M |             if (!c || c->m_chain.Tip() == nullptr419M) continue419M; | 
| 5367 |  |             // Two main factors determine whether pindex is a candidate in | 
| 5368 |  |             // setBlockIndexCandidates: | 
| 5369 |  |             // | 
| 5370 |  |             // - If pindex has less work than the chain tip, it should not be a | 
| 5371 |  |             //   candidate, and this will be asserted below. Otherwise it is a | 
| 5372 |  |             //   potential candidate. | 
| 5373 |  |             // | 
| 5374 |  |             // - If pindex or one of its parent blocks back to the genesis block | 
| 5375 |  |             //   or an assumeutxo snapshot never downloaded transactions | 
| 5376 |  |             //   (pindexFirstNeverProcessed is non-null), it should not be a | 
| 5377 |  |             //   candidate, and this will be asserted below. The only exception | 
| 5378 |  |             //   is if pindex itself is an assumeutxo snapshot block. Then it is | 
| 5379 |  |             //   also a potential candidate. | 
| 5380 | 419M |             if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && (5.50M pindexFirstNeverProcessed == nullptr5.50M|| pindex == snap_base3.36M)) { | 
| 5381 |  |                 // If pindex was detected as invalid (pindexFirstInvalid is | 
| 5382 |  |                 // non-null), it is not required to be in | 
| 5383 |  |                 // setBlockIndexCandidates. | 
| 5384 | 2.14M |                 if (pindexFirstInvalid == nullptr) { | 
| 5385 |  |                     // If pindex and all its parents back to the genesis block | 
| 5386 |  |                     // or an assumeutxo snapshot block downloaded transactions, | 
| 5387 |  |                     // and the transactions were not pruned (pindexFirstMissing | 
| 5388 |  |                     // is null), it is a potential candidate. The check | 
| 5389 |  |                     // excludes pruned blocks, because if any blocks were | 
| 5390 |  |                     // pruned between pindex and the current chain tip, pindex will | 
| 5391 |  |                     // only temporarily be added to setBlockIndexCandidates, | 
| 5392 |  |                     // before being moved to m_blocks_unlinked. This check | 
| 5393 |  |                     // could be improved to verify that if all blocks between | 
| 5394 |  |                     // the chain tip and pindex have data, pindex must be a | 
| 5395 |  |                     // candidate. | 
| 5396 |  |                     // | 
| 5397 |  |                     // If pindex is the chain tip, it also is a potential | 
| 5398 |  |                     // candidate. | 
| 5399 |  |                     // | 
| 5400 |  |                     // If the chainstate was loaded from a snapshot and pindex | 
| 5401 |  |                     // is the base of the snapshot, pindex is also a potential | 
| 5402 |  |                     // candidate. | 
| 5403 | 2.08M |                     if (pindexFirstMissing == nullptr || pindex == c->m_chain.Tip()0|| pindex == c->SnapshotBase()0) { | 
| 5404 |  |                         // If this chainstate is the active chainstate, pindex | 
| 5405 |  |                         // must be in setBlockIndexCandidates. Otherwise, this | 
| 5406 |  |                         // chainstate is a background validation chainstate, and | 
| 5407 |  |                         // pindex only needs to be added if it is an ancestor of | 
| 5408 |  |                         // the snapshot that is being validated. | 
| 5409 | 2.08M |                         if (c == &ActiveChainstate() || snap_base->GetAncestor(pindex->nHeight) == pindex0) { | 
| 5410 | 2.08M |                             assert(c->setBlockIndexCandidates.contains(const_cast<CBlockIndex*>(pindex))); | 
| 5411 | 2.08M |                         } | 
| 5412 | 2.08M |                     } | 
| 5413 |  |                     // If some parent is missing, then it could be that this block was in | 
| 5414 |  |                     // setBlockIndexCandidates but had to be removed because of the missing data. | 
| 5415 |  |                     // In this case it must be in m_blocks_unlinked -- see test below. | 
| 5416 | 2.08M |                 } | 
| 5417 | 417M |             } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. | 
| 5418 | 417M |                 assert(!c->setBlockIndexCandidates.contains(const_cast<CBlockIndex*>(pindex))); | 
| 5419 | 417M |             } | 
| 5420 | 419M |         } | 
| 5421 |  |         // Check whether this block is in m_blocks_unlinked. | 
| 5422 | 419M |         auto rangeUnlinked{m_blockman.m_blocks_unlinked.equal_range(pindex->pprev)}; | 
| 5423 | 419M |         bool foundInUnlinked = false; | 
| 5424 | 419M |         while (rangeUnlinked.first != rangeUnlinked.second) { | 
| 5425 | 190k |             assert(rangeUnlinked.first->first == pindex->pprev); | 
| 5426 | 190k |             if (rangeUnlinked.first->second == pindex) { | 
| 5427 | 125k |                 foundInUnlinked = true; | 
| 5428 | 125k |                 break; | 
| 5429 | 125k |             } | 
| 5430 | 64.7k |             rangeUnlinked.first++; | 
| 5431 | 64.7k |         } | 
| 5432 | 419M |         if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA)417M&& pindexFirstNeverProcessed != nullptr414M&& pindexFirstInvalid == nullptr125k) { | 
| 5433 |  |             // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked. | 
| 5434 | 125k |             assert(foundInUnlinked); | 
| 5435 | 125k |         } | 
| 5436 | 419M |         if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA | 
| 5437 | 419M |         if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked. | 
| 5438 | 419M |         if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA)417M&& pindexFirstNeverProcessed == nullptr414M&& pindexFirstMissing != nullptr414M) { | 
| 5439 |  |             // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. | 
| 5440 | 0 |             assert(m_blockman.m_have_pruned); | 
| 5441 |  |             // This block may have entered m_blocks_unlinked if: | 
| 5442 |  |             //  - it has a descendant that at some point had more work than the | 
| 5443 |  |             //    tip, and | 
| 5444 |  |             //  - we tried switching to that descendant but were missing | 
| 5445 |  |             //    data for some intermediate block between m_chain and the | 
| 5446 |  |             //    tip. | 
| 5447 |  |             // So if this block is itself better than any m_chain.Tip() and it wasn't in | 
| 5448 |  |             // setBlockIndexCandidates, then it must be in m_blocks_unlinked. | 
| 5449 | 0 |             for (const Chainstate* c : {m_ibd_chainstate.get(), m_snapshot_chainstate.get()}) { | 
| 5450 | 0 |                 if (!c) continue; | 
| 5451 | 0 |                 const bool is_active = c == &ActiveChainstate(); | 
| 5452 | 0 |                 if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && !c->setBlockIndexCandidates.contains(const_cast<CBlockIndex*>(pindex))) { | 
| 5453 | 0 |                     if (pindexFirstInvalid == nullptr) { | 
| 5454 | 0 |                         if (is_active || snap_base->GetAncestor(pindex->nHeight) == pindex) { | 
| 5455 | 0 |                             assert(foundInUnlinked); | 
| 5456 | 0 |                         } | 
| 5457 | 0 |                     } | 
| 5458 | 0 |                 } | 
| 5459 | 0 |             } | 
| 5460 | 0 |         } | 
| 5461 |  |         // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow | 
| 5462 |  |         // End: actual consistency checks. | 
| 5463 |  |  | 
| 5464 |  |  | 
| 5465 |  |         // Try descending into the first subnode. Always process forks first and the best header chain after. | 
| 5466 | 419M |         snap_update_firsts(); | 
| 5467 | 419M |         auto range{forward.equal_range(pindex)}; | 
| 5468 | 419M |         if (range.first != range.second) { | 
| 5469 |  |             // A subnode not part of the best header chain was found. | 
| 5470 | 1.53M |             pindex = range.first->second; | 
| 5471 | 1.53M |             nHeight++; | 
| 5472 | 1.53M |             continue; | 
| 5473 | 418M |         } else if (best_hdr_chain.Contains(pindex)) { | 
| 5474 |  |             // Descend further into best header chain. | 
| 5475 | 415M |             nHeight++; | 
| 5476 | 415M |             pindex = best_hdr_chain[nHeight]; | 
| 5477 | 415M |             if (!pindex) break1.98M; // we are finished, since the best header chain is always processed last | 
| 5478 | 413M |             continue; | 
| 5479 | 415M |         } | 
| 5480 |  |         // This is a leaf node. | 
| 5481 |  |         // Move upwards until we reach a node of which we have not yet visited the last child. | 
| 5482 | 2.45M |         while (2.40Mpindex) { | 
| 5483 |  |             // We are going to either move to a parent or a sibling of pindex. | 
| 5484 | 2.45M |             snap_update_firsts(); | 
| 5485 |  |             // If pindex was the first with a certain property, unset the corresponding variable. | 
| 5486 | 2.45M |             if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr107k; | 
| 5487 | 2.45M |             if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr2.22M; | 
| 5488 | 2.45M |             if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr2.22M; | 
| 5489 | 2.45M |             if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr0; | 
| 5490 | 2.45M |             if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr2.22M; | 
| 5491 | 2.45M |             if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr2.28M; | 
| 5492 | 2.45M |             if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr2.28M; | 
| 5493 |  |             // Find our parent. | 
| 5494 | 2.45M |             CBlockIndex* pindexPar = pindex->pprev; | 
| 5495 |  |             // Find which child we just visited. | 
| 5496 | 2.45M |             auto rangePar{forward.equal_range(pindexPar)}; | 
| 5497 | 8.33M |             while (rangePar.first->second != pindex) { | 
| 5498 | 5.88M |                 assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child. | 
| 5499 | 5.88M |                 rangePar.first++; | 
| 5500 | 5.88M |             } | 
| 5501 |  |             // Proceed to the next one. | 
| 5502 | 2.45M |             rangePar.first++; | 
| 5503 | 2.45M |             if (rangePar.first != rangePar.second) { | 
| 5504 |  |                 // Move to a sibling not part of the best header chain. | 
| 5505 | 912k |                 pindex = rangePar.first->second; | 
| 5506 | 912k |                 break; | 
| 5507 | 1.53M |             } else if (pindexPar == best_hdr_chain[nHeight - 1]) { | 
| 5508 |  |                 // Move to pindex's sibling on the best-chain, if it has one. | 
| 5509 | 1.48M |                 pindex = best_hdr_chain[nHeight]; | 
| 5510 |  |                 // There will not be a next block if (and only if) parent block is the best header. | 
| 5511 | 1.48M |                 assert((pindex == nullptr) == (pindexPar == best_hdr_chain.Tip())); | 
| 5512 | 1.48M |                 break; | 
| 5513 | 1.48M |             } else { | 
| 5514 |  |                 // Move up further. | 
| 5515 | 48.5k |                 pindex = pindexPar; | 
| 5516 | 48.5k |                 nHeight--; | 
| 5517 | 48.5k |                 continue; | 
| 5518 | 48.5k |             } | 
| 5519 | 2.45M |         } | 
| 5520 | 2.40M |     } | 
| 5521 |  |  | 
| 5522 |  |     // Check that we actually traversed the entire block index. | 
| 5523 | 2.06M |     assert(nNodes == forward.size() + best_hdr_chain.Height() + 1); | 
| 5524 | 2.06M | } | 
| 5525 |  |  | 
| 5526 |  | std::string Chainstate::ToString() | 
| 5527 | 51.2k | { | 
| 5528 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 5529 | 51.2k |     CBlockIndex* tip = m_chain.Tip(); | 
| 5530 | 51.2k |     return strprintf("Chainstate [%s] @ height %d (%s)",| Line | Count | Source |  | 1172 | 51.2k | #define strprintf tfm::format | 
 | 
| 5531 | 51.2k |                      m_from_snapshot_blockhash ? "snapshot"0: "ibd", | 
| 5532 | 51.2k |                      tip ? tip->nHeight0: -1, tip ? tip->GetBlockHash().ToString()0: "null"); | 
| 5533 | 51.2k | } | 
| 5534 |  |  | 
| 5535 |  | bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) | 
| 5536 | 51.2k | { | 
| 5537 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 5538 | 51.2k |     if (coinstip_size == m_coinstip_cache_size_bytes && | 
| 5539 | 51.2k |             coinsdb_size == m_coinsdb_cache_size_bytes) { | 
| 5540 |  |         // Cache sizes are unchanged, no need to continue. | 
| 5541 | 51.2k |         return true; | 
| 5542 | 51.2k |     } | 
| 5543 | 0 |     size_t old_coinstip_size = m_coinstip_cache_size_bytes; | 
| 5544 | 0 |     m_coinstip_cache_size_bytes = coinstip_size; | 
| 5545 | 0 |     m_coinsdb_cache_size_bytes = coinsdb_size; | 
| 5546 | 0 |     CoinsDB().ResizeCache(coinsdb_size); | 
| 5547 |  | 
 | 
| 5548 | 0 |     LogInfo("[%s] resized coinsdb cache to %.1f MiB",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5549 | 0 |         this->ToString(), coinsdb_size * (1.0 / 1024 / 1024)); | 
| 5550 | 0 |     LogInfo("[%s] resized coinstip cache to %.1f MiB",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5551 | 0 |         this->ToString(), coinstip_size * (1.0 / 1024 / 1024)); | 
| 5552 |  | 
 | 
| 5553 | 0 |     BlockValidationState state; | 
| 5554 | 0 |     bool ret; | 
| 5555 |  | 
 | 
| 5556 | 0 |     if (coinstip_size > old_coinstip_size) { | 
| 5557 |  |         // Likely no need to flush if cache sizes have grown. | 
| 5558 | 0 |         ret = FlushStateToDisk(state, FlushStateMode::IF_NEEDED); | 
| 5559 | 0 |     } else { | 
| 5560 |  |         // Otherwise, flush state to disk and deallocate the in-memory coins map. | 
| 5561 | 0 |         ret = FlushStateToDisk(state, FlushStateMode::ALWAYS); | 
| 5562 | 0 |     } | 
| 5563 | 0 |     return ret; | 
| 5564 | 51.2k | } | 
| 5565 |  |  | 
| 5566 |  | double ChainstateManager::GuessVerificationProgress(const CBlockIndex* pindex) const | 
| 5567 | 146k | { | 
| 5568 | 146k |     AssertLockHeld(GetMutex()); | Line | Count | Source |  | 137 | 146k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 5569 | 146k |     const ChainTxData& data{GetParams().TxData()}; | 
| 5570 | 146k |     if (pindex == nullptr) { | 
| 5571 | 0 |         return 0.0; | 
| 5572 | 0 |     } | 
| 5573 |  |  | 
| 5574 | 146k |     if (pindex->m_chain_tx_count == 0) { | 
| 5575 | 0 |         LogDebug(BCLog::VALIDATION, "Block %d has unset m_chain_tx_count. Unable to estimate verification progress.\n", pindex->nHeight); | Line | Count | Source |  | 381 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) | Line | Count | Source |  | 373 | 0 |     do {                                                              \ |  | 374 | 0 |         if (LogAcceptCategory((category), (level))) {                 \ |  | 375 | 0 |             bool rate_limit{level >= BCLog::Level::Info};             \ |  | 376 | 0 |             LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 |  | 377 | 0 |         }                                                             \ |  | 378 | 0 |     } while (0) | 
 | 
 | 
| 5576 | 0 |         return 0.0; | 
| 5577 | 0 |     } | 
| 5578 |  |  | 
| 5579 | 146k |     const int64_t nNow{TicksSinceEpoch<std::chrono::seconds>(NodeClock::now())}; | 
| 5580 | 146k |     const auto block_time{ | 
| 5581 | 146k |         (Assume(m_best_header) && std::abs(nNow - pindex->GetBlockTime()) <= Ticks<std::chrono::seconds>(2h) && | Line | Count | Source |  | 118 | 292k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 5582 | 146k |          Assume40.6k(m_best_header->nHeight >= pindex->nHeight)) ? | Line | Count | Source |  | 118 | 40.6k | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 5583 |  |             // When the header is known to be recent, switch to a height-based | 
| 5584 |  |             // approach. This ensures the returned value is quantized when | 
| 5585 |  |             // close to "1.0", because some users expect it to be. This also | 
| 5586 |  |             // avoids relying too much on the exact miner-set timestamp, which | 
| 5587 |  |             // may be off. | 
| 5588 | 40.6k |             nNow - (m_best_header->nHeight - pindex->nHeight) * GetConsensus().nPowTargetSpacing : | 
| 5589 | 146k |             pindex->GetBlockTime()105k, | 
| 5590 | 146k |     }; | 
| 5591 |  |  | 
| 5592 | 146k |     double fTxTotal; | 
| 5593 |  |  | 
| 5594 | 146k |     if (pindex->m_chain_tx_count <= data.tx_count) { | 
| 5595 | 0 |         fTxTotal = data.tx_count + (nNow - data.nTime) * data.dTxRate; | 
| 5596 | 146k |     } else { | 
| 5597 | 146k |         fTxTotal = pindex->m_chain_tx_count + (nNow - block_time) * data.dTxRate; | 
| 5598 | 146k |     } | 
| 5599 |  |  | 
| 5600 | 146k |     return std::min<double>(pindex->m_chain_tx_count / fTxTotal, 1.0); | 
| 5601 | 146k | } | 
| 5602 |  |  | 
| 5603 |  | std::optional<uint256> ChainstateManager::SnapshotBlockhash() const | 
| 5604 | 51.2k | { | 
| 5605 | 51.2k |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 51.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 51.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 51.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 51.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5606 | 51.2k |     if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) { | 
| 5607 |  |         // If a snapshot chainstate exists, it will always be our active. | 
| 5608 | 0 |         return m_active_chainstate->m_from_snapshot_blockhash; | 
| 5609 | 0 |     } | 
| 5610 | 51.2k |     return std::nullopt; | 
| 5611 | 51.2k | } | 
| 5612 |  |  | 
| 5613 |  | std::vector<Chainstate*> ChainstateManager::GetAll() | 
| 5614 | 10.6M | { | 
| 5615 | 10.6M |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 10.6M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 10.6M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 10.6M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 10.6M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5616 | 10.6M |     std::vector<Chainstate*> out; | 
| 5617 |  |  | 
| 5618 | 21.2M |     for (Chainstate* cs : {m_ibd_chainstate.get(), m_snapshot_chainstate.get()}) { | 
| 5619 | 21.2M |         if (this->IsUsable(cs)) out.push_back(cs)10.6M; | 
| 5620 | 21.2M |     } | 
| 5621 |  |  | 
| 5622 | 10.6M |     return out; | 
| 5623 | 10.6M | } | 
| 5624 |  |  | 
| 5625 |  | Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) | 
| 5626 | 51.2k | { | 
| 5627 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 5628 | 51.2k |     assert(!m_ibd_chainstate); | 
| 5629 | 51.2k |     assert(!m_active_chainstate); | 
| 5630 |  |  | 
| 5631 | 51.2k |     m_ibd_chainstate = std::make_unique<Chainstate>(mempool, m_blockman, *this); | 
| 5632 | 51.2k |     m_active_chainstate = m_ibd_chainstate.get(); | 
| 5633 | 51.2k |     return *m_active_chainstate; | 
| 5634 | 51.2k | } | 
| 5635 |  |  | 
| 5636 |  | [[nodiscard]] static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) | 
| 5637 |  |     EXCLUSIVE_LOCKS_REQUIRED(::cs_main) | 
| 5638 | 0 | { | 
| 5639 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 5640 |  | 
 | 
| 5641 | 0 |     if (is_snapshot) { | 
| 5642 | 0 |         fs::path base_blockhash_path = db_path / node::SNAPSHOT_BLOCKHASH_FILENAME; | 
| 5643 |  | 
 | 
| 5644 | 0 |         try { | 
| 5645 | 0 |             bool existed = fs::remove(base_blockhash_path); | 
| 5646 | 0 |             if (!existed) { | 
| 5647 | 0 |                 LogPrintf("[snapshot] snapshot chainstate dir being removed lacks %s file\n",| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 5648 | 0 |                           fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME)); | 
| 5649 | 0 |             } | 
| 5650 | 0 |         } catch (const fs::filesystem_error& e) { | 
| 5651 | 0 |             LogWarning("[snapshot] failed to remove file %s: %s\n",| Line | Count | Source |  | 357 | 0 | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5652 | 0 |                        fs::PathToString(base_blockhash_path), e.code().message()); | 
| 5653 | 0 |         } | 
| 5654 | 0 |     } | 
| 5655 |  | 
 | 
| 5656 | 0 |     std::string path_str = fs::PathToString(db_path); | 
| 5657 | 0 |     LogInfo("Removing leveldb dir at %s\n", path_str);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5658 |  |  | 
| 5659 |  |     // We have to destruct before this call leveldb::DB in order to release the db | 
| 5660 |  |     // lock, otherwise `DestroyDB` will fail. See `leveldb::~DBImpl()`. | 
| 5661 | 0 |     const bool destroyed = DestroyDB(path_str); | 
| 5662 |  | 
 | 
| 5663 | 0 |     if (!destroyed) { | 
| 5664 | 0 |         LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 5665 | 0 |     } | 
| 5666 |  |  | 
| 5667 |  |     // Datadir should be removed from filesystem; otherwise initialization may detect | 
| 5668 |  |     // it on subsequent statups and get confused. | 
| 5669 |  |     // | 
| 5670 |  |     // If the base_blockhash_path removal above fails in the case of snapshot | 
| 5671 |  |     // chainstates, this will return false since leveldb won't remove a non-empty | 
| 5672 |  |     // directory. | 
| 5673 | 0 |     return destroyed && !fs::exists(db_path); | 
| 5674 | 0 | } | 
| 5675 |  |  | 
| 5676 |  | util::Result<CBlockIndex*> ChainstateManager::ActivateSnapshot( | 
| 5677 |  |         AutoFile& coins_file, | 
| 5678 |  |         const SnapshotMetadata& metadata, | 
| 5679 |  |         bool in_memory) | 
| 5680 | 0 | { | 
| 5681 | 0 |     uint256 base_blockhash = metadata.m_base_blockhash; | 
| 5682 |  | 
 | 
| 5683 | 0 |     if (this->SnapshotBlockhash()) { | 
| 5684 | 0 |         return util::Error{Untranslated("Can't activate a snapshot-based chainstate more than once")}; | 
| 5685 | 0 |     } | 
| 5686 |  |  | 
| 5687 | 0 |     CBlockIndex* snapshot_start_block{}; | 
| 5688 |  | 
 | 
| 5689 | 0 |     { | 
| 5690 | 0 |         LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5691 |  | 
 | 
| 5692 | 0 |         if (!GetParams().AssumeutxoForBlockhash(base_blockhash).has_value()) { | 
| 5693 | 0 |             auto available_heights = GetParams().GetAvailableSnapshotHeights(); | 
| 5694 | 0 |             std::string heights_formatted = util::Join(available_heights, ", ", [&](const auto& i) { return util::ToString(i); }); | 
| 5695 | 0 |             return util::Error{Untranslated(strprintf("assumeutxo block hash in snapshot metadata not recognized (hash: %s). The following snapshot heights are available: %s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5696 | 0 |                 base_blockhash.ToString(), | 
| 5697 | 0 |                 heights_formatted))}; | 
| 5698 | 0 |         } | 
| 5699 |  |  | 
| 5700 | 0 |         snapshot_start_block = m_blockman.LookupBlockIndex(base_blockhash); | 
| 5701 | 0 |         if (!snapshot_start_block) { | 
| 5702 | 0 |             return util::Error{Untranslated(strprintf("The base block header (%s) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5703 | 0 |                           base_blockhash.ToString()))}; | 
| 5704 | 0 |         } | 
| 5705 |  |  | 
| 5706 | 0 |         bool start_block_invalid = snapshot_start_block->nStatus & BLOCK_FAILED_MASK; | 
| 5707 | 0 |         if (start_block_invalid) { | 
| 5708 | 0 |             return util::Error{Untranslated(strprintf("The base block header (%s) is part of an invalid chain", base_blockhash.ToString()))};| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5709 | 0 |         } | 
| 5710 |  |  | 
| 5711 | 0 |         if (!m_best_header || m_best_header->GetAncestor(snapshot_start_block->nHeight) != snapshot_start_block) { | 
| 5712 | 0 |             return util::Error{Untranslated("A forked headers-chain with more work than the chain with the snapshot base block header exists. Please proceed to sync without AssumeUtxo.")}; | 
| 5713 | 0 |         } | 
| 5714 |  |  | 
| 5715 | 0 |         auto mempool{m_active_chainstate->GetMempool()}; | 
| 5716 | 0 |         if (mempool && mempool->size() > 0) { | 
| 5717 | 0 |             return util::Error{Untranslated("Can't activate a snapshot when mempool not empty")}; | 
| 5718 | 0 |         } | 
| 5719 | 0 |     } | 
| 5720 |  |  | 
| 5721 | 0 |     int64_t current_coinsdb_cache_size{0}; | 
| 5722 | 0 |     int64_t current_coinstip_cache_size{0}; | 
| 5723 |  |  | 
| 5724 |  |     // Cache percentages to allocate to each chainstate. | 
| 5725 |  |     // | 
| 5726 |  |     // These particular percentages don't matter so much since they will only be | 
| 5727 |  |     // relevant during snapshot activation; caches are rebalanced at the conclusion of | 
| 5728 |  |     // this function. We want to give (essentially) all available cache capacity to the | 
| 5729 |  |     // snapshot to aid the bulk load later in this function. | 
| 5730 | 0 |     static constexpr double IBD_CACHE_PERC = 0.01; | 
| 5731 | 0 |     static constexpr double SNAPSHOT_CACHE_PERC = 0.99; | 
| 5732 |  | 
 | 
| 5733 | 0 |     { | 
| 5734 | 0 |         LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5735 |  |         // Resize the coins caches to ensure we're not exceeding memory limits. | 
| 5736 |  |         // | 
| 5737 |  |         // Allocate the majority of the cache to the incoming snapshot chainstate, since | 
| 5738 |  |         // (optimistically) getting to its tip will be the top priority. We'll need to call | 
| 5739 |  |         // `MaybeRebalanceCaches()` once we're done with this function to ensure | 
| 5740 |  |         // the right allocation (including the possibility that no snapshot was activated | 
| 5741 |  |         // and that we should restore the active chainstate caches to their original size). | 
| 5742 |  |         // | 
| 5743 | 0 |         current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes; | 
| 5744 | 0 |         current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes; | 
| 5745 |  |  | 
| 5746 |  |         // Temporarily resize the active coins cache to make room for the newly-created | 
| 5747 |  |         // snapshot chain. | 
| 5748 | 0 |         this->ActiveChainstate().ResizeCoinsCaches( | 
| 5749 | 0 |             static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC), | 
| 5750 | 0 |             static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC)); | 
| 5751 | 0 |     } | 
| 5752 |  | 
 | 
| 5753 | 0 |     auto snapshot_chainstate = WITH_LOCK(::cs_main, | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5754 | 0 |         return std::make_unique<Chainstate>( | 
| 5755 | 0 |             /*mempool=*/nullptr, m_blockman, *this, base_blockhash)); | 
| 5756 |  | 
 | 
| 5757 | 0 |     { | 
| 5758 | 0 |         LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5759 | 0 |         snapshot_chainstate->InitCoinsDB( | 
| 5760 | 0 |             static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC), | 
| 5761 | 0 |             in_memory, false, "chainstate"); | 
| 5762 | 0 |         snapshot_chainstate->InitCoinsCache( | 
| 5763 | 0 |             static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); | 
| 5764 | 0 |     } | 
| 5765 |  | 
 | 
| 5766 | 0 |     auto cleanup_bad_snapshot = [&](bilingual_str reason) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 5767 | 0 |         this->MaybeRebalanceCaches(); | 
| 5768 |  |  | 
| 5769 |  |         // PopulateAndValidateSnapshot can return (in error) before the leveldb datadir | 
| 5770 |  |         // has been created, so only attempt removal if we got that far. | 
| 5771 | 0 |         if (auto snapshot_datadir = node::FindSnapshotChainstateDir(m_options.datadir)) { | 
| 5772 |  |             // We have to destruct leveldb::DB in order to release the db lock, otherwise | 
| 5773 |  |             // DestroyDB() (in DeleteCoinsDBFromDisk()) will fail. See `leveldb::~DBImpl()`. | 
| 5774 |  |             // Destructing the chainstate (and so resetting the coinsviews object) does this. | 
| 5775 | 0 |             snapshot_chainstate.reset(); | 
| 5776 | 0 |             bool removed = DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true); | 
| 5777 | 0 |             if (!removed) { | 
| 5778 | 0 |                 GetNotifications().fatalError(strprintf(_("Failed to remove snapshot chainstate dir (%s). "| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5779 | 0 |                     "Manually remove it before restarting.\n"), fs::PathToString(*snapshot_datadir))); | 
| 5780 | 0 |             } | 
| 5781 | 0 |         } | 
| 5782 | 0 |         return util::Error{std::move(reason)}; | 
| 5783 | 0 |     }; | 
| 5784 |  | 
 | 
| 5785 | 0 |     if (auto res{this->PopulateAndValidateSnapshot(*snapshot_chainstate, coins_file, metadata)}; !res) { | 
| 5786 | 0 |         LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5787 | 0 |         return cleanup_bad_snapshot(Untranslated(strprintf("Population failed: %s", util::ErrorString(res).original)));| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5788 | 0 |     } | 
| 5789 |  |  | 
| 5790 | 0 |     LOCK(::cs_main);  // cs_main required for rest of snapshot activation. | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5791 |  |  | 
| 5792 |  |     // Do a final check to ensure that the snapshot chainstate is actually a more | 
| 5793 |  |     // work chain than the active chainstate; a user could have loaded a snapshot | 
| 5794 |  |     // very late in the IBD process, and we wouldn't want to load a useless chainstate. | 
| 5795 | 0 |     if (!CBlockIndexWorkComparator()(ActiveTip(), snapshot_chainstate->m_chain.Tip())) { | 
| 5796 | 0 |         return cleanup_bad_snapshot(Untranslated("work does not exceed active chainstate")); | 
| 5797 | 0 |     } | 
| 5798 |  |     // If not in-memory, persist the base blockhash for use during subsequent | 
| 5799 |  |     // initialization. | 
| 5800 | 0 |     if (!in_memory) { | 
| 5801 | 0 |         if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { | 
| 5802 | 0 |             return cleanup_bad_snapshot(Untranslated("could not write base blockhash")); | 
| 5803 | 0 |         } | 
| 5804 | 0 |     } | 
| 5805 |  |  | 
| 5806 | 0 |     assert(!m_snapshot_chainstate); | 
| 5807 | 0 |     m_snapshot_chainstate.swap(snapshot_chainstate); | 
| 5808 | 0 |     const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); | 
| 5809 | 0 |     assert(chaintip_loaded); | 
| 5810 |  |  | 
| 5811 |  |     // Transfer possession of the mempool to the snapshot chainstate. | 
| 5812 |  |     // Mempool is empty at this point because we're still in IBD. | 
| 5813 | 0 |     Assert(m_active_chainstate->m_mempool->size() == 0); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 5814 | 0 |     Assert(!m_snapshot_chainstate->m_mempool); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 5815 | 0 |     m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; | 
| 5816 | 0 |     m_active_chainstate->m_mempool = nullptr; | 
| 5817 | 0 |     m_active_chainstate = m_snapshot_chainstate.get(); | 
| 5818 | 0 |     m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); | 
| 5819 |  | 
 | 
| 5820 | 0 |     LogInfo("[snapshot] successfully activated snapshot %s", base_blockhash.ToString());| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5821 | 0 |     LogInfo("[snapshot] (%.2f MB)",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5822 | 0 |         m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); | 
| 5823 |  | 
 | 
| 5824 | 0 |     this->MaybeRebalanceCaches(); | 
| 5825 | 0 |     return snapshot_start_block; | 
| 5826 | 0 | } | 
| 5827 |  |  | 
| 5828 |  | static void FlushSnapshotToDisk(CCoinsViewCache& coins_cache, bool snapshot_loaded) | 
| 5829 | 0 | { | 
| 5830 | 0 |     LOG_TIME_MILLIS_WITH_CATEGORY_MSG_ONCE( | Line | Count | Source |  | 106 | 0 |     BCLog::Timer<std::chrono::milliseconds> UNIQUE_NAME(logging_timer)(__func__, end_msg, log_category, /* msg_on_completion=*/false) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 5831 | 0 |         strprintf("%s (%.2f MB)", | 
| 5832 | 0 |                   snapshot_loaded ? "saving snapshot chainstate" : "flushing coins cache", | 
| 5833 | 0 |                   coins_cache.DynamicMemoryUsage() / (1000 * 1000)), | 
| 5834 | 0 |         BCLog::LogFlags::ALL); | 
| 5835 |  | 
 | 
| 5836 | 0 |     coins_cache.Flush(); | 
| 5837 | 0 | } | 
| 5838 |  |  | 
| 5839 |  | struct StopHashingException : public std::exception | 
| 5840 |  | { | 
| 5841 |  |     const char* what() const noexcept override | 
| 5842 | 0 |     { | 
| 5843 | 0 |         return "ComputeUTXOStats interrupted."; | 
| 5844 | 0 |     } | 
| 5845 |  | }; | 
| 5846 |  |  | 
| 5847 |  | static void SnapshotUTXOHashBreakpoint(const util::SignalInterrupt& interrupt) | 
| 5848 | 0 | { | 
| 5849 | 0 |     if (interrupt) throw StopHashingException(); | 
| 5850 | 0 | } | 
| 5851 |  |  | 
| 5852 |  | util::Result<void> ChainstateManager::PopulateAndValidateSnapshot( | 
| 5853 |  |     Chainstate& snapshot_chainstate, | 
| 5854 |  |     AutoFile& coins_file, | 
| 5855 |  |     const SnapshotMetadata& metadata) | 
| 5856 | 0 | { | 
| 5857 |  |     // It's okay to release cs_main before we're done using `coins_cache` because we know | 
| 5858 |  |     // that nothing else will be referencing the newly created snapshot_chainstate yet. | 
| 5859 | 0 |     CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip()); | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5860 |  | 
 | 
| 5861 | 0 |     uint256 base_blockhash = metadata.m_base_blockhash; | 
| 5862 |  | 
 | 
| 5863 | 0 |     CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash)); | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5864 |  | 
 | 
| 5865 | 0 |     if (!snapshot_start_block) { | 
| 5866 |  |         // Needed for ComputeUTXOStats to determine the | 
| 5867 |  |         // height and to avoid a crash when base_blockhash.IsNull() | 
| 5868 | 0 |         return util::Error{Untranslated(strprintf("Did not find snapshot start blockheader %s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5869 | 0 |                   base_blockhash.ToString()))}; | 
| 5870 | 0 |     } | 
| 5871 |  |  | 
| 5872 | 0 |     int base_height = snapshot_start_block->nHeight; | 
| 5873 | 0 |     const auto& maybe_au_data = GetParams().AssumeutxoForHeight(base_height); | 
| 5874 |  | 
 | 
| 5875 | 0 |     if (!maybe_au_data) { | 
| 5876 | 0 |         return util::Error{Untranslated(strprintf("Assumeutxo height in snapshot metadata not recognized "| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5877 | 0 |                   "(%d) - refusing to load snapshot", base_height))}; | 
| 5878 | 0 |     } | 
| 5879 |  |  | 
| 5880 | 0 |     const AssumeutxoData& au_data = *maybe_au_data; | 
| 5881 |  |  | 
| 5882 |  |     // This work comparison is a duplicate check with the one performed later in | 
| 5883 |  |     // ActivateSnapshot(), but is done so that we avoid doing the long work of staging | 
| 5884 |  |     // a snapshot that isn't actually usable. | 
| 5885 | 0 |     if (WITH_LOCK(::cs_main, return !CBlockIndexWorkComparator()(ActiveTip(), snapshot_start_block))) {| Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5886 | 0 |         return util::Error{Untranslated("Work does not exceed active chainstate")}; | 
| 5887 | 0 |     } | 
| 5888 |  |  | 
| 5889 | 0 |     const uint64_t coins_count = metadata.m_coins_count; | 
| 5890 | 0 |     uint64_t coins_left = metadata.m_coins_count; | 
| 5891 |  | 
 | 
| 5892 | 0 |     LogInfo("[snapshot] loading %d coins from snapshot %s", coins_left, base_blockhash.ToString());| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5893 | 0 |     int64_t coins_processed{0}; | 
| 5894 |  | 
 | 
| 5895 | 0 |     while (coins_left > 0) { | 
| 5896 | 0 |         try { | 
| 5897 | 0 |             Txid txid; | 
| 5898 | 0 |             coins_file >> txid; | 
| 5899 | 0 |             size_t coins_per_txid{0}; | 
| 5900 | 0 |             coins_per_txid = ReadCompactSize(coins_file); | 
| 5901 |  | 
 | 
| 5902 | 0 |             if (coins_per_txid > coins_left) { | 
| 5903 | 0 |                 return util::Error{Untranslated("Mismatch in coins count in snapshot metadata and actual snapshot data")}; | 
| 5904 | 0 |             } | 
| 5905 |  |  | 
| 5906 | 0 |             for (size_t i = 0; i < coins_per_txid; i++) { | 
| 5907 | 0 |                 COutPoint outpoint; | 
| 5908 | 0 |                 Coin coin; | 
| 5909 | 0 |                 outpoint.n = static_cast<uint32_t>(ReadCompactSize(coins_file)); | 
| 5910 | 0 |                 outpoint.hash = txid; | 
| 5911 | 0 |                 coins_file >> coin; | 
| 5912 | 0 |                 if (coin.nHeight > base_height || | 
| 5913 | 0 |                     outpoint.n >= std::numeric_limits<decltype(outpoint.n)>::max() // Avoid integer wrap-around in coinstats.cpp:ApplyHash | 
| 5914 | 0 |                 ) { | 
| 5915 | 0 |                     return util::Error{Untranslated(strprintf("Bad snapshot data after deserializing %d coins",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5916 | 0 |                               coins_count - coins_left))}; | 
| 5917 | 0 |                 } | 
| 5918 | 0 |                 if (!MoneyRange(coin.out.nValue)) { | 
| 5919 | 0 |                     return util::Error{Untranslated(strprintf("Bad snapshot data after deserializing %d coins - bad tx out value",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5920 | 0 |                               coins_count - coins_left))}; | 
| 5921 | 0 |                 } | 
| 5922 | 0 |                 coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin)); | 
| 5923 |  | 
 | 
| 5924 | 0 |                 --coins_left; | 
| 5925 | 0 |                 ++coins_processed; | 
| 5926 |  | 
 | 
| 5927 | 0 |                 if (coins_processed % 1000000 == 0) { | 
| 5928 | 0 |                     LogInfo("[snapshot] %d coins loaded (%.2f%%, %.2f MB)",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5929 | 0 |                         coins_processed, | 
| 5930 | 0 |                         static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count), | 
| 5931 | 0 |                         coins_cache.DynamicMemoryUsage() / (1000 * 1000)); | 
| 5932 | 0 |                 } | 
| 5933 |  |  | 
| 5934 |  |                 // Batch write and flush (if we need to) every so often. | 
| 5935 |  |                 // | 
| 5936 |  |                 // If our average Coin size is roughly 41 bytes, checking every 120,000 coins | 
| 5937 |  |                 // means <5MB of memory imprecision. | 
| 5938 | 0 |                 if (coins_processed % 120000 == 0) { | 
| 5939 | 0 |                     if (m_interrupt) { | 
| 5940 | 0 |                         return util::Error{Untranslated("Aborting after an interrupt was requested")}; | 
| 5941 | 0 |                     } | 
| 5942 |  |  | 
| 5943 | 0 |                     const auto snapshot_cache_state = WITH_LOCK(::cs_main, | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5944 | 0 |                         return snapshot_chainstate.GetCoinsCacheSizeState()); | 
| 5945 |  | 
 | 
| 5946 | 0 |                     if (snapshot_cache_state >= CoinsCacheSizeState::CRITICAL) { | 
| 5947 |  |                         // This is a hack - we don't know what the actual best block is, but that | 
| 5948 |  |                         // doesn't matter for the purposes of flushing the cache here. We'll set this | 
| 5949 |  |                         // to its correct value (`base_blockhash`) below after the coins are loaded. | 
| 5950 | 0 |                         coins_cache.SetBestBlock(GetRandHash()); | 
| 5951 |  |  | 
| 5952 |  |                         // No need to acquire cs_main since this chainstate isn't being used yet. | 
| 5953 | 0 |                         FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/false); | 
| 5954 | 0 |                     } | 
| 5955 | 0 |                 } | 
| 5956 | 0 |             } | 
| 5957 | 0 |         } catch (const std::ios_base::failure&) { | 
| 5958 | 0 |             return util::Error{Untranslated(strprintf("Bad snapshot format or truncated snapshot after deserializing %d coins",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5959 | 0 |                       coins_processed))}; | 
| 5960 | 0 |         } | 
| 5961 | 0 |     } | 
| 5962 |  |  | 
| 5963 |  |     // Important that we set this. This and the coins_cache accesses above are | 
| 5964 |  |     // sort of a layer violation, but either we reach into the innards of | 
| 5965 |  |     // CCoinsViewCache here or we have to invert some of the Chainstate to | 
| 5966 |  |     // embed them in a snapshot-activation-specific CCoinsViewCache bulk load | 
| 5967 |  |     // method. | 
| 5968 | 0 |     coins_cache.SetBestBlock(base_blockhash); | 
| 5969 |  | 
 | 
| 5970 | 0 |     bool out_of_coins{false}; | 
| 5971 | 0 |     try { | 
| 5972 | 0 |         std::byte left_over_byte; | 
| 5973 | 0 |         coins_file >> left_over_byte; | 
| 5974 | 0 |     } catch (const std::ios_base::failure&) { | 
| 5975 |  |         // We expect an exception since we should be out of coins. | 
| 5976 | 0 |         out_of_coins = true; | 
| 5977 | 0 |     } | 
| 5978 | 0 |     if (!out_of_coins) { | 
| 5979 | 0 |         return util::Error{Untranslated(strprintf("Bad snapshot - coins left over after deserializing %d coins",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 5980 | 0 |             coins_count))}; | 
| 5981 | 0 |     } | 
| 5982 |  |  | 
| 5983 | 0 |     LogInfo("[snapshot] loaded %d (%.2f MB) coins from snapshot %s",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 5984 | 0 |         coins_count, | 
| 5985 | 0 |         coins_cache.DynamicMemoryUsage() / (1000 * 1000), | 
| 5986 | 0 |         base_blockhash.ToString()); | 
| 5987 |  |  | 
| 5988 |  |     // No need to acquire cs_main since this chainstate isn't being used yet. | 
| 5989 | 0 |     FlushSnapshotToDisk(coins_cache, /*snapshot_loaded=*/true); | 
| 5990 |  | 
 | 
| 5991 | 0 |     assert(coins_cache.GetBestBlock() == base_blockhash); | 
| 5992 |  |  | 
| 5993 |  |     // As above, okay to immediately release cs_main here since no other context knows | 
| 5994 |  |     // about the snapshot_chainstate. | 
| 5995 | 0 |     CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB()); | Line | Count | Source |  | 290 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) | 
 | 
| 5996 |  | 
 | 
| 5997 | 0 |     std::optional<CCoinsStats> maybe_stats; | 
| 5998 |  | 
 | 
| 5999 | 0 |     try { | 
| 6000 | 0 |         maybe_stats = ComputeUTXOStats( | 
| 6001 | 0 |             CoinStatsHashType::HASH_SERIALIZED, snapshot_coinsdb, m_blockman, [&interrupt = m_interrupt] { SnapshotUTXOHashBreakpoint(interrupt); }); | 
| 6002 | 0 |     } catch (StopHashingException const&) { | 
| 6003 | 0 |         return util::Error{Untranslated("Aborting after an interrupt was requested")}; | 
| 6004 | 0 |     } | 
| 6005 | 0 |     if (!maybe_stats.has_value()) { | 
| 6006 | 0 |         return util::Error{Untranslated("Failed to generate coins stats")}; | 
| 6007 | 0 |     } | 
| 6008 |  |  | 
| 6009 |  |     // Assert that the deserialized chainstate contents match the expected assumeutxo value. | 
| 6010 | 0 |     if (AssumeutxoHash{maybe_stats->hashSerialized} != au_data.hash_serialized) { | 
| 6011 | 0 |         return util::Error{Untranslated(strprintf("Bad snapshot content hash: expected %s, got %s",| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 6012 | 0 |             au_data.hash_serialized.ToString(), maybe_stats->hashSerialized.ToString()))}; | 
| 6013 | 0 |     } | 
| 6014 |  |  | 
| 6015 | 0 |     snapshot_chainstate.m_chain.SetTip(*snapshot_start_block); | 
| 6016 |  |  | 
| 6017 |  |     // The remainder of this function requires modifying data protected by cs_main. | 
| 6018 | 0 |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 6019 |  |  | 
| 6020 |  |     // Fake various pieces of CBlockIndex state: | 
| 6021 | 0 |     CBlockIndex* index = nullptr; | 
| 6022 |  |  | 
| 6023 |  |     // Don't make any modifications to the genesis block since it shouldn't be | 
| 6024 |  |     // necessary, and since the genesis block doesn't have normal flags like | 
| 6025 |  |     // BLOCK_VALID_SCRIPTS set. | 
| 6026 | 0 |     constexpr int AFTER_GENESIS_START{1}; | 
| 6027 |  | 
 | 
| 6028 | 0 |     for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) { | 
| 6029 | 0 |         index = snapshot_chainstate.m_chain[i]; | 
| 6030 |  |  | 
| 6031 |  |         // Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload() | 
| 6032 |  |         // won't ask for -reindex on startup. | 
| 6033 | 0 |         if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) { | 
| 6034 | 0 |             index->nStatus |= BLOCK_OPT_WITNESS; | 
| 6035 | 0 |         } | 
| 6036 |  | 
 | 
| 6037 | 0 |         m_blockman.m_dirty_blockindex.insert(index); | 
| 6038 |  |         // Changes to the block index will be flushed to disk after this call | 
| 6039 |  |         // returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is | 
| 6040 |  |         // called, since we've added a snapshot chainstate and therefore will | 
| 6041 |  |         // have to downsize the IBD chainstate, which will result in a call to | 
| 6042 |  |         // `FlushStateToDisk(ALWAYS)`. | 
| 6043 | 0 |     } | 
| 6044 |  | 
 | 
| 6045 | 0 |     assert(index); | 
| 6046 | 0 |     assert(index == snapshot_start_block); | 
| 6047 | 0 |     index->m_chain_tx_count = au_data.m_chain_tx_count; | 
| 6048 | 0 |     snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block); | 
| 6049 |  | 
 | 
| 6050 | 0 |     LogInfo("[snapshot] validated snapshot (%.2f MB)",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6051 | 0 |         coins_cache.DynamicMemoryUsage() / (1000 * 1000)); | 
| 6052 | 0 |     return {}; | 
| 6053 | 0 | } | 
| 6054 |  |  | 
| 6055 |  | // Currently, this function holds cs_main for its duration, which could be for | 
| 6056 |  | // multiple minutes due to the ComputeUTXOStats call. This hold is necessary | 
| 6057 |  | // because we need to avoid advancing the background validation chainstate | 
| 6058 |  | // farther than the snapshot base block - and this function is also invoked | 
| 6059 |  | // from within ConnectTip, i.e. from within ActivateBestChain, so cs_main is | 
| 6060 |  | // held anyway. | 
| 6061 |  | // | 
| 6062 |  | // Eventually (TODO), we could somehow separate this function's runtime from | 
| 6063 |  | // maintenance of the active chain, but that will either require | 
| 6064 |  | // | 
| 6065 |  | //  (i) setting `m_disabled` immediately and ensuring all chainstate accesses go | 
| 6066 |  | //      through IsUsable() checks, or | 
| 6067 |  | // | 
| 6068 |  | //  (ii) giving each chainstate its own lock instead of using cs_main for everything. | 
| 6069 |  | SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() | 
| 6070 | 51.2k | { | 
| 6071 | 51.2k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6072 | 51.2k |     if (m_ibd_chainstate.get() == &this->ActiveChainstate() || | 
| 6073 | 51.2k |             !this->IsUsable(m_snapshot_chainstate.get())0|| | 
| 6074 | 51.2k |             !this->IsUsable(m_ibd_chainstate.get())0|| | 
| 6075 | 51.2k |             !m_ibd_chainstate->m_chain.Tip()0) { | 
| 6076 |  |        // Nothing to do - this function only applies to the background | 
| 6077 |  |        // validation chainstate. | 
| 6078 | 51.2k |        return SnapshotCompletionResult::SKIPPED; | 
| 6079 | 51.2k |     } | 
| 6080 | 0 |     const int snapshot_tip_height = this->ActiveHeight(); | 
| 6081 | 0 |     const int snapshot_base_height = *Assert(this->GetSnapshotBaseHeight()); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6082 | 0 |     const CBlockIndex& index_new = *Assert(m_ibd_chainstate->m_chain.Tip()); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6083 |  | 
 | 
| 6084 | 0 |     if (index_new.nHeight < snapshot_base_height) { | 
| 6085 |  |         // Background IBD not complete yet. | 
| 6086 | 0 |         return SnapshotCompletionResult::SKIPPED; | 
| 6087 | 0 |     } | 
| 6088 |  |  | 
| 6089 | 0 |     assert(SnapshotBlockhash()); | 
| 6090 | 0 |     uint256 snapshot_blockhash = *Assert(SnapshotBlockhash()); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6091 |  | 
 | 
| 6092 | 0 |     auto handle_invalid_snapshot = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 6093 | 0 |         bilingual_str user_error = strprintf(_( | Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 6094 | 0 |             "%s failed to validate the -assumeutxo snapshot state. " | 
| 6095 | 0 |             "This indicates a hardware problem, or a bug in the software, or a " | 
| 6096 | 0 |             "bad software modification that allowed an invalid snapshot to be " | 
| 6097 | 0 |             "loaded. As a result of this, the node will shut down and stop using any " | 
| 6098 | 0 |             "state that was built on the snapshot, resetting the chain height " | 
| 6099 | 0 |             "from %d to %d. On the next " | 
| 6100 | 0 |             "restart, the node will resume syncing from %d " | 
| 6101 | 0 |             "without using any snapshot data. " | 
| 6102 | 0 |             "Please report this incident to %s, including how you obtained the snapshot. " | 
| 6103 | 0 |             "The invalid snapshot chainstate will be left on disk in case it is " | 
| 6104 | 0 |             "helpful in diagnosing the issue that caused this error."), | 
| 6105 | 0 |             CLIENT_NAME, snapshot_tip_height, snapshot_base_height, snapshot_base_height, CLIENT_BUGREPORT | Line | Count | Source |  | 98 | 0 | #define CLIENT_NAME "Bitcoin Core" | 
 |             CLIENT_NAME, snapshot_tip_height, snapshot_base_height, snapshot_base_height, CLIENT_BUGREPORT | Line | Count | Source |  | 95 | 0 | #define CLIENT_BUGREPORT "https://github.com/bitcoin/bitcoin/issues" | 
 | 
| 6106 | 0 |         ); | 
| 6107 |  | 
 | 
| 6108 | 0 |         LogError("[snapshot] !!! %s\n", user_error.original);| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6109 | 0 |         LogError("[snapshot] deleting snapshot, reverting to validated chain, and stopping node\n");| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6110 |  | 
 | 
| 6111 | 0 |         m_active_chainstate = m_ibd_chainstate.get(); | 
| 6112 | 0 |         m_snapshot_chainstate->m_disabled = true; | 
| 6113 | 0 |         assert(!this->IsUsable(m_snapshot_chainstate.get())); | 
| 6114 | 0 |         assert(this->IsUsable(m_ibd_chainstate.get())); | 
| 6115 |  |  | 
| 6116 | 0 |         auto rename_result = m_snapshot_chainstate->InvalidateCoinsDBOnDisk(); | 
| 6117 | 0 |         if (!rename_result) { | 
| 6118 | 0 |             user_error += Untranslated("\n") + util::ErrorString(rename_result); | 
| 6119 | 0 |         } | 
| 6120 |  | 
 | 
| 6121 | 0 |         GetNotifications().fatalError(user_error); | 
| 6122 | 0 |     }; | 
| 6123 |  | 
 | 
| 6124 | 0 |     if (index_new.GetBlockHash() != snapshot_blockhash) { | 
| 6125 | 0 |         LogPrintf("[snapshot] supposed base block %s does not match the "| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6126 | 0 |           "snapshot base block %s (height %d). Snapshot is not valid.\n", | 
| 6127 | 0 |           index_new.ToString(), snapshot_blockhash.ToString(), snapshot_base_height); | 
| 6128 | 0 |         handle_invalid_snapshot(); | 
| 6129 | 0 |         return SnapshotCompletionResult::BASE_BLOCKHASH_MISMATCH; | 
| 6130 | 0 |     } | 
| 6131 |  |  | 
| 6132 | 0 |     assert(index_new.nHeight == snapshot_base_height); | 
| 6133 |  |  | 
| 6134 | 0 |     int curr_height = m_ibd_chainstate->m_chain.Height(); | 
| 6135 |  | 
 | 
| 6136 | 0 |     assert(snapshot_base_height == curr_height); | 
| 6137 | 0 |     assert(snapshot_base_height == index_new.nHeight); | 
| 6138 | 0 |     assert(this->IsUsable(m_snapshot_chainstate.get())); | 
| 6139 | 0 |     assert(this->GetAll().size() == 2); | 
| 6140 |  |  | 
| 6141 | 0 |     CCoinsViewDB& ibd_coins_db = m_ibd_chainstate->CoinsDB(); | 
| 6142 | 0 |     m_ibd_chainstate->ForceFlushStateToDisk(); | 
| 6143 |  | 
 | 
| 6144 | 0 |     const auto& maybe_au_data = m_options.chainparams.AssumeutxoForHeight(curr_height); | 
| 6145 | 0 |     if (!maybe_au_data) { | 
| 6146 | 0 |         LogPrintf("[snapshot] assumeutxo data not found for height "| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6147 | 0 |             "(%d) - refusing to validate snapshot\n", curr_height); | 
| 6148 | 0 |         handle_invalid_snapshot(); | 
| 6149 | 0 |         return SnapshotCompletionResult::MISSING_CHAINPARAMS; | 
| 6150 | 0 |     } | 
| 6151 |  |  | 
| 6152 | 0 |     const AssumeutxoData& au_data = *maybe_au_data; | 
| 6153 | 0 |     std::optional<CCoinsStats> maybe_ibd_stats; | 
| 6154 | 0 |     LogInfo("[snapshot] computing UTXO stats for background chainstate to validate "| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6155 | 0 |         "snapshot - this could take a few minutes"); | 
| 6156 | 0 |     try { | 
| 6157 | 0 |         maybe_ibd_stats = ComputeUTXOStats( | 
| 6158 | 0 |             CoinStatsHashType::HASH_SERIALIZED, | 
| 6159 | 0 |             &ibd_coins_db, | 
| 6160 | 0 |             m_blockman, | 
| 6161 | 0 |             [&interrupt = m_interrupt] { SnapshotUTXOHashBreakpoint(interrupt); }); | 
| 6162 | 0 |     } catch (StopHashingException const&) { | 
| 6163 | 0 |         return SnapshotCompletionResult::STATS_FAILED; | 
| 6164 | 0 |     } | 
| 6165 |  |  | 
| 6166 |  |     // XXX note that this function is slow and will hold cs_main for potentially minutes. | 
| 6167 | 0 |     if (!maybe_ibd_stats) { | 
| 6168 | 0 |         LogPrintf("[snapshot] failed to generate stats for validation coins db\n");| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6169 |  |         // While this isn't a problem with the snapshot per se, this condition | 
| 6170 |  |         // prevents us from validating the snapshot, so we should shut down and let the | 
| 6171 |  |         // user handle the issue manually. | 
| 6172 | 0 |         handle_invalid_snapshot(); | 
| 6173 | 0 |         return SnapshotCompletionResult::STATS_FAILED; | 
| 6174 | 0 |     } | 
| 6175 | 0 |     const auto& ibd_stats = *maybe_ibd_stats; | 
| 6176 |  |  | 
| 6177 |  |     // Compare the background validation chainstate's UTXO set hash against the hard-coded | 
| 6178 |  |     // assumeutxo hash we expect. | 
| 6179 |  |     // | 
| 6180 |  |     // TODO: For belt-and-suspenders, we could cache the UTXO set | 
| 6181 |  |     // hash for the snapshot when it's loaded in its chainstate's leveldb. We could then | 
| 6182 |  |     // reference that here for an additional check. | 
| 6183 | 0 |     if (AssumeutxoHash{ibd_stats.hashSerialized} != au_data.hash_serialized) { | 
| 6184 | 0 |         LogPrintf("[snapshot] hash mismatch: actual=%s, expected=%s\n",| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6185 | 0 |             ibd_stats.hashSerialized.ToString(), | 
| 6186 | 0 |             au_data.hash_serialized.ToString()); | 
| 6187 | 0 |         handle_invalid_snapshot(); | 
| 6188 | 0 |         return SnapshotCompletionResult::HASH_MISMATCH; | 
| 6189 | 0 |     } | 
| 6190 |  |  | 
| 6191 | 0 |     LogInfo("[snapshot] snapshot beginning at %s has been fully validated",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6192 | 0 |         snapshot_blockhash.ToString()); | 
| 6193 |  | 
 | 
| 6194 | 0 |     m_ibd_chainstate->m_disabled = true; | 
| 6195 | 0 |     this->MaybeRebalanceCaches(); | 
| 6196 |  | 
 | 
| 6197 | 0 |     return SnapshotCompletionResult::SUCCESS; | 
| 6198 | 0 | } | 
| 6199 |  |  | 
| 6200 |  | Chainstate& ChainstateManager::ActiveChainstate() const | 
| 6201 | 38.9M | { | 
| 6202 | 38.9M |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 38.9M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 38.9M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 38.9M | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 38.9M | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 6203 | 38.9M |     assert(m_active_chainstate); | 
| 6204 | 38.9M |     return *m_active_chainstate; | 
| 6205 | 38.9M | } | 
| 6206 |  |  | 
| 6207 |  | bool ChainstateManager::IsSnapshotActive() const | 
| 6208 | 0 | { | 
| 6209 | 0 |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 0 | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 6210 | 0 |     return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get(); | 
| 6211 | 0 | } | 
| 6212 |  |  | 
| 6213 |  | void ChainstateManager::MaybeRebalanceCaches() | 
| 6214 | 51.2k | { | 
| 6215 | 51.2k |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 51.2k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6216 | 51.2k |     bool ibd_usable = this->IsUsable(m_ibd_chainstate.get()); | 
| 6217 | 51.2k |     bool snapshot_usable = this->IsUsable(m_snapshot_chainstate.get()); | 
| 6218 | 51.2k |     assert(ibd_usable || snapshot_usable); | 
| 6219 |  |  | 
| 6220 | 51.2k |     if (ibd_usable && !snapshot_usable) { | 
| 6221 |  |         // Allocate everything to the IBD chainstate. This will always happen | 
| 6222 |  |         // when we are not using a snapshot. | 
| 6223 | 51.2k |         m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache); | 
| 6224 | 51.2k |     } | 
| 6225 | 0 |     else if (snapshot_usable && !ibd_usable) { | 
| 6226 |  |         // If background validation has completed and snapshot is our active chain... | 
| 6227 | 0 |         LogInfo("[snapshot] allocating all cache to the snapshot chainstate");| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6228 |  |         // Allocate everything to the snapshot chainstate. | 
| 6229 | 0 |         m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache); | 
| 6230 | 0 |     } | 
| 6231 | 0 |     else if (ibd_usable && snapshot_usable) { | 
| 6232 |  |         // If both chainstates exist, determine who needs more cache based on IBD status. | 
| 6233 |  |         // | 
| 6234 |  |         // Note: shrink caches first so that we don't inadvertently overwhelm available memory. | 
| 6235 | 0 |         if (IsInitialBlockDownload()) { | 
| 6236 | 0 |             m_ibd_chainstate->ResizeCoinsCaches( | 
| 6237 | 0 |                 m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05); | 
| 6238 | 0 |             m_snapshot_chainstate->ResizeCoinsCaches( | 
| 6239 | 0 |                 m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95); | 
| 6240 | 0 |         } else { | 
| 6241 | 0 |             m_snapshot_chainstate->ResizeCoinsCaches( | 
| 6242 | 0 |                 m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05); | 
| 6243 | 0 |             m_ibd_chainstate->ResizeCoinsCaches( | 
| 6244 | 0 |                 m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95); | 
| 6245 | 0 |         } | 
| 6246 | 0 |     } | 
| 6247 | 51.2k | } | 
| 6248 |  |  | 
| 6249 |  | void ChainstateManager::ResetChainstates() | 
| 6250 | 0 | { | 
| 6251 | 0 |     m_ibd_chainstate.reset(); | 
| 6252 | 0 |     m_snapshot_chainstate.reset(); | 
| 6253 | 0 |     m_active_chainstate = nullptr; | 
| 6254 | 0 | } | 
| 6255 |  |  | 
| 6256 |  | /** | 
| 6257 |  |  * Apply default chain params to nullopt members. | 
| 6258 |  |  * This helps to avoid coding errors around the accidental use of the compare | 
| 6259 |  |  * operators that accept nullopt, thus ignoring the intended default value. | 
| 6260 |  |  */ | 
| 6261 |  | static ChainstateManager::Options&& Flatten(ChainstateManager::Options&& opts) | 
| 6262 | 51.2k | { | 
| 6263 | 51.2k |     if (!opts.check_block_index.has_value()) opts.check_block_index = opts.chainparams.DefaultConsistencyChecks()0; | 
| 6264 | 51.2k |     if (!opts.minimum_chain_work.has_value()) opts.minimum_chain_work = UintToArith256(opts.chainparams.GetConsensus().nMinimumChainWork); | 
| 6265 | 51.2k |     if (!opts.assumed_valid_block.has_value()) opts.assumed_valid_block = opts.chainparams.GetConsensus().defaultAssumeValid; | 
| 6266 | 51.2k |     return std::move(opts); | 
| 6267 | 51.2k | } | 
| 6268 |  |  | 
| 6269 |  | ChainstateManager::ChainstateManager(const util::SignalInterrupt& interrupt, Options options, node::BlockManager::Options blockman_options) | 
| 6270 | 51.2k |     : m_script_check_queue{/*batch_size=*/128, std::clamp(options.worker_threads_num, 0, MAX_SCRIPTCHECK_THREADS)}, | 
| 6271 | 51.2k |       m_interrupt{interrupt}, | 
| 6272 | 51.2k |       m_options{Flatten(std::move(options))}, | 
| 6273 | 51.2k |       m_blockman{interrupt, std::move(blockman_options)}, | 
| 6274 | 51.2k |       m_validation_cache{m_options.script_execution_cache_bytes, m_options.signature_cache_bytes} | 
| 6275 | 51.2k | { | 
| 6276 | 51.2k | } | 
| 6277 |  |  | 
| 6278 |  | ChainstateManager::~ChainstateManager() | 
| 6279 | 51.2k | { | 
| 6280 | 51.2k |     LOCK(::cs_main); | Line | Count | Source |  | 259 | 51.2k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) | Line | Count | Source |  | 11 | 51.2k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) | Line | Count | Source |  | 9 | 51.2k | #define PASTE2(x, y) PASTE(x, y) | Line | Count | Source |  | 8 | 51.2k | #define PASTE(x, y) x ## y | 
 | 
 | 
 | 
 | 
| 6281 |  |  | 
| 6282 | 51.2k |     m_versionbitscache.Clear(); | 
| 6283 | 51.2k | } | 
| 6284 |  |  | 
| 6285 |  | bool ChainstateManager::DetectSnapshotChainstate() | 
| 6286 | 51.2k | { | 
| 6287 | 51.2k |     assert(!m_snapshot_chainstate); | 
| 6288 | 51.2k |     std::optional<fs::path> path = node::FindSnapshotChainstateDir(m_options.datadir); | 
| 6289 | 51.2k |     if (!path) { | 
| 6290 | 51.2k |         return false; | 
| 6291 | 51.2k |     } | 
| 6292 | 0 |     std::optional<uint256> base_blockhash = node::ReadSnapshotBaseBlockhash(*path); | 
| 6293 | 0 |     if (!base_blockhash) { | 
| 6294 | 0 |         return false; | 
| 6295 | 0 |     } | 
| 6296 | 0 |     LogInfo("[snapshot] detected active snapshot chainstate (%s) - loading",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6297 | 0 |         fs::PathToString(*path)); | 
| 6298 |  | 
 | 
| 6299 | 0 |     this->ActivateExistingSnapshot(*base_blockhash); | 
| 6300 | 0 |     return true; | 
| 6301 | 0 | } | 
| 6302 |  |  | 
| 6303 |  | Chainstate& ChainstateManager::ActivateExistingSnapshot(uint256 base_blockhash) | 
| 6304 | 0 | { | 
| 6305 | 0 |     assert(!m_snapshot_chainstate); | 
| 6306 | 0 |     m_snapshot_chainstate = | 
| 6307 | 0 |         std::make_unique<Chainstate>(nullptr, m_blockman, *this, base_blockhash); | 
| 6308 | 0 |     LogInfo("[snapshot] switching active chainstate to %s", m_snapshot_chainstate->ToString());| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6309 |  |  | 
| 6310 |  |     // Mempool is empty at this point because we're still in IBD. | 
| 6311 | 0 |     Assert(m_active_chainstate->m_mempool->size() == 0); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6312 | 0 |     Assert(!m_snapshot_chainstate->m_mempool); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6313 | 0 |     m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; | 
| 6314 | 0 |     m_active_chainstate->m_mempool = nullptr; | 
| 6315 | 0 |     m_active_chainstate = m_snapshot_chainstate.get(); | 
| 6316 | 0 |     return *m_snapshot_chainstate; | 
| 6317 | 0 | } | 
| 6318 |  |  | 
| 6319 |  | bool IsBIP30Repeat(const CBlockIndex& block_index) | 
| 6320 | 25.0k | { | 
| 6321 | 25.0k |     return (block_index.nHeight==91842 && block_index.GetBlockHash() == uint256{"00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec"}0) || | 
| 6322 | 25.0k |            (block_index.nHeight==91880 && block_index.GetBlockHash() == uint256{"00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"}0); | 
| 6323 | 25.0k | } | 
| 6324 |  |  | 
| 6325 |  | bool IsBIP30Unspendable(const uint256& block_hash, int block_height) | 
| 6326 | 0 | { | 
| 6327 | 0 |     return (block_height==91722 && block_hash == uint256{"00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e"}) || | 
| 6328 | 0 |            (block_height==91812 && block_hash == uint256{"00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f"}); | 
| 6329 | 0 | } | 
| 6330 |  |  | 
| 6331 |  | static fs::path GetSnapshotCoinsDBPath(Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) | 
| 6332 | 0 | { | 
| 6333 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6334 |  |     // Should never be called on a non-snapshot chainstate. | 
| 6335 | 0 |     assert(cs.m_from_snapshot_blockhash); | 
| 6336 | 0 |     auto storage_path_maybe = cs.CoinsDB().StoragePath(); | 
| 6337 |  |     // Should never be called with a non-existent storage path. | 
| 6338 | 0 |     assert(storage_path_maybe); | 
| 6339 | 0 |     return *storage_path_maybe; | 
| 6340 | 0 | } | 
| 6341 |  |  | 
| 6342 |  | util::Result<void> Chainstate::InvalidateCoinsDBOnDisk() | 
| 6343 | 0 | { | 
| 6344 | 0 |     fs::path snapshot_datadir = GetSnapshotCoinsDBPath(*this); | 
| 6345 |  |  | 
| 6346 |  |     // Coins views no longer usable. | 
| 6347 | 0 |     m_coins_views.reset(); | 
| 6348 |  | 
 | 
| 6349 | 0 |     auto invalid_path = snapshot_datadir + "_INVALID"; | 
| 6350 | 0 |     std::string dbpath = fs::PathToString(snapshot_datadir); | 
| 6351 | 0 |     std::string target = fs::PathToString(invalid_path); | 
| 6352 | 0 |     LogInfo("[snapshot] renaming snapshot datadir %s to %s", dbpath, target);| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6353 |  |  | 
| 6354 |  |     // The invalid snapshot datadir is simply moved and not deleted because we may | 
| 6355 |  |     // want to do forensics later during issue investigation. The user is instructed | 
| 6356 |  |     // accordingly in MaybeCompleteSnapshotValidation(). | 
| 6357 | 0 |     try { | 
| 6358 | 0 |         fs::rename(snapshot_datadir, invalid_path); | 
| 6359 | 0 |     } catch (const fs::filesystem_error& e) { | 
| 6360 | 0 |         auto src_str = fs::PathToString(snapshot_datadir); | 
| 6361 | 0 |         auto dest_str = fs::PathToString(invalid_path); | 
| 6362 |  | 
 | 
| 6363 | 0 |         LogPrintf("%s: error renaming file '%s' -> '%s': %s\n",| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6364 | 0 |                 __func__, src_str, dest_str, e.what()); | 
| 6365 | 0 |         return util::Error{strprintf(_(| Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 6366 | 0 |             "Rename of '%s' -> '%s' failed. " | 
| 6367 | 0 |             "You should resolve this by manually moving or deleting the invalid " | 
| 6368 | 0 |             "snapshot directory %s, otherwise you will encounter the same error again " | 
| 6369 | 0 |             "on the next startup."), | 
| 6370 | 0 |             src_str, dest_str, src_str)}; | 
| 6371 | 0 |     } | 
| 6372 | 0 |     return {}; | 
| 6373 | 0 | } | 
| 6374 |  |  | 
| 6375 |  | bool ChainstateManager::DeleteSnapshotChainstate() | 
| 6376 | 0 | { | 
| 6377 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6378 | 0 |     Assert(m_snapshot_chainstate); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6379 | 0 |     Assert(m_ibd_chainstate); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6380 |  | 
 | 
| 6381 | 0 |     fs::path snapshot_datadir = Assert(node::FindSnapshotChainstateDir(m_options.datadir)).value(); | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6382 | 0 |     if (!DeleteCoinsDBFromDisk(snapshot_datadir, /*is_snapshot=*/ true)) { | 
| 6383 | 0 |         LogPrintf("Deletion of %s failed. Please remove it manually to continue reindexing.\n",| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6384 | 0 |                   fs::PathToString(snapshot_datadir)); | 
| 6385 | 0 |         return false; | 
| 6386 | 0 |     } | 
| 6387 | 0 |     m_active_chainstate = m_ibd_chainstate.get(); | 
| 6388 | 0 |     m_active_chainstate->m_mempool = m_snapshot_chainstate->m_mempool; | 
| 6389 | 0 |     m_snapshot_chainstate.reset(); | 
| 6390 | 0 |     return true; | 
| 6391 | 0 | } | 
| 6392 |  |  | 
| 6393 |  | ChainstateRole Chainstate::GetRole() const | 
| 6394 | 110k | { | 
| 6395 | 110k |     if (m_chainman.GetAll().size() <= 1) { | 
| 6396 | 110k |         return ChainstateRole::NORMAL; | 
| 6397 | 110k |     } | 
| 6398 | 0 |     return (this != &m_chainman.ActiveChainstate()) ? | 
| 6399 | 0 |                ChainstateRole::BACKGROUND : | 
| 6400 | 0 |                ChainstateRole::ASSUMEDVALID; | 
| 6401 | 110k | } | 
| 6402 |  |  | 
| 6403 |  | const CBlockIndex* ChainstateManager::GetSnapshotBaseBlock() const | 
| 6404 | 16.7M | { | 
| 6405 | 16.7M |     return m_active_chainstate ? m_active_chainstate->SnapshotBase() : nullptr0; | 
| 6406 | 16.7M | } | 
| 6407 |  |  | 
| 6408 |  | std::optional<int> ChainstateManager::GetSnapshotBaseHeight() const | 
| 6409 | 0 | { | 
| 6410 | 0 |     const CBlockIndex* base = this->GetSnapshotBaseBlock(); | 
| 6411 | 0 |     return base ? std::make_optional(base->nHeight) : std::nullopt; | 
| 6412 | 0 | } | 
| 6413 |  |  | 
| 6414 |  | void ChainstateManager::RecalculateBestHeader() | 
| 6415 | 3.67k | { | 
| 6416 | 3.67k |     AssertLockHeld(cs_main); | Line | Count | Source |  | 137 | 3.67k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6417 | 3.67k |     m_best_header = ActiveChain().Tip(); | 
| 6418 | 747k |     for (auto& entry : m_blockman.m_block_index) { | 
| 6419 | 747k |         if (!(entry.second.nStatus & BLOCK_FAILED_MASK) && m_best_header->nChainWork < entry.second.nChainWork742k) { | 
| 6420 | 132 |             m_best_header = &entry.second; | 
| 6421 | 132 |         } | 
| 6422 | 747k |     } | 
| 6423 | 3.67k | } | 
| 6424 |  |  | 
| 6425 |  | bool ChainstateManager::ValidatedSnapshotCleanup() | 
| 6426 | 0 | { | 
| 6427 | 0 |     AssertLockHeld(::cs_main); | Line | Count | Source |  | 137 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) | 
 | 
| 6428 | 0 |     auto get_storage_path = [](auto& chainstate) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) -> std::optional<fs::path> { | 
| 6429 | 0 |         if (!(chainstate && chainstate->HasCoinsViews())) { | 
| 6430 | 0 |             return {}; | 
| 6431 | 0 |         } | 
| 6432 | 0 |         return chainstate->CoinsDB().StoragePath(); | 
| 6433 | 0 |     }; | 
| 6434 | 0 |     std::optional<fs::path> ibd_chainstate_path_maybe = get_storage_path(m_ibd_chainstate); | 
| 6435 | 0 |     std::optional<fs::path> snapshot_chainstate_path_maybe = get_storage_path(m_snapshot_chainstate); | 
| 6436 |  | 
 | 
| 6437 | 0 |     if (!this->IsSnapshotValidated()) { | 
| 6438 |  |         // No need to clean up. | 
| 6439 | 0 |         return false; | 
| 6440 | 0 |     } | 
| 6441 |  |     // If either path doesn't exist, that means at least one of the chainstates | 
| 6442 |  |     // is in-memory, in which case we can't do on-disk cleanup. You'd better be | 
| 6443 |  |     // in a unittest! | 
| 6444 | 0 |     if (!ibd_chainstate_path_maybe || !snapshot_chainstate_path_maybe) { | 
| 6445 | 0 |         LogPrintf("[snapshot] snapshot chainstate cleanup cannot happen with "| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6446 | 0 |                   "in-memory chainstates. You are testing, right?\n"); | 
| 6447 | 0 |         return false; | 
| 6448 | 0 |     } | 
| 6449 |  |  | 
| 6450 | 0 |     const auto& snapshot_chainstate_path = *snapshot_chainstate_path_maybe; | 
| 6451 | 0 |     const auto& ibd_chainstate_path = *ibd_chainstate_path_maybe; | 
| 6452 |  |  | 
| 6453 |  |     // Since we're going to be moving around the underlying leveldb filesystem content | 
| 6454 |  |     // for each chainstate, make sure that the chainstates (and their constituent | 
| 6455 |  |     // CoinsViews members) have been destructed first. | 
| 6456 |  |     // | 
| 6457 |  |     // The caller of this method will be responsible for reinitializing chainstates | 
| 6458 |  |     // if they want to continue operation. | 
| 6459 | 0 |     this->ResetChainstates(); | 
| 6460 |  |  | 
| 6461 |  |     // No chainstates should be considered usable. | 
| 6462 | 0 |     assert(this->GetAll().size() == 0); | 
| 6463 |  |  | 
| 6464 | 0 |     LogInfo("[snapshot] deleting background chainstate directory (now unnecessary) (%s)",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6465 | 0 |               fs::PathToString(ibd_chainstate_path)); | 
| 6466 |  | 
 | 
| 6467 | 0 |     fs::path tmp_old{ibd_chainstate_path + "_todelete"}; | 
| 6468 |  | 
 | 
| 6469 | 0 |     auto rename_failed_abort = [this]( | 
| 6470 | 0 |                                    fs::path p_old, | 
| 6471 | 0 |                                    fs::path p_new, | 
| 6472 | 0 |                                    const fs::filesystem_error& err) { | 
| 6473 | 0 |         LogError("[snapshot] Error renaming path (%s) -> (%s): %s\n",| Line | Count | Source |  | 358 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6474 | 0 |                   fs::PathToString(p_old), fs::PathToString(p_new), err.what()); | 
| 6475 | 0 |         GetNotifications().fatalError(strprintf(_( | Line | Count | Source |  | 1172 | 0 | #define strprintf tfm::format | 
 | 
| 6476 | 0 |             "Rename of '%s' -> '%s' failed. " | 
| 6477 | 0 |             "Cannot clean up the background chainstate leveldb directory."), | 
| 6478 | 0 |             fs::PathToString(p_old), fs::PathToString(p_new))); | 
| 6479 | 0 |     }; | 
| 6480 |  | 
 | 
| 6481 | 0 |     try { | 
| 6482 | 0 |         fs::rename(ibd_chainstate_path, tmp_old); | 
| 6483 | 0 |     } catch (const fs::filesystem_error& e) { | 
| 6484 | 0 |         rename_failed_abort(ibd_chainstate_path, tmp_old, e); | 
| 6485 | 0 |         throw; | 
| 6486 | 0 |     } | 
| 6487 |  |  | 
| 6488 | 0 |     LogInfo("[snapshot] moving snapshot chainstate (%s) to "| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6489 | 0 |               "default chainstate directory (%s)", | 
| 6490 | 0 |               fs::PathToString(snapshot_chainstate_path), fs::PathToString(ibd_chainstate_path)); | 
| 6491 |  | 
 | 
| 6492 | 0 |     try { | 
| 6493 | 0 |         fs::rename(snapshot_chainstate_path, ibd_chainstate_path); | 
| 6494 | 0 |     } catch (const fs::filesystem_error& e) { | 
| 6495 | 0 |         rename_failed_abort(snapshot_chainstate_path, ibd_chainstate_path, e); | 
| 6496 | 0 |         throw; | 
| 6497 | 0 |     } | 
| 6498 |  |  | 
| 6499 | 0 |     if (!DeleteCoinsDBFromDisk(tmp_old, /*is_snapshot=*/false)) { | 
| 6500 |  |         // No need to FatalError because once the unneeded bg chainstate data is | 
| 6501 |  |         // moved, it will not interfere with subsequent initialization. | 
| 6502 | 0 |         LogPrintf("Deletion of %s failed. Please remove it manually, as the "| Line | Count | Source |  | 361 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) | Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
 | 
| 6503 | 0 |                   "directory is now unnecessary.\n", | 
| 6504 | 0 |                   fs::PathToString(tmp_old)); | 
| 6505 | 0 |     } else { | 
| 6506 | 0 |         LogInfo("[snapshot] deleted background chainstate directory (%s)",| Line | Count | Source |  | 356 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) | Line | Count | Source |  | 350 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) | 
 | 
 | 
| 6507 | 0 |                   fs::PathToString(ibd_chainstate_path)); | 
| 6508 | 0 |     } | 
| 6509 | 0 |     return true; | 
| 6510 | 0 | } | 
| 6511 |  |  | 
| 6512 |  | Chainstate& ChainstateManager::GetChainstateForIndexing() | 
| 6513 | 0 | { | 
| 6514 |  |     // We can't always return `m_ibd_chainstate` because after background validation | 
| 6515 |  |     // has completed, `m_snapshot_chainstate == m_active_chainstate`, but it can be | 
| 6516 |  |     // indexed. | 
| 6517 | 0 |     return (this->GetAll().size() > 1) ? *m_ibd_chainstate : *m_active_chainstate; | 
| 6518 | 0 | } | 
| 6519 |  |  | 
| 6520 |  | std::pair<int, int> ChainstateManager::GetPruneRange(const Chainstate& chainstate, int last_height_can_prune) | 
| 6521 | 0 | { | 
| 6522 | 0 |     if (chainstate.m_chain.Height() <= 0) { | 
| 6523 | 0 |         return {0, 0}; | 
| 6524 | 0 |     } | 
| 6525 | 0 |     int prune_start{0}; | 
| 6526 |  | 
 | 
| 6527 | 0 |     if (this->GetAll().size() > 1 && m_snapshot_chainstate.get() == &chainstate) { | 
| 6528 |  |         // Leave the blocks in the background IBD chain alone if we're pruning | 
| 6529 |  |         // the snapshot chain. | 
| 6530 | 0 |         prune_start = *Assert(GetSnapshotBaseHeight()) + 1; | Line | Count | Source |  | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) | 
 | 
| 6531 | 0 |     } | 
| 6532 |  | 
 | 
| 6533 | 0 |     int max_prune = std::max<int>( | 
| 6534 | 0 |         0, chainstate.m_chain.Height() - static_cast<int>(MIN_BLOCKS_TO_KEEP)); | 
| 6535 |  |  | 
| 6536 |  |     // last block to prune is the lesser of (caller-specified height, MIN_BLOCKS_TO_KEEP from the tip) | 
| 6537 |  |     // | 
| 6538 |  |     // While you might be tempted to prune the background chainstate more | 
| 6539 |  |     // aggressively (i.e. fewer MIN_BLOCKS_TO_KEEP), this won't work with index | 
| 6540 |  |     // building - specifically blockfilterindex requires undo data, and if | 
| 6541 |  |     // we don't maintain this trailing window, we hit indexing failures. | 
| 6542 | 0 |     int prune_end = std::min(last_height_can_prune, max_prune); | 
| 6543 |  | 
 | 
| 6544 | 0 |     return {prune_start, prune_end}; | 
| 6545 | 0 | } |