/Users/eugenesiegel/btc/bitcoin/src/node/blockstorage.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-2022 The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <node/blockstorage.h> |
6 | | |
7 | | #include <arith_uint256.h> |
8 | | #include <chain.h> |
9 | | #include <consensus/params.h> |
10 | | #include <consensus/validation.h> |
11 | | #include <dbwrapper.h> |
12 | | #include <flatfile.h> |
13 | | #include <hash.h> |
14 | | #include <kernel/blockmanager_opts.h> |
15 | | #include <kernel/chainparams.h> |
16 | | #include <kernel/messagestartchars.h> |
17 | | #include <kernel/notifications_interface.h> |
18 | | #include <logging.h> |
19 | | #include <pow.h> |
20 | | #include <primitives/block.h> |
21 | | #include <primitives/transaction.h> |
22 | | #include <random.h> |
23 | | #include <serialize.h> |
24 | | #include <signet.h> |
25 | | #include <span.h> |
26 | | #include <streams.h> |
27 | | #include <sync.h> |
28 | | #include <tinyformat.h> |
29 | | #include <uint256.h> |
30 | | #include <undo.h> |
31 | | #include <util/batchpriority.h> |
32 | | #include <util/check.h> |
33 | | #include <util/fs.h> |
34 | | #include <util/signalinterrupt.h> |
35 | | #include <util/strencodings.h> |
36 | | #include <util/translation.h> |
37 | | #include <validation.h> |
38 | | |
39 | | #include <cstddef> |
40 | | #include <map> |
41 | | #include <unordered_map> |
42 | | |
43 | | namespace kernel { |
44 | | static constexpr uint8_t DB_BLOCK_FILES{'f'}; |
45 | | static constexpr uint8_t DB_BLOCK_INDEX{'b'}; |
46 | | static constexpr uint8_t DB_FLAG{'F'}; |
47 | | static constexpr uint8_t DB_REINDEX_FLAG{'R'}; |
48 | | static constexpr uint8_t DB_LAST_BLOCK{'l'}; |
49 | | // Keys used in previous version that might still be found in the DB: |
50 | | // BlockTreeDB::DB_TXINDEX_BLOCK{'T'}; |
51 | | // BlockTreeDB::DB_TXINDEX{'t'} |
52 | | // BlockTreeDB::ReadFlag("txindex") |
53 | | |
54 | | bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info) |
55 | 99.9k | { |
56 | 99.9k | return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); |
57 | 99.9k | } |
58 | | |
59 | | bool BlockTreeDB::WriteReindexing(bool fReindexing) |
60 | 0 | { |
61 | 0 | if (fReindexing) { |
62 | 0 | return Write(DB_REINDEX_FLAG, uint8_t{'1'}); |
63 | 0 | } else { |
64 | 0 | return Erase(DB_REINDEX_FLAG); |
65 | 0 | } |
66 | 0 | } |
67 | | |
68 | | void BlockTreeDB::ReadReindexing(bool& fReindexing) |
69 | 49.9k | { |
70 | 49.9k | fReindexing = Exists(DB_REINDEX_FLAG); |
71 | 49.9k | } |
72 | | |
73 | | bool BlockTreeDB::ReadLastBlockFile(int& nFile) |
74 | 49.9k | { |
75 | 49.9k | return Read(DB_LAST_BLOCK, nFile); |
76 | 49.9k | } |
77 | | |
78 | | bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo) |
79 | 517 | { |
80 | 517 | CDBBatch batch(*this); |
81 | 517 | for (const auto& [file, info] : fileInfo) { |
82 | 517 | batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info); |
83 | 517 | } |
84 | 517 | batch.Write(DB_LAST_BLOCK, nLastFile); |
85 | 104k | for (const CBlockIndex* bi : blockinfo) { |
86 | 104k | batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi}); |
87 | 104k | } |
88 | 517 | return WriteBatch(batch, true); |
89 | 517 | } |
90 | | |
91 | | bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue) |
92 | 0 | { |
93 | 0 | return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'}); |
94 | 0 | } |
95 | | |
96 | | bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue) |
97 | 49.9k | { |
98 | 49.9k | uint8_t ch; |
99 | 49.9k | if (!Read(std::make_pair(DB_FLAG, name), ch)) { |
100 | 49.9k | return false; |
101 | 49.9k | } |
102 | 0 | fValue = ch == uint8_t{'1'}; |
103 | 0 | return true; |
104 | 49.9k | } |
105 | | |
106 | | bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt) |
107 | 49.9k | { |
108 | 49.9k | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 49.9k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
109 | 49.9k | std::unique_ptr<CDBIterator> pcursor(NewIterator()); |
110 | 49.9k | pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); |
111 | | |
112 | | // Load m_block_index |
113 | 49.9k | while (pcursor->Valid()) { |
114 | 0 | if (interrupt) return false; |
115 | 0 | std::pair<uint8_t, uint256> key; |
116 | 0 | if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) { |
117 | 0 | CDiskBlockIndex diskindex; |
118 | 0 | if (pcursor->GetValue(diskindex)) { |
119 | | // Construct block index object |
120 | 0 | CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash()); |
121 | 0 | pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); |
122 | 0 | pindexNew->nHeight = diskindex.nHeight; |
123 | 0 | pindexNew->nFile = diskindex.nFile; |
124 | 0 | pindexNew->nDataPos = diskindex.nDataPos; |
125 | 0 | pindexNew->nUndoPos = diskindex.nUndoPos; |
126 | 0 | pindexNew->nVersion = diskindex.nVersion; |
127 | 0 | pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; |
128 | 0 | pindexNew->nTime = diskindex.nTime; |
129 | 0 | pindexNew->nBits = diskindex.nBits; |
130 | 0 | pindexNew->nNonce = diskindex.nNonce; |
131 | 0 | pindexNew->nStatus = diskindex.nStatus; |
132 | 0 | pindexNew->nTx = diskindex.nTx; |
133 | |
|
134 | 0 | if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) { |
135 | 0 | LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
136 | 0 | return false; |
137 | 0 | } |
138 | | |
139 | 0 | pcursor->Next(); |
140 | 0 | } else { |
141 | 0 | LogError("%s: failed to read value\n", __func__); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
142 | 0 | return false; |
143 | 0 | } |
144 | 0 | } else { |
145 | 0 | break; |
146 | 0 | } |
147 | 0 | } |
148 | | |
149 | 49.9k | return true; |
150 | 49.9k | } |
151 | | } // namespace kernel |
152 | | |
153 | | namespace node { |
154 | | |
155 | | bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const |
156 | 6.27G | { |
157 | | // First sort by most total work, ... |
158 | 6.27G | if (pa->nChainWork > pb->nChainWork) return false70.1M ; |
159 | 6.20G | if (pa->nChainWork < pb->nChainWork) return true6.08G ; |
160 | | |
161 | | // ... then by earliest time received, ... |
162 | 120M | if (pa->nSequenceId < pb->nSequenceId) return false6.37k ; |
163 | 120M | if (pa->nSequenceId > pb->nSequenceId) return true4.63k ; |
164 | | |
165 | | // Use pointer address as tie breaker (should only happen with blocks |
166 | | // loaded from disk, as those all have id 0). |
167 | 120M | if (pa < pb) return false0 ; |
168 | 120M | if (pa > pb) return true0 ; |
169 | | |
170 | | // Identical blocks. |
171 | 120M | return false; |
172 | 120M | } |
173 | | |
174 | | bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const |
175 | 0 | { |
176 | 0 | return pa->nHeight < pb->nHeight; |
177 | 0 | } |
178 | | |
179 | | std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices() |
180 | 99.9k | { |
181 | 99.9k | AssertLockHeld(cs_main); Line | Count | Source | 142 | 99.9k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
182 | 99.9k | std::vector<CBlockIndex*> rv; |
183 | 99.9k | rv.reserve(m_block_index.size()); |
184 | 99.9k | for (auto& [_, block_index] : m_block_index) { |
185 | 0 | rv.push_back(&block_index); |
186 | 0 | } |
187 | 99.9k | return rv; |
188 | 99.9k | } |
189 | | |
190 | | CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) |
191 | 351k | { |
192 | 351k | AssertLockHeld(cs_main); Line | Count | Source | 142 | 351k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
193 | 351k | BlockMap::iterator it = m_block_index.find(hash); |
194 | 351k | return it == m_block_index.end() ? nullptr83.1k : &it->second268k ; |
195 | 351k | } |
196 | | |
197 | | const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const |
198 | 0 | { |
199 | 0 | AssertLockHeld(cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
200 | 0 | BlockMap::const_iterator it = m_block_index.find(hash); |
201 | 0 | return it == m_block_index.end() ? nullptr : &it->second; |
202 | 0 | } |
203 | | |
204 | | CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header) |
205 | 10.0M | { |
206 | 10.0M | AssertLockHeld(cs_main); Line | Count | Source | 142 | 10.0M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
207 | | |
208 | 10.0M | auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block); |
209 | 10.0M | if (!inserted) { |
210 | 0 | return &mi->second; |
211 | 0 | } |
212 | 10.0M | CBlockIndex* pindexNew = &(*mi).second; |
213 | | |
214 | | // We assign the sequence id to blocks only when the full data is available, |
215 | | // to avoid miners withholding blocks but broadcasting headers, to get a |
216 | | // competitive advantage. |
217 | 10.0M | pindexNew->nSequenceId = 0; |
218 | | |
219 | 10.0M | pindexNew->phashBlock = &((*mi).first); |
220 | 10.0M | BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); |
221 | 10.0M | if (miPrev != m_block_index.end()) { |
222 | 9.99M | pindexNew->pprev = &(*miPrev).second; |
223 | 9.99M | pindexNew->nHeight = pindexNew->pprev->nHeight + 1; |
224 | 9.99M | pindexNew->BuildSkip(); |
225 | 9.99M | } |
226 | 10.0M | pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)9.99M : pindexNew->nTime49.9k ); |
227 | 10.0M | pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork9.99M : 049.9k ) + GetBlockProof(*pindexNew); |
228 | 10.0M | pindexNew->RaiseValidity(BLOCK_VALID_TREE); |
229 | 10.0M | if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork9.99M ) { |
230 | 10.0M | best_header = pindexNew; |
231 | 10.0M | } |
232 | | |
233 | 10.0M | m_dirty_blockindex.insert(pindexNew); |
234 | | |
235 | 10.0M | return pindexNew; |
236 | 10.0M | } |
237 | | |
238 | | void BlockManager::PruneOneBlockFile(const int fileNumber) |
239 | 0 | { |
240 | 0 | AssertLockHeld(cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
241 | 0 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
242 | |
|
243 | 0 | for (auto& entry : m_block_index) { |
244 | 0 | CBlockIndex* pindex = &entry.second; |
245 | 0 | if (pindex->nFile == fileNumber) { |
246 | 0 | pindex->nStatus &= ~BLOCK_HAVE_DATA; |
247 | 0 | pindex->nStatus &= ~BLOCK_HAVE_UNDO; |
248 | 0 | pindex->nFile = 0; |
249 | 0 | pindex->nDataPos = 0; |
250 | 0 | pindex->nUndoPos = 0; |
251 | 0 | m_dirty_blockindex.insert(pindex); |
252 | | |
253 | | // Prune from m_blocks_unlinked -- any block we prune would have |
254 | | // to be downloaded again in order to consider its chain, at which |
255 | | // point it would be considered as a candidate for |
256 | | // m_blocks_unlinked or setBlockIndexCandidates. |
257 | 0 | auto range = m_blocks_unlinked.equal_range(pindex->pprev); |
258 | 0 | while (range.first != range.second) { |
259 | 0 | std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first; |
260 | 0 | range.first++; |
261 | 0 | if (_it->second == pindex) { |
262 | 0 | m_blocks_unlinked.erase(_it); |
263 | 0 | } |
264 | 0 | } |
265 | 0 | } |
266 | 0 | } |
267 | |
|
268 | 0 | m_blockfile_info.at(fileNumber) = CBlockFileInfo{}; |
269 | 0 | m_dirty_fileinfo.insert(fileNumber); |
270 | 0 | } |
271 | | |
272 | | void BlockManager::FindFilesToPruneManual( |
273 | | std::set<int>& setFilesToPrune, |
274 | | int nManualPruneHeight, |
275 | | const Chainstate& chain, |
276 | | ChainstateManager& chainman) |
277 | 0 | { |
278 | 0 | assert(IsPruneMode() && nManualPruneHeight > 0); |
279 | | |
280 | 0 | LOCK2(cs_main, cs_LastBlockFile); Line | Count | Source | 259 | 0 | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 260 | 0 | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
281 | 0 | if (chain.m_chain.Height() < 0) { |
282 | 0 | return; |
283 | 0 | } |
284 | | |
285 | 0 | const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); |
286 | |
|
287 | 0 | int count = 0; |
288 | 0 | for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { |
289 | 0 | const auto& fileinfo = m_blockfile_info[fileNumber]; |
290 | 0 | if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { |
291 | 0 | continue; |
292 | 0 | } |
293 | | |
294 | 0 | PruneOneBlockFile(fileNumber); |
295 | 0 | setFilesToPrune.insert(fileNumber); |
296 | 0 | count++; |
297 | 0 | } |
298 | 0 | LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
299 | 0 | chain.GetRole(), last_block_can_prune, count); |
300 | 0 | } |
301 | | |
302 | | void BlockManager::FindFilesToPrune( |
303 | | std::set<int>& setFilesToPrune, |
304 | | int last_prune, |
305 | | const Chainstate& chain, |
306 | | ChainstateManager& chainman) |
307 | 0 | { |
308 | 0 | LOCK2(cs_main, cs_LastBlockFile); Line | Count | Source | 259 | 0 | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 260 | 0 | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
309 | | // Distribute our -prune budget over all chainstates. |
310 | 0 | const auto target = std::max( |
311 | 0 | MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size()); |
312 | 0 | const uint64_t target_sync_height = chainman.m_best_header->nHeight; |
313 | |
|
314 | 0 | if (chain.m_chain.Height() < 0 || target == 0) { |
315 | 0 | return; |
316 | 0 | } |
317 | 0 | if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) { |
318 | 0 | return; |
319 | 0 | } |
320 | | |
321 | 0 | const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune); |
322 | |
|
323 | 0 | uint64_t nCurrentUsage = CalculateCurrentUsage(); |
324 | | // We don't check to prune until after we've allocated new space for files |
325 | | // So we should leave a buffer under our target to account for another allocation |
326 | | // before the next pruning. |
327 | 0 | uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; |
328 | 0 | uint64_t nBytesToPrune; |
329 | 0 | int count = 0; |
330 | |
|
331 | 0 | if (nCurrentUsage + nBuffer >= target) { |
332 | | // On a prune event, the chainstate DB is flushed. |
333 | | // To avoid excessive prune events negating the benefit of high dbcache |
334 | | // values, we should not prune too rapidly. |
335 | | // So when pruning in IBD, increase the buffer to avoid a re-prune too soon. |
336 | 0 | const auto chain_tip_height = chain.m_chain.Height(); |
337 | 0 | if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) { |
338 | | // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average |
339 | 0 | static constexpr uint64_t average_block_size = 1000000; /* 1 MB */ |
340 | 0 | const uint64_t remaining_blocks = target_sync_height - chain_tip_height; |
341 | 0 | nBuffer += average_block_size * remaining_blocks; |
342 | 0 | } |
343 | |
|
344 | 0 | for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { |
345 | 0 | const auto& fileinfo = m_blockfile_info[fileNumber]; |
346 | 0 | nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; |
347 | |
|
348 | 0 | if (fileinfo.nSize == 0) { |
349 | 0 | continue; |
350 | 0 | } |
351 | | |
352 | 0 | if (nCurrentUsage + nBuffer < target) { // are we below our target? |
353 | 0 | break; |
354 | 0 | } |
355 | | |
356 | | // don't prune files that could have a block that's not within the allowable |
357 | | // prune range for the chain being pruned. |
358 | 0 | if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { |
359 | 0 | continue; |
360 | 0 | } |
361 | | |
362 | 0 | PruneOneBlockFile(fileNumber); |
363 | | // Queue up the files for removal |
364 | 0 | setFilesToPrune.insert(fileNumber); |
365 | 0 | nCurrentUsage -= nBytesToPrune; |
366 | 0 | count++; |
367 | 0 | } |
368 | 0 | } |
369 | |
|
370 | 0 | LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n", Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
371 | 0 | chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024, |
372 | 0 | (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024, |
373 | 0 | min_block_to_prune, last_block_can_prune, count); |
374 | 0 | } |
375 | | |
376 | 0 | void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) { |
377 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
378 | 0 | m_prune_locks[name] = lock_info; |
379 | 0 | } |
380 | | |
381 | | CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash) |
382 | 0 | { |
383 | 0 | AssertLockHeld(cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
384 | |
|
385 | 0 | if (hash.IsNull()) { |
386 | 0 | return nullptr; |
387 | 0 | } |
388 | | |
389 | 0 | const auto [mi, inserted]{m_block_index.try_emplace(hash)}; |
390 | 0 | CBlockIndex* pindex = &(*mi).second; |
391 | 0 | if (inserted) { |
392 | 0 | pindex->phashBlock = &((*mi).first); |
393 | 0 | } |
394 | 0 | return pindex; |
395 | 0 | } |
396 | | |
397 | | bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash) |
398 | 49.9k | { |
399 | 49.9k | if (!m_block_tree_db->LoadBlockIndexGuts( |
400 | 49.9k | GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }0 , m_interrupt)) { |
401 | 0 | return false; |
402 | 0 | } |
403 | | |
404 | 49.9k | if (snapshot_blockhash) { |
405 | 0 | const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash); |
406 | 0 | if (!maybe_au_data) { |
407 | 0 | m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString())); Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
408 | 0 | return false; |
409 | 0 | } |
410 | 0 | const AssumeutxoData& au_data = *Assert(maybe_au_data); Line | Count | Source | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) |
|
411 | 0 | m_snapshot_height = au_data.height; |
412 | 0 | CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; |
413 | | |
414 | | // Since m_chain_tx_count (responsible for estimated progress) isn't persisted |
415 | | // to disk, we must bootstrap the value for assumedvalid chainstates |
416 | | // from the hardcoded assumeutxo chainparams. |
417 | 0 | base->m_chain_tx_count = au_data.m_chain_tx_count; |
418 | 0 | LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString()); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
419 | 49.9k | } else { |
420 | | // If this isn't called with a snapshot blockhash, make sure the cached snapshot height |
421 | | // is null. This is relevant during snapshot completion, when the blockman may be loaded |
422 | | // with a height that then needs to be cleared after the snapshot is fully validated. |
423 | 49.9k | m_snapshot_height.reset(); |
424 | 49.9k | } |
425 | | |
426 | 49.9k | Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value()); Line | Count | Source | 106 | 49.9k | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) |
|
427 | | |
428 | | // Calculate nChainWork |
429 | 49.9k | std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()}; |
430 | 49.9k | std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), |
431 | 49.9k | CBlockIndexHeightOnlyComparator()); |
432 | | |
433 | 49.9k | CBlockIndex* previous_index{nullptr}; |
434 | 49.9k | for (CBlockIndex* pindex : vSortedByHeight) { |
435 | 0 | if (m_interrupt) return false; |
436 | 0 | if (previous_index && pindex->nHeight > previous_index->nHeight + 1) { |
437 | 0 | LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
438 | 0 | return false; |
439 | 0 | } |
440 | 0 | previous_index = pindex; |
441 | 0 | pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); |
442 | 0 | pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); |
443 | | |
444 | | // We can link the chain of blocks for which we've received transactions at some point, or |
445 | | // blocks that are assumed-valid on the basis of snapshot load (see |
446 | | // PopulateAndValidateSnapshot()). |
447 | | // Pruned nodes may have deleted the block. |
448 | 0 | if (pindex->nTx > 0) { |
449 | 0 | if (pindex->pprev) { |
450 | 0 | if (m_snapshot_height && pindex->nHeight == *m_snapshot_height && |
451 | 0 | pindex->GetBlockHash() == *snapshot_blockhash) { |
452 | | // Should have been set above; don't disturb it with code below. |
453 | 0 | Assert(pindex->m_chain_tx_count > 0); Line | Count | Source | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) |
|
454 | 0 | } else if (pindex->pprev->m_chain_tx_count > 0) { |
455 | 0 | pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx; |
456 | 0 | } else { |
457 | 0 | pindex->m_chain_tx_count = 0; |
458 | 0 | m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); |
459 | 0 | } |
460 | 0 | } else { |
461 | 0 | pindex->m_chain_tx_count = pindex->nTx; |
462 | 0 | } |
463 | 0 | } |
464 | 0 | if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) { |
465 | 0 | pindex->nStatus |= BLOCK_FAILED_CHILD; |
466 | 0 | m_dirty_blockindex.insert(pindex); |
467 | 0 | } |
468 | 0 | if (pindex->pprev) { |
469 | 0 | pindex->BuildSkip(); |
470 | 0 | } |
471 | 0 | } |
472 | | |
473 | 49.9k | return true; |
474 | 49.9k | } |
475 | | |
476 | | bool BlockManager::WriteBlockIndexDB() |
477 | 517 | { |
478 | 517 | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 517 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
479 | 517 | std::vector<std::pair<int, const CBlockFileInfo*>> vFiles; |
480 | 517 | vFiles.reserve(m_dirty_fileinfo.size()); |
481 | 1.03k | for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) { |
482 | 517 | vFiles.emplace_back(*it, &m_blockfile_info[*it]); |
483 | 517 | m_dirty_fileinfo.erase(it++); |
484 | 517 | } |
485 | 517 | std::vector<const CBlockIndex*> vBlocks; |
486 | 517 | vBlocks.reserve(m_dirty_blockindex.size()); |
487 | 105k | for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) { |
488 | 104k | vBlocks.push_back(*it); |
489 | 104k | m_dirty_blockindex.erase(it++); |
490 | 104k | } |
491 | 517 | int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); Line | Count | Source | 301 | 517 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
492 | 517 | if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) { |
493 | 0 | return false; |
494 | 0 | } |
495 | 517 | return true; |
496 | 517 | } |
497 | | |
498 | | bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash) |
499 | 49.9k | { |
500 | 49.9k | if (!LoadBlockIndex(snapshot_blockhash)) { |
501 | 0 | return false; |
502 | 0 | } |
503 | 49.9k | int max_blockfile_num{0}; |
504 | | |
505 | | // Load block file info |
506 | 49.9k | m_block_tree_db->ReadLastBlockFile(max_blockfile_num); |
507 | 49.9k | m_blockfile_info.resize(max_blockfile_num + 1); |
508 | 49.9k | LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num); Line | Count | Source | 266 | 49.9k | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 49.9k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 49.9k | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
509 | 99.9k | for (int nFile = 0; nFile <= max_blockfile_num; nFile++49.9k ) { |
510 | 49.9k | m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); |
511 | 49.9k | } |
512 | 49.9k | LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString()); Line | Count | Source | 266 | 49.9k | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 49.9k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 49.9k | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
513 | 49.9k | for (int nFile = max_blockfile_num + 1; true; nFile++0 ) { |
514 | 49.9k | CBlockFileInfo info; |
515 | 49.9k | if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { |
516 | 0 | m_blockfile_info.push_back(info); |
517 | 49.9k | } else { |
518 | 49.9k | break; |
519 | 49.9k | } |
520 | 49.9k | } |
521 | | |
522 | | // Check presence of blk files |
523 | 49.9k | LogPrintf("Checking all blk files are present...\n"); Line | Count | Source | 266 | 49.9k | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 49.9k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 49.9k | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
524 | 49.9k | std::set<int> setBlkDataFiles; |
525 | 49.9k | for (const auto& [_, block_index] : m_block_index) { |
526 | 0 | if (block_index.nStatus & BLOCK_HAVE_DATA) { |
527 | 0 | setBlkDataFiles.insert(block_index.nFile); |
528 | 0 | } |
529 | 0 | } |
530 | 49.9k | for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++0 ) { |
531 | 0 | FlatFilePos pos(*it, 0); |
532 | 0 | if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) { |
533 | 0 | return false; |
534 | 0 | } |
535 | 0 | } |
536 | | |
537 | 49.9k | { |
538 | | // Initialize the blockfile cursors. |
539 | 49.9k | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 49.9k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 49.9k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 49.9k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 49.9k | #define PASTE(x, y) x ## y |
|
|
|
|
540 | 99.9k | for (size_t i = 0; i < m_blockfile_info.size(); ++i49.9k ) { |
541 | 49.9k | const auto last_height_in_file = m_blockfile_info[i].nHeightLast; |
542 | 49.9k | m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0}; |
543 | 49.9k | } |
544 | 49.9k | } |
545 | | |
546 | | // Check whether we have ever pruned block & undo files |
547 | 49.9k | m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); |
548 | 49.9k | if (m_have_pruned) { |
549 | 0 | LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
550 | 0 | } |
551 | | |
552 | | // Check whether we need to continue reindexing |
553 | 49.9k | bool fReindexing = false; |
554 | 49.9k | m_block_tree_db->ReadReindexing(fReindexing); |
555 | 49.9k | if (fReindexing) m_blockfiles_indexed = false0 ; |
556 | | |
557 | 49.9k | return true; |
558 | 49.9k | } |
559 | | |
560 | | void BlockManager::ScanAndUnlinkAlreadyPrunedFiles() |
561 | 49.9k | { |
562 | 49.9k | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 49.9k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
563 | 49.9k | int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); Line | Count | Source | 301 | 49.9k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
564 | 49.9k | if (!m_have_pruned) { |
565 | 49.9k | return; |
566 | 49.9k | } |
567 | | |
568 | 0 | std::set<int> block_files_to_prune; |
569 | 0 | for (int file_number = 0; file_number < max_blockfile; file_number++) { |
570 | 0 | if (m_blockfile_info[file_number].nSize == 0) { |
571 | 0 | block_files_to_prune.insert(file_number); |
572 | 0 | } |
573 | 0 | } |
574 | |
|
575 | 0 | UnlinkPrunedFiles(block_files_to_prune); |
576 | 0 | } |
577 | | |
578 | | bool BlockManager::IsBlockPruned(const CBlockIndex& block) const |
579 | 0 | { |
580 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
581 | 0 | return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0); |
582 | 0 | } |
583 | | |
584 | | const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const |
585 | 0 | { |
586 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
587 | 0 | const CBlockIndex* last_block = &upper_block; |
588 | 0 | assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask |
589 | 0 | while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) { |
590 | 0 | if (lower_block) { |
591 | | // Return if we reached the lower_block |
592 | 0 | if (last_block == lower_block) return lower_block; |
593 | | // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain |
594 | | // and so far this is not allowed. |
595 | 0 | assert(last_block->nHeight >= lower_block->nHeight); |
596 | 0 | } |
597 | 0 | last_block = last_block->pprev; |
598 | 0 | } |
599 | 0 | assert(last_block != nullptr); |
600 | 0 | return last_block; |
601 | 0 | } |
602 | | |
603 | | bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block) |
604 | 0 | { |
605 | 0 | if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false; |
606 | 0 | return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block; |
607 | 0 | } |
608 | | |
609 | | // If we're using -prune with -reindex, then delete block files that will be ignored by the |
610 | | // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile |
611 | | // is missing, do the same here to delete any later block files after a gap. Also delete all |
612 | | // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info |
613 | | // is in sync with what's actually on disk by the time we start downloading, so that pruning |
614 | | // works correctly. |
615 | | void BlockManager::CleanupBlockRevFiles() const |
616 | 0 | { |
617 | 0 | std::map<std::string, fs::path> mapBlockFiles; |
618 | | |
619 | | // Glob all blk?????.dat and rev?????.dat files from the blocks directory. |
620 | | // Remove the rev files immediately and insert the blk file paths into an |
621 | | // ordered map keyed by block file index. |
622 | 0 | LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n"); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
623 | 0 | for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) { |
624 | 0 | const std::string path = fs::PathToString(it->path().filename()); |
625 | 0 | if (fs::is_regular_file(*it) && |
626 | 0 | path.length() == 12 && |
627 | 0 | path.ends_with(".dat")) |
628 | 0 | { |
629 | 0 | if (path.starts_with("blk")) { |
630 | 0 | mapBlockFiles[path.substr(3, 5)] = it->path(); |
631 | 0 | } else if (path.starts_with("rev")) { |
632 | 0 | remove(it->path()); |
633 | 0 | } |
634 | 0 | } |
635 | 0 | } |
636 | | |
637 | | // Remove all block files that aren't part of a contiguous set starting at |
638 | | // zero by walking the ordered map (keys are block file indices) by |
639 | | // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist) |
640 | | // start removing block files. |
641 | 0 | int nContigCounter = 0; |
642 | 0 | for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) { |
643 | 0 | if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) { |
644 | 0 | nContigCounter++; |
645 | 0 | continue; |
646 | 0 | } |
647 | 0 | remove(item.second); |
648 | 0 | } |
649 | 0 | } |
650 | | |
651 | | CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) |
652 | 0 | { |
653 | 0 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
654 | |
|
655 | 0 | return &m_blockfile_info.at(n); |
656 | 0 | } |
657 | | |
658 | | bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const |
659 | 55 | { |
660 | 55 | const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())}; Line | Count | Source | 301 | 55 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
661 | | |
662 | | // Open history file to read |
663 | 55 | AutoFile file{OpenUndoFile(pos, true)}; |
664 | 55 | if (file.IsNull()) { |
665 | 0 | LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
666 | 0 | return false; |
667 | 0 | } |
668 | 55 | BufferedReader filein{std::move(file)}; |
669 | | |
670 | 55 | try { |
671 | | // Read block |
672 | 55 | HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243 |
673 | | |
674 | 55 | verifier << index.pprev->GetBlockHash(); |
675 | 55 | verifier >> blockundo; |
676 | | |
677 | 55 | uint256 hashChecksum; |
678 | 55 | filein >> hashChecksum; |
679 | | |
680 | | // Verify checksum |
681 | 55 | if (hashChecksum != verifier.GetHash()) { |
682 | 0 | LogError("Checksum mismatch at %s while reading block undo", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
683 | 0 | return false; |
684 | 0 | } |
685 | 55 | } catch (const std::exception& e) { |
686 | 0 | LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
687 | 0 | return false; |
688 | 0 | } |
689 | | |
690 | 55 | return true; |
691 | 55 | } |
692 | | |
693 | | bool BlockManager::FlushUndoFile(int block_file, bool finalize) |
694 | 517 | { |
695 | 517 | FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); |
696 | 517 | if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) { |
697 | 0 | m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error.")); |
698 | 0 | return false; |
699 | 0 | } |
700 | 517 | return true; |
701 | 517 | } |
702 | | |
703 | | bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo) |
704 | 517 | { |
705 | 517 | bool success = true; |
706 | 517 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 517 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 517 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 517 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 517 | #define PASTE(x, y) x ## y |
|
|
|
|
707 | | |
708 | 517 | if (m_blockfile_info.size() < 1) { |
709 | | // Return if we haven't loaded any blockfiles yet. This happens during |
710 | | // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which |
711 | | // then calls FlushStateToDisk()), resulting in a call to this function before we |
712 | | // have populated `m_blockfile_info` via LoadBlockIndexDB(). |
713 | 0 | return true; |
714 | 0 | } |
715 | 517 | assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num); |
716 | | |
717 | 517 | FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize); |
718 | 517 | if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) { |
719 | 0 | m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error.")); |
720 | 0 | success = false; |
721 | 0 | } |
722 | | // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, |
723 | | // e.g. during IBD or a sync after a node going offline |
724 | 517 | if (!fFinalize || finalize_undo0 ) { |
725 | 517 | if (!FlushUndoFile(blockfile_num, finalize_undo)) { |
726 | 0 | success = false; |
727 | 0 | } |
728 | 517 | } |
729 | 517 | return success; |
730 | 517 | } |
731 | | |
732 | | BlockfileType BlockManager::BlockfileTypeForHeight(int height) |
733 | 20.0M | { |
734 | 20.0M | if (!m_snapshot_height) { |
735 | 20.0M | return BlockfileType::NORMAL; |
736 | 20.0M | } |
737 | 0 | return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL; |
738 | 20.0M | } |
739 | | |
740 | | bool BlockManager::FlushChainstateBlockFile(int tip_height) |
741 | 517 | { |
742 | 517 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 517 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 517 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 517 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 517 | #define PASTE(x, y) x ## y |
|
|
|
|
743 | 517 | auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)]; |
744 | | // If the cursor does not exist, it means an assumeutxo snapshot is loaded, |
745 | | // but no blocks past the snapshot height have been written yet, so there |
746 | | // is no data associated with the chainstate, and it is safe not to flush. |
747 | 517 | if (cursor) { |
748 | 517 | return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); |
749 | 517 | } |
750 | | // No need to log warnings in this case. |
751 | 0 | return true; |
752 | 517 | } |
753 | | |
754 | | uint64_t BlockManager::CalculateCurrentUsage() |
755 | 0 | { |
756 | 0 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
757 | |
|
758 | 0 | uint64_t retval = 0; |
759 | 0 | for (const CBlockFileInfo& file : m_blockfile_info) { |
760 | 0 | retval += file.nSize + file.nUndoSize; |
761 | 0 | } |
762 | 0 | return retval; |
763 | 0 | } |
764 | | |
765 | | void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const |
766 | 0 | { |
767 | 0 | std::error_code ec; |
768 | 0 | for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { |
769 | 0 | FlatFilePos pos(*it, 0); |
770 | 0 | const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)}; |
771 | 0 | const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)}; |
772 | 0 | if (removed_blockfile || removed_undofile) { |
773 | 0 | LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
774 | 0 | } |
775 | 0 | } |
776 | 0 | } |
777 | | |
778 | | AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const |
779 | 10.0M | { |
780 | 10.0M | return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key}; |
781 | 10.0M | } |
782 | | |
783 | | /** Open an undo file (rev?????.dat) */ |
784 | | AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const |
785 | 9.99M | { |
786 | 9.99M | return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key}; |
787 | 9.99M | } |
788 | | |
789 | | fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const |
790 | 0 | { |
791 | 0 | return m_block_file_seq.FileName(pos); |
792 | 0 | } |
793 | | |
794 | | FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) |
795 | 10.0M | { |
796 | 10.0M | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 10.0M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 10.0M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 10.0M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 10.0M | #define PASTE(x, y) x ## y |
|
|
|
|
797 | | |
798 | 10.0M | const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); |
799 | | |
800 | 10.0M | if (!m_blockfile_cursors[chain_type]) { |
801 | | // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. |
802 | 0 | assert(chain_type == BlockfileType::ASSUMED); |
803 | 0 | const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; |
804 | 0 | m_blockfile_cursors[chain_type] = new_cursor; |
805 | 0 | LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
806 | 0 | } |
807 | 10.0M | const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; |
808 | | |
809 | 10.0M | int nFile = last_blockfile; |
810 | 10.0M | if (static_cast<int>(m_blockfile_info.size()) <= nFile) { |
811 | 0 | m_blockfile_info.resize(nFile + 1); |
812 | 0 | } |
813 | | |
814 | 10.0M | bool finalize_undo = false; |
815 | 10.0M | unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; |
816 | | // Use smaller blockfiles in test-only -fastprune mode - but avoid |
817 | | // the possibility of having a block not fit into the block file. |
818 | 10.0M | if (m_opts.fast_prune) { |
819 | 0 | max_blockfile_size = 0x10000; // 64kiB |
820 | 0 | if (nAddSize >= max_blockfile_size) { |
821 | | // dynamically adjust the blockfile size to be larger than the added size |
822 | 0 | max_blockfile_size = nAddSize + 1; |
823 | 0 | } |
824 | 0 | } |
825 | 10.0M | assert(nAddSize < max_blockfile_size); |
826 | | |
827 | 10.0M | while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { |
828 | | // when the undo file is keeping up with the block file, we want to flush it explicitly |
829 | | // when it is lagging behind (more blocks arrive than are being connected), we let the |
830 | | // undo block write case handle it |
831 | 0 | finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) == |
832 | 0 | Assert(m_blockfile_cursors[chain_type])->undo_height); Line | Count | Source | 106 | 0 | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) |
|
833 | | |
834 | | // Try the next unclaimed blockfile number |
835 | 0 | nFile = this->MaxBlockfileNum() + 1; |
836 | | // Set to increment MaxBlockfileNum() for next iteration |
837 | 0 | m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; |
838 | |
|
839 | 0 | if (static_cast<int>(m_blockfile_info.size()) <= nFile) { |
840 | 0 | m_blockfile_info.resize(nFile + 1); |
841 | 0 | } |
842 | 0 | } |
843 | 10.0M | FlatFilePos pos; |
844 | 10.0M | pos.nFile = nFile; |
845 | 10.0M | pos.nPos = m_blockfile_info[nFile].nSize; |
846 | | |
847 | 10.0M | if (nFile != last_blockfile) { |
848 | 0 | LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
849 | 0 | last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); |
850 | | |
851 | | // Do not propagate the return code. The flush concerns a previous block |
852 | | // and undo file that has already been written to. If a flush fails |
853 | | // here, and we crash, there is no expected additional block data |
854 | | // inconsistency arising from the flush failure here. However, the undo |
855 | | // data may be inconsistent after a crash if the flush is called during |
856 | | // a reindex. A flush error might also leave some of the data files |
857 | | // untrimmed. |
858 | 0 | if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) { |
859 | 0 | LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
860 | 0 | "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n", |
861 | 0 | last_blockfile, finalize_undo, nFile); |
862 | 0 | } |
863 | | // No undo data yet in the new file, so reset our undo-height tracking. |
864 | 0 | m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; |
865 | 0 | } |
866 | | |
867 | 10.0M | m_blockfile_info[nFile].AddBlock(nHeight, nTime); |
868 | 10.0M | m_blockfile_info[nFile].nSize += nAddSize; |
869 | | |
870 | 10.0M | bool out_of_space; |
871 | 10.0M | size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space); |
872 | 10.0M | if (out_of_space) { |
873 | 0 | m_opts.notifications.fatalError(_("Disk space is too low!")); |
874 | 0 | return {}; |
875 | 0 | } |
876 | 10.0M | if (bytes_allocated != 0 && IsPruneMode()49.9k ) { |
877 | 0 | m_check_for_pruning = true; |
878 | 0 | } |
879 | | |
880 | 10.0M | m_dirty_fileinfo.insert(nFile); |
881 | 10.0M | return pos; |
882 | 10.0M | } |
883 | | |
884 | | void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos) |
885 | 0 | { |
886 | 0 | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
887 | | |
888 | | // Update the cursor so it points to the last file. |
889 | 0 | const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)}; |
890 | 0 | auto& cursor{m_blockfile_cursors[chain_type]}; |
891 | 0 | if (!cursor || cursor->file_num < pos.nFile) { |
892 | 0 | m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile}; |
893 | 0 | } |
894 | | |
895 | | // Update the file information with the current block. |
896 | 0 | const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block)); |
897 | 0 | const int nFile = pos.nFile; |
898 | 0 | if (static_cast<int>(m_blockfile_info.size()) <= nFile) { |
899 | 0 | m_blockfile_info.resize(nFile + 1); |
900 | 0 | } |
901 | 0 | m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime()); |
902 | 0 | m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize); |
903 | 0 | m_dirty_fileinfo.insert(nFile); |
904 | 0 | } |
905 | | |
906 | | bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize) |
907 | 9.99M | { |
908 | 9.99M | pos.nFile = nFile; |
909 | | |
910 | 9.99M | LOCK(cs_LastBlockFile); Line | Count | Source | 257 | 9.99M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 9.99M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 9.99M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 9.99M | #define PASTE(x, y) x ## y |
|
|
|
|
911 | | |
912 | 9.99M | pos.nPos = m_blockfile_info[nFile].nUndoSize; |
913 | 9.99M | m_blockfile_info[nFile].nUndoSize += nAddSize; |
914 | 9.99M | m_dirty_fileinfo.insert(nFile); |
915 | | |
916 | 9.99M | bool out_of_space; |
917 | 9.99M | size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space); |
918 | 9.99M | if (out_of_space) { |
919 | 0 | return FatalError(m_opts.notifications, state, _("Disk space is too low!")); |
920 | 0 | } |
921 | 9.99M | if (bytes_allocated != 0 && IsPruneMode()49.9k ) { |
922 | 0 | m_check_for_pruning = true; |
923 | 0 | } |
924 | | |
925 | 9.99M | return true; |
926 | 9.99M | } |
927 | | |
928 | | bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) |
929 | 9.99M | { |
930 | 9.99M | AssertLockHeld(::cs_main); Line | Count | Source | 142 | 9.99M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
931 | 9.99M | const BlockfileType type = BlockfileTypeForHeight(block.nHeight); |
932 | 9.99M | auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type])); Line | Count | Source | 106 | 9.99M | #define Assert(val) inline_assertion_check<true>(val, __FILE__, __LINE__, __func__, #val) |
|
933 | | |
934 | | // Write undo information to disk |
935 | 9.99M | if (block.GetUndoPos().IsNull()) { |
936 | 9.99M | FlatFilePos pos; |
937 | 9.99M | const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))}; |
938 | 9.99M | if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) { |
939 | 0 | LogError("FindUndoPos failed for %s while writing block undo", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
940 | 0 | return false; |
941 | 0 | } |
942 | | |
943 | 9.99M | { |
944 | | // Open history file to append |
945 | 9.99M | AutoFile file{OpenUndoFile(pos)}; |
946 | 9.99M | if (file.IsNull()) { |
947 | 0 | LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
948 | 0 | return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); |
949 | 0 | } |
950 | 9.99M | BufferedWriter fileout{file}; |
951 | | |
952 | | // Write index header |
953 | 9.99M | fileout << GetParams().MessageStart() << blockundo_size; |
954 | 9.99M | pos.nPos += STORAGE_HEADER_BYTES; |
955 | 9.99M | { |
956 | | // Calculate checksum |
957 | 9.99M | HashWriter hasher{}; |
958 | 9.99M | hasher << block.pprev->GetBlockHash() << blockundo; |
959 | | // Write undo data & checksum |
960 | 9.99M | fileout << blockundo << hasher.GetHash(); |
961 | 9.99M | } |
962 | | |
963 | 9.99M | fileout.flush(); // Make sure `AutoFile`/`BufferedWriter` go out of scope before we call `FlushUndoFile` |
964 | 9.99M | } |
965 | | |
966 | | // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) |
967 | | // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height |
968 | | // in the block file info as below; note that this does not catch the case where the undo writes are keeping up |
969 | | // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in |
970 | | // the FindNextBlockPos function |
971 | 9.99M | if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast0 ) { |
972 | | // Do not propagate the return code, a failed flush here should not |
973 | | // be an indication for a failed write. If it were propagated here, |
974 | | // the caller would assume the undo data not to be written, when in |
975 | | // fact it is. Note though, that a failed flush might leave the data |
976 | | // file untrimmed. |
977 | 0 | if (!FlushUndoFile(pos.nFile, true)) { |
978 | 0 | LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile); Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
979 | 0 | } |
980 | 9.99M | } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { |
981 | 9.99M | cursor.undo_height = block.nHeight; |
982 | 9.99M | } |
983 | | // update nUndoPos in block index |
984 | 9.99M | block.nUndoPos = pos.nPos; |
985 | 9.99M | block.nStatus |= BLOCK_HAVE_UNDO; |
986 | 9.99M | m_dirty_blockindex.insert(&block); |
987 | 9.99M | } |
988 | | |
989 | 9.99M | return true; |
990 | 9.99M | } |
991 | | |
992 | | bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const |
993 | 50.8k | { |
994 | 50.8k | block.SetNull(); |
995 | | |
996 | | // Open history file to read |
997 | 50.8k | std::vector<uint8_t> block_data; |
998 | 50.8k | if (!ReadRawBlock(block_data, pos)) { |
999 | 0 | return false; |
1000 | 0 | } |
1001 | | |
1002 | 50.8k | try { |
1003 | | // Read block |
1004 | 50.8k | SpanReader{block_data} >> TX_WITH_WITNESS(block); |
1005 | 50.8k | } catch (const std::exception& e) { |
1006 | 0 | LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1007 | 0 | return false; |
1008 | 0 | } |
1009 | | |
1010 | | // Check the header |
1011 | 50.8k | if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) { |
1012 | 0 | LogError("Errors in block header at %s while reading block", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1013 | 0 | return false; |
1014 | 0 | } |
1015 | | |
1016 | | // Signet only: check block solution |
1017 | 50.8k | if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())0 ) { |
1018 | 0 | LogError("Errors in block solution at %s while reading block", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1019 | 0 | return false; |
1020 | 0 | } |
1021 | | |
1022 | 50.8k | return true; |
1023 | 50.8k | } |
1024 | | |
1025 | | bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const |
1026 | 50.8k | { |
1027 | 50.8k | const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())}; Line | Count | Source | 301 | 50.8k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1028 | | |
1029 | 50.8k | if (!ReadBlock(block, block_pos)) { |
1030 | 0 | return false; |
1031 | 0 | } |
1032 | 50.8k | if (block.GetHash() != index.GetBlockHash()) { |
1033 | 0 | LogError("GetHash() doesn't match index for %s at %s while reading block", index.ToString(), block_pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1034 | 0 | return false; |
1035 | 0 | } |
1036 | 50.8k | return true; |
1037 | 50.8k | } |
1038 | | |
1039 | | bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos& pos) const |
1040 | 50.8k | { |
1041 | 50.8k | if (pos.nPos < STORAGE_HEADER_BYTES) { |
1042 | | // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data |
1043 | | // This would cause an unsigned integer underflow when trying to position the file cursor |
1044 | | // This can happen after pruning or default constructed positions |
1045 | 0 | LogError("Failed for %s while reading raw block storage header", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1046 | 0 | return false; |
1047 | 0 | } |
1048 | 50.8k | AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)}; |
1049 | 50.8k | if (filein.IsNull()) { |
1050 | 0 | LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1051 | 0 | return false; |
1052 | 0 | } |
1053 | | |
1054 | 50.8k | try { |
1055 | 50.8k | MessageStartChars blk_start; |
1056 | 50.8k | unsigned int blk_size; |
1057 | | |
1058 | 50.8k | filein >> blk_start >> blk_size; |
1059 | | |
1060 | 50.8k | if (blk_start != GetParams().MessageStart()) { |
1061 | 0 | LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block", Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1062 | 0 | pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart())); |
1063 | 0 | return false; |
1064 | 0 | } |
1065 | | |
1066 | 50.8k | if (blk_size > MAX_SIZE) { |
1067 | 0 | LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block", Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1068 | 0 | pos.ToString(), blk_size, MAX_SIZE); |
1069 | 0 | return false; |
1070 | 0 | } |
1071 | | |
1072 | 50.8k | block.resize(blk_size); // Zeroing of memory is intentional here |
1073 | 50.8k | filein.read(MakeWritableByteSpan(block)); |
1074 | 50.8k | } catch (const std::exception& e) { |
1075 | 0 | LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1076 | 0 | return false; |
1077 | 0 | } |
1078 | | |
1079 | 50.8k | return true; |
1080 | 50.8k | } |
1081 | | |
1082 | | FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight) |
1083 | 10.0M | { |
1084 | 10.0M | const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))}; |
1085 | 10.0M | FlatFilePos pos{FindNextBlockPos(block_size + STORAGE_HEADER_BYTES, nHeight, block.GetBlockTime())}; |
1086 | 10.0M | if (pos.IsNull()) { |
1087 | 0 | LogError("FindNextBlockPos failed for %s while writing block", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1088 | 0 | return FlatFilePos(); |
1089 | 0 | } |
1090 | 10.0M | AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)}; |
1091 | 10.0M | if (file.IsNull()) { |
1092 | 0 | LogError("OpenBlockFile failed for %s while writing block", pos.ToString()); Line | Count | Source | 263 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1093 | 0 | m_opts.notifications.fatalError(_("Failed to write block.")); |
1094 | 0 | return FlatFilePos(); |
1095 | 0 | } |
1096 | 10.0M | BufferedWriter fileout{file}; |
1097 | | |
1098 | | // Write index header |
1099 | 10.0M | fileout << GetParams().MessageStart() << block_size; |
1100 | 10.0M | pos.nPos += STORAGE_HEADER_BYTES; |
1101 | | // Write block |
1102 | 10.0M | fileout << TX_WITH_WITNESS(block); |
1103 | 10.0M | return pos; |
1104 | 10.0M | } |
1105 | | |
1106 | | static auto InitBlocksdirXorKey(const BlockManager::Options& opts) |
1107 | 49.9k | { |
1108 | | // Bytes are serialized without length indicator, so this is also the exact |
1109 | | // size of the XOR-key file. |
1110 | 49.9k | std::array<std::byte, 8> xor_key{}; |
1111 | | |
1112 | | // Consider this to be the first run if the blocksdir contains only hidden |
1113 | | // files (those which start with a .). Checking for a fully-empty dir would |
1114 | | // be too aggressive as a .lock file may have already been written. |
1115 | 49.9k | bool first_run = true; |
1116 | 49.9k | for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) { |
1117 | 0 | const std::string path = fs::PathToString(entry.path().filename()); |
1118 | 0 | if (!entry.is_regular_file() || !path.starts_with('.')) { |
1119 | 0 | first_run = false; |
1120 | 0 | break; |
1121 | 0 | } |
1122 | 0 | } |
1123 | | |
1124 | 49.9k | if (opts.use_xor && first_run) { |
1125 | | // Only use random fresh key when the boolean option is set and on the |
1126 | | // very first start of the program. |
1127 | 49.9k | FastRandomContext{}.fillrand(xor_key); |
1128 | 49.9k | } |
1129 | | |
1130 | 49.9k | const fs::path xor_key_path{opts.blocks_dir / "xor.dat"}; |
1131 | 49.9k | if (fs::exists(xor_key_path)) { |
1132 | | // A pre-existing xor key file has priority. |
1133 | 0 | AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")}; |
1134 | 0 | xor_key_file >> xor_key; |
1135 | 49.9k | } else { |
1136 | | // Create initial or missing xor key file |
1137 | 49.9k | AutoFile xor_key_file{fsbridge::fopen(xor_key_path, |
1138 | | #ifdef __MINGW64__ |
1139 | | "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 |
1140 | | #else |
1141 | 49.9k | "wbx" |
1142 | 49.9k | #endif |
1143 | 49.9k | )}; |
1144 | 49.9k | xor_key_file << xor_key; |
1145 | 49.9k | } |
1146 | | // If the user disabled the key, it must be zero. |
1147 | 49.9k | if (!opts.use_xor && xor_key != decltype(xor_key){}0 ) { |
1148 | 0 | throw std::runtime_error{ |
1149 | 0 | strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! " Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
1150 | 0 | "Stored key: '%s', stored path: '%s'.", |
1151 | 0 | HexStr(xor_key), fs::PathToString(xor_key_path)), |
1152 | 0 | }; |
1153 | 0 | } |
1154 | 49.9k | LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key)); Line | Count | Source | 261 | 49.9k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 49.9k | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
1155 | 49.9k | return std::vector<std::byte>{xor_key.begin(), xor_key.end()}; |
1156 | 49.9k | } |
1157 | | |
1158 | | BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) |
1159 | 49.9k | : m_prune_mode{opts.prune_target > 0}, |
1160 | 49.9k | m_xor_key{InitBlocksdirXorKey(opts)}, |
1161 | 49.9k | m_opts{std::move(opts)}, |
1162 | 49.9k | m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x40000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}}, |
1163 | 49.9k | m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}}, |
1164 | 49.9k | m_interrupt{interrupt} |
1165 | 49.9k | { |
1166 | 49.9k | m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params); |
1167 | | |
1168 | 49.9k | if (m_opts.block_tree_db_params.wipe_data) { |
1169 | 0 | m_block_tree_db->WriteReindexing(true); |
1170 | 0 | m_blockfiles_indexed = false; |
1171 | | // If we're reindexing in prune mode, wipe away unusable block files and all undo data files |
1172 | 0 | if (m_prune_mode) { |
1173 | 0 | CleanupBlockRevFiles(); |
1174 | 0 | } |
1175 | 0 | } |
1176 | 49.9k | } |
1177 | | |
1178 | | class ImportingNow |
1179 | | { |
1180 | | std::atomic<bool>& m_importing; |
1181 | | |
1182 | | public: |
1183 | 0 | ImportingNow(std::atomic<bool>& importing) : m_importing{importing} |
1184 | 0 | { |
1185 | 0 | assert(m_importing == false); |
1186 | 0 | m_importing = true; |
1187 | 0 | } |
1188 | | ~ImportingNow() |
1189 | 0 | { |
1190 | 0 | assert(m_importing == true); |
1191 | 0 | m_importing = false; |
1192 | 0 | } |
1193 | | }; |
1194 | | |
1195 | | void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths) |
1196 | 0 | { |
1197 | 0 | ImportingNow imp{chainman.m_blockman.m_importing}; |
1198 | | |
1199 | | // -reindex |
1200 | 0 | if (!chainman.m_blockman.m_blockfiles_indexed) { |
1201 | 0 | int nFile = 0; |
1202 | | // Map of disk positions for blocks with unknown parent (only used for reindex); |
1203 | | // parent hash -> child disk position, multiple children can have the same parent. |
1204 | 0 | std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent; |
1205 | 0 | while (true) { |
1206 | 0 | FlatFilePos pos(nFile, 0); |
1207 | 0 | if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { |
1208 | 0 | break; // No block files left to reindex |
1209 | 0 | } |
1210 | 0 | AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)}; |
1211 | 0 | if (file.IsNull()) { |
1212 | 0 | break; // This error is logged in OpenBlockFile |
1213 | 0 | } |
1214 | 0 | LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1215 | 0 | chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); |
1216 | 0 | if (chainman.m_interrupt) { |
1217 | 0 | LogPrintf("Interrupt requested. Exit %s\n", __func__); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1218 | 0 | return; |
1219 | 0 | } |
1220 | 0 | nFile++; |
1221 | 0 | } |
1222 | 0 | WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); Line | Count | Source | 301 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1223 | 0 | chainman.m_blockman.m_blockfiles_indexed = true; |
1224 | 0 | LogPrintf("Reindexing finished\n"); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1225 | | // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): |
1226 | 0 | chainman.ActiveChainstate().LoadGenesisBlock(); |
1227 | 0 | } |
1228 | | |
1229 | | // -loadblock= |
1230 | 0 | for (const fs::path& path : import_paths) { |
1231 | 0 | AutoFile file{fsbridge::fopen(path, "rb")}; |
1232 | 0 | if (!file.IsNull()) { |
1233 | 0 | LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1234 | 0 | chainman.LoadExternalBlockFile(file); |
1235 | 0 | if (chainman.m_interrupt) { |
1236 | 0 | LogPrintf("Interrupt requested. Exit %s\n", __func__); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1237 | 0 | return; |
1238 | 0 | } |
1239 | 0 | } else { |
1240 | 0 | LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); Line | Count | Source | 266 | 0 | #define LogPrintf(...) LogInfo(__VA_ARGS__) Line | Count | Source | 261 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__) Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
|
|
|
1241 | 0 | } |
1242 | 0 | } |
1243 | | |
1244 | | // scan for better chains in the block chain database, that are not yet connected in the active best chain |
1245 | | |
1246 | | // We can't hold cs_main during ActivateBestChain even though we're accessing |
1247 | | // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve |
1248 | | // the relevant pointers before the ABC call. |
1249 | 0 | for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { Line | Count | Source | 301 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1250 | 0 | BlockValidationState state; |
1251 | 0 | if (!chainstate->ActivateBestChain(state, nullptr)) { |
1252 | 0 | chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
1253 | 0 | return; |
1254 | 0 | } |
1255 | 0 | } |
1256 | | // End scope of ImportingNow |
1257 | 0 | } |
1258 | | |
1259 | 0 | std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { |
1260 | 0 | switch(type) { |
1261 | 0 | case BlockfileType::NORMAL: os << "normal"; break; |
1262 | 0 | case BlockfileType::ASSUMED: os << "assumed"; break; |
1263 | 0 | default: os.setstate(std::ios_base::failbit); |
1264 | 0 | } |
1265 | 0 | return os; |
1266 | 0 | } |
1267 | | |
1268 | 0 | std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) { |
1269 | 0 | os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height); Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
1270 | 0 | return os; |
1271 | 0 | } |
1272 | | } // namespace node |