/Users/eugenesiegel/btc/bitcoin/src/txorphanage.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2021-2022 The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <txorphanage.h> |
6 | | |
7 | | #include <consensus/validation.h> |
8 | | #include <logging.h> |
9 | | #include <policy/policy.h> |
10 | | #include <primitives/transaction.h> |
11 | | #include <util/time.h> |
12 | | |
13 | | #include <cassert> |
14 | | |
15 | | bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) |
16 | 0 | { |
17 | 0 | const Txid& hash = tx->GetHash(); |
18 | 0 | const Wtxid& wtxid = tx->GetWitnessHash(); |
19 | 0 | if (auto it{m_orphans.find(wtxid)}; it != m_orphans.end()) { |
20 | 0 | AddAnnouncer(wtxid, peer); |
21 | | // No new orphan entry was created. An announcer may have been added. |
22 | 0 | return false; |
23 | 0 | } |
24 | | |
25 | | // Ignore big transactions, to avoid a |
26 | | // send-big-orphans memory exhaustion attack. If a peer has a legitimate |
27 | | // large transaction with a missing parent then we assume |
28 | | // it will rebroadcast it later, after the parent transaction(s) |
29 | | // have been mined or received. |
30 | | // 100 orphans, each of which is at most 100,000 bytes big is |
31 | | // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case): |
32 | 0 | unsigned int sz = GetTransactionWeight(*tx); |
33 | 0 | if (sz > MAX_STANDARD_TX_WEIGHT) |
34 | 0 | { |
35 | 0 | LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, hash.ToString(), wtxid.ToString()); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
36 | 0 | return false; |
37 | 0 | } |
38 | | |
39 | 0 | auto ret = m_orphans.emplace(wtxid, OrphanTx{{tx, {peer}, Now<NodeSeconds>() + ORPHAN_TX_EXPIRE_TIME}, m_orphan_list.size()}); |
40 | 0 | assert(ret.second); |
41 | 0 | m_orphan_list.push_back(ret.first); |
42 | 0 | for (const CTxIn& txin : tx->vin) { |
43 | 0 | m_outpoint_to_orphan_it[txin.prevout].insert(ret.first); |
44 | 0 | } |
45 | 0 | m_total_orphan_usage += sz; |
46 | 0 | m_total_announcements += 1; |
47 | 0 | auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second; |
48 | 0 | peer_info.m_total_usage += sz; |
49 | |
|
50 | 0 | LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), sz, Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
51 | 0 | m_orphans.size(), m_outpoint_to_orphan_it.size()); |
52 | 0 | return true; |
53 | 0 | } |
54 | | |
55 | | bool TxOrphanage::AddAnnouncer(const Wtxid& wtxid, NodeId peer) |
56 | 0 | { |
57 | 0 | const auto it = m_orphans.find(wtxid); |
58 | 0 | if (it != m_orphans.end()) { |
59 | 0 | Assume(!it->second.announcers.empty()); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
60 | 0 | const auto ret = it->second.announcers.insert(peer); |
61 | 0 | if (ret.second) { |
62 | 0 | auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second; |
63 | 0 | peer_info.m_total_usage += it->second.GetUsage(); |
64 | 0 | m_total_announcements += 1; |
65 | 0 | LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s\n", peer, wtxid.ToString()); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
66 | 0 | return true; |
67 | 0 | } |
68 | 0 | } |
69 | 0 | return false; |
70 | 0 | } |
71 | | |
72 | | int TxOrphanage::EraseTx(const Wtxid& wtxid) |
73 | 0 | { |
74 | 0 | std::map<Wtxid, OrphanTx>::iterator it = m_orphans.find(wtxid); |
75 | 0 | if (it == m_orphans.end()) |
76 | 0 | return 0; |
77 | 0 | for (const CTxIn& txin : it->second.tx->vin) |
78 | 0 | { |
79 | 0 | auto itPrev = m_outpoint_to_orphan_it.find(txin.prevout); |
80 | 0 | if (itPrev == m_outpoint_to_orphan_it.end()) |
81 | 0 | continue; |
82 | 0 | itPrev->second.erase(it); |
83 | 0 | if (itPrev->second.empty()) |
84 | 0 | m_outpoint_to_orphan_it.erase(itPrev); |
85 | 0 | } |
86 | |
|
87 | 0 | const auto tx_size{it->second.GetUsage()}; |
88 | 0 | m_total_orphan_usage -= tx_size; |
89 | 0 | m_total_announcements -= it->second.announcers.size(); |
90 | | // Decrement each announcer's m_total_usage |
91 | 0 | for (const auto& peer : it->second.announcers) { |
92 | 0 | auto peer_it = m_peer_orphanage_info.find(peer); |
93 | 0 | if (Assume(peer_it != m_peer_orphanage_info.end())) { Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
94 | 0 | peer_it->second.m_total_usage -= tx_size; |
95 | 0 | } |
96 | 0 | } |
97 | |
|
98 | 0 | size_t old_pos = it->second.list_pos; |
99 | 0 | assert(m_orphan_list[old_pos] == it); |
100 | 0 | if (old_pos + 1 != m_orphan_list.size()) { |
101 | | // Unless we're deleting the last entry in m_orphan_list, move the last |
102 | | // entry to the position we're deleting. |
103 | 0 | auto it_last = m_orphan_list.back(); |
104 | 0 | m_orphan_list[old_pos] = it_last; |
105 | 0 | it_last->second.list_pos = old_pos; |
106 | 0 | } |
107 | 0 | const auto& txid = it->second.tx->GetHash(); |
108 | | // Time spent in orphanage = difference between current and entry time. |
109 | | // Entry time is equal to ORPHAN_TX_EXPIRE_TIME earlier than entry's expiry. |
110 | 0 | LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s) after %ds\n", txid.ToString(), wtxid.ToString(), Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
111 | 0 | Ticks<std::chrono::seconds>(NodeClock::now() + ORPHAN_TX_EXPIRE_TIME - it->second.nTimeExpire)); |
112 | 0 | m_orphan_list.pop_back(); |
113 | |
|
114 | 0 | m_orphans.erase(it); |
115 | 0 | return 1; |
116 | 0 | } |
117 | | |
118 | | void TxOrphanage::EraseForPeer(NodeId peer) |
119 | 149k | { |
120 | | // Zeroes out this peer's m_total_usage. |
121 | 149k | m_peer_orphanage_info.erase(peer); |
122 | | |
123 | 149k | int nErased = 0; |
124 | 149k | std::map<Wtxid, OrphanTx>::iterator iter = m_orphans.begin(); |
125 | 149k | while (iter != m_orphans.end()) |
126 | 0 | { |
127 | | // increment to avoid iterator becoming invalid after erasure |
128 | 0 | auto& [wtxid, orphan] = *iter++; |
129 | 0 | auto orphan_it = orphan.announcers.find(peer); |
130 | 0 | if (orphan_it != orphan.announcers.end()) { |
131 | 0 | orphan.announcers.erase(peer); |
132 | 0 | m_total_announcements -= 1; |
133 | | |
134 | | // No remaining announcers: clean up entry |
135 | 0 | if (orphan.announcers.empty()) { |
136 | 0 | nErased += EraseTx(orphan.tx->GetWitnessHash()); |
137 | 0 | } |
138 | 0 | } |
139 | 0 | } |
140 | 149k | if (nErased > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", nErased, peer); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
141 | 149k | } |
142 | | |
143 | | void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) |
144 | 0 | { |
145 | 0 | unsigned int nEvicted = 0; |
146 | 0 | auto nNow{Now<NodeSeconds>()}; |
147 | 0 | if (m_next_sweep <= nNow) { |
148 | | // Sweep out expired orphan pool entries: |
149 | 0 | int nErased = 0; |
150 | 0 | auto nMinExpTime{nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL}; |
151 | 0 | std::map<Wtxid, OrphanTx>::iterator iter = m_orphans.begin(); |
152 | 0 | while (iter != m_orphans.end()) |
153 | 0 | { |
154 | 0 | std::map<Wtxid, OrphanTx>::iterator maybeErase = iter++; |
155 | 0 | if (maybeErase->second.nTimeExpire <= nNow) { |
156 | 0 | nErased += EraseTx(maybeErase->first); |
157 | 0 | } else { |
158 | 0 | nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); |
159 | 0 | } |
160 | 0 | } |
161 | | // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan. |
162 | 0 | m_next_sweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; |
163 | 0 | if (nErased > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan tx due to expiration\n", nErased); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
164 | 0 | } |
165 | 0 | while (m_orphans.size() > max_orphans) |
166 | 0 | { |
167 | | // Evict a random orphan: |
168 | 0 | size_t randompos = rng.randrange(m_orphan_list.size()); |
169 | 0 | EraseTx(m_orphan_list[randompos]->first); |
170 | 0 | ++nEvicted; |
171 | 0 | } |
172 | 0 | if (nEvicted > 0) LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx\n", nEvicted); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
173 | 0 | } |
174 | | |
175 | | void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) |
176 | 0 | { |
177 | 0 | for (unsigned int i = 0; i < tx.vout.size(); i++) { |
178 | 0 | const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i)); |
179 | 0 | if (it_by_prev != m_outpoint_to_orphan_it.end()) { |
180 | 0 | for (const auto& elem : it_by_prev->second) { |
181 | | // Belt and suspenders, each orphan should always have at least 1 announcer. |
182 | 0 | if (!Assume(!elem->second.announcers.empty())) continue; Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
183 | | |
184 | | // Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing |
185 | | // inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us |
186 | | // from processing the orphan by disconnecting. |
187 | 0 | auto announcer_iter = std::begin(elem->second.announcers); |
188 | 0 | std::advance(announcer_iter, rng.randrange(elem->second.announcers.size())); |
189 | 0 | auto announcer = *(announcer_iter); |
190 | | |
191 | | // Get this source peer's work set, emplacing an empty set if it didn't exist |
192 | | // (note: if this peer wasn't still connected, we would have removed the orphan tx already) |
193 | 0 | std::set<Wtxid>& orphan_work_set = m_peer_orphanage_info.try_emplace(announcer).first->second.m_work_set; |
194 | | // Add this tx to the work set |
195 | 0 | orphan_work_set.insert(elem->first); |
196 | 0 | LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n", Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
197 | 0 | tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), announcer); |
198 | 0 | } |
199 | 0 | } |
200 | 0 | } |
201 | 0 | } |
202 | | |
203 | | bool TxOrphanage::HaveTx(const Wtxid& wtxid) const |
204 | 0 | { |
205 | 0 | return m_orphans.count(wtxid); |
206 | 0 | } |
207 | | |
208 | | CTransactionRef TxOrphanage::GetTx(const Wtxid& wtxid) const |
209 | 0 | { |
210 | 0 | auto it = m_orphans.find(wtxid); |
211 | 0 | return it != m_orphans.end() ? it->second.tx : nullptr; |
212 | 0 | } |
213 | | |
214 | | bool TxOrphanage::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const |
215 | 0 | { |
216 | 0 | auto it = m_orphans.find(wtxid); |
217 | 0 | return (it != m_orphans.end() && it->second.announcers.contains(peer)); |
218 | 0 | } |
219 | | |
220 | | CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) |
221 | 711k | { |
222 | 711k | auto peer_it = m_peer_orphanage_info.find(peer); |
223 | 711k | if (peer_it == m_peer_orphanage_info.end()) return nullptr; |
224 | | |
225 | 0 | auto& work_set = peer_it->second.m_work_set; |
226 | 0 | while (!work_set.empty()) { |
227 | 0 | Wtxid wtxid = *work_set.begin(); |
228 | 0 | work_set.erase(work_set.begin()); |
229 | |
|
230 | 0 | const auto orphan_it = m_orphans.find(wtxid); |
231 | 0 | if (orphan_it != m_orphans.end()) { |
232 | 0 | return orphan_it->second.tx; |
233 | 0 | } |
234 | 0 | } |
235 | 0 | return nullptr; |
236 | 0 | } |
237 | | |
238 | | bool TxOrphanage::HaveTxToReconsider(NodeId peer) |
239 | 438k | { |
240 | 438k | auto peer_it = m_peer_orphanage_info.find(peer); |
241 | 438k | if (peer_it == m_peer_orphanage_info.end()) return false; |
242 | | |
243 | 0 | auto& work_set = peer_it->second.m_work_set; |
244 | 0 | return !work_set.empty(); |
245 | 438k | } |
246 | | |
247 | | void TxOrphanage::EraseForBlock(const CBlock& block) |
248 | 2.79k | { |
249 | 2.79k | std::vector<Wtxid> vOrphanErase; |
250 | | |
251 | 2.79k | for (const CTransactionRef& ptx : block.vtx) { |
252 | 2.79k | const CTransaction& tx = *ptx; |
253 | | |
254 | | // Which orphan pool entries must we evict? |
255 | 2.79k | for (const auto& txin : tx.vin) { |
256 | 2.79k | auto itByPrev = m_outpoint_to_orphan_it.find(txin.prevout); |
257 | 2.79k | if (itByPrev == m_outpoint_to_orphan_it.end()) continue; |
258 | 0 | for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { |
259 | 0 | const CTransaction& orphanTx = *(*mi)->second.tx; |
260 | 0 | vOrphanErase.push_back(orphanTx.GetWitnessHash()); |
261 | 0 | } |
262 | 0 | } |
263 | 2.79k | } |
264 | | |
265 | | // Erase orphan transactions included or precluded by this block |
266 | 2.79k | if (vOrphanErase.size()) { |
267 | 0 | int nErased = 0; |
268 | 0 | for (const auto& orphanHash : vOrphanErase) { |
269 | 0 | nErased += EraseTx(orphanHash); |
270 | 0 | } |
271 | 0 | LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", nErased); Line | Count | Source | 280 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 273 | 0 | do { \ | 274 | 0 | if (LogAcceptCategory((category), (level))) { \ | 275 | 0 | LogPrintLevel_(category, level, __VA_ARGS__); \ Line | Count | Source | 255 | 0 | #define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) |
| 276 | 0 | } \ | 277 | 0 | } while (0) |
|
|
272 | 0 | } |
273 | 2.79k | } |
274 | | |
275 | | std::vector<CTransactionRef> TxOrphanage::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const |
276 | 0 | { |
277 | | // First construct a vector of iterators to ensure we do not return duplicates of the same tx |
278 | | // and so we can sort by nTimeExpire. |
279 | 0 | std::vector<OrphanMap::iterator> iters; |
280 | | |
281 | | // For each output, get all entries spending this prevout, filtering for ones from the specified peer. |
282 | 0 | for (unsigned int i = 0; i < parent->vout.size(); i++) { |
283 | 0 | const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i)); |
284 | 0 | if (it_by_prev != m_outpoint_to_orphan_it.end()) { |
285 | 0 | for (const auto& elem : it_by_prev->second) { |
286 | 0 | if (elem->second.announcers.contains(nodeid)) { |
287 | 0 | iters.emplace_back(elem); |
288 | 0 | } |
289 | 0 | } |
290 | 0 | } |
291 | 0 | } |
292 | | |
293 | | // Sort by address so that duplicates can be deleted. At the same time, sort so that more recent |
294 | | // orphans (which expire later) come first. Break ties based on address, as nTimeExpire is |
295 | | // quantified in seconds and it is possible for orphans to have the same expiry. |
296 | 0 | std::sort(iters.begin(), iters.end(), [](const auto& lhs, const auto& rhs) { |
297 | 0 | if (lhs->second.nTimeExpire == rhs->second.nTimeExpire) { |
298 | 0 | return &(*lhs) < &(*rhs); |
299 | 0 | } else { |
300 | 0 | return lhs->second.nTimeExpire > rhs->second.nTimeExpire; |
301 | 0 | } |
302 | 0 | }); |
303 | | // Erase duplicates |
304 | 0 | iters.erase(std::unique(iters.begin(), iters.end()), iters.end()); |
305 | | |
306 | | // Convert to a vector of CTransactionRef |
307 | 0 | std::vector<CTransactionRef> children_found; |
308 | 0 | children_found.reserve(iters.size()); |
309 | 0 | for (const auto& child_iter : iters) { |
310 | 0 | children_found.emplace_back(child_iter->second.tx); |
311 | 0 | } |
312 | 0 | return children_found; |
313 | 0 | } |
314 | | |
315 | | std::vector<TxOrphanage::OrphanTxBase> TxOrphanage::GetOrphanTransactions() const |
316 | 0 | { |
317 | 0 | std::vector<OrphanTxBase> ret; |
318 | 0 | ret.reserve(m_orphans.size()); |
319 | 0 | for (auto const& o : m_orphans) { |
320 | 0 | ret.push_back({o.second.tx, o.second.announcers, o.second.nTimeExpire}); |
321 | 0 | } |
322 | 0 | return ret; |
323 | 0 | } |
324 | | |
325 | | void TxOrphanage::SanityCheck() const |
326 | 0 | { |
327 | | // Check that cached m_total_announcements is correct |
328 | 0 | unsigned int counted_total_announcements{0}; |
329 | | // Check that m_total_orphan_usage is correct |
330 | 0 | unsigned int counted_total_usage{0}; |
331 | | |
332 | | // Check that cached PeerOrphanInfo::m_total_size is correct |
333 | 0 | std::map<NodeId, unsigned int> counted_size_per_peer; |
334 | |
|
335 | 0 | for (const auto& [wtxid, orphan] : m_orphans) { |
336 | 0 | counted_total_announcements += orphan.announcers.size(); |
337 | 0 | counted_total_usage += orphan.GetUsage(); |
338 | |
|
339 | 0 | Assume(!orphan.announcers.empty()); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
340 | 0 | for (const auto& peer : orphan.announcers) { |
341 | 0 | auto& count_peer_entry = counted_size_per_peer.try_emplace(peer).first->second; |
342 | 0 | count_peer_entry += orphan.GetUsage(); |
343 | 0 | } |
344 | 0 | } |
345 | |
|
346 | 0 | Assume(m_total_announcements >= m_orphans.size()); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
347 | 0 | Assume(counted_total_announcements == m_total_announcements); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
348 | 0 | Assume(counted_total_usage == m_total_orphan_usage); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
349 | | |
350 | | // There must be an entry in m_peer_orphanage_info for each peer |
351 | | // However, there may be m_peer_orphanage_info entries corresponding to peers for whom we |
352 | | // previously had orphans but no longer do. |
353 | 0 | Assume(counted_size_per_peer.size() <= m_peer_orphanage_info.size()); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
354 | |
|
355 | 0 | for (const auto& [peerid, info] : m_peer_orphanage_info) { |
356 | 0 | auto it_counted = counted_size_per_peer.find(peerid); |
357 | 0 | if (it_counted == counted_size_per_peer.end()) { |
358 | 0 | Assume(info.m_total_usage == 0); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
359 | 0 | } else { |
360 | 0 | Assume(it_counted->second == info.m_total_usage); Line | Count | Source | 118 | 0 | #define Assume(val) inline_assertion_check<false>(val, __FILE__, __LINE__, __func__, #val) |
|
361 | 0 | } |
362 | 0 | } |
363 | 0 | } |