/root/bitcoin/src/net_processing.cpp
Line | Count | Source |
1 | | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | | // Copyright (c) 2009-present The Bitcoin Core developers |
3 | | // Distributed under the MIT software license, see the accompanying |
4 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
5 | | |
6 | | #include <net_processing.h> |
7 | | |
8 | | #include <addrman.h> |
9 | | #include <arith_uint256.h> |
10 | | #include <banman.h> |
11 | | #include <blockencodings.h> |
12 | | #include <blockfilter.h> |
13 | | #include <chain.h> |
14 | | #include <chainparams.h> |
15 | | #include <common/bloom.h> |
16 | | #include <consensus/amount.h> |
17 | | #include <consensus/params.h> |
18 | | #include <consensus/validation.h> |
19 | | #include <core_memusage.h> |
20 | | #include <crypto/siphash.h> |
21 | | #include <deploymentstatus.h> |
22 | | #include <flatfile.h> |
23 | | #include <headerssync.h> |
24 | | #include <index/blockfilterindex.h> |
25 | | #include <kernel/types.h> |
26 | | #include <logging.h> |
27 | | #include <merkleblock.h> |
28 | | #include <net.h> |
29 | | #include <net_permissions.h> |
30 | | #include <netaddress.h> |
31 | | #include <netbase.h> |
32 | | #include <netmessagemaker.h> |
33 | | #include <node/blockstorage.h> |
34 | | #include <node/connection_types.h> |
35 | | #include <node/protocol_version.h> |
36 | | #include <node/timeoffsets.h> |
37 | | #include <node/txdownloadman.h> |
38 | | #include <node/txorphanage.h> |
39 | | #include <node/txreconciliation.h> |
40 | | #include <node/warnings.h> |
41 | | #include <policy/feerate.h> |
42 | | #include <policy/fees/block_policy_estimator.h> |
43 | | #include <policy/packages.h> |
44 | | #include <policy/policy.h> |
45 | | #include <primitives/block.h> |
46 | | #include <primitives/transaction.h> |
47 | | #include <private_broadcast.h> |
48 | | #include <protocol.h> |
49 | | #include <random.h> |
50 | | #include <scheduler.h> |
51 | | #include <script/script.h> |
52 | | #include <serialize.h> |
53 | | #include <span.h> |
54 | | #include <streams.h> |
55 | | #include <sync.h> |
56 | | #include <tinyformat.h> |
57 | | #include <txmempool.h> |
58 | | #include <uint256.h> |
59 | | #include <util/check.h> |
60 | | #include <util/strencodings.h> |
61 | | #include <util/time.h> |
62 | | #include <util/trace.h> |
63 | | #include <validation.h> |
64 | | |
65 | | #include <algorithm> |
66 | | #include <array> |
67 | | #include <atomic> |
68 | | #include <compare> |
69 | | #include <cstddef> |
70 | | #include <deque> |
71 | | #include <exception> |
72 | | #include <functional> |
73 | | #include <future> |
74 | | #include <initializer_list> |
75 | | #include <iterator> |
76 | | #include <limits> |
77 | | #include <list> |
78 | | #include <map> |
79 | | #include <memory> |
80 | | #include <optional> |
81 | | #include <queue> |
82 | | #include <ranges> |
83 | | #include <ratio> |
84 | | #include <set> |
85 | | #include <span> |
86 | | #include <typeinfo> |
87 | | #include <utility> |
88 | | |
89 | | using kernel::ChainstateRole; |
90 | | using namespace util::hex_literals; |
91 | | |
92 | | TRACEPOINT_SEMAPHORE(net, inbound_message); |
93 | | TRACEPOINT_SEMAPHORE(net, misbehaving_connection); |
94 | | |
95 | | /** Headers download timeout. |
96 | | * Timeout = base + per_header * (expected number of headers) */ |
97 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; |
98 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; |
99 | | /** How long to wait for a peer to respond to a getheaders request */ |
100 | | static constexpr auto HEADERS_RESPONSE_TIME{2min}; |
101 | | /** Protect at least this many outbound peers from disconnection due to slow/ |
102 | | * behind headers chain. |
103 | | */ |
104 | | static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; |
105 | | /** Timeout for (unprotected) outbound peers to sync to our chainwork */ |
106 | | static constexpr auto CHAIN_SYNC_TIMEOUT{20min}; |
107 | | /** How frequently to check for stale tips */ |
108 | | static constexpr auto STALE_CHECK_INTERVAL{10min}; |
109 | | /** How frequently to check for extra outbound peers and disconnect */ |
110 | | static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s}; |
111 | | /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */ |
112 | | static constexpr auto MINIMUM_CONNECT_TIME{30s}; |
113 | | /** SHA256("main address relay")[0:8] */ |
114 | | static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; |
115 | | /// Age after which a stale block will no longer be served if requested as |
116 | | /// protection against fingerprinting. Set to one month, denominated in seconds. |
117 | | static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; |
118 | | /// Age after which a block is considered historical for purposes of rate |
119 | | /// limiting block relay. Set to one week, denominated in seconds. |
120 | | static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; |
121 | | /** Time between pings automatically sent out for latency probing and keepalive */ |
122 | | static constexpr auto PING_INTERVAL{2min}; |
123 | | /** The maximum number of entries in a locator */ |
124 | | static const unsigned int MAX_LOCATOR_SZ = 101; |
125 | | /** The maximum number of entries in an 'inv' protocol message */ |
126 | | static const unsigned int MAX_INV_SZ = 50000; |
127 | | /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ |
128 | | static const unsigned int MAX_GETDATA_SZ = 1000; |
129 | | /** Number of blocks that can be requested at any given time from a single peer. */ |
130 | | static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16; |
131 | | /** Default time during which a peer must stall block download progress before being disconnected. |
132 | | * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */ |
133 | | static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s}; |
134 | | /** Maximum timeout for stalling block download. */ |
135 | | static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s}; |
136 | | /** Maximum depth of blocks we're willing to serve as compact blocks to peers |
137 | | * when requested. For older blocks, a regular BLOCK response will be sent. */ |
138 | | static const int MAX_CMPCTBLOCK_DEPTH = 5; |
139 | | /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */ |
140 | | static const int MAX_BLOCKTXN_DEPTH = 10; |
141 | | static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high"); |
142 | | /** Size of the "block download window": how far ahead of our current height do we fetch? |
143 | | * Larger windows tolerate larger download speed differences between peer, but increase the potential |
144 | | * degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably |
145 | | * want to make this a per-peer adaptive value at some point. */ |
146 | | static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024; |
147 | | /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */ |
148 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1; |
149 | | /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */ |
150 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5; |
151 | | /** Maximum number of headers to announce when relaying blocks with headers message.*/ |
152 | | static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; |
153 | | /** Minimum blocks required to signal NODE_NETWORK_LIMITED */ |
154 | | static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; |
155 | | /** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */ |
156 | | static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144; |
157 | | /** Average delay between local address broadcasts */ |
158 | | static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h}; |
159 | | /** Average delay between peer address broadcasts */ |
160 | | static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s}; |
161 | | /** Delay between rotating the peers we relay a particular address to */ |
162 | | static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h}; |
163 | | /** Average delay between trickled inventory transmissions for inbound peers. |
164 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
165 | | static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s}; |
166 | | /** Average delay between trickled inventory transmissions for outbound peers. |
167 | | * Use a smaller delay as there is less privacy concern for them. |
168 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
169 | | static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s}; |
170 | | /** Maximum rate of inventory items to send per second. |
171 | | * Limits the impact of low-fee transaction floods. */ |
172 | | static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND{14}; |
173 | | /** Target number of tx inventory items to send per transmission. */ |
174 | | static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL); |
175 | | /** Maximum number of inventory items to send per transmission. */ |
176 | | static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000; |
177 | | static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low"); |
178 | | static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); |
179 | | /** Average delay between feefilter broadcasts in seconds. */ |
180 | | static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min}; |
181 | | /** Maximum feefilter broadcast delay after significant change. */ |
182 | | static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min}; |
183 | | /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */ |
184 | | static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; |
185 | | /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ |
186 | | static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; |
187 | | /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ |
188 | | static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; |
189 | | /** The maximum number of address records permitted in an ADDR message. */ |
190 | | static constexpr size_t MAX_ADDR_TO_SEND{1000}; |
191 | | /** The maximum rate of address records we're willing to process on average. Can be bypassed using |
192 | | * the NetPermissionFlags::Addr permission. */ |
193 | | static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1}; |
194 | | /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND |
195 | | * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR |
196 | | * is exempt from this limit). */ |
197 | | static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; |
198 | | /** For private broadcast, send a transaction to this many peers. */ |
199 | | static constexpr size_t NUM_PRIVATE_BROADCAST_PER_TX{3}; |
200 | | /** Private broadcast connections must complete within this time. Disconnect the peer if it takes longer. */ |
201 | | static constexpr auto PRIVATE_BROADCAST_MAX_CONNECTION_LIFETIME{3min}; |
202 | | |
203 | | // Internal stuff |
204 | | namespace { |
205 | | /** Blocks that are in flight, and that are in the queue to be downloaded. */ |
206 | | struct QueuedBlock { |
207 | | /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ |
208 | | const CBlockIndex* pindex; |
209 | | /** Optional, used for CMPCTBLOCK downloads */ |
210 | | std::unique_ptr<PartiallyDownloadedBlock> partialBlock; |
211 | | }; |
212 | | |
213 | | /** |
214 | | * Data structure for an individual peer. This struct is not protected by |
215 | | * cs_main since it does not contain validation-critical data. |
216 | | * |
217 | | * Memory is owned by shared pointers and this object is destructed when |
218 | | * the refcount drops to zero. |
219 | | * |
220 | | * Mutexes inside this struct must not be held when locking m_peer_mutex. |
221 | | * |
222 | | * TODO: move most members from CNodeState to this structure. |
223 | | * TODO: move remaining application-layer data members from CNode to this structure. |
224 | | */ |
225 | | struct Peer { |
226 | | /** Same id as the CNode object for this peer */ |
227 | | const NodeId m_id{0}; |
228 | | |
229 | | /** Services we offered to this peer. |
230 | | * |
231 | | * This is supplied by CConnman during peer initialization. It's const |
232 | | * because there is no protocol defined for renegotiating services |
233 | | * initially offered to a peer. The set of local services we offer should |
234 | | * not change after initialization. |
235 | | * |
236 | | * An interesting example of this is NODE_NETWORK and initial block |
237 | | * download: a node which starts up from scratch doesn't have any blocks |
238 | | * to serve, but still advertises NODE_NETWORK because it will eventually |
239 | | * fulfill this role after IBD completes. P2P code is written in such a |
240 | | * way that it can gracefully handle peers who don't make good on their |
241 | | * service advertisements. */ |
242 | | const ServiceFlags m_our_services; |
243 | | /** Services this peer offered to us. */ |
244 | | std::atomic<ServiceFlags> m_their_services{NODE_NONE}; |
245 | | |
246 | | //! Whether this peer is an inbound connection |
247 | | const bool m_is_inbound; |
248 | | |
249 | | /** Protects misbehavior data members */ |
250 | | Mutex m_misbehavior_mutex; |
251 | | /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */ |
252 | | bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; |
253 | | |
254 | | /** Protects block inventory data members */ |
255 | | Mutex m_block_inv_mutex; |
256 | | /** List of blocks that we'll announce via an `inv` message. |
257 | | * There is no final sorting before sending, as they are always sent |
258 | | * immediately and in the order requested. */ |
259 | | std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex); |
260 | | /** Unfiltered list of blocks that we'd like to announce via a `headers` |
261 | | * message. If we can't announce via a `headers` message, we'll fall back to |
262 | | * announcing via `inv`. */ |
263 | | std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex); |
264 | | /** The final block hash that we sent in an `inv` message to this peer. |
265 | | * When the peer requests this block, we send an `inv` message to trigger |
266 | | * the peer to request the next sequence of block hashes. |
267 | | * Most peers use headers-first syncing, which doesn't use this mechanism */ |
268 | | uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {}; |
269 | | |
270 | | /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */ |
271 | | bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
272 | | |
273 | | /** The pong reply we're expecting, or 0 if no pong expected. */ |
274 | | std::atomic<uint64_t> m_ping_nonce_sent{0}; |
275 | | /** When the last ping was sent, or 0 if no ping was ever sent */ |
276 | | std::atomic<std::chrono::microseconds> m_ping_start{0us}; |
277 | | /** Whether a ping has been requested by the user */ |
278 | | std::atomic<bool> m_ping_queued{false}; |
279 | | |
280 | | /** Whether this peer relays txs via wtxid */ |
281 | | std::atomic<bool> m_wtxid_relay{false}; |
282 | | /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. |
283 | | * It is *not* a p2p protocol violation for the peer to send us |
284 | | * transactions with a lower fee rate than this. See BIP133. */ |
285 | | CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
286 | | /** Timestamp after which we will send the next BIP133 `feefilter` message |
287 | | * to the peer. */ |
288 | | std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
289 | | |
290 | | struct TxRelay { |
291 | | mutable RecursiveMutex m_bloom_filter_mutex; |
292 | | /** Whether we relay transactions to this peer. */ |
293 | | bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; |
294 | | /** A bloom filter for which transactions to announce to the peer. See BIP37. */ |
295 | | std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; |
296 | | |
297 | | mutable RecursiveMutex m_tx_inventory_mutex; |
298 | | /** A filter of all the (w)txids that the peer has announced to |
299 | | * us or we have announced to the peer. We use this to avoid announcing |
300 | | * the same (w)txid to a peer that already has the transaction. */ |
301 | | CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001}; |
302 | | /** Set of wtxids we still have to announce. For non-wtxid-relay peers, |
303 | | * we retrieve the txid from the corresponding mempool transaction when |
304 | | * constructing the `inv` message. We use the mempool to sort transactions |
305 | | * in dependency order before relay, so this does not have to be sorted. */ |
306 | | std::set<Wtxid> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); |
307 | | /** Whether the peer has requested us to send our complete mempool. Only |
308 | | * permitted if the peer has NetPermissionFlags::Mempool or we advertise |
309 | | * NODE_BLOOM. See BIP35. */ |
310 | | bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; |
311 | | /** The next time after which we will send an `inv` message containing |
312 | | * transaction announcements to this peer. */ |
313 | | std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; |
314 | | /** The mempool sequence num at which we sent the last `inv` message to this peer. |
315 | | * Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */ |
316 | | uint64_t m_last_inv_sequence GUARDED_BY(m_tx_inventory_mutex){1}; |
317 | | |
318 | | /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ |
319 | | std::atomic<CAmount> m_fee_filter_received{0}; |
320 | | }; |
321 | | |
322 | | /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */ |
323 | | TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
324 | 542k | { |
325 | 542k | LOCK(m_tx_relay_mutex); Line | Count | Source | 268 | 542k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 542k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 542k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 542k | #define PASTE(x, y) x ## y |
|
|
|
|
326 | 542k | Assume(!m_tx_relay); Line | Count | Source | 125 | 542k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
327 | 542k | m_tx_relay = std::make_unique<Peer::TxRelay>(); |
328 | 542k | return m_tx_relay.get(); |
329 | 542k | }; |
330 | | |
331 | | TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
332 | 66.5M | { |
333 | 66.5M | return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); Line | Count | Source | 299 | 66.5M | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
334 | 66.5M | }; |
335 | | |
336 | | /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ |
337 | | std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
338 | | /** Probabilistic filter to track recent addr messages relayed with this |
339 | | * peer. Used to avoid relaying redundant addresses to this peer. |
340 | | * |
341 | | * We initialize this filter for outbound peers (other than |
342 | | * block-relay-only connections) or when an inbound peer sends us an |
343 | | * address related message (ADDR, ADDRV2, GETADDR). |
344 | | * |
345 | | * Presence of this filter must correlate with m_addr_relay_enabled. |
346 | | **/ |
347 | | std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
348 | | /** Whether we are participating in address relay with this connection. |
349 | | * |
350 | | * We set this bool to true for outbound peers (other than |
351 | | * block-relay-only connections), or when an inbound peer sends us an |
352 | | * address related message (ADDR, ADDRV2, GETADDR). |
353 | | * |
354 | | * We use this bool to decide whether a peer is eligible for gossiping |
355 | | * addr messages. This avoids relaying to peers that are unlikely to |
356 | | * forward them, effectively blackholing self announcements. Reasons |
357 | | * peers might support addr relay on the link include that they connected |
358 | | * to us as a block-relay-only peer or they are a light client. |
359 | | * |
360 | | * This field must correlate with whether m_addr_known has been |
361 | | * initialized.*/ |
362 | | std::atomic_bool m_addr_relay_enabled{false}; |
363 | | /** Whether a getaddr request to this peer is outstanding. */ |
364 | | bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
365 | | /** Guards address sending timers. */ |
366 | | mutable Mutex m_addr_send_times_mutex; |
367 | | /** Time point to send the next ADDR message to this peer. */ |
368 | | std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
369 | | /** Time point to possibly re-announce our local address to this peer. */ |
370 | | std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
371 | | /** Whether the peer has signaled support for receiving ADDRv2 (BIP155) |
372 | | * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ |
373 | | std::atomic_bool m_wants_addrv2{false}; |
374 | | /** Whether this peer has already sent us a getaddr message. */ |
375 | | bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
376 | | /** Number of addresses that can be processed from this peer. Start at 1 to |
377 | | * permit self-announcement. */ |
378 | | double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; |
379 | | /** When m_addr_token_bucket was last updated */ |
380 | | NodeClock::time_point m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){NodeClock::now()}; |
381 | | /** Total number of addresses that were dropped due to rate limiting. */ |
382 | | std::atomic<uint64_t> m_addr_rate_limited{0}; |
383 | | /** Total number of addresses that were processed (excludes rate-limited ones). */ |
384 | | std::atomic<uint64_t> m_addr_processed{0}; |
385 | | |
386 | | /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ |
387 | | bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
388 | | |
389 | | /** Protects m_getdata_requests **/ |
390 | | Mutex m_getdata_requests_mutex; |
391 | | /** Work queue of items requested by this peer **/ |
392 | | std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); |
393 | | |
394 | | /** Time of the last getheaders message to this peer */ |
395 | | NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){}; |
396 | | |
397 | | /** Protects m_headers_sync **/ |
398 | | Mutex m_headers_sync_mutex; |
399 | | /** Headers-sync state for this peer (eg for initial sync, or syncing large |
400 | | * reorgs) **/ |
401 | | std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; |
402 | | |
403 | | /** Whether we've sent our peer a sendheaders message. **/ |
404 | | std::atomic<bool> m_sent_sendheaders{false}; |
405 | | |
406 | | /** When to potentially disconnect peer for stalling headers download */ |
407 | | std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us}; |
408 | | |
409 | | /** Whether this peer wants invs or headers (when possible) for block announcements */ |
410 | | bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
411 | | |
412 | | /** Time offset computed during the version handshake based on the |
413 | | * timestamp the peer sent in the version message. */ |
414 | | std::atomic<std::chrono::seconds> m_time_offset{0s}; |
415 | | |
416 | | explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound) |
417 | 762k | : m_id{id} |
418 | 762k | , m_our_services{our_services} |
419 | 762k | , m_is_inbound{is_inbound} |
420 | 762k | {} |
421 | | |
422 | | private: |
423 | | mutable Mutex m_tx_relay_mutex; |
424 | | |
425 | | /** Transaction relay data. May be a nullptr. */ |
426 | | std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); |
427 | | }; |
428 | | |
429 | | using PeerRef = std::shared_ptr<Peer>; |
430 | | |
431 | | /** |
432 | | * Maintain validation-specific state about nodes, protected by cs_main, instead |
433 | | * by CNode's own locks. This simplifies asynchronous operation, where |
434 | | * processing of incoming data is done after the ProcessMessage call returns, |
435 | | * and we're no longer holding the node's locks. |
436 | | */ |
437 | | struct CNodeState { |
438 | | //! The best known block we know this peer has announced. |
439 | | const CBlockIndex* pindexBestKnownBlock{nullptr}; |
440 | | //! The hash of the last unknown block this peer has announced. |
441 | | uint256 hashLastUnknownBlock{}; |
442 | | //! The last full block we both have. |
443 | | const CBlockIndex* pindexLastCommonBlock{nullptr}; |
444 | | //! The best header we have sent our peer. |
445 | | const CBlockIndex* pindexBestHeaderSent{nullptr}; |
446 | | //! Whether we've started headers synchronization with this peer. |
447 | | bool fSyncStarted{false}; |
448 | | //! Since when we're stalling block download progress (in microseconds), or 0. |
449 | | std::chrono::microseconds m_stalling_since{0us}; |
450 | | std::list<QueuedBlock> vBlocksInFlight; |
451 | | //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. |
452 | | std::chrono::microseconds m_downloading_since{0us}; |
453 | | //! Whether we consider this a preferred download peer. |
454 | | bool fPreferredDownload{false}; |
455 | | /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */ |
456 | | bool m_requested_hb_cmpctblocks{false}; |
457 | | /** Whether this peer will send us cmpctblocks if we request them. */ |
458 | | bool m_provides_cmpctblocks{false}; |
459 | | |
460 | | /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. |
461 | | * |
462 | | * Both are only in effect for outbound, non-manual, non-protected connections. |
463 | | * Any peer protected (m_protect = true) is not chosen for eviction. A peer is |
464 | | * marked as protected if all of these are true: |
465 | | * - its connection type is IsBlockOnlyConn() == false |
466 | | * - it gave us a valid connecting header |
467 | | * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet |
468 | | * - its chain tip has at least as much work as ours |
469 | | * |
470 | | * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, |
471 | | * set a timeout CHAIN_SYNC_TIMEOUT in the future: |
472 | | * - If at timeout their best known block now has more work than our tip |
473 | | * when the timeout was set, then either reset the timeout or clear it |
474 | | * (after comparing against our current tip's work) |
475 | | * - If at timeout their best known block still has less work than our |
476 | | * tip did when the timeout was set, then send a getheaders message, |
477 | | * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. |
478 | | * If their best known block is still behind when that new timeout is |
479 | | * reached, disconnect. |
480 | | * |
481 | | * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, |
482 | | * drop the outbound one that least recently announced us a new block. |
483 | | */ |
484 | | struct ChainSyncTimeoutState { |
485 | | //! A timeout used for checking whether our peer has sufficiently synced |
486 | | std::chrono::seconds m_timeout{0s}; |
487 | | //! A header with the work we require on our peer's chain |
488 | | const CBlockIndex* m_work_header{nullptr}; |
489 | | //! After timeout is reached, set to true after sending getheaders |
490 | | bool m_sent_getheaders{false}; |
491 | | //! Whether this peer is protected from disconnection due to a bad/slow chain |
492 | | bool m_protect{false}; |
493 | | }; |
494 | | |
495 | | ChainSyncTimeoutState m_chain_sync; |
496 | | |
497 | | //! Time of last new block announcement |
498 | | int64_t m_last_block_announcement{0}; |
499 | | }; |
500 | | |
501 | | class PeerManagerImpl final : public PeerManager |
502 | | { |
503 | | public: |
504 | | PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
505 | | BanMan* banman, ChainstateManager& chainman, |
506 | | CTxMemPool& pool, node::Warnings& warnings, Options opts); |
507 | | |
508 | | /** Overridden from CValidationInterface. */ |
509 | | void ActiveTipChange(const CBlockIndex& new_tip, bool) override |
510 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
511 | | void BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override |
512 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
513 | | void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override |
514 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
515 | | void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override |
516 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
517 | | void BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state) override |
518 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
519 | | void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override |
520 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); |
521 | | |
522 | | /** Implement NetEventsInterface */ |
523 | | void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex); |
524 | | void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex); |
525 | | bool HasAllDesirableServiceFlags(ServiceFlags services) const override; |
526 | | bool ProcessMessages(CNode& node, std::atomic<bool>& interrupt) override |
527 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
528 | | bool SendMessages(CNode& node) override |
529 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
530 | | |
531 | | /** Implement PeerManager */ |
532 | | void StartScheduledTasks(CScheduler& scheduler) override; |
533 | | void CheckForStaleTipAndEvictPeers() override; |
534 | | util::Expected<void, std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override |
535 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
536 | | bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
537 | | std::vector<node::TxOrphanage::OrphanInfo> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
538 | | PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
539 | | std::vector<PrivateBroadcast::TxBroadcastInfo> GetPrivateBroadcastInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
540 | | std::vector<CTransactionRef> AbortPrivateBroadcast(const uint256& id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
541 | | void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
542 | | void InitiateTxBroadcastToAll(const Txid& txid, const Wtxid& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
543 | | void InitiateTxBroadcastPrivate(const CTransactionRef& tx) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
544 | | void SetBestBlock(int height, std::chrono::seconds time) override |
545 | 403k | { |
546 | 403k | m_best_height = height; |
547 | 403k | m_best_block_time = time; |
548 | 403k | }; |
549 | 0 | void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
550 | | void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; |
551 | | ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override; |
552 | | |
553 | | private: |
554 | | void ProcessMessage(Peer& peer, CNode& pfrom, const std::string& msg_type, DataStream& vRecv, std::chrono::microseconds time_received, |
555 | | const std::atomic<bool>& interruptMsgProc) |
556 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
557 | | |
558 | | /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ |
559 | | void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); |
560 | | |
561 | | /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ |
562 | | void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
563 | | |
564 | | /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ |
565 | | void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
566 | | |
567 | | /** Rebroadcast stale private transactions (already broadcast but not received back from the network). */ |
568 | | void ReattemptPrivateBroadcast(CScheduler& scheduler); |
569 | | |
570 | | /** Get a shared pointer to the Peer object. |
571 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
572 | | PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
573 | | |
574 | | /** Get a shared pointer to the Peer object and remove it from m_peer_map. |
575 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
576 | | PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
577 | | |
578 | | /** Mark a peer as misbehaving, which will cause it to be disconnected and its |
579 | | * address discouraged. */ |
580 | | void Misbehaving(Peer& peer, const std::string& message); |
581 | | |
582 | | /** |
583 | | * Potentially mark a node discouraged based on the contents of a BlockValidationState object |
584 | | * |
585 | | * @param[in] via_compact_block this bool is passed in because net_processing should |
586 | | * punish peers differently depending on whether the data was provided in a compact |
587 | | * block message or not. If the compact block had a valid header, but contained invalid |
588 | | * txs, the peer should not be punished. See BIP 152. |
589 | | */ |
590 | | void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
591 | | bool via_compact_block, const std::string& message = "") |
592 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
593 | | |
594 | | /** Maybe disconnect a peer and discourage future connections from its address. |
595 | | * |
596 | | * @param[in] pnode The node to check. |
597 | | * @param[in] peer The peer object to check. |
598 | | * @return True if the peer was marked for disconnection in this function |
599 | | */ |
600 | | bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); |
601 | | |
602 | | /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID. |
603 | | * @param[in] first_time_failure Whether we should consider inserting into vExtraTxnForCompact, adding |
604 | | * a new orphan to resolve, or looking for a package to submit. |
605 | | * Set to true for transactions just received over p2p. |
606 | | * Set to false if the tx has already been rejected before, |
607 | | * e.g. is already in the orphanage, to avoid adding duplicate entries. |
608 | | * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. |
609 | | * |
610 | | * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found, |
611 | | * or std::nullopt otherwise. |
612 | | */ |
613 | | std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, |
614 | | bool first_time_failure) |
615 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
616 | | |
617 | | /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID. |
618 | | * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */ |
619 | | void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
620 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
621 | | |
622 | | /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for |
623 | | * individual transactions, and caches rejection for the package as a group. |
624 | | */ |
625 | | void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
626 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
627 | | |
628 | | /** |
629 | | * Reconsider orphan transactions after a parent has been accepted to the mempool. |
630 | | * |
631 | | * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only |
632 | | * one orphan will be reconsidered on each call of this function. If an |
633 | | * accepted orphan has orphaned children, those will need to be |
634 | | * reconsidered, creating more work, possibly for other peers. |
635 | | * @return True if meaningful work was done (an orphan was accepted/rejected). |
636 | | * If no meaningful work was done, then the work set for this peer |
637 | | * will be empty. |
638 | | */ |
639 | | bool ProcessOrphanTx(Peer& peer) |
640 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
641 | | |
642 | | /** Process a single headers message from a peer. |
643 | | * |
644 | | * @param[in] pfrom CNode of the peer |
645 | | * @param[in] peer The peer sending us the headers |
646 | | * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage. |
647 | | * @param[in] via_compact_block Whether this header came in via compact block handling. |
648 | | */ |
649 | | void ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
650 | | std::vector<CBlockHeader>&& headers, |
651 | | bool via_compact_block) |
652 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
653 | | /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ |
654 | | /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ |
655 | | bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer); |
656 | | /** Calculate an anti-DoS work threshold for headers chains */ |
657 | | arith_uint256 GetAntiDoSWorkThreshold(); |
658 | | /** Deal with state tracking and headers sync for peers that send |
659 | | * non-connecting headers (this can happen due to BIP 130 headers |
660 | | * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ |
661 | | void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
662 | | /** Return true if the headers connect to each other, false otherwise */ |
663 | | bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; |
664 | | /** Try to continue a low-work headers sync that has already begun. |
665 | | * Assumes the caller has already verified the headers connect, and has |
666 | | * checked that each header satisfies the proof-of-work target included in |
667 | | * the header. |
668 | | * @param[in] peer The peer we're syncing with. |
669 | | * @param[in] pfrom CNode of the peer |
670 | | * @param[in,out] headers The headers to be processed. |
671 | | * @return True if the passed in headers were successfully processed |
672 | | * as the continuation of a low-work headers sync in progress; |
673 | | * false otherwise. |
674 | | * If false, the passed in headers will be returned back to |
675 | | * the caller. |
676 | | * If true, the returned headers may be empty, indicating |
677 | | * there is no more work for the caller to do; or the headers |
678 | | * may be populated with entries that have passed anti-DoS |
679 | | * checks (and therefore may be validated for block index |
680 | | * acceptance by the caller). |
681 | | */ |
682 | | bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
683 | | std::vector<CBlockHeader>& headers) |
684 | | EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
685 | | /** Check work on a headers chain to be processed, and if insufficient, |
686 | | * initiate our anti-DoS headers sync mechanism. |
687 | | * |
688 | | * @param[in] peer The peer whose headers we're processing. |
689 | | * @param[in] pfrom CNode of the peer |
690 | | * @param[in] chain_start_header Where these headers connect in our index. |
691 | | * @param[in,out] headers The headers to be processed. |
692 | | * |
693 | | * @return True if chain was low work (headers will be empty after |
694 | | * calling); false otherwise. |
695 | | */ |
696 | | bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
697 | | const CBlockIndex& chain_start_header, |
698 | | std::vector<CBlockHeader>& headers) |
699 | | EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
700 | | |
701 | | /** Return true if the given header is an ancestor of |
702 | | * m_chainman.m_best_header or our current tip */ |
703 | | bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
704 | | |
705 | | /** Request further headers from this peer with a given locator. |
706 | | * We don't issue a getheaders message if we have a recent one outstanding. |
707 | | * This returns true if a getheaders is actually sent, and false otherwise. |
708 | | */ |
709 | | bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
710 | | /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ |
711 | | void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); |
712 | | /** Update peer state based on received headers message */ |
713 | | void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
714 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
715 | | |
716 | | void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); |
717 | | |
718 | | /** Send a message to a peer */ |
719 | 52.0k | void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); } |
720 | | template <typename... Args> |
721 | | void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const |
722 | 6.69M | { |
723 | 6.69M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); |
724 | 6.69M | } net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<bool, unsigned long const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool&&, unsigned long const&) const Line | Count | Source | 722 | 543k | { | 723 | 543k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 543k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CInv, std::allocator<CInv>>&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::vector<CInv, std::allocator<CInv>>&) const Line | Count | Source | 722 | 1.31M | { | 723 | 1.31M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 1.31M | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, CTransaction const>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, ParamsWrapper<TransactionSerParams, CTransaction const>&&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::span<std::byte const, 18446744073709551615ul>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::span<std::byte const, 18446744073709551615ul>&&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, CBlock const>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, ParamsWrapper<TransactionSerParams, CBlock const>&&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CMerkleBlock&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, CMerkleBlock&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockHeaderAndShortTxIDs const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, CBlockHeaderAndShortTxIDs const&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockHeaderAndShortTxIDs&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, CBlockHeaderAndShortTxIDs&) const Line | Count | Source | 722 | 4.81k | { | 723 | 4.81k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 4.81k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<int const&, unsigned long&, long&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>, unsigned long, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>&, int&, bool&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, int const&, unsigned long&, long&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>&&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>&&, unsigned long&&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>&, int&, bool&) const Line | Count | Source | 722 | 756k | { | 723 | 756k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 756k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>) const Line | Count | Source | 722 | 2.51M | { | 723 | 2.51M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 2.51M | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned int const&, unsigned long const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, unsigned int const&, unsigned long const&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::array<std::byte, 168ul> const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::array<std::byte, 168ul> const&) const Line | Count | Source | 722 | 60.3k | { | 723 | 60.3k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 60.3k | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CInv, std::allocator<CInv>>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::vector<CInv, std::allocator<CInv>>&&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockLocator const&, uint256>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, CBlockLocator const&, uint256&&) const Line | Count | Source | 722 | 207k | { | 723 | 207k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 207k | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockTransactions&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, BlockTransactions&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CBlockHeader, std::allocator<CBlockHeader>>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::vector<CBlockHeader, std::allocator<CBlockHeader>>&&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, std::vector<CBlock, std::allocator<CBlock>>>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, ParamsWrapper<TransactionSerParams, std::vector<CBlock, std::allocator<CBlock>>>&&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockTransactionsRequest&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, BlockTransactionsRequest&) const Line | Count | Source | 722 | 42.9k | { | 723 | 42.9k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 42.9k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned long&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, unsigned long&) const Line | Count | Source | 722 | 513k | { | 723 | 513k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 513k | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockFilter const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, BlockFilter const&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned char&, uint256, uint256&, std::vector<uint256, std::allocator<uint256>>&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, unsigned char&, uint256&&, uint256&, std::vector<uint256, std::allocator<uint256>>&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned char&, uint256, std::vector<uint256, std::allocator<uint256>>&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, unsigned char&, uint256&&, std::vector<uint256, std::allocator<uint256>>&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<CAddress::SerParams, std::vector<CAddress, std::allocator<CAddress>>>>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, ParamsWrapper<CAddress::SerParams, std::vector<CAddress, std::allocator<CAddress>>>&&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<long&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, long&) const Line | Count | Source | 722 | 731k | { | 723 | 731k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 724 | 731k | } |
|
725 | | |
726 | | /** Send a version message to a peer */ |
727 | | void PushNodeVersion(CNode& pnode, const Peer& peer); |
728 | | |
729 | | /** Send a ping message every PING_INTERVAL or if requested via RPC (peer.m_ping_queued is true). |
730 | | * May mark the peer to be disconnected if a ping has timed out. |
731 | | * We use mockable time for ping timeouts, so setmocktime may cause pings |
732 | | * to time out. */ |
733 | | void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); |
734 | | |
735 | | /** Send `addr` messages on a regular schedule. */ |
736 | | void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
737 | | |
738 | | /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ |
739 | | void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
740 | | |
741 | | /** Relay (gossip) an address to a few randomly chosen nodes. |
742 | | * |
743 | | * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. |
744 | | * @param[in] addr Address to relay. |
745 | | * @param[in] fReachable Whether the address' network is reachable. We relay unreachable |
746 | | * addresses less. |
747 | | */ |
748 | | void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); |
749 | | |
750 | | /** Send `feefilter` message. */ |
751 | | void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
752 | | |
753 | | FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
754 | | |
755 | | FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
756 | | |
757 | | const CChainParams& m_chainparams; |
758 | | CConnman& m_connman; |
759 | | AddrMan& m_addrman; |
760 | | /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ |
761 | | BanMan* const m_banman; |
762 | | ChainstateManager& m_chainman; |
763 | | CTxMemPool& m_mempool; |
764 | | |
765 | | /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage. |
766 | | * Lock invariants: |
767 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage. |
768 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects. |
769 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable. |
770 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions. |
771 | | * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). |
772 | | */ |
773 | | Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); |
774 | | node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex); |
775 | | |
776 | | std::unique_ptr<TxReconciliationTracker> m_txreconciliation; |
777 | | |
778 | | /** The height of the best chain */ |
779 | | std::atomic<int> m_best_height{-1}; |
780 | | /** The time of the best chain tip block */ |
781 | | std::atomic<std::chrono::seconds> m_best_block_time{0s}; |
782 | | |
783 | | /** Next time to check for stale tip */ |
784 | | std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; |
785 | | |
786 | | node::Warnings& m_warnings; |
787 | | TimeOffsets m_outbound_time_offsets{m_warnings}; |
788 | | |
789 | | const Options m_opts; |
790 | | |
791 | | bool RejectIncomingTxs(const CNode& peer) const; |
792 | | |
793 | | /** Whether we've completed initial sync yet, for determining when to turn |
794 | | * on extra block-relay-only peers. */ |
795 | | bool m_initial_sync_finished GUARDED_BY(cs_main){false}; |
796 | | |
797 | | /** Protects m_peer_map. This mutex must not be locked while holding a lock |
798 | | * on any of the mutexes inside a Peer object. */ |
799 | | mutable Mutex m_peer_mutex; |
800 | | /** |
801 | | * Map of all Peer objects, keyed by peer id. This map is protected |
802 | | * by the m_peer_mutex. Once a shared pointer reference is |
803 | | * taken, the lock may be released. Individual fields are protected by |
804 | | * their own locks. |
805 | | */ |
806 | | std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); |
807 | | |
808 | | /** Map maintaining per-node state. */ |
809 | | std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main); |
810 | | |
811 | | /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ |
812 | | const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
813 | | /** Get a pointer to a mutable CNodeState. */ |
814 | | CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
815 | | |
816 | | uint32_t GetFetchFlags(const Peer& peer) const; |
817 | | |
818 | | std::map<uint64_t, std::chrono::microseconds> m_next_inv_to_inbounds_per_network_key GUARDED_BY(g_msgproc_mutex); |
819 | | |
820 | | /** Number of nodes with fSyncStarted. */ |
821 | | int nSyncStarted GUARDED_BY(cs_main) = 0; |
822 | | |
823 | | /** Hash of the last block we received via INV */ |
824 | | uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; |
825 | | |
826 | | /** |
827 | | * Sources of received blocks, saved to be able punish them when processing |
828 | | * happens afterwards. |
829 | | * Set mapBlockSource[hash].second to false if the node should not be |
830 | | * punished if the block is invalid. |
831 | | */ |
832 | | std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); |
833 | | |
834 | | /** Number of peers with wtxid relay. */ |
835 | | std::atomic<int> m_wtxid_relay_peers{0}; |
836 | | |
837 | | /** Number of outbound peers with m_chain_sync.m_protect. */ |
838 | | int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; |
839 | | |
840 | | /** Number of preferable block download peers. */ |
841 | | int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; |
842 | | |
843 | | /** Stalling timeout for blocks in IBD */ |
844 | | std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; |
845 | | |
846 | | /** |
847 | | * For sending `inv`s to inbound peers, we use a single (exponentially |
848 | | * distributed) timer for all peers with the same network key. If we used a separate timer for each |
849 | | * peer, a spy node could make multiple inbound connections to us to |
850 | | * accurately determine when we received a transaction (and potentially |
851 | | * determine the transaction's origin). Each network key has its own timer |
852 | | * to make fingerprinting harder. */ |
853 | | std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, |
854 | | std::chrono::seconds average_interval, |
855 | | uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
856 | | |
857 | | |
858 | | // All of the following cache a recent block, and are protected by m_most_recent_block_mutex |
859 | | Mutex m_most_recent_block_mutex; |
860 | | std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); |
861 | | std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); |
862 | | uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); |
863 | | std::unique_ptr<const std::map<GenTxid, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex); |
864 | | |
865 | | // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. |
866 | | /** Mutex guarding the other m_headers_presync_* variables. */ |
867 | | Mutex m_headers_presync_mutex; |
868 | | /** A type to represent statistics about a peer's low-work headers sync. |
869 | | * |
870 | | * - The first field is the total verified amount of work in that synchronization. |
871 | | * - The second is: |
872 | | * - nullopt: the sync is in REDOWNLOAD phase (phase 2). |
873 | | * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). |
874 | | */ |
875 | | using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; |
876 | | /** Statistics for all peers in low-work headers sync. */ |
877 | | std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; |
878 | | /** The peer with the most-work entry in m_headers_presync_stats. */ |
879 | | NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; |
880 | | /** The m_headers_presync_stats improved, and needs signalling. */ |
881 | | std::atomic_bool m_headers_presync_should_signal{false}; |
882 | | |
883 | | /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ |
884 | | int m_highest_fast_announce GUARDED_BY(::cs_main){0}; |
885 | | |
886 | | /** Have we requested this block from a peer */ |
887 | | bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
888 | | |
889 | | /** Have we requested this block from an outbound peer */ |
890 | | bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
891 | | |
892 | | /** Remove this block from our tracked requested blocks. Called if: |
893 | | * - the block has been received from a peer |
894 | | * - the request for the block has timed out |
895 | | * If "from_peer" is specified, then only remove the block if it is in |
896 | | * flight from that peer (to avoid one peer's network traffic from |
897 | | * affecting another's state). |
898 | | */ |
899 | | void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
900 | | |
901 | | /* Mark a block as in flight |
902 | | * Returns false, still setting pit, if the block was already in flight from the same peer |
903 | | * pit will only be valid as long as the same cs_main lock is being held |
904 | | */ |
905 | | bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
906 | | |
907 | | bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
908 | | |
909 | | /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has |
910 | | * at most count entries. |
911 | | */ |
912 | | void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
913 | | |
914 | | /** Request blocks for the background chainstate, if one is in use. */ |
915 | | void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
916 | | |
917 | | /** |
918 | | * \brief Find next blocks to download from a peer after a starting block. |
919 | | * |
920 | | * \param vBlocks Vector of blocks to download which will be appended to. |
921 | | * \param peer Peer which blocks will be downloaded from. |
922 | | * \param state Pointer to the state of the peer. |
923 | | * \param pindexWalk Pointer to the starting block to add to vBlocks. |
924 | | * \param count Maximum number of blocks to allow in vBlocks. No more |
925 | | * blocks will be added if it reaches this size. |
926 | | * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No |
927 | | * blocks will be added above this height. |
928 | | * \param activeChain Optional pointer to a chain to compare against. If |
929 | | * provided, any next blocks which are already contained |
930 | | * in this chain will not be appended to vBlocks, but |
931 | | * instead will be used to update the |
932 | | * state->pindexLastCommonBlock pointer. |
933 | | * \param nodeStaller Optional pointer to a NodeId variable that will receive |
934 | | * the ID of another peer that might be causing this peer |
935 | | * to stall. This is set to the ID of the peer which |
936 | | * first requested the first in-flight block in the |
937 | | * download window. It is only set if vBlocks is empty at |
938 | | * the end of this function call and if increasing |
939 | | * nWindowEnd by 1 would cause it to be non-empty (which |
940 | | * indicates the download might be stalled because every |
941 | | * block in the window is in flight and no other peer is |
942 | | * trying to download the next block). |
943 | | */ |
944 | | void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
945 | | |
946 | | /* Multimap used to preserve insertion order */ |
947 | | typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap; |
948 | | BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); |
949 | | |
950 | | /** When our tip was last updated. */ |
951 | | std::atomic<std::chrono::seconds> m_last_tip_update{0s}; |
952 | | |
953 | | /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ |
954 | | CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
955 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, !tx_relay.m_tx_inventory_mutex); |
956 | | |
957 | | void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
958 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) |
959 | | LOCKS_EXCLUDED(::cs_main); |
960 | | |
961 | | /** Process a new block. Perform any post-processing housekeeping */ |
962 | | void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); |
963 | | |
964 | | /** Process compact block txns */ |
965 | | void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
966 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
967 | | |
968 | | /** |
969 | | * Schedule an INV for a transaction to be sent to the given peer (via `PushMessage()`). |
970 | | * The transaction is picked from the list of transactions for private broadcast. |
971 | | * It is assumed that the connection to the peer is `ConnectionType::PRIVATE_BROADCAST`. |
972 | | * Avoid calling this for other peers since it will degrade privacy. |
973 | | */ |
974 | | void PushPrivateBroadcastTx(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
975 | | |
976 | | /** |
977 | | * When a peer sends us a valid block, instruct it to announce blocks to us |
978 | | * using CMPCTBLOCK if possible by adding its nodeid to the end of |
979 | | * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by |
980 | | * removing the first element if necessary. |
981 | | */ |
982 | | void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
983 | | |
984 | | /** Stack of nodes which we have set to announce using compact blocks */ |
985 | | std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); |
986 | | |
987 | | /** Number of peers from which we're downloading blocks. */ |
988 | | int m_peers_downloading_from GUARDED_BY(cs_main) = 0; |
989 | | |
990 | | void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
991 | | |
992 | | /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. |
993 | | * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of |
994 | | * these are kept in a ring buffer */ |
995 | | std::vector<std::pair<Wtxid, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex); |
996 | | /** Offset into vExtraTxnForCompact to insert the next tx */ |
997 | | size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0; |
998 | | |
999 | | /** Check whether the last unknown block a peer advertised is not yet known. */ |
1000 | | void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1001 | | /** Update tracking information about which blocks a peer is assumed to have. */ |
1002 | | void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1003 | | bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1004 | | |
1005 | | /** |
1006 | | * Estimates the distance, in blocks, between the best-known block and the network chain tip. |
1007 | | * Utilizes the best-block time and the chainparams blocks spacing to approximate it. |
1008 | | */ |
1009 | | int64_t ApproximateBestBlockDepth() const; |
1010 | | |
1011 | | /** |
1012 | | * To prevent fingerprinting attacks, only send blocks/headers outside of |
1013 | | * the active chain if they are no more than a month older (both in time, |
1014 | | * and in best equivalent proof of work) than the best header chain we know |
1015 | | * about and we fully-validated them at some point. |
1016 | | */ |
1017 | | bool BlockRequestAllowed(const CBlockIndex& block_index) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1018 | | bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1019 | | void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
1020 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
1021 | | |
1022 | | /** |
1023 | | * Validation logic for compact filters request handling. |
1024 | | * |
1025 | | * May disconnect from the peer in the case of a bad request. |
1026 | | * |
1027 | | * @param[in] node The node that we received the request from |
1028 | | * @param[in] peer The peer that we received the request from |
1029 | | * @param[in] filter_type The filter type the request is for. Must be basic filters. |
1030 | | * @param[in] start_height The start height for the request |
1031 | | * @param[in] stop_hash The stop_hash for the request |
1032 | | * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157 |
1033 | | * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced. |
1034 | | * @param[out] filter_index The filter index, if the request can be serviced. |
1035 | | * @return True if the request can be serviced. |
1036 | | */ |
1037 | | bool PrepareBlockFilterRequest(CNode& node, Peer& peer, |
1038 | | BlockFilterType filter_type, uint32_t start_height, |
1039 | | const uint256& stop_hash, uint32_t max_height_diff, |
1040 | | const CBlockIndex*& stop_index, |
1041 | | BlockFilterIndex*& filter_index); |
1042 | | |
1043 | | /** |
1044 | | * Handle a cfilters request. |
1045 | | * |
1046 | | * May disconnect from the peer in the case of a bad request. |
1047 | | * |
1048 | | * @param[in] node The node that we received the request from |
1049 | | * @param[in] peer The peer that we received the request from |
1050 | | * @param[in] vRecv The raw message received |
1051 | | */ |
1052 | | void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv); |
1053 | | |
1054 | | /** |
1055 | | * Handle a cfheaders request. |
1056 | | * |
1057 | | * May disconnect from the peer in the case of a bad request. |
1058 | | * |
1059 | | * @param[in] node The node that we received the request from |
1060 | | * @param[in] peer The peer that we received the request from |
1061 | | * @param[in] vRecv The raw message received |
1062 | | */ |
1063 | | void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv); |
1064 | | |
1065 | | /** |
1066 | | * Handle a getcfcheckpt request. |
1067 | | * |
1068 | | * May disconnect from the peer in the case of a bad request. |
1069 | | * |
1070 | | * @param[in] node The node that we received the request from |
1071 | | * @param[in] peer The peer that we received the request from |
1072 | | * @param[in] vRecv The raw message received |
1073 | | */ |
1074 | | void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv); |
1075 | | |
1076 | | /** Checks if address relay is permitted with peer. If needed, initializes |
1077 | | * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. |
1078 | | * |
1079 | | * @return True if address relay is enabled with peer |
1080 | | * False if address relay is disallowed |
1081 | | */ |
1082 | | bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1083 | | |
1084 | | void ProcessAddrs(std::string_view msg_type, CNode& pfrom, Peer& peer, std::vector<CAddress>&& vAddr, const std::atomic<bool>& interruptMsgProc) |
1085 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_peer_mutex); |
1086 | | |
1087 | | void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1088 | | void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1089 | | |
1090 | | void LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block); |
1091 | | |
1092 | | /// The transactions to be broadcast privately. |
1093 | | PrivateBroadcast m_tx_for_private_broadcast; |
1094 | | }; |
1095 | | |
1096 | | const CNodeState* PeerManagerImpl::State(NodeId pnode) const |
1097 | 178M | { |
1098 | 178M | std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode); |
1099 | 178M | if (it == m_node_states.end()) |
1100 | 0 | return nullptr; |
1101 | 178M | return &it->second; |
1102 | 178M | } |
1103 | | |
1104 | | CNodeState* PeerManagerImpl::State(NodeId pnode) |
1105 | 178M | { |
1106 | 178M | return const_cast<CNodeState*>(std::as_const(*this).State(pnode)); |
1107 | 178M | } |
1108 | | |
1109 | | /** |
1110 | | * Whether the peer supports the address. For example, a peer that does not |
1111 | | * implement BIP155 cannot receive Tor v3 addresses because it requires |
1112 | | * ADDRv2 (BIP155) encoding. |
1113 | | */ |
1114 | | static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) |
1115 | 0 | { |
1116 | 0 | return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); |
1117 | 0 | } |
1118 | | |
1119 | | void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) |
1120 | 0 | { |
1121 | 0 | assert(peer.m_addr_known); |
1122 | 0 | peer.m_addr_known->insert(addr.GetKey()); |
1123 | 0 | } |
1124 | | |
1125 | | void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr) |
1126 | 0 | { |
1127 | | // Known checking here is only to save space from duplicates. |
1128 | | // Before sending, we'll filter it again for known addresses that were |
1129 | | // added after addresses were pushed. |
1130 | 0 | assert(peer.m_addr_known); |
1131 | 0 | if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) { |
1132 | 0 | if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) { |
1133 | 0 | peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr; |
1134 | 0 | } else { |
1135 | 0 | peer.m_addrs_to_send.push_back(addr); |
1136 | 0 | } |
1137 | 0 | } |
1138 | 0 | } |
1139 | | |
1140 | | static void AddKnownTx(Peer& peer, const uint256& hash) |
1141 | 19.2M | { |
1142 | 19.2M | auto tx_relay = peer.GetTxRelay(); |
1143 | 19.2M | if (!tx_relay) return58.2k ; |
1144 | | |
1145 | 19.1M | LOCK(tx_relay->m_tx_inventory_mutex); Line | Count | Source | 268 | 19.1M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 19.1M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 19.1M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 19.1M | #define PASTE(x, y) x ## y |
|
|
|
|
1146 | 19.1M | tx_relay->m_tx_inventory_known_filter.insert(hash); |
1147 | 19.1M | } |
1148 | | |
1149 | | /** Whether this peer can serve us blocks. */ |
1150 | | static bool CanServeBlocks(const Peer& peer) |
1151 | 59.1M | { |
1152 | 59.1M | return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED); |
1153 | 59.1M | } |
1154 | | |
1155 | | /** Whether this peer can only serve limited recent blocks (e.g. because |
1156 | | * it prunes old blocks) */ |
1157 | | static bool IsLimitedPeer(const Peer& peer) |
1158 | 26.7M | { |
1159 | 26.7M | return (!(peer.m_their_services & NODE_NETWORK) && |
1160 | 26.7M | (peer.m_their_services & NODE_NETWORK_LIMITED)10.4M ); |
1161 | 26.7M | } |
1162 | | |
1163 | | /** Whether this peer can serve us witness data */ |
1164 | | static bool CanServeWitnesses(const Peer& peer) |
1165 | 8.46M | { |
1166 | 8.46M | return peer.m_their_services & NODE_WITNESS; |
1167 | 8.46M | } |
1168 | | |
1169 | | std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, |
1170 | | std::chrono::seconds average_interval, |
1171 | | uint64_t network_key) |
1172 | 94.2k | { |
1173 | 94.2k | auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us); |
1174 | 94.2k | auto& timer{it->second}; |
1175 | 94.2k | if (timer < now) { |
1176 | 88.5k | timer = now + m_rng.rand_exp_duration(average_interval); |
1177 | 88.5k | } |
1178 | 94.2k | return timer; |
1179 | 94.2k | } |
1180 | | |
1181 | | bool PeerManagerImpl::IsBlockRequested(const uint256& hash) |
1182 | 3.34M | { |
1183 | 3.34M | return mapBlocksInFlight.contains(hash); |
1184 | 3.34M | } |
1185 | | |
1186 | | bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash) |
1187 | 390 | { |
1188 | 432 | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++42 ) { |
1189 | 411 | auto [nodeid, block_it] = range.first->second; |
1190 | 411 | PeerRef peer{GetPeerRef(nodeid)}; |
1191 | 411 | if (peer && !peer->m_is_inbound) return true369 ; |
1192 | 411 | } |
1193 | | |
1194 | 21 | return false; |
1195 | 390 | } |
1196 | | |
1197 | | void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) |
1198 | 2.00M | { |
1199 | 2.00M | auto range = mapBlocksInFlight.equal_range(hash); |
1200 | 2.00M | if (range.first == range.second) { |
1201 | | // Block was not requested from any peer |
1202 | 1.03M | return; |
1203 | 1.03M | } |
1204 | | |
1205 | | // We should not have requested too many of this block |
1206 | 964k | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); Line | Count | Source | 125 | 964k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
1207 | | |
1208 | 2.17M | while (range.first != range.second) { |
1209 | 1.21M | const auto& [node_id, list_it]{range.first->second}; |
1210 | | |
1211 | 1.21M | if (from_peer && *from_peer != node_id1.20M ) { |
1212 | 496k | range.first++; |
1213 | 496k | continue; |
1214 | 496k | } |
1215 | | |
1216 | 716k | CNodeState& state = *Assert(State(node_id)); Line | Count | Source | 113 | 716k | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
1217 | | |
1218 | 716k | if (state.vBlocksInFlight.begin() == list_it) { |
1219 | | // First block on the queue was received, update the start download time for the next one |
1220 | 489k | state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>()); |
1221 | 489k | } |
1222 | 716k | state.vBlocksInFlight.erase(list_it); |
1223 | | |
1224 | 716k | if (state.vBlocksInFlight.empty()) { |
1225 | | // Last validated block on the queue for this peer was received. |
1226 | 487k | m_peers_downloading_from--; |
1227 | 487k | } |
1228 | 716k | state.m_stalling_since = 0us; |
1229 | | |
1230 | 716k | range.first = mapBlocksInFlight.erase(range.first); |
1231 | 716k | } |
1232 | 964k | } |
1233 | | |
1234 | | bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit) |
1235 | 2.22M | { |
1236 | 2.22M | const uint256& hash{block.GetBlockHash()}; |
1237 | | |
1238 | 2.22M | CNodeState *state = State(nodeid); |
1239 | 2.22M | assert(state != nullptr); |
1240 | | |
1241 | 2.22M | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); Line | Count | Source | 125 | 2.22M | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
1242 | | |
1243 | | // Short-circuit most stuff in case it is from the same node |
1244 | 2.48M | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++255k ) { |
1245 | 1.64M | if (range.first->second.first == nodeid) { |
1246 | 1.39M | if (pit) { |
1247 | 1.39M | *pit = &range.first->second.second; |
1248 | 1.39M | } |
1249 | 1.39M | return false; |
1250 | 1.39M | } |
1251 | 1.64M | } |
1252 | | |
1253 | | // Make sure it's not being fetched already from same peer. |
1254 | 835k | RemoveBlockRequest(hash, nodeid); |
1255 | | |
1256 | 835k | std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), |
1257 | 835k | {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool)802k : nullptr33.3k )}); |
1258 | 835k | if (state->vBlocksInFlight.size() == 1) { |
1259 | | // We're starting a block download (batch) from this peer. |
1260 | 526k | state->m_downloading_since = GetTime<std::chrono::microseconds>(); |
1261 | 526k | m_peers_downloading_from++; |
1262 | 526k | } |
1263 | 835k | auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))); |
1264 | 835k | if (pit) { |
1265 | 802k | *pit = &itInFlight->second.second; |
1266 | 802k | } |
1267 | 835k | return true; |
1268 | 2.22M | } |
1269 | | |
1270 | | void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) |
1271 | 207k | { |
1272 | 207k | AssertLockHeld(cs_main); Line | Count | Source | 144 | 207k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
1273 | | |
1274 | | // When in -blocksonly mode, never request high-bandwidth mode from peers. Our |
1275 | | // mempool will not contain the transactions necessary to reconstruct the |
1276 | | // compact block. |
1277 | 207k | if (m_opts.ignore_incoming_txs) return0 ; |
1278 | | |
1279 | 207k | CNodeState* nodestate = State(nodeid); |
1280 | 207k | PeerRef peer{GetPeerRef(nodeid)}; |
1281 | 207k | if (!nodestate || !nodestate->m_provides_cmpctblocks) { |
1282 | | // Don't request compact blocks if the peer has not signalled support |
1283 | 20.6k | return; |
1284 | 20.6k | } |
1285 | | |
1286 | 187k | int num_outbound_hb_peers = 0; |
1287 | 352k | for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++164k ) { |
1288 | 322k | if (*it == nodeid) { |
1289 | 157k | lNodesAnnouncingHeaderAndIDs.erase(it); |
1290 | 157k | lNodesAnnouncingHeaderAndIDs.push_back(nodeid); |
1291 | 157k | return; |
1292 | 157k | } |
1293 | 164k | PeerRef peer_ref{GetPeerRef(*it)}; |
1294 | 164k | if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers163k ; |
1295 | 164k | } |
1296 | 29.9k | if (peer && peer->m_is_inbound) { |
1297 | | // If we're adding an inbound HB peer, make sure we're not removing |
1298 | | // our last outbound HB peer in the process. |
1299 | 1.15k | if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1101 ) { |
1300 | 0 | PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())}; |
1301 | 0 | if (remove_peer && !remove_peer->m_is_inbound) { |
1302 | | // Put the HB outbound peer in the second slot, so that it |
1303 | | // doesn't get removed. |
1304 | 0 | std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); |
1305 | 0 | } |
1306 | 0 | } |
1307 | 1.15k | } |
1308 | 29.9k | m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
1309 | 29.7k | AssertLockHeld(::cs_main); Line | Count | Source | 144 | 29.7k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
1310 | 29.7k | if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { |
1311 | | // As per BIP152, we only get 3 of our peers to announce |
1312 | | // blocks using compact encodings. |
1313 | 143 | m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){ |
1314 | 143 | MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
1315 | | // save BIP152 bandwidth state: we select peer to be low-bandwidth |
1316 | 143 | pnodeStop->m_bip152_highbandwidth_to = false; |
1317 | 143 | return true; |
1318 | 143 | }); |
1319 | 143 | lNodesAnnouncingHeaderAndIDs.pop_front(); |
1320 | 143 | } |
1321 | 29.7k | MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION); |
1322 | | // save BIP152 bandwidth state: we select peer to be high-bandwidth |
1323 | 29.7k | pfrom->m_bip152_highbandwidth_to = true; |
1324 | 29.7k | lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); |
1325 | 29.7k | return true; |
1326 | 29.7k | }); |
1327 | 29.9k | } |
1328 | | |
1329 | | bool PeerManagerImpl::TipMayBeStale() |
1330 | 0 | { |
1331 | 0 | AssertLockHeld(cs_main); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
1332 | 0 | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
1333 | 0 | if (m_last_tip_update.load() == 0s) { |
1334 | 0 | m_last_tip_update = GetTime<std::chrono::seconds>(); |
1335 | 0 | } |
1336 | 0 | return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty(); |
1337 | 0 | } |
1338 | | |
1339 | | int64_t PeerManagerImpl::ApproximateBestBlockDepth() const |
1340 | 129k | { |
1341 | 129k | return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing; |
1342 | 129k | } |
1343 | | |
1344 | | bool PeerManagerImpl::CanDirectFetch() |
1345 | 4.82M | { |
1346 | 4.82M | return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; |
1347 | 4.82M | } |
1348 | | |
1349 | | static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) |
1350 | 1.92M | { |
1351 | 1.92M | if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)1.19M ) |
1352 | 726k | return true; |
1353 | 1.20M | if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)334k ) |
1354 | 59.2k | return true; |
1355 | 1.14M | return false; |
1356 | 1.20M | } |
1357 | | |
1358 | 60.9M | void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) { |
1359 | 60.9M | CNodeState *state = State(nodeid); |
1360 | 60.9M | assert(state != nullptr); |
1361 | | |
1362 | 60.9M | if (!state->hashLastUnknownBlock.IsNull()) { |
1363 | 1.15M | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock); |
1364 | 1.15M | if (pindex && pindex->nChainWork > 0543 ) { |
1365 | 543 | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork244 ) { |
1366 | 480 | state->pindexBestKnownBlock = pindex; |
1367 | 480 | } |
1368 | 543 | state->hashLastUnknownBlock.SetNull(); |
1369 | 543 | } |
1370 | 1.15M | } |
1371 | 60.9M | } |
1372 | | |
1373 | 7.13M | void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { |
1374 | 7.13M | CNodeState *state = State(nodeid); |
1375 | 7.13M | assert(state != nullptr); |
1376 | | |
1377 | 7.13M | ProcessBlockAvailability(nodeid); |
1378 | | |
1379 | 7.13M | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
1380 | 7.13M | if (pindex && pindex->nChainWork > 06.87M ) { |
1381 | | // An actually better block was announced. |
1382 | 6.87M | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork6.55M ) { |
1383 | 5.85M | state->pindexBestKnownBlock = pindex; |
1384 | 5.85M | } |
1385 | 6.87M | } else { |
1386 | | // An unknown block was announced; just assume that the latest one is the best one. |
1387 | 253k | state->hashLastUnknownBlock = hash; |
1388 | 253k | } |
1389 | 7.13M | } |
1390 | | |
1391 | | // Logic for calculating which blocks to download from a given peer, given our current tip. |
1392 | | void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) |
1393 | 19.7M | { |
1394 | 19.7M | if (count == 0) |
1395 | 0 | return; |
1396 | | |
1397 | 19.7M | vBlocks.reserve(vBlocks.size() + count); |
1398 | 19.7M | CNodeState *state = State(peer.m_id); |
1399 | 19.7M | assert(state != nullptr); |
1400 | | |
1401 | | // Make sure pindexBestKnownBlock is up to date, we'll need it. |
1402 | 19.7M | ProcessBlockAvailability(peer.m_id); |
1403 | | |
1404 | 19.7M | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork15.3M || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()15.0M ) { |
1405 | | // This peer has nothing interesting. |
1406 | 4.67M | return; |
1407 | 4.67M | } |
1408 | | |
1409 | | // When syncing with AssumeUtxo and the snapshot has not yet been validated, |
1410 | | // abort downloading blocks from peers that don't have the snapshot block in their best chain. |
1411 | | // We can't reorg to this chain due to missing undo data until validation completes, |
1412 | | // so downloading blocks from it would be futile. |
1413 | 15.0M | const CBlockIndex* snap_base{m_chainman.CurrentChainstate().SnapshotBase()}; |
1414 | 15.0M | if (snap_base && m_chainman.CurrentChainstate().m_assumeutxo == Assumeutxo::UNVALIDATED0 && |
1415 | 15.0M | state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base0 ) { |
1416 | 0 | LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
1417 | 0 | return; |
1418 | 0 | } |
1419 | | |
1420 | | // Determine the forking point between the peer's chain and our chain: |
1421 | | // pindexLastCommonBlock is required to be an ancestor of pindexBestKnownBlock, and will be used as a starting point. |
1422 | | // It is being set to the fork point between the peer's best known block and the current tip, unless it is already set to |
1423 | | // an ancestor with more work than the fork point. |
1424 | 15.0M | auto fork_point = LastCommonAncestor(state->pindexBestKnownBlock, m_chainman.ActiveTip()); |
1425 | 15.0M | if (state->pindexLastCommonBlock == nullptr || |
1426 | 15.0M | fork_point->nChainWork > state->pindexLastCommonBlock->nChainWork14.9M || |
1427 | 15.0M | state->pindexBestKnownBlock->GetAncestor(state->pindexLastCommonBlock->nHeight) != state->pindexLastCommonBlock14.6M ) { |
1428 | 376k | state->pindexLastCommonBlock = fork_point; |
1429 | 376k | } |
1430 | 15.0M | if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) |
1431 | 8.66M | return; |
1432 | | |
1433 | 6.37M | const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; |
1434 | | // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last |
1435 | | // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to |
1436 | | // download that next block if the window were 1 larger. |
1437 | 6.37M | int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; |
1438 | | |
1439 | 6.37M | FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); |
1440 | 6.37M | } |
1441 | | |
1442 | | void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) |
1443 | 0 | { |
1444 | 0 | Assert(from_tip); Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
1445 | 0 | Assert(target_block); Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
1446 | |
|
1447 | 0 | if (vBlocks.size() >= count) { |
1448 | 0 | return; |
1449 | 0 | } |
1450 | | |
1451 | 0 | vBlocks.reserve(count); |
1452 | 0 | CNodeState *state = Assert(State(peer.m_id)); Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
1453 | |
|
1454 | 0 | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { |
1455 | | // This peer can't provide us the complete series of blocks leading up to the |
1456 | | // assumeutxo snapshot base. |
1457 | | // |
1458 | | // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we |
1459 | | // will eventually crash when we try to reorg to it. Let other logic |
1460 | | // deal with whether we disconnect this peer. |
1461 | | // |
1462 | | // TODO at some point in the future, we might choose to request what blocks |
1463 | | // this peer does have from the historical chain, despite it not having a |
1464 | | // complete history beneath the snapshot base. |
1465 | 0 | return; |
1466 | 0 | } |
1467 | | |
1468 | 0 | FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); |
1469 | 0 | } |
1470 | | |
1471 | | void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) |
1472 | 6.37M | { |
1473 | 6.37M | std::vector<const CBlockIndex*> vToFetch; |
1474 | 6.37M | int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); |
1475 | 6.37M | bool is_limited_peer = IsLimitedPeer(peer); |
1476 | 6.37M | NodeId waitingfor = -1; |
1477 | 7.96M | while (pindexWalk->nHeight < nMaxHeight) { |
1478 | | // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards |
1479 | | // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive |
1480 | | // as iterating over ~100 CBlockIndex* entries anyway. |
1481 | 6.37M | int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); |
1482 | 6.37M | vToFetch.resize(nToFetch); |
1483 | 6.37M | pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); |
1484 | 6.37M | vToFetch[nToFetch - 1] = pindexWalk; |
1485 | 7.39M | for (unsigned int i = nToFetch - 1; i > 0; i--1.01M ) { |
1486 | 1.01M | vToFetch[i - 1] = vToFetch[i]->pprev; |
1487 | 1.01M | } |
1488 | | |
1489 | | // Iterate over those blocks in vToFetch (in forward direction), adding the ones that |
1490 | | // are not yet downloaded and not in flight to vBlocks. In the meantime, update |
1491 | | // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's |
1492 | | // already part of our chain (and therefore don't need it even if pruned). |
1493 | 6.82M | for (const CBlockIndex* pindex : vToFetch) { |
1494 | 6.82M | if (!pindex->IsValid(BLOCK_VALID_TREE)) { |
1495 | | // We consider the chain that this peer is on invalid. |
1496 | 255k | return; |
1497 | 255k | } |
1498 | | |
1499 | 6.56M | if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)4.53M ) { |
1500 | | // We wouldn't download this block or its descendants from this peer. |
1501 | 4.53M | return; |
1502 | 4.53M | } |
1503 | | |
1504 | 2.02M | if (pindex->nStatus & BLOCK_HAVE_DATA || (2.01M activeChain2.01M && activeChain->Contains(pindex)2.01M )) { |
1505 | 14.5k | if (activeChain && pindex->HaveNumChainTxs()) { |
1506 | 144 | state->pindexLastCommonBlock = pindex; |
1507 | 144 | } |
1508 | 14.5k | continue; |
1509 | 14.5k | } |
1510 | | |
1511 | | // Is block in-flight? |
1512 | 2.01M | if (IsBlockRequested(pindex->GetBlockHash())) { |
1513 | 1.98M | if (waitingfor == -1) { |
1514 | | // This is the first already-in-flight block. |
1515 | 1.56M | waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first; |
1516 | 1.56M | } |
1517 | 1.98M | continue; |
1518 | 1.98M | } |
1519 | | |
1520 | | // The block is not already downloaded, and not yet in flight. |
1521 | 26.5k | if (pindex->nHeight > nWindowEnd) { |
1522 | | // We reached the end of the window. |
1523 | 0 | if (vBlocks.size() == 0 && waitingfor != peer.m_id) { |
1524 | | // We aren't able to fetch anything, but we would be if the download window was one larger. |
1525 | 0 | if (nodeStaller) *nodeStaller = waitingfor; |
1526 | 0 | } |
1527 | 0 | return; |
1528 | 0 | } |
1529 | | |
1530 | | // Don't request blocks that go further than what limited peers can provide |
1531 | 26.5k | if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)1.51k ) { |
1532 | 0 | continue; |
1533 | 0 | } |
1534 | | |
1535 | 26.5k | vBlocks.push_back(pindex); |
1536 | 26.5k | if (vBlocks.size() == count) { |
1537 | 312 | return; |
1538 | 312 | } |
1539 | 26.5k | } |
1540 | 6.37M | } |
1541 | 6.37M | } |
1542 | | |
1543 | | } // namespace |
1544 | | |
1545 | | void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) |
1546 | 756k | { |
1547 | 756k | uint64_t my_services; |
1548 | 756k | int64_t my_time; |
1549 | 756k | uint64_t your_services; |
1550 | 756k | CService your_addr; |
1551 | 756k | std::string my_user_agent; |
1552 | 756k | int my_height; |
1553 | 756k | bool my_tx_relay; |
1554 | 756k | if (pnode.IsPrivateBroadcastConn()) { |
1555 | 6.68k | my_services = NODE_NONE; |
1556 | 6.68k | my_time = 0; |
1557 | 6.68k | your_services = NODE_NONE; |
1558 | 6.68k | your_addr = CService{}; |
1559 | 6.68k | my_user_agent = "/pynode:0.0.1/"; // Use a constant other than the default (or user-configured). See https://github.com/bitcoin/bitcoin/pull/27509#discussion_r1214671917 |
1560 | 6.68k | my_height = 0; |
1561 | 6.68k | my_tx_relay = false; |
1562 | 749k | } else { |
1563 | 749k | const CAddress& addr{pnode.addr}; |
1564 | 749k | my_services = peer.m_our_services; |
1565 | 749k | my_time = count_seconds(GetTime<std::chrono::seconds>()); |
1566 | 749k | your_services = addr.nServices; |
1567 | 749k | your_addr = addr.IsRoutable() && !IsProxy(addr)392k && addr.IsAddrV1Compatible()392k ? CService{addr}304k : CService{}445k ; |
1568 | 749k | my_user_agent = strSubVersion; |
1569 | 749k | my_height = m_best_height; |
1570 | 749k | my_tx_relay = !RejectIncomingTxs(pnode); |
1571 | 749k | } |
1572 | | |
1573 | 756k | MakeAndPushMessage( |
1574 | 756k | pnode, |
1575 | 756k | NetMsgType::VERSION, |
1576 | 756k | PROTOCOL_VERSION, |
1577 | 756k | my_services, |
1578 | 756k | my_time, |
1579 | | // your_services + CNetAddr::V1(your_addr) is the pre-version-31402 serialization of your_addr (without nTime) |
1580 | 756k | your_services, CNetAddr::V1(your_addr), |
1581 | | // same, for a dummy address |
1582 | 756k | my_services, CNetAddr::V1(CService{}), |
1583 | 756k | pnode.GetLocalNonce(), |
1584 | 756k | my_user_agent, |
1585 | 756k | my_height, |
1586 | 756k | my_tx_relay); |
1587 | | |
1588 | 756k | LogDebug( Line | Count | Source | 117 | 756k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 756k | do { \ | 109 | 756k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 756k | } while (0) |
|
|
1589 | 756k | BCLog::NET, "send version message: version=%d, blocks=%d%s, txrelay=%d, peer=%d\n", |
1590 | 756k | PROTOCOL_VERSION, my_height, |
1591 | 756k | fLogIPs ? strprintf(", them=%s", your_addr.ToStringAddrPort()) : "", |
1592 | 756k | my_tx_relay, pnode.GetId()); |
1593 | 756k | } |
1594 | | |
1595 | | void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) |
1596 | 0 | { |
1597 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
1598 | 0 | CNodeState *state = State(node); |
1599 | 0 | if (state) state->m_last_block_announcement = time_in_seconds; |
1600 | 0 | } |
1601 | | |
1602 | | void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services) |
1603 | 762k | { |
1604 | 762k | NodeId nodeid = node.GetId(); |
1605 | 762k | { |
1606 | 762k | LOCK(cs_main); // For m_node_states Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1607 | 762k | m_node_states.try_emplace(m_node_states.end(), nodeid); |
1608 | 762k | } |
1609 | 762k | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid)); Line | Count | Source | 299 | 762k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1610 | | |
1611 | 762k | if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) { |
1612 | 109k | our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM); |
1613 | 109k | } |
1614 | | |
1615 | 762k | PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn()); |
1616 | 762k | { |
1617 | 762k | LOCK(m_peer_mutex); Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1618 | 762k | m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); |
1619 | 762k | } |
1620 | 762k | } |
1621 | | |
1622 | | void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) |
1623 | 0 | { |
1624 | 0 | std::set<Txid> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); |
1625 | |
|
1626 | 0 | for (const auto& txid : unbroadcast_txids) { |
1627 | 0 | CTransactionRef tx = m_mempool.get(txid); |
1628 | |
|
1629 | 0 | if (tx != nullptr) { |
1630 | 0 | InitiateTxBroadcastToAll(txid, tx->GetWitnessHash()); |
1631 | 0 | } else { |
1632 | 0 | m_mempool.RemoveUnbroadcastTx(txid, true); |
1633 | 0 | } |
1634 | 0 | } |
1635 | | |
1636 | | // Schedule next run for 10-15 minutes in the future. |
1637 | | // We add randomness on every cycle to avoid the possibility of P2P fingerprinting. |
1638 | 0 | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
1639 | 0 | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
1640 | 0 | } |
1641 | | |
1642 | | void PeerManagerImpl::ReattemptPrivateBroadcast(CScheduler& scheduler) |
1643 | 0 | { |
1644 | | // Remove stale transactions that are no longer relevant (e.g. already in |
1645 | | // the mempool or mined) and count the remaining ones. |
1646 | 0 | size_t num_for_rebroadcast{0}; |
1647 | 0 | const auto stale_txs = m_tx_for_private_broadcast.GetStale(); |
1648 | 0 | if (!stale_txs.empty()) { |
1649 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
1650 | 0 | for (const auto& stale_tx : stale_txs) { |
1651 | 0 | auto mempool_acceptable = m_chainman.ProcessTransaction(stale_tx, /*test_accept=*/true); |
1652 | 0 | if (mempool_acceptable.m_result_type == MempoolAcceptResult::ResultType::VALID) { |
1653 | 0 | LogDebug(BCLog::PRIVBROADCAST, Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
1654 | 0 | "Reattempting broadcast of stale txid=%s wtxid=%s", |
1655 | 0 | stale_tx->GetHash().ToString(), stale_tx->GetWitnessHash().ToString()); |
1656 | 0 | ++num_for_rebroadcast; |
1657 | 0 | } else { |
1658 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Giving up broadcast attempts for txid=%s wtxid=%s: %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
1659 | 0 | stale_tx->GetHash().ToString(), stale_tx->GetWitnessHash().ToString(), |
1660 | 0 | mempool_acceptable.m_state.ToString()); |
1661 | 0 | m_tx_for_private_broadcast.Remove(stale_tx); |
1662 | 0 | } |
1663 | 0 | } |
1664 | | |
1665 | | // This could overshoot, but that is ok - we will open some private connections in vain. |
1666 | 0 | m_connman.m_private_broadcast.NumToOpenAdd(num_for_rebroadcast); |
1667 | 0 | } |
1668 | |
|
1669 | 0 | const auto delta{2min + FastRandomContext().randrange<std::chrono::milliseconds>(1min)}; |
1670 | 0 | scheduler.scheduleFromNow([&] { ReattemptPrivateBroadcast(scheduler); }, delta); |
1671 | 0 | } |
1672 | | |
1673 | | void PeerManagerImpl::FinalizeNode(const CNode& node) |
1674 | 762k | { |
1675 | 762k | NodeId nodeid = node.GetId(); |
1676 | 762k | { |
1677 | 762k | LOCK(cs_main); Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1678 | 762k | { |
1679 | | // We remove the PeerRef from g_peer_map here, but we don't always |
1680 | | // destruct the Peer. Sometimes another thread is still holding a |
1681 | | // PeerRef, so the refcount is >= 1. Be careful not to do any |
1682 | | // processing here that assumes Peer won't be changed before it's |
1683 | | // destructed. |
1684 | 762k | PeerRef peer = RemovePeer(nodeid); |
1685 | 762k | assert(peer != nullptr); |
1686 | 762k | m_wtxid_relay_peers -= peer->m_wtxid_relay; |
1687 | 762k | assert(m_wtxid_relay_peers >= 0); |
1688 | 762k | } |
1689 | 762k | CNodeState *state = State(nodeid); |
1690 | 762k | assert(state != nullptr); |
1691 | | |
1692 | 762k | if (state->fSyncStarted) |
1693 | 165k | nSyncStarted--; |
1694 | | |
1695 | 762k | for (const QueuedBlock& entry : state->vBlocksInFlight) { |
1696 | 119k | auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash()); |
1697 | 239k | while (range.first != range.second) { |
1698 | 120k | auto [node_id, list_it] = range.first->second; |
1699 | 120k | if (node_id != nodeid) { |
1700 | 631 | range.first++; |
1701 | 119k | } else { |
1702 | 119k | range.first = mapBlocksInFlight.erase(range.first); |
1703 | 119k | } |
1704 | 120k | } |
1705 | 119k | } |
1706 | 762k | { |
1707 | 762k | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1708 | 762k | m_txdownloadman.DisconnectedPeer(nodeid); |
1709 | 762k | } |
1710 | 762k | if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid)0 ; |
1711 | 762k | m_num_preferred_download_peers -= state->fPreferredDownload; |
1712 | 762k | m_peers_downloading_from -= (!state->vBlocksInFlight.empty()); |
1713 | 762k | assert(m_peers_downloading_from >= 0); |
1714 | 762k | m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; |
1715 | 762k | assert(m_outbound_peers_with_protect_from_disconnect >= 0); |
1716 | | |
1717 | 762k | m_node_states.erase(nodeid); |
1718 | | |
1719 | 762k | if (m_node_states.empty()) { |
1720 | | // Do a consistency check after the last peer is removed. |
1721 | 190k | assert(mapBlocksInFlight.empty()); |
1722 | 190k | assert(m_num_preferred_download_peers == 0); |
1723 | 190k | assert(m_peers_downloading_from == 0); |
1724 | 190k | assert(m_outbound_peers_with_protect_from_disconnect == 0); |
1725 | 190k | assert(m_wtxid_relay_peers == 0); |
1726 | 190k | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty()); Line | Count | Source | 299 | 190k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1727 | 190k | } |
1728 | 762k | } // cs_main |
1729 | 762k | if (node.fSuccessfullyConnected && |
1730 | 762k | !node.IsBlockOnlyConn()547k && !node.IsPrivateBroadcastConn()544k && !node.IsInboundConn()543k ) { |
1731 | | // Only change visible addrman state for full outbound peers. We don't |
1732 | | // call Connected() for feeler connections since they don't have |
1733 | | // fSuccessfullyConnected set. Also don't call Connected() for private broadcast |
1734 | | // connections since they could leak information in addrman. |
1735 | 434k | m_addrman.Connected(node.addr); |
1736 | 434k | } |
1737 | 762k | { |
1738 | 762k | LOCK(m_headers_presync_mutex); Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1739 | 762k | m_headers_presync_stats.erase(nodeid); |
1740 | 762k | } |
1741 | 762k | if (node.IsPrivateBroadcastConn() && |
1742 | 762k | !m_tx_for_private_broadcast.DidNodeConfirmReception(nodeid)6.68k && |
1743 | 762k | m_tx_for_private_broadcast.HavePendingTransactions()6.68k ) { |
1744 | |
|
1745 | 0 | m_connman.m_private_broadcast.NumToOpenAdd(1); |
1746 | 0 | } |
1747 | 762k | LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); Line | Count | Source | 117 | 762k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 762k | do { \ | 109 | 762k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 762k | } while (0) |
|
|
1748 | 762k | } |
1749 | | |
1750 | | bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const |
1751 | 771k | { |
1752 | | // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services) |
1753 | 771k | return !(GetDesirableServiceFlags(services) & (~services)); |
1754 | 771k | } |
1755 | | |
1756 | | ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const |
1757 | 771k | { |
1758 | 771k | if (services & NODE_NETWORK_LIMITED) { |
1759 | | // Limited peers are desirable when we are close to the tip. |
1760 | 129k | if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { |
1761 | 0 | return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); |
1762 | 0 | } |
1763 | 129k | } |
1764 | 771k | return ServiceFlags(NODE_NETWORK | NODE_WITNESS); |
1765 | 771k | } |
1766 | | |
1767 | | PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const |
1768 | 82.2M | { |
1769 | 82.2M | LOCK(m_peer_mutex); Line | Count | Source | 268 | 82.2M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 82.2M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 82.2M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 82.2M | #define PASTE(x, y) x ## y |
|
|
|
|
1770 | 82.2M | auto it = m_peer_map.find(id); |
1771 | 82.2M | return it != m_peer_map.end() ? it->second : nullptr0 ; |
1772 | 82.2M | } |
1773 | | |
1774 | | PeerRef PeerManagerImpl::RemovePeer(NodeId id) |
1775 | 762k | { |
1776 | 762k | PeerRef ret; |
1777 | 762k | LOCK(m_peer_mutex); Line | Count | Source | 268 | 762k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 762k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 762k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 762k | #define PASTE(x, y) x ## y |
|
|
|
|
1778 | 762k | auto it = m_peer_map.find(id); |
1779 | 762k | if (it != m_peer_map.end()) { |
1780 | 762k | ret = std::move(it->second); |
1781 | 762k | m_peer_map.erase(it); |
1782 | 762k | } |
1783 | 762k | return ret; |
1784 | 762k | } |
1785 | | |
1786 | | bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const |
1787 | 563k | { |
1788 | 563k | { |
1789 | 563k | LOCK(cs_main); Line | Count | Source | 268 | 563k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 563k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 563k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 563k | #define PASTE(x, y) x ## y |
|
|
|
|
1790 | 563k | const CNodeState* state = State(nodeid); |
1791 | 563k | if (state == nullptr) |
1792 | 0 | return false; |
1793 | 563k | stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight0 : -1; |
1794 | 563k | stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight0 : -1; |
1795 | 563k | for (const QueuedBlock& queue : state->vBlocksInFlight) { |
1796 | 0 | if (queue.pindex) |
1797 | 0 | stats.vHeightInFlight.push_back(queue.pindex->nHeight); |
1798 | 0 | } |
1799 | 563k | } |
1800 | | |
1801 | 0 | PeerRef peer = GetPeerRef(nodeid); |
1802 | 563k | if (peer == nullptr) return false0 ; |
1803 | 563k | stats.their_services = peer->m_their_services; |
1804 | | // It is common for nodes with good ping times to suddenly become lagged, |
1805 | | // due to a new block arriving or other large transfer. |
1806 | | // Merely reporting pingtime might fool the caller into thinking the node was still responsive, |
1807 | | // since pingtime does not update until the ping is complete, which might take a while. |
1808 | | // So, if a ping is taking an unusually long time in flight, |
1809 | | // the caller can immediately detect that this is happening. |
1810 | 563k | auto ping_wait{0us}; |
1811 | 563k | if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())0 ) { |
1812 | 0 | ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); |
1813 | 0 | } |
1814 | | |
1815 | 563k | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
1816 | 537k | stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); Line | Count | Source | 299 | 537k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
1817 | 537k | stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); |
1818 | 537k | LOCK(tx_relay->m_tx_inventory_mutex); Line | Count | Source | 268 | 537k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 537k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 537k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 537k | #define PASTE(x, y) x ## y |
|
|
|
|
1819 | 537k | stats.m_last_inv_seq = tx_relay->m_last_inv_sequence; |
1820 | 537k | stats.m_inv_to_send = tx_relay->m_tx_inventory_to_send.size(); |
1821 | 537k | } else { |
1822 | 25.4k | stats.m_relay_txs = false; |
1823 | 25.4k | stats.m_fee_filter_received = 0; |
1824 | 25.4k | stats.m_inv_to_send = 0; |
1825 | 25.4k | } |
1826 | | |
1827 | 563k | stats.m_ping_wait = ping_wait; |
1828 | 563k | stats.m_addr_processed = peer->m_addr_processed.load(); |
1829 | 563k | stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); |
1830 | 563k | stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); |
1831 | 563k | { |
1832 | 563k | LOCK(peer->m_headers_sync_mutex); Line | Count | Source | 268 | 563k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 563k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 563k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 563k | #define PASTE(x, y) x ## y |
|
|
|
|
1833 | 563k | if (peer->m_headers_sync) { |
1834 | 0 | stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); |
1835 | 0 | } |
1836 | 563k | } |
1837 | 563k | stats.time_offset = peer->m_time_offset; |
1838 | | |
1839 | 563k | return true; |
1840 | 563k | } |
1841 | | |
1842 | | std::vector<node::TxOrphanage::OrphanInfo> PeerManagerImpl::GetOrphanTransactions() |
1843 | 0 | { |
1844 | 0 | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
1845 | 0 | return m_txdownloadman.GetOrphanTransactions(); |
1846 | 0 | } |
1847 | | |
1848 | | PeerManagerInfo PeerManagerImpl::GetInfo() const |
1849 | 0 | { |
1850 | 0 | return PeerManagerInfo{ |
1851 | 0 | .median_outbound_time_offset = m_outbound_time_offsets.Median(), |
1852 | 0 | .ignores_incoming_txs = m_opts.ignore_incoming_txs, |
1853 | 0 | }; |
1854 | 0 | } |
1855 | | |
1856 | | std::vector<PrivateBroadcast::TxBroadcastInfo> PeerManagerImpl::GetPrivateBroadcastInfo() const |
1857 | 0 | { |
1858 | 0 | return m_tx_for_private_broadcast.GetBroadcastInfo(); |
1859 | 0 | } |
1860 | | |
1861 | | std::vector<CTransactionRef> PeerManagerImpl::AbortPrivateBroadcast(const uint256& id) |
1862 | 0 | { |
1863 | 0 | const auto snapshot{m_tx_for_private_broadcast.GetBroadcastInfo()}; |
1864 | 0 | std::vector<CTransactionRef> removed_txs; |
1865 | |
|
1866 | 0 | size_t connections_cancelled{0}; |
1867 | 0 | for (const auto& [tx, _] : snapshot) { |
1868 | 0 | if (tx->GetHash().ToUint256() != id && tx->GetWitnessHash().ToUint256() != id) continue; |
1869 | 0 | if (const auto peer_acks{m_tx_for_private_broadcast.Remove(tx)}) { |
1870 | 0 | removed_txs.push_back(tx); |
1871 | 0 | if (NUM_PRIVATE_BROADCAST_PER_TX > *peer_acks) { |
1872 | 0 | connections_cancelled += (NUM_PRIVATE_BROADCAST_PER_TX - *peer_acks); |
1873 | 0 | } |
1874 | 0 | } |
1875 | 0 | } |
1876 | 0 | m_connman.m_private_broadcast.NumToOpenSub(connections_cancelled); |
1877 | |
|
1878 | 0 | return removed_txs; |
1879 | 0 | } |
1880 | | |
1881 | | void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) |
1882 | 4.19M | { |
1883 | 4.19M | if (m_opts.max_extra_txs <= 0) |
1884 | 0 | return; |
1885 | 4.19M | if (!vExtraTxnForCompact.size()) |
1886 | 110k | vExtraTxnForCompact.resize(m_opts.max_extra_txs); |
1887 | 4.19M | vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx); |
1888 | 4.19M | vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs; |
1889 | 4.19M | } |
1890 | | |
1891 | | void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message) |
1892 | 901k | { |
1893 | 901k | LOCK(peer.m_misbehavior_mutex); Line | Count | Source | 268 | 901k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 901k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 901k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 901k | #define PASTE(x, y) x ## y |
|
|
|
|
1894 | | |
1895 | 901k | const std::string message_prefixed = message.empty() ? ""0 : (": " + message); |
1896 | 901k | peer.m_should_discourage = true; |
1897 | 901k | LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed); Line | Count | Source | 117 | 901k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 901k | do { \ | 109 | 901k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 901k | } while (0) |
|
|
1898 | 901k | TRACEPOINT(net, misbehaving_connection, |
1899 | 901k | peer.m_id, |
1900 | 901k | message.c_str() |
1901 | 901k | ); |
1902 | 901k | } |
1903 | | |
1904 | | void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
1905 | | bool via_compact_block, const std::string& message) |
1906 | 4.99M | { |
1907 | 4.99M | PeerRef peer{GetPeerRef(nodeid)}; |
1908 | 4.99M | switch (state.GetResult()) { |
1909 | 0 | case BlockValidationResult::BLOCK_RESULT_UNSET: |
1910 | 0 | break; |
1911 | 0 | case BlockValidationResult::BLOCK_HEADER_LOW_WORK: |
1912 | | // We didn't try to process the block because the header chain may have |
1913 | | // too little work. |
1914 | 0 | break; |
1915 | | // The node is providing invalid data: |
1916 | 21.0k | case BlockValidationResult::BLOCK_CONSENSUS: |
1917 | 21.0k | case BlockValidationResult::BLOCK_MUTATED: |
1918 | 21.0k | if (!via_compact_block) { |
1919 | 0 | if (peer) Misbehaving(*peer, message); |
1920 | 0 | return; |
1921 | 0 | } |
1922 | 21.0k | break; |
1923 | 58.5k | case BlockValidationResult::BLOCK_CACHED_INVALID: |
1924 | 58.5k | { |
1925 | | // Discourage outbound (but not inbound) peers if on an invalid chain. |
1926 | | // Exempt HB compact block peers. Manual connections are always protected from discouragement. |
1927 | 58.5k | if (peer && !via_compact_block && !peer->m_is_inbound32.0k ) { |
1928 | 25.4k | if (peer) Misbehaving(*peer, message); |
1929 | 25.4k | return; |
1930 | 25.4k | } |
1931 | 33.1k | break; |
1932 | 58.5k | } |
1933 | 838k | case BlockValidationResult::BLOCK_INVALID_HEADER: |
1934 | 855k | case BlockValidationResult::BLOCK_INVALID_PREV: |
1935 | 855k | if (peer) Misbehaving(*peer, message); |
1936 | 855k | return; |
1937 | | // Conflicting (but not necessarily invalid) data or different policy: |
1938 | 0 | case BlockValidationResult::BLOCK_MISSING_PREV: |
1939 | 0 | if (peer) Misbehaving(*peer, message); |
1940 | 0 | return; |
1941 | 4.05M | case BlockValidationResult::BLOCK_TIME_FUTURE: |
1942 | 4.05M | break; |
1943 | 4.99M | } |
1944 | 4.10M | if (message != "") { |
1945 | 4.08M | LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message); Line | Count | Source | 117 | 4.08M | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 4.08M | do { \ | 109 | 4.08M | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 4.08M | } while (0) |
|
|
1946 | 4.08M | } |
1947 | 4.10M | } |
1948 | | |
1949 | | bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex& block_index) |
1950 | 0 | { |
1951 | 0 | AssertLockHeld(cs_main); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
1952 | 0 | if (m_chainman.ActiveChain().Contains(&block_index)) return true; |
1953 | 0 | return block_index.IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) && |
1954 | 0 | (m_chainman.m_best_header->GetBlockTime() - block_index.GetBlockTime() < STALE_RELAY_AGE_LIMIT) && |
1955 | 0 | (GetBlockProofEquivalentTime(*m_chainman.m_best_header, block_index, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); |
1956 | 0 | } |
1957 | | |
1958 | | util::Expected<void, std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) |
1959 | 0 | { |
1960 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) return util::Unexpected{"Loading blocks ..."}; |
1961 | | |
1962 | | // Ensure this peer exists and hasn't been disconnected |
1963 | 0 | PeerRef peer = GetPeerRef(peer_id); |
1964 | 0 | if (peer == nullptr) return util::Unexpected{"Peer does not exist"}; |
1965 | | |
1966 | | // Ignore pre-segwit peers |
1967 | 0 | if (!CanServeWitnesses(*peer)) return util::Unexpected{"Pre-SegWit peer"}; |
1968 | | |
1969 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
1970 | | |
1971 | | // Forget about all prior requests |
1972 | 0 | RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt); |
1973 | | |
1974 | | // Mark block as in-flight |
1975 | 0 | if (!BlockRequested(peer_id, block_index)) return util::Unexpected{"Already requested from this peer"}; |
1976 | | |
1977 | | // Construct message to request the block |
1978 | 0 | const uint256& hash{block_index.GetBlockHash()}; |
1979 | 0 | std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)}; |
1980 | | |
1981 | | // Send block request message to the peer |
1982 | 0 | bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) { |
1983 | 0 | this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs); |
1984 | 0 | return true; |
1985 | 0 | }); |
1986 | |
|
1987 | 0 | if (!success) return util::Unexpected{"Peer not fully connected"}; |
1988 | | |
1989 | 0 | LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
1990 | 0 | hash.ToString(), peer_id); |
1991 | 0 | return {}; |
1992 | 0 | } |
1993 | | |
1994 | | std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman, |
1995 | | BanMan* banman, ChainstateManager& chainman, |
1996 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
1997 | 190k | { |
1998 | 190k | return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts); |
1999 | 190k | } |
2000 | | |
2001 | | PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
2002 | | BanMan* banman, ChainstateManager& chainman, |
2003 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
2004 | 190k | : m_rng{opts.deterministic_rng}, |
2005 | 190k | m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng}, |
2006 | 190k | m_chainparams(chainman.GetParams()), |
2007 | 190k | m_connman(connman), |
2008 | 190k | m_addrman(addrman), |
2009 | 190k | m_banman(banman), |
2010 | 190k | m_chainman(chainman), |
2011 | 190k | m_mempool(pool), |
2012 | 190k | m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.deterministic_rng}), |
2013 | 190k | m_warnings{warnings}, |
2014 | 190k | m_opts{opts} |
2015 | 190k | { |
2016 | | // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. |
2017 | | // This argument can go away after Erlay support is complete. |
2018 | 190k | if (opts.reconcile_txs) { |
2019 | 0 | m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION); |
2020 | 0 | } |
2021 | 190k | } |
2022 | | |
2023 | | void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) |
2024 | 0 | { |
2025 | | // Stale tip checking and peer eviction are on two different timers, but we |
2026 | | // don't want them to get out of sync due to drift in the scheduler, so we |
2027 | | // combine them in one function and schedule at the quicker (peer-eviction) |
2028 | | // timer. |
2029 | 0 | static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); |
2030 | 0 | scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); |
2031 | | |
2032 | | // schedule next run for 10-15 minutes in the future |
2033 | 0 | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
2034 | 0 | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
2035 | |
|
2036 | 0 | if (m_opts.private_broadcast) { |
2037 | 0 | scheduler.scheduleFromNow([&] { ReattemptPrivateBroadcast(scheduler); }, 0min); |
2038 | 0 | } |
2039 | 0 | } |
2040 | | |
2041 | | void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) |
2042 | 422k | { |
2043 | | // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding |
2044 | | // m_tx_download_mutex waits on the mempool mutex. |
2045 | 422k | AssertLockNotHeld(m_mempool.cs); Line | Count | Source | 149 | 422k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
2046 | 422k | AssertLockNotHeld(m_tx_download_mutex); Line | Count | Source | 149 | 422k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
2047 | | |
2048 | 422k | if (!is_ibd) { |
2049 | 420k | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 420k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 420k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 420k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 420k | #define PASTE(x, y) x ## y |
|
|
|
|
2050 | | // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due |
2051 | | // to a timelock. Reset the rejection filters to give those transactions another chance if we |
2052 | | // see them again. |
2053 | 420k | m_txdownloadman.ActiveTipChange(); |
2054 | 420k | } |
2055 | 422k | } |
2056 | | |
2057 | | /** |
2058 | | * Evict orphan txn pool entries based on a newly connected |
2059 | | * block, remember the recently confirmed transactions, and delete tracked |
2060 | | * announcements for them. Also save the time of the last tip update and |
2061 | | * possibly reduce dynamic block stalling timeout. |
2062 | | */ |
2063 | | void PeerManagerImpl::BlockConnected( |
2064 | | const ChainstateRole& role, |
2065 | | const std::shared_ptr<const CBlock>& pblock, |
2066 | | const CBlockIndex* pindex) |
2067 | 430k | { |
2068 | | // Update this for all chainstate roles so that we don't mistakenly see peers |
2069 | | // helping us do background IBD as having a stale tip. |
2070 | 430k | m_last_tip_update = GetTime<std::chrono::seconds>(); |
2071 | | |
2072 | | // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value |
2073 | 430k | auto stalling_timeout = m_block_stalling_timeout.load(); |
2074 | 430k | Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); Line | Count | Source | 125 | 430k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
2075 | 430k | if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { |
2076 | 0 | const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); |
2077 | 0 | if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { |
2078 | 0 | LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2079 | 0 | } |
2080 | 0 | } |
2081 | | |
2082 | | // The following task can be skipped since we don't maintain a mempool for |
2083 | | // the historical chainstate, or during ibd since we don't receive incoming |
2084 | | // transactions from peers into the mempool. |
2085 | 430k | if (!role.historical && !m_chainman.IsInitialBlockDownload()) { |
2086 | 429k | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 429k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 429k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 429k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 429k | #define PASTE(x, y) x ## y |
|
|
|
|
2087 | 429k | m_txdownloadman.BlockConnected(pblock); |
2088 | 429k | } |
2089 | 430k | } |
2090 | | |
2091 | | void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) |
2092 | 26.9k | { |
2093 | 26.9k | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 26.9k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 26.9k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 26.9k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 26.9k | #define PASTE(x, y) x ## y |
|
|
|
|
2094 | 26.9k | m_txdownloadman.BlockDisconnected(); |
2095 | 26.9k | } |
2096 | | |
2097 | | /** |
2098 | | * Maintain state about the best-seen block and fast-announce a compact block |
2099 | | * to compatible peers. |
2100 | | */ |
2101 | | void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) |
2102 | 282k | { |
2103 | 282k | auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64()); |
2104 | | |
2105 | 282k | LOCK(cs_main); Line | Count | Source | 268 | 282k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 282k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 282k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 282k | #define PASTE(x, y) x ## y |
|
|
|
|
2106 | | |
2107 | 282k | if (pindex->nHeight <= m_highest_fast_announce) |
2108 | 9.70k | return; |
2109 | 272k | m_highest_fast_announce = pindex->nHeight; |
2110 | | |
2111 | 272k | if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return0 ; |
2112 | | |
2113 | 272k | uint256 hashBlock(pblock->GetHash()); |
2114 | 272k | const std::shared_future<CSerializedNetMsg> lazy_ser{ |
2115 | 272k | std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); }47.8k )}; |
2116 | | |
2117 | 272k | { |
2118 | 272k | auto most_recent_block_txs = std::make_unique<std::map<GenTxid, CTransactionRef>>(); |
2119 | 426k | for (const auto& tx : pblock->vtx) { |
2120 | 426k | most_recent_block_txs->emplace(tx->GetHash(), tx); |
2121 | 426k | most_recent_block_txs->emplace(tx->GetWitnessHash(), tx); |
2122 | 426k | } |
2123 | | |
2124 | 272k | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 272k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 272k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 272k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 272k | #define PASTE(x, y) x ## y |
|
|
|
|
2125 | 272k | m_most_recent_block_hash = hashBlock; |
2126 | 272k | m_most_recent_block = pblock; |
2127 | 272k | m_most_recent_compact_block = pcmpctblock; |
2128 | 272k | m_most_recent_block_txs = std::move(most_recent_block_txs); |
2129 | 272k | } |
2130 | | |
2131 | 1.06M | m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
2132 | 1.06M | AssertLockHeld(::cs_main); Line | Count | Source | 144 | 1.06M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
2133 | | |
2134 | 1.06M | if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect1.05M ) |
2135 | 9.40k | return; |
2136 | 1.05M | ProcessBlockAvailability(pnode->GetId()); |
2137 | 1.05M | CNodeState &state = *State(pnode->GetId()); |
2138 | | // If the peer has, or we announced to them the previous block already, |
2139 | | // but we don't think they have this one, go ahead and announce it |
2140 | 1.05M | if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex)520k && PeerHasHeader(&state, pindex->pprev)291k ) { |
2141 | | |
2142 | 51.3k | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", Line | Count | Source | 117 | 51.3k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 51.3k | do { \ | 109 | 51.3k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 51.3k | } while (0) |
|
|
2143 | 51.3k | hashBlock.ToString(), pnode->GetId()); |
2144 | | |
2145 | 51.3k | const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()}; |
2146 | 51.3k | PushMessage(*pnode, ser_cmpctblock.Copy()); |
2147 | 51.3k | state.pindexBestHeaderSent = pindex; |
2148 | 51.3k | } |
2149 | 1.05M | }); |
2150 | 272k | } |
2151 | | |
2152 | | /** |
2153 | | * Update our best height and announce any block hashes which weren't previously |
2154 | | * in m_chainman.ActiveChain() to our peers. |
2155 | | */ |
2156 | | void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) |
2157 | 403k | { |
2158 | 403k | SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()}); |
2159 | | |
2160 | | // Don't relay inventory during initial block download. |
2161 | 403k | if (fInitialDownload) return1.03k ; |
2162 | | |
2163 | | // Find the hashes of all blocks that weren't previously in the best chain. |
2164 | 402k | std::vector<uint256> vHashes; |
2165 | 402k | const CBlockIndex *pindexToAnnounce = pindexNew; |
2166 | 811k | while (pindexToAnnounce != pindexFork) { |
2167 | 409k | vHashes.push_back(pindexToAnnounce->GetBlockHash()); |
2168 | 409k | pindexToAnnounce = pindexToAnnounce->pprev; |
2169 | 409k | if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { |
2170 | | // Limit announcements in case of a huge reorganization. |
2171 | | // Rely on the peer's synchronization mechanism in that case. |
2172 | 0 | break; |
2173 | 0 | } |
2174 | 409k | } |
2175 | | |
2176 | 402k | { |
2177 | 402k | LOCK(m_peer_mutex); Line | Count | Source | 268 | 402k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 402k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 402k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 402k | #define PASTE(x, y) x ## y |
|
|
|
|
2178 | 1.60M | for (auto& it : m_peer_map) { |
2179 | 1.60M | Peer& peer = *it.second; |
2180 | 1.60M | LOCK(peer.m_block_inv_mutex); Line | Count | Source | 268 | 1.60M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 1.60M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 1.60M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 1.60M | #define PASTE(x, y) x ## y |
|
|
|
|
2181 | 1.63M | for (const uint256& hash : vHashes | std::views::reverse) { |
2182 | 1.63M | peer.m_blocks_for_headers_relay.push_back(hash); |
2183 | 1.63M | } |
2184 | 1.60M | } |
2185 | 402k | } |
2186 | | |
2187 | 402k | m_connman.WakeMessageHandler(); |
2188 | 402k | } |
2189 | | |
2190 | | /** |
2191 | | * Handle invalid block rejection and consequent peer discouragement, maintain which |
2192 | | * peers announce compact blocks. |
2193 | | */ |
2194 | | void PeerManagerImpl::BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state) |
2195 | 451k | { |
2196 | 451k | LOCK(cs_main); Line | Count | Source | 268 | 451k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 451k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 451k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 451k | #define PASTE(x, y) x ## y |
|
|
|
|
2197 | | |
2198 | 451k | const uint256 hash(block->GetHash()); |
2199 | 451k | std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash); |
2200 | | |
2201 | | // If the block failed validation, we know where it came from and we're still connected |
2202 | | // to that peer, maybe punish. |
2203 | 451k | if (state.IsInvalid() && |
2204 | 451k | it != mapBlockSource.end()21.2k && |
2205 | 451k | State(it->second.first)21.2k ) { |
2206 | 21.2k | MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second); |
2207 | 21.2k | } |
2208 | | // Check that: |
2209 | | // 1. The block is valid |
2210 | | // 2. We're not in initial block download |
2211 | | // 3. This is currently the best block we're aware of. We haven't updated |
2212 | | // the tip yet so we have no way to check this directly here. Instead we |
2213 | | // just check that there are currently no other blocks in flight. |
2214 | 430k | else if (state.IsValid() && |
2215 | 430k | !m_chainman.IsInitialBlockDownload() && |
2216 | 430k | mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()302k ) { |
2217 | 225k | if (it != mapBlockSource.end()) { |
2218 | 207k | MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); |
2219 | 207k | } |
2220 | 225k | } |
2221 | 451k | if (it != mapBlockSource.end()) |
2222 | 434k | mapBlockSource.erase(it); |
2223 | 451k | } |
2224 | | |
2225 | | ////////////////////////////////////////////////////////////////////////////// |
2226 | | // |
2227 | | // Messages |
2228 | | // |
2229 | | |
2230 | | bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) |
2231 | 0 | { |
2232 | 0 | return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; |
2233 | 0 | } |
2234 | | |
2235 | | void PeerManagerImpl::SendPings() |
2236 | 0 | { |
2237 | 0 | LOCK(m_peer_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2238 | 0 | for(auto& it : m_peer_map) it.second->m_ping_queued = true; |
2239 | 0 | } |
2240 | | |
2241 | | void PeerManagerImpl::InitiateTxBroadcastToAll(const Txid& txid, const Wtxid& wtxid) |
2242 | 3.31M | { |
2243 | 3.31M | LOCK(m_peer_mutex); Line | Count | Source | 268 | 3.31M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.31M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.31M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.31M | #define PASTE(x, y) x ## y |
|
|
|
|
2244 | 13.2M | for(auto& it : m_peer_map) { |
2245 | 13.2M | Peer& peer = *it.second; |
2246 | 13.2M | auto tx_relay = peer.GetTxRelay(); |
2247 | 13.2M | if (!tx_relay) continue3.33M ; |
2248 | | |
2249 | 9.91M | LOCK(tx_relay->m_tx_inventory_mutex); Line | Count | Source | 268 | 9.91M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 9.91M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 9.91M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 9.91M | #define PASTE(x, y) x ## y |
|
|
|
|
2250 | | // Only queue transactions for announcement once the version handshake |
2251 | | // is completed. The time of arrival for these transactions is |
2252 | | // otherwise at risk of leaking to a spy, if the spy is able to |
2253 | | // distinguish transactions received during the handshake from the rest |
2254 | | // in the announcement. |
2255 | 9.91M | if (tx_relay->m_next_inv_send_time == 0s) continue19.9k ; |
2256 | | |
2257 | 9.89M | const uint256& hash{peer.m_wtxid_relay ? wtxid.ToUint256()0 : txid.ToUint256()}; |
2258 | 9.89M | if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { |
2259 | 4.31M | tx_relay->m_tx_inventory_to_send.insert(wtxid); |
2260 | 4.31M | } |
2261 | 9.89M | } |
2262 | 3.31M | } |
2263 | | |
2264 | | void PeerManagerImpl::InitiateTxBroadcastPrivate(const CTransactionRef& tx) |
2265 | 0 | { |
2266 | 0 | const auto txstr{strprintf("txid=%s, wtxid=%s", tx->GetHash().ToString(), tx->GetWitnessHash().ToString())};Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
2267 | 0 | if (m_tx_for_private_broadcast.Add(tx)) { |
2268 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Requesting %d new connections due to %s", NUM_PRIVATE_BROADCAST_PER_TX, txstr); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2269 | 0 | m_connman.m_private_broadcast.NumToOpenAdd(NUM_PRIVATE_BROADCAST_PER_TX); |
2270 | 0 | } else { |
2271 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Ignoring unnecessary request to schedule an already scheduled transaction: %s", txstr); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2272 | 0 | } |
2273 | 0 | } |
2274 | | |
2275 | | void PeerManagerImpl::RelayAddress(NodeId originator, |
2276 | | const CAddress& addr, |
2277 | | bool fReachable) |
2278 | 0 | { |
2279 | | // We choose the same nodes within a given 24h window (if the list of connected |
2280 | | // nodes does not change) and we don't relay to nodes that already know an |
2281 | | // address. So within 24h we will likely relay a given address once. This is to |
2282 | | // prevent a peer from unjustly giving their address better propagation by sending |
2283 | | // it to us repeatedly. |
2284 | |
|
2285 | 0 | if (!fReachable && !addr.IsRelayable()) return; |
2286 | | |
2287 | | // Relay to a limited number of other nodes |
2288 | | // Use deterministic randomness to send to the same nodes for 24 hours |
2289 | | // at a time so the m_addr_knowns of the chosen nodes prevent repeats |
2290 | 0 | const uint64_t hash_addr{CServiceHash(0, 0)(addr)}; |
2291 | 0 | const auto current_time{GetTime<std::chrono::seconds>()}; |
2292 | | // Adding address hash makes exact rotation time different per address, while preserving periodicity. |
2293 | 0 | const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)}; |
2294 | 0 | const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) |
2295 | 0 | .Write(hash_addr) |
2296 | 0 | .Write(time_addr)}; |
2297 | | |
2298 | | // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. |
2299 | 0 | unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1; |
2300 | |
|
2301 | 0 | std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}}; |
2302 | 0 | assert(nRelayNodes <= best.size()); |
2303 | | |
2304 | 0 | LOCK(m_peer_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2305 | |
|
2306 | 0 | for (auto& [id, peer] : m_peer_map) { |
2307 | 0 | if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) { |
2308 | 0 | uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize(); |
2309 | 0 | for (unsigned int i = 0; i < nRelayNodes; i++) { |
2310 | 0 | if (hashKey > best[i].first) { |
2311 | 0 | std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); |
2312 | 0 | best[i] = std::make_pair(hashKey, peer.get()); |
2313 | 0 | break; |
2314 | 0 | } |
2315 | 0 | } |
2316 | 0 | } |
2317 | 0 | }; |
2318 | |
|
2319 | 0 | for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { |
2320 | 0 | PushAddress(*best[i].second, addr); |
2321 | 0 | } |
2322 | 0 | } |
2323 | | |
2324 | | void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
2325 | 0 | { |
2326 | 0 | std::shared_ptr<const CBlock> a_recent_block; |
2327 | 0 | std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; |
2328 | 0 | { |
2329 | 0 | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2330 | 0 | a_recent_block = m_most_recent_block; |
2331 | 0 | a_recent_compact_block = m_most_recent_compact_block; |
2332 | 0 | } |
2333 | |
|
2334 | 0 | bool need_activate_chain = false; |
2335 | 0 | { |
2336 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2337 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2338 | 0 | if (pindex) { |
2339 | 0 | if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && |
2340 | 0 | pindex->IsValid(BLOCK_VALID_TREE)) { |
2341 | | // If we have the block and all of its parents, but have not yet validated it, |
2342 | | // we might be in the middle of connecting it (ie in the unlock of cs_main |
2343 | | // before ActivateBestChain but after AcceptBlock). |
2344 | | // In this case, we need to run ActivateBestChain prior to checking the relay |
2345 | | // conditions below. |
2346 | 0 | need_activate_chain = true; |
2347 | 0 | } |
2348 | 0 | } |
2349 | 0 | } // release cs_main before calling ActivateBestChain |
2350 | 0 | if (need_activate_chain) { |
2351 | 0 | BlockValidationState state; |
2352 | 0 | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { |
2353 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2354 | 0 | } |
2355 | 0 | } |
2356 | |
|
2357 | 0 | const CBlockIndex* pindex{nullptr}; |
2358 | 0 | const CBlockIndex* tip{nullptr}; |
2359 | 0 | bool can_direct_fetch{false}; |
2360 | 0 | FlatFilePos block_pos{}; |
2361 | 0 | { |
2362 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2363 | 0 | pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2364 | 0 | if (!pindex) { |
2365 | 0 | return; |
2366 | 0 | } |
2367 | 0 | if (!BlockRequestAllowed(*pindex)) { |
2368 | 0 | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2369 | 0 | return; |
2370 | 0 | } |
2371 | | // disconnect node in case we have reached the outbound limit for serving historical blocks |
2372 | 0 | if (m_connman.OutboundTargetReached(true) && |
2373 | 0 | (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && |
2374 | 0 | !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target |
2375 | 0 | ) { |
2376 | 0 | LogDebug(BCLog::NET, "historical block serving limit reached, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2377 | 0 | pfrom.fDisconnect = true; |
2378 | 0 | return; |
2379 | 0 | } |
2380 | 0 | tip = m_chainman.ActiveChain().Tip(); |
2381 | | // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold |
2382 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && ( |
2383 | 0 | (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) |
2384 | 0 | )) { |
2385 | 0 | LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2386 | | //disconnect node and prevent it from stalling (would otherwise wait for the missing block) |
2387 | 0 | pfrom.fDisconnect = true; |
2388 | 0 | return; |
2389 | 0 | } |
2390 | | // Pruned nodes may have deleted the block, so check whether |
2391 | | // it's available before trying to send. |
2392 | 0 | if (!(pindex->nStatus & BLOCK_HAVE_DATA)) { |
2393 | 0 | return; |
2394 | 0 | } |
2395 | 0 | can_direct_fetch = CanDirectFetch(); |
2396 | 0 | block_pos = pindex->GetBlockPos(); |
2397 | 0 | } |
2398 | | |
2399 | 0 | std::shared_ptr<const CBlock> pblock; |
2400 | 0 | if (a_recent_block && a_recent_block->GetHash() == inv.hash) { |
2401 | 0 | pblock = a_recent_block; |
2402 | 0 | } else if (inv.IsMsgWitnessBlk()) { |
2403 | | // Fast-path: in this case it is possible to serve the block directly from disk, |
2404 | | // as the network format matches the format on disk |
2405 | 0 | if (const auto block_data{m_chainman.m_blockman.ReadRawBlock(block_pos)}) { |
2406 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, std::span{*block_data}); |
2407 | 0 | } else { |
2408 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
2409 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2410 | 0 | } else { |
2411 | 0 | LogError("Cannot load block from disk, %s", pfrom.DisconnectMsg());Line | Count | Source | 99 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
2412 | 0 | } |
2413 | 0 | pfrom.fDisconnect = true; |
2414 | 0 | return; |
2415 | 0 | } |
2416 | | // Don't set pblock as we've sent the block |
2417 | 0 | } else { |
2418 | | // Send block from disk |
2419 | 0 | std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); |
2420 | 0 | if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos, inv.hash)) { |
2421 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
2422 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2423 | 0 | } else { |
2424 | 0 | LogError("Cannot load block from disk, %s", pfrom.DisconnectMsg());Line | Count | Source | 99 | 0 | #define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
2425 | 0 | } |
2426 | 0 | pfrom.fDisconnect = true; |
2427 | 0 | return; |
2428 | 0 | } |
2429 | 0 | pblock = pblockRead; |
2430 | 0 | } |
2431 | 0 | if (pblock) { |
2432 | 0 | if (inv.IsMsgBlk()) { |
2433 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock)); |
2434 | 0 | } else if (inv.IsMsgWitnessBlk()) { |
2435 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2436 | 0 | } else if (inv.IsMsgFilteredBlk()) { |
2437 | 0 | bool sendMerkleBlock = false; |
2438 | 0 | CMerkleBlock merkleBlock; |
2439 | 0 | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
2440 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2441 | 0 | if (tx_relay->m_bloom_filter) { |
2442 | 0 | sendMerkleBlock = true; |
2443 | 0 | merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter); |
2444 | 0 | } |
2445 | 0 | } |
2446 | 0 | if (sendMerkleBlock) { |
2447 | 0 | MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock); |
2448 | | // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see |
2449 | | // This avoids hurting performance by pointlessly requiring a round-trip |
2450 | | // Note that there is currently no way for a node to request any single transactions we didn't send here - |
2451 | | // they must either disconnect and retry or request the full block. |
2452 | | // Thus, the protocol spec specified allows for us to provide duplicate txn here, |
2453 | | // however we MUST always provide at least what the remote peer needs |
2454 | 0 | for (const auto& [tx_idx, _] : merkleBlock.vMatchedTxn) |
2455 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[tx_idx])); |
2456 | 0 | } |
2457 | | // else |
2458 | | // no response |
2459 | 0 | } else if (inv.IsMsgCmpctBlk()) { |
2460 | | // If a peer is asking for old blocks, we're almost guaranteed |
2461 | | // they won't have a useful mempool to match against a compact block, |
2462 | | // and we don't feel like constructing the object for them, so |
2463 | | // instead we respond with the full, non-compact block. |
2464 | 0 | if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) { |
2465 | 0 | if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == inv.hash) { |
2466 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); |
2467 | 0 | } else { |
2468 | 0 | CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; |
2469 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock); |
2470 | 0 | } |
2471 | 0 | } else { |
2472 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2473 | 0 | } |
2474 | 0 | } |
2475 | 0 | } |
2476 | |
|
2477 | 0 | { |
2478 | 0 | LOCK(peer.m_block_inv_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2479 | | // Trigger the peer node to send a getblocks request for the next batch of inventory |
2480 | 0 | if (inv.hash == peer.m_continuation_block) { |
2481 | | // Send immediately. This must send even if redundant, |
2482 | | // and we want it right after the last block so they don't |
2483 | | // wait for other stuff first. |
2484 | 0 | std::vector<CInv> vInv; |
2485 | 0 | vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash()); |
2486 | 0 | MakeAndPushMessage(pfrom, NetMsgType::INV, vInv); |
2487 | 0 | peer.m_continuation_block.SetNull(); |
2488 | 0 | } |
2489 | 0 | } |
2490 | 0 | } |
2491 | | |
2492 | | CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
2493 | 0 | { |
2494 | | // If a tx was in the mempool prior to the last INV for this peer, permit the request. |
2495 | 0 | auto txinfo{std::visit( |
2496 | 0 | [&](const auto& id) { |
2497 | 0 | return m_mempool.info_for_relay(id, WITH_LOCK(tx_relay.m_tx_inventory_mutex, return tx_relay.m_last_inv_sequence)); Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
| return m_mempool.info_for_relay(id, WITH_LOCK(tx_relay.m_tx_inventory_mutex, return tx_relay.m_last_inv_sequence)); Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
2498 | 0 | }, Unexecuted instantiation: net_processing.cpp:_ZZN12_GLOBAL__N_115PeerManagerImpl16FindTxForGetDataERKNS_4Peer7TxRelayERK7GenTxidENK3$_0clI22transaction_identifierILb0EEEEDaRKT_ Unexecuted instantiation: net_processing.cpp:_ZZN12_GLOBAL__N_115PeerManagerImpl16FindTxForGetDataERKNS_4Peer7TxRelayERK7GenTxidENK3$_0clI22transaction_identifierILb1EEEEDaRKT_ |
2499 | 0 | gtxid)}; |
2500 | 0 | if (txinfo.tx) { |
2501 | 0 | return std::move(txinfo.tx); |
2502 | 0 | } |
2503 | | |
2504 | | // Or it might be from the most recent block |
2505 | 0 | { |
2506 | 0 | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2507 | 0 | if (m_most_recent_block_txs != nullptr) { |
2508 | 0 | auto it = m_most_recent_block_txs->find(gtxid); |
2509 | 0 | if (it != m_most_recent_block_txs->end()) return it->second; |
2510 | 0 | } |
2511 | 0 | } |
2512 | | |
2513 | 0 | return {}; |
2514 | 0 | } |
2515 | | |
2516 | | void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
2517 | 0 | { |
2518 | 0 | AssertLockNotHeld(cs_main); Line | Count | Source | 149 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
2519 | |
|
2520 | 0 | auto tx_relay = peer.GetTxRelay(); |
2521 | |
|
2522 | 0 | std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); |
2523 | 0 | std::vector<CInv> vNotFound; |
2524 | | |
2525 | | // Process as many TX items from the front of the getdata queue as |
2526 | | // possible, since they're common and it's efficient to batch process |
2527 | | // them. |
2528 | 0 | while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { |
2529 | 0 | if (interruptMsgProc) return; |
2530 | | // The send buffer provides backpressure. If there's no space in |
2531 | | // the buffer, pause processing until the next call. |
2532 | 0 | if (pfrom.fPauseSend) break; |
2533 | | |
2534 | 0 | const CInv &inv = *it++; |
2535 | |
|
2536 | 0 | if (tx_relay == nullptr) { |
2537 | | // Ignore GETDATA requests for transactions from block-relay-only |
2538 | | // peers and peers that asked us not to announce transactions. |
2539 | 0 | continue; |
2540 | 0 | } |
2541 | | |
2542 | 0 | if (auto tx{FindTxForGetData(*tx_relay, ToGenTxid(inv))}) { |
2543 | | // WTX and WITNESS_TX imply we serialize with witness |
2544 | 0 | const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS); |
2545 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx)); |
2546 | 0 | m_mempool.RemoveUnbroadcastTx(tx->GetHash()); |
2547 | 0 | } else { |
2548 | 0 | vNotFound.push_back(inv); |
2549 | 0 | } |
2550 | 0 | } |
2551 | | |
2552 | | // Only process one BLOCK item per call, since they're uncommon and can be |
2553 | | // expensive to process. |
2554 | 0 | if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { |
2555 | 0 | const CInv &inv = *it++; |
2556 | 0 | if (inv.IsGenBlkMsg()) { |
2557 | 0 | ProcessGetBlockData(pfrom, peer, inv); |
2558 | 0 | } |
2559 | | // else: If the first item on the queue is an unknown type, we erase it |
2560 | | // and continue processing the queue on the next call. |
2561 | | // NOTE: previously we wouldn't do so and the peer sending us a malformed GETDATA could |
2562 | | // result in never making progress and this thread using 100% allocated CPU. See |
2563 | | // https://bitcoincore.org/en/2024/07/03/disclose-getdata-cpu. |
2564 | 0 | } |
2565 | |
|
2566 | 0 | peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it); |
2567 | |
|
2568 | 0 | if (!vNotFound.empty()) { |
2569 | | // Let the peer know that we didn't find what it asked for, so it doesn't |
2570 | | // have to wait around forever. |
2571 | | // SPV clients care about this message: it's needed when they are |
2572 | | // recursively walking the dependencies of relevant unconfirmed |
2573 | | // transactions. SPV clients want to do that because they want to know |
2574 | | // about (and store and rebroadcast and risk analyze) the dependencies |
2575 | | // of transactions relevant to them, without having to download the |
2576 | | // entire memory pool. |
2577 | | // Also, other nodes can use these messages to automatically request a |
2578 | | // transaction from some other peer that announced it, and stop |
2579 | | // waiting for us to respond. |
2580 | | // In normal operation, we often send NOTFOUND messages for parents of |
2581 | | // transactions that we relay; if a peer is missing a parent, they may |
2582 | | // assume we have them and request the parents from us. |
2583 | 0 | MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound); |
2584 | 0 | } |
2585 | 0 | } |
2586 | | |
2587 | | uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const |
2588 | 931k | { |
2589 | 931k | uint32_t nFetchFlags = 0; |
2590 | 931k | if (CanServeWitnesses(peer)) { |
2591 | 185k | nFetchFlags |= MSG_WITNESS_FLAG; |
2592 | 185k | } |
2593 | 931k | return nFetchFlags; |
2594 | 931k | } |
2595 | | |
2596 | | void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req) |
2597 | 0 | { |
2598 | 0 | BlockTransactions resp(req); |
2599 | 0 | for (size_t i = 0; i < req.indexes.size(); i++) { |
2600 | 0 | if (req.indexes[i] >= block.vtx.size()) { |
2601 | 0 | Misbehaving(peer, "getblocktxn with out-of-bounds tx indices"); |
2602 | 0 | return; |
2603 | 0 | } |
2604 | 0 | resp.txn[i] = block.vtx[req.indexes[i]]; |
2605 | 0 | } |
2606 | | |
2607 | 0 | if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) { |
2608 | 0 | uint32_t tx_requested_size{0}; |
2609 | 0 | for (const auto& tx : resp.txn) tx_requested_size += tx->ComputeTotalSize(); |
2610 | 0 | LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2611 | 0 | } |
2612 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp); |
2613 | 0 | } |
2614 | | |
2615 | | bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer) |
2616 | 8.64M | { |
2617 | | // Do these headers have proof-of-work matching what's claimed? |
2618 | 8.64M | if (!HasValidProofOfWork(headers, m_chainparams.GetConsensus())) { |
2619 | 0 | Misbehaving(peer, "header with invalid proof of work"); |
2620 | 0 | return false; |
2621 | 0 | } |
2622 | | |
2623 | | // Are these headers connected to each other? |
2624 | 8.64M | if (!CheckHeadersAreContinuous(headers)) { |
2625 | 0 | Misbehaving(peer, "non-continuous headers sequence"); |
2626 | 0 | return false; |
2627 | 0 | } |
2628 | 8.64M | return true; |
2629 | 8.64M | } |
2630 | | |
2631 | | arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() |
2632 | 4.92M | { |
2633 | 4.92M | arith_uint256 near_chaintip_work = 0; |
2634 | 4.92M | LOCK(cs_main); Line | Count | Source | 268 | 4.92M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 4.92M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 4.92M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 4.92M | #define PASTE(x, y) x ## y |
|
|
|
|
2635 | 4.92M | if (m_chainman.ActiveChain().Tip() != nullptr) { |
2636 | 4.92M | const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); |
2637 | | // Use a 144 block buffer, so that we'll accept headers that fork from |
2638 | | // near our tip. |
2639 | 4.92M | near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); |
2640 | 4.92M | } |
2641 | 4.92M | return std::max(near_chaintip_work, m_chainman.MinimumChainWork()); |
2642 | 4.92M | } |
2643 | | |
2644 | | /** |
2645 | | * Special handling for unconnecting headers that might be part of a block |
2646 | | * announcement. |
2647 | | * |
2648 | | * We'll send a getheaders message in response to try to connect the chain. |
2649 | | */ |
2650 | | void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, |
2651 | | const std::vector<CBlockHeader>& headers) |
2652 | 253k | { |
2653 | | // Try to fill in the missing headers. |
2654 | 253k | const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};Line | Count | Source | 299 | 253k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
2655 | 253k | if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) { |
2656 | 32.3k | LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n", Line | Count | Source | 117 | 32.3k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 32.3k | do { \ | 109 | 32.3k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 32.3k | } while (0) |
|
|
2657 | 32.3k | headers[0].GetHash().ToString(), |
2658 | 32.3k | headers[0].hashPrevBlock.ToString(), |
2659 | 32.3k | best_header->nHeight, |
2660 | 32.3k | pfrom.GetId()); |
2661 | 32.3k | } |
2662 | | |
2663 | | // Set hashLastUnknownBlock for this peer, so that if we |
2664 | | // eventually get the headers - even from a different peer - |
2665 | | // we can use this peer to download. |
2666 | 253k | WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash())); Line | Count | Source | 299 | 253k | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
2667 | 253k | } |
2668 | | |
2669 | | bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const |
2670 | 8.64M | { |
2671 | 8.64M | uint256 hashLastBlock; |
2672 | 8.64M | for (const CBlockHeader& header : headers) { |
2673 | 8.64M | if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock0 ) { |
2674 | 0 | return false; |
2675 | 0 | } |
2676 | 8.64M | hashLastBlock = header.GetHash(); |
2677 | 8.64M | } |
2678 | 8.64M | return true; |
2679 | 8.64M | } |
2680 | | |
2681 | | bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) |
2682 | 8.64M | { |
2683 | 8.64M | if (peer.m_headers_sync) { |
2684 | 0 | auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result); |
2685 | | // If it is a valid continuation, we should treat the existing getheaders request as responded to. |
2686 | 0 | if (result.success) peer.m_last_getheaders_timestamp = {}; |
2687 | 0 | if (result.request_more) { |
2688 | 0 | auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); |
2689 | | // If we were instructed to ask for a locator, it should not be empty. |
2690 | 0 | Assume(!locator.vHave.empty()); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
2691 | | // We can only be instructed to request more if processing was successful. |
2692 | 0 | Assume(result.success); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
2693 | 0 | if (!locator.vHave.empty()) { |
2694 | | // It should be impossible for the getheaders request to fail, |
2695 | | // because we just cleared the last getheaders timestamp. |
2696 | 0 | bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); |
2697 | 0 | Assume(sent_getheaders); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
2698 | 0 | LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2699 | 0 | locator.vHave.front().ToString(), pfrom.GetId()); |
2700 | 0 | } |
2701 | 0 | } |
2702 | |
|
2703 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { |
2704 | 0 | peer.m_headers_sync.reset(nullptr); |
2705 | | |
2706 | | // Delete this peer's entry in m_headers_presync_stats. |
2707 | | // If this is m_headers_presync_bestpeer, it will be replaced later |
2708 | | // by the next peer that triggers the else{} branch below. |
2709 | 0 | LOCK(m_headers_presync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2710 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2711 | 0 | } else { |
2712 | | // Build statistics for this peer's sync. |
2713 | 0 | HeadersPresyncStats stats; |
2714 | 0 | stats.first = peer.m_headers_sync->GetPresyncWork(); |
2715 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { |
2716 | 0 | stats.second = {peer.m_headers_sync->GetPresyncHeight(), |
2717 | 0 | peer.m_headers_sync->GetPresyncTime()}; |
2718 | 0 | } |
2719 | | |
2720 | | // Update statistics in stats. |
2721 | 0 | LOCK(m_headers_presync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2722 | 0 | m_headers_presync_stats[pfrom.GetId()] = stats; |
2723 | 0 | auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
2724 | 0 | bool best_updated = false; |
2725 | 0 | if (best_it == m_headers_presync_stats.end()) { |
2726 | | // If the cached best peer is outdated, iterate over all remaining ones (including |
2727 | | // newly updated one) to find the best one. |
2728 | 0 | NodeId peer_best{-1}; |
2729 | 0 | const HeadersPresyncStats* stat_best{nullptr}; |
2730 | 0 | for (const auto& [peer, stat] : m_headers_presync_stats) { |
2731 | 0 | if (!stat_best || stat > *stat_best) { |
2732 | 0 | peer_best = peer; |
2733 | 0 | stat_best = &stat; |
2734 | 0 | } |
2735 | 0 | } |
2736 | 0 | m_headers_presync_bestpeer = peer_best; |
2737 | 0 | best_updated = (peer_best == pfrom.GetId()); |
2738 | 0 | } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { |
2739 | | // pfrom was and remains the best peer, or pfrom just became best. |
2740 | 0 | m_headers_presync_bestpeer = pfrom.GetId(); |
2741 | 0 | best_updated = true; |
2742 | 0 | } |
2743 | 0 | if (best_updated && stats.second.has_value()) { |
2744 | | // If the best peer updated, and it is in its first phase, signal. |
2745 | 0 | m_headers_presync_should_signal = true; |
2746 | 0 | } |
2747 | 0 | } |
2748 | |
|
2749 | 0 | if (result.success) { |
2750 | | // We only overwrite the headers passed in if processing was |
2751 | | // successful. |
2752 | 0 | headers.swap(result.pow_validated_headers); |
2753 | 0 | } |
2754 | |
|
2755 | 0 | return result.success; |
2756 | 0 | } |
2757 | | // Either we didn't have a sync in progress, or something went wrong |
2758 | | // processing these headers, or we are returning headers to the caller to |
2759 | | // process. |
2760 | 8.64M | return false; |
2761 | 8.64M | } |
2762 | | |
2763 | | bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex& chain_start_header, std::vector<CBlockHeader>& headers) |
2764 | 1.47M | { |
2765 | | // Calculate the claimed total work on this chain. |
2766 | 1.47M | arith_uint256 total_work = chain_start_header.nChainWork + CalculateClaimedHeadersWork(headers); |
2767 | | |
2768 | | // Our dynamic anti-DoS threshold (minimum work required on a headers chain |
2769 | | // before we'll store it) |
2770 | 1.47M | arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); |
2771 | | |
2772 | | // Avoid DoS via low-difficulty-headers by only processing if the headers |
2773 | | // are part of a chain with sufficient work. |
2774 | 1.47M | if (total_work < minimum_chain_work) { |
2775 | | // Only try to sync with this peer if their headers message was full; |
2776 | | // otherwise they don't have more headers after this so no point in |
2777 | | // trying to sync their too-little-work chain. |
2778 | 0 | if (headers.size() == m_opts.max_headers_result) { |
2779 | | // Note: we could advance to the last header in this set that is |
2780 | | // known to us, rather than starting at the first header (which we |
2781 | | // may already have); however this is unlikely to matter much since |
2782 | | // ProcessHeadersMessage() already handles the case where all |
2783 | | // headers in a received message are already known and are |
2784 | | // ancestors of m_best_header or chainActive.Tip(), by skipping |
2785 | | // this logic in that case. So even if the first header in this set |
2786 | | // of headers is known, some header in this set must be new, so |
2787 | | // advancing to the first unknown header would be a small effect. |
2788 | 0 | LOCK(peer.m_headers_sync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2789 | 0 | peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), |
2790 | 0 | m_chainparams.HeadersSync(), chain_start_header, minimum_chain_work)); |
2791 | | |
2792 | | // Now a HeadersSyncState object for tracking this synchronization |
2793 | | // is created, process the headers using it as normal. Failures are |
2794 | | // handled inside of IsContinuationOfLowWorkHeadersSync. |
2795 | 0 | (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
2796 | 0 | } else { |
2797 | 0 | LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header.nHeight + headers.size(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2798 | 0 | } |
2799 | | |
2800 | | // The peer has not yet given us a chain that meets our work threshold, |
2801 | | // so we want to prevent further processing of the headers in any case. |
2802 | 0 | headers = {}; |
2803 | 0 | return true; |
2804 | 0 | } |
2805 | | |
2806 | 1.47M | return false; |
2807 | 1.47M | } |
2808 | | |
2809 | | bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) |
2810 | 8.39M | { |
2811 | 8.39M | if (header == nullptr) { |
2812 | 4.78M | return false; |
2813 | 4.78M | } else if (3.60M m_chainman.m_best_header != nullptr3.60M && header == m_chainman.m_best_header->GetAncestor(header->nHeight)3.60M ) { |
2814 | 2.48M | return true; |
2815 | 2.48M | } else if (1.12M m_chainman.ActiveChain().Contains(header)1.12M ) { |
2816 | 372k | return true; |
2817 | 372k | } |
2818 | 753k | return false; |
2819 | 8.39M | } |
2820 | | |
2821 | | bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) |
2822 | 452k | { |
2823 | 452k | const auto current_time = NodeClock::now(); |
2824 | | |
2825 | | // Only allow a new getheaders message to go out if we don't have a recent |
2826 | | // one already in-flight |
2827 | 452k | if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) { |
2828 | 207k | MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256()); |
2829 | 207k | peer.m_last_getheaders_timestamp = current_time; |
2830 | 207k | return true; |
2831 | 207k | } |
2832 | 244k | return false; |
2833 | 452k | } |
2834 | | |
2835 | | /* |
2836 | | * Given a new headers tip ending in last_header, potentially request blocks towards that tip. |
2837 | | * We require that the given tip have at least as much work as our tip, and for |
2838 | | * our current tip to be "close to synced" (see CanDirectFetch()). |
2839 | | */ |
2840 | | void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) |
2841 | 3.80M | { |
2842 | 3.80M | LOCK(cs_main); Line | Count | Source | 268 | 3.80M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.80M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.80M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.80M | #define PASTE(x, y) x ## y |
|
|
|
|
2843 | 3.80M | CNodeState *nodestate = State(pfrom.GetId()); |
2844 | | |
2845 | 3.80M | if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE)2.05M && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork2.05M ) { |
2846 | 1.80M | std::vector<const CBlockIndex*> vToFetch; |
2847 | 1.80M | const CBlockIndex* pindexWalk{&last_header}; |
2848 | | // Calculate all the blocks we'd need to switch to last_header, up to a limit. |
2849 | 3.19M | while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER1.39M ) { |
2850 | 1.39M | if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && |
2851 | 1.39M | !IsBlockRequested(pindexWalk->GetBlockHash())1.33M && |
2852 | 1.39M | (964k !DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT)964k || CanServeWitnesses(peer)964k )) { |
2853 | | // We don't have this block, and it's not yet in flight. |
2854 | 44.0k | vToFetch.push_back(pindexWalk); |
2855 | 44.0k | } |
2856 | 1.39M | pindexWalk = pindexWalk->pprev; |
2857 | 1.39M | } |
2858 | | // If pindexWalk still isn't on our main chain, we're looking at a |
2859 | | // very large reorg at a time we think we're close to caught up to |
2860 | | // the main chain -- this shouldn't really happen. Bail out on the |
2861 | | // direct fetch and rely on parallel download instead. |
2862 | 1.80M | if (!m_chainman.ActiveChain().Contains(pindexWalk)) { |
2863 | 0 | LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
2864 | 0 | last_header.GetBlockHash().ToString(), |
2865 | 0 | last_header.nHeight); |
2866 | 1.80M | } else { |
2867 | 1.80M | std::vector<CInv> vGetData; |
2868 | | // Download as much as possible, from earliest to latest. |
2869 | 1.80M | for (const CBlockIndex* pindex : vToFetch | std::views::reverse) { |
2870 | 33.9k | if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
2871 | | // Can't download any more from this peer |
2872 | 27.1k | break; |
2873 | 27.1k | } |
2874 | 6.88k | uint32_t nFetchFlags = GetFetchFlags(peer); |
2875 | 6.88k | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
2876 | 6.88k | BlockRequested(pfrom.GetId(), *pindex); |
2877 | 6.88k | LogDebug(BCLog::NET, "Requesting block %s from peer=%d", Line | Count | Source | 117 | 6.88k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 6.88k | do { \ | 109 | 6.88k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 6.88k | } while (0) |
|
|
2878 | 6.88k | pindex->GetBlockHash().ToString(), pfrom.GetId()); |
2879 | 6.88k | } |
2880 | 1.80M | if (vGetData.size() > 1) { |
2881 | 158 | LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", Line | Count | Source | 117 | 158 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 158 | do { \ | 109 | 158 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 158 | } while (0) |
|
|
2882 | 158 | last_header.GetBlockHash().ToString(), |
2883 | 158 | last_header.nHeight); |
2884 | 158 | } |
2885 | 1.80M | if (vGetData.size() > 0) { |
2886 | 6.66k | if (!m_opts.ignore_incoming_txs && |
2887 | 6.66k | nodestate->m_provides_cmpctblocks && |
2888 | 6.66k | vGetData.size() == 11.77k && |
2889 | 6.66k | mapBlocksInFlight.size() == 11.76k && |
2890 | 6.66k | last_header.pprev->IsValid(BLOCK_VALID_CHAIN)556 ) { |
2891 | | // In any case, we want to download using a compact block, not a regular one |
2892 | 535 | vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); |
2893 | 535 | } |
2894 | 6.66k | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData); |
2895 | 6.66k | } |
2896 | 1.80M | } |
2897 | 1.80M | } |
2898 | 3.80M | } |
2899 | | |
2900 | | /** |
2901 | | * Given receipt of headers from a peer ending in last_header, along with |
2902 | | * whether that header was new and whether the headers message was full, |
2903 | | * update the state we keep for the peer. |
2904 | | */ |
2905 | | void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, |
2906 | | const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
2907 | 3.80M | { |
2908 | 3.80M | LOCK(cs_main); Line | Count | Source | 268 | 3.80M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.80M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.80M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.80M | #define PASTE(x, y) x ## y |
|
|
|
|
2909 | 3.80M | CNodeState *nodestate = State(pfrom.GetId()); |
2910 | | |
2911 | 3.80M | UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); |
2912 | | |
2913 | | // From here, pindexBestKnownBlock should be guaranteed to be non-null, |
2914 | | // because it is set in UpdateBlockAvailability. Some nullptr checks |
2915 | | // are still present, however, as belt-and-suspenders. |
2916 | | |
2917 | 3.80M | if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork231k ) { |
2918 | 219k | nodestate->m_last_block_announcement = GetTime(); |
2919 | 219k | } |
2920 | | |
2921 | | // If we're in IBD, we want outbound peers that will serve us a useful |
2922 | | // chain. Disconnect peers that are on chains with insufficient work. |
2923 | 3.80M | if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers867k ) { |
2924 | | // If the peer has no more headers to give us, then we know we have |
2925 | | // their tip. |
2926 | 867k | if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { |
2927 | | // This peer has too little work on their headers chain to help |
2928 | | // us sync -- disconnect if it is an outbound disconnection |
2929 | | // candidate. |
2930 | | // Note: We compare their tip to the minimum chain work (rather than |
2931 | | // m_chainman.ActiveChain().Tip()) because we won't start block download |
2932 | | // until we have a headers chain that has at least |
2933 | | // the minimum chain work, even if a peer has a chain past our tip, |
2934 | | // as an anti-DoS measure. |
2935 | 0 | if (pfrom.IsOutboundOrBlockRelayConn()) { |
2936 | 0 | LogInfo("outbound peer headers chain has insufficient work, %s", pfrom.DisconnectMsg());Line | Count | Source | 97 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
2937 | 0 | pfrom.fDisconnect = true; |
2938 | 0 | } |
2939 | 0 | } |
2940 | 867k | } |
2941 | | |
2942 | | // If this is an outbound full-relay peer, check to see if we should protect |
2943 | | // it from the bad/lagging chain logic. |
2944 | | // Note that outbound block-relay peers are excluded from this protection, and |
2945 | | // thus always subject to eviction under the bad/lagging chain logic. |
2946 | | // See ChainSyncTimeoutState. |
2947 | 3.80M | if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr273k ) { |
2948 | 273k | if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect245k ) { |
2949 | 14.4k | LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); Line | Count | Source | 117 | 14.4k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 14.4k | do { \ | 109 | 14.4k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 14.4k | } while (0) |
|
|
2950 | 14.4k | nodestate->m_chain_sync.m_protect = true; |
2951 | 14.4k | ++m_outbound_peers_with_protect_from_disconnect; |
2952 | 14.4k | } |
2953 | 273k | } |
2954 | 3.80M | } |
2955 | | |
2956 | | void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
2957 | | std::vector<CBlockHeader>&& headers, |
2958 | | bool via_compact_block) |
2959 | 8.64M | { |
2960 | 8.64M | size_t nCount = headers.size(); |
2961 | | |
2962 | 8.64M | if (nCount == 0) { |
2963 | | // Nothing interesting. Stop asking this peers for more headers. |
2964 | | // If we were in the middle of headers sync, receiving an empty headers |
2965 | | // message suggests that the peer suddenly has nothing to give us |
2966 | | // (perhaps it reorged to our chain). Clear download state for this peer. |
2967 | 0 | LOCK(peer.m_headers_sync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2968 | 0 | if (peer.m_headers_sync) { |
2969 | 0 | peer.m_headers_sync.reset(nullptr); |
2970 | 0 | LOCK(m_headers_presync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
2971 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2972 | 0 | } |
2973 | | // A headers message with no headers cannot be an announcement, so assume |
2974 | | // it is a response to our last getheaders request, if there is one. |
2975 | 0 | peer.m_last_getheaders_timestamp = {}; |
2976 | 0 | return; |
2977 | 0 | } |
2978 | | |
2979 | | // Before we do any processing, make sure these pass basic sanity checks. |
2980 | | // We'll rely on headers having valid proof-of-work further down, as an |
2981 | | // anti-DoS criteria (note: this check is required before passing any |
2982 | | // headers into HeadersSyncState). |
2983 | 8.64M | if (!CheckHeadersPoW(headers, peer)) { |
2984 | | // Misbehaving() calls are handled within CheckHeadersPoW(), so we can |
2985 | | // just return. (Note that even if a header is announced via compact |
2986 | | // block, the header itself should be valid, so this type of error can |
2987 | | // always be punished.) |
2988 | 0 | return; |
2989 | 0 | } |
2990 | | |
2991 | 8.64M | const CBlockIndex *pindexLast = nullptr; |
2992 | | |
2993 | | // We'll set already_validated_work to true if these headers are |
2994 | | // successfully processed as part of a low-work headers sync in progress |
2995 | | // (either in PRESYNC or REDOWNLOAD phase). |
2996 | | // If true, this will mean that any headers returned to us (ie during |
2997 | | // REDOWNLOAD) can be validated without further anti-DoS checks. |
2998 | 8.64M | bool already_validated_work = false; |
2999 | | |
3000 | | // If we're in the middle of headers sync, let it do its magic. |
3001 | 8.64M | bool have_headers_sync = false; |
3002 | 8.64M | { |
3003 | 8.64M | LOCK(peer.m_headers_sync_mutex); Line | Count | Source | 268 | 8.64M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 8.64M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 8.64M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 8.64M | #define PASTE(x, y) x ## y |
|
|
|
|
3004 | | |
3005 | 8.64M | already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
3006 | | |
3007 | | // The headers we passed in may have been: |
3008 | | // - untouched, perhaps if no headers-sync was in progress, or some |
3009 | | // failure occurred |
3010 | | // - erased, such as if the headers were successfully processed and no |
3011 | | // additional headers processing needs to take place (such as if we |
3012 | | // are still in PRESYNC) |
3013 | | // - replaced with headers that are now ready for validation, such as |
3014 | | // during the REDOWNLOAD phase of a low-work headers sync. |
3015 | | // So just check whether we still have headers that we need to process, |
3016 | | // or not. |
3017 | 8.64M | if (headers.empty()) { |
3018 | 0 | return; |
3019 | 0 | } |
3020 | | |
3021 | 8.64M | have_headers_sync = !!peer.m_headers_sync; |
3022 | 8.64M | } |
3023 | | |
3024 | | // Do these headers connect to something in our block index? |
3025 | 8.64M | const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};Line | Count | Source | 299 | 8.64M | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
3026 | 8.64M | bool headers_connect_blockindex{chain_start_header != nullptr}; |
3027 | | |
3028 | 8.64M | if (!headers_connect_blockindex) { |
3029 | | // This could be a BIP 130 block announcement, use |
3030 | | // special logic for handling headers that don't connect, as this |
3031 | | // could be benign. |
3032 | 253k | HandleUnconnectingHeaders(pfrom, peer, headers); |
3033 | 253k | return; |
3034 | 253k | } |
3035 | | |
3036 | | // If headers connect, assume that this is in response to any outstanding getheaders |
3037 | | // request we may have sent, and clear out the time of our last request. Non-connecting |
3038 | | // headers cannot be a response to a getheaders request. |
3039 | 8.39M | peer.m_last_getheaders_timestamp = {}; |
3040 | | |
3041 | | // If the headers we received are already in memory and an ancestor of |
3042 | | // m_best_header or our tip, skip anti-DoS checks. These headers will not |
3043 | | // use any more memory (and we are not leaking information that could be |
3044 | | // used to fingerprint us). |
3045 | 8.39M | const CBlockIndex *last_received_header{nullptr}; |
3046 | 8.39M | { |
3047 | 8.39M | LOCK(cs_main); Line | Count | Source | 268 | 8.39M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 8.39M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 8.39M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 8.39M | #define PASTE(x, y) x ## y |
|
|
|
|
3048 | 8.39M | last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); |
3049 | 8.39M | already_validated_work = already_validated_work || IsAncestorOfBestHeaderOrTip(last_received_header); |
3050 | 8.39M | } |
3051 | | |
3052 | | // If our peer has NetPermissionFlags::NoBan privileges, then bypass our |
3053 | | // anti-DoS logic (this saves bandwidth when we connect to a trusted peer |
3054 | | // on startup). |
3055 | 8.39M | if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { |
3056 | 5.91M | already_validated_work = true; |
3057 | 5.91M | } |
3058 | | |
3059 | | // At this point, the headers connect to something in our block index. |
3060 | | // Do anti-DoS checks to determine if we should process or store for later |
3061 | | // processing. |
3062 | 8.39M | if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, |
3063 | 1.47M | *chain_start_header, headers)) { |
3064 | | // If we successfully started a low-work headers sync, then there |
3065 | | // should be no headers to process any further. |
3066 | 0 | Assume(headers.empty()); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3067 | 0 | return; |
3068 | 0 | } |
3069 | | |
3070 | | // At this point, we have a set of headers with sufficient work on them |
3071 | | // which can be processed. |
3072 | | |
3073 | | // If we don't have the last header, then this peer will have given us |
3074 | | // something new (if these headers are valid). |
3075 | 8.39M | bool received_new_header{last_received_header == nullptr}; |
3076 | | |
3077 | | // Now process all the headers. |
3078 | 8.39M | BlockValidationState state; |
3079 | 8.39M | const bool processed{m_chainman.ProcessNewBlockHeaders(headers, |
3080 | 8.39M | /*min_pow_checked=*/true, |
3081 | 8.39M | state, &pindexLast)}; |
3082 | 8.39M | if (!processed) { |
3083 | 4.58M | if (state.IsInvalid()) { |
3084 | 4.58M | if (!pfrom.IsInboundConn() && state.GetResult() == BlockValidationResult::BLOCK_CACHED_INVALID563k ) { |
3085 | | // Warn user if outgoing peers send us headers of blocks that we previously marked as invalid. |
3086 | 25.4k | LogWarning("%s (received from peer=%i). "Line | Count | Source | 98 | 25.4k | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 25.4k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
3087 | 25.4k | "If this happens with all peers, consider database corruption (that -reindex may fix) " |
3088 | 25.4k | "or a potential consensus incompatibility.", |
3089 | 25.4k | state.GetDebugMessage(), pfrom.GetId()); |
3090 | 25.4k | } |
3091 | 4.58M | MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); |
3092 | 4.58M | return; |
3093 | 4.58M | } |
3094 | 4.58M | } |
3095 | 8.39M | assert(pindexLast)3.80M ; |
3096 | | |
3097 | 3.80M | if (processed && received_new_header) { |
3098 | 231k | LogBlockHeader(*pindexLast, pfrom, /*via_compact_block=*/false); |
3099 | 231k | } |
3100 | | |
3101 | | // Consider fetching more headers if we are not using our headers-sync mechanism. |
3102 | 3.80M | if (nCount == m_opts.max_headers_result && !have_headers_sync0 ) { |
3103 | | // Headers message had its maximum size; the peer may have more headers. |
3104 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { |
3105 | 0 | LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d", pindexLast->nHeight, pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3106 | 0 | } |
3107 | 0 | } |
3108 | | |
3109 | 3.80M | UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result); |
3110 | | |
3111 | | // Consider immediately downloading blocks. |
3112 | 3.80M | HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); |
3113 | | |
3114 | 3.80M | return; |
3115 | 3.80M | } |
3116 | | |
3117 | | std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, |
3118 | | bool first_time_failure) |
3119 | 4.54M | { |
3120 | 4.54M | AssertLockNotHeld(m_peer_mutex); Line | Count | Source | 149 | 4.54M | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
3121 | 4.54M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 4.54M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3122 | 4.54M | AssertLockHeld(m_tx_download_mutex); Line | Count | Source | 144 | 4.54M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3123 | | |
3124 | 4.54M | PeerRef peer{GetPeerRef(nodeid)}; |
3125 | | |
3126 | 4.54M | LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n", Line | Count | Source | 117 | 4.54M | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 4.54M | do { \ | 109 | 4.54M | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 4.54M | } while (0) |
|
|
3127 | 4.54M | ptx->GetHash().ToString(), |
3128 | 4.54M | ptx->GetWitnessHash().ToString(), |
3129 | 4.54M | nodeid, |
3130 | 4.54M | state.ToString()); |
3131 | | |
3132 | 4.54M | const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure); |
3133 | | |
3134 | 4.54M | if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 1000004.19M ) { |
3135 | 4.19M | AddToCompactExtraTransactions(ptx); |
3136 | 4.19M | } |
3137 | 4.54M | for (const Txid& parent_txid : unique_parents) { |
3138 | 1.55M | if (peer) AddKnownTx(*peer, parent_txid.ToUint256()); |
3139 | 1.55M | } |
3140 | | |
3141 | 4.54M | return package_to_validate; |
3142 | 4.54M | } |
3143 | | |
3144 | | void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
3145 | 1.03M | { |
3146 | 1.03M | AssertLockNotHeld(m_peer_mutex); Line | Count | Source | 149 | 1.03M | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
3147 | 1.03M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 1.03M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3148 | 1.03M | AssertLockHeld(m_tx_download_mutex); Line | Count | Source | 144 | 1.03M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3149 | | |
3150 | 1.03M | m_txdownloadman.MempoolAcceptedTx(tx); |
3151 | | |
3152 | 1.03M | LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", Line | Count | Source | 117 | 1.03M | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 1.03M | do { \ | 109 | 1.03M | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 1.03M | } while (0) |
|
|
3153 | 1.03M | nodeid, |
3154 | 1.03M | tx->GetHash().ToString(), |
3155 | 1.03M | tx->GetWitnessHash().ToString(), |
3156 | 1.03M | m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); |
3157 | | |
3158 | 1.03M | InitiateTxBroadcastToAll(tx->GetHash(), tx->GetWitnessHash()); |
3159 | | |
3160 | 1.03M | for (const CTransactionRef& removedTx : replaced_transactions) { |
3161 | 0 | AddToCompactExtraTransactions(removedTx); |
3162 | 0 | } |
3163 | 1.03M | } |
3164 | | |
3165 | | void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
3166 | 136k | { |
3167 | 136k | AssertLockNotHeld(m_peer_mutex); Line | Count | Source | 149 | 136k | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
3168 | 136k | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 136k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3169 | 136k | AssertLockHeld(m_tx_download_mutex); Line | Count | Source | 144 | 136k | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3170 | | |
3171 | 136k | const auto& package = package_to_validate.m_txns; |
3172 | 136k | const auto& senders = package_to_validate.m_senders; |
3173 | | |
3174 | 136k | if (package_result.m_state.IsInvalid()) { |
3175 | 130k | m_txdownloadman.MempoolRejectedPackage(package); |
3176 | 130k | } |
3177 | | // We currently only expect to process 1-parent-1-child packages. Remove if this changes. |
3178 | 136k | if (!Assume(package.size() == 2)) return0 ; Line | Count | Source | 125 | 136k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3179 | | |
3180 | | // Iterate backwards to erase in-package descendants from the orphanage before they become |
3181 | | // relevant in AddChildrenToWorkSet. |
3182 | 136k | auto package_iter = package.rbegin(); |
3183 | 136k | auto senders_iter = senders.rbegin(); |
3184 | 408k | while (package_iter != package.rend()) { |
3185 | 272k | const auto& tx = *package_iter; |
3186 | 272k | const NodeId nodeid = *senders_iter; |
3187 | 272k | const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())}; |
3188 | | |
3189 | | // It is not guaranteed that a result exists for every transaction. |
3190 | 272k | if (it_result != package_result.m_tx_results.end()) { |
3191 | 272k | const auto& tx_result = it_result->second; |
3192 | 272k | switch (tx_result.m_result_type) { |
3193 | 11.7k | case MempoolAcceptResult::ResultType::VALID: |
3194 | 11.7k | { |
3195 | 11.7k | ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions); |
3196 | 11.7k | break; |
3197 | 0 | } |
3198 | 260k | case MempoolAcceptResult::ResultType::INVALID: |
3199 | 260k | case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS: |
3200 | 260k | { |
3201 | | // Don't add to vExtraTxnForCompact, as these transactions should have already been |
3202 | | // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. |
3203 | | // This should be updated if package submission is ever used for transactions |
3204 | | // that haven't already been validated before. |
3205 | 260k | ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false); |
3206 | 260k | break; |
3207 | 260k | } |
3208 | 0 | case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: |
3209 | 0 | { |
3210 | | // AlreadyHaveTx() should be catching transactions that are already in mempool. |
3211 | 0 | Assume(false); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3212 | 0 | break; |
3213 | 260k | } |
3214 | 272k | } |
3215 | 272k | } |
3216 | 272k | package_iter++; |
3217 | 272k | senders_iter++; |
3218 | 272k | } |
3219 | 136k | } |
3220 | | |
3221 | | // NOTE: the orphan processing used to be uninterruptible and quadratic, which could allow a peer to stall the node for |
3222 | | // hours with specially crafted transactions. See https://bitcoincore.org/en/2024/07/03/disclose-orphan-dos. |
3223 | | bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) |
3224 | 35.4M | { |
3225 | 35.4M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 35.4M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3226 | 35.4M | LOCK2(::cs_main, m_tx_download_mutex); Line | Count | Source | 270 | 35.4M | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 271 | 35.4M | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
3227 | | |
3228 | 35.4M | CTransactionRef porphanTx = nullptr; |
3229 | | |
3230 | 35.4M | while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) { |
3231 | 121k | const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); |
3232 | 121k | const TxValidationState& state = result.m_state; |
3233 | 121k | const Txid& orphanHash = porphanTx->GetHash(); |
3234 | 121k | const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash(); |
3235 | | |
3236 | 121k | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { |
3237 | 32.1k | LogDebug(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString()); Line | Count | Source | 117 | 32.1k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 32.1k | do { \ | 109 | 32.1k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 32.1k | } while (0) |
|
|
3238 | 32.1k | ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions); |
3239 | 32.1k | return true; |
3240 | 89.1k | } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { |
3241 | 87.9k | LogDebug(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n", Line | Count | Source | 117 | 87.9k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 87.9k | do { \ | 109 | 87.9k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 87.9k | } while (0) |
|
|
3242 | 87.9k | orphanHash.ToString(), |
3243 | 87.9k | orphan_wtxid.ToString(), |
3244 | 87.9k | peer.m_id, |
3245 | 87.9k | state.ToString()); |
3246 | | |
3247 | 87.9k | if (Assume(state.IsInvalid() && Line | Count | Source | 125 | 527k | #define Assume(val) inline_assertion_check<false>(87.9k val, std::source_location::current(), #87.9k val) |
|
3248 | 87.9k | state.GetResult() != TxValidationResult::TX_UNKNOWN && |
3249 | 87.9k | state.GetResult() != TxValidationResult::TX_NO_MEMPOOL && |
3250 | 87.9k | state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) { |
3251 | 87.9k | ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false); |
3252 | 87.9k | } |
3253 | 87.9k | return true; |
3254 | 87.9k | } |
3255 | 121k | } |
3256 | | |
3257 | 35.3M | return false; |
3258 | 35.4M | } |
3259 | | |
3260 | | bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, |
3261 | | BlockFilterType filter_type, uint32_t start_height, |
3262 | | const uint256& stop_hash, uint32_t max_height_diff, |
3263 | | const CBlockIndex*& stop_index, |
3264 | | BlockFilterIndex*& filter_index) |
3265 | 0 | { |
3266 | 0 | const bool supported_filter_type = |
3267 | 0 | (filter_type == BlockFilterType::BASIC && |
3268 | 0 | (peer.m_our_services & NODE_COMPACT_FILTERS)); |
3269 | 0 | if (!supported_filter_type) { |
3270 | 0 | LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3271 | 0 | static_cast<uint8_t>(filter_type), node.DisconnectMsg()); |
3272 | 0 | node.fDisconnect = true; |
3273 | 0 | return false; |
3274 | 0 | } |
3275 | | |
3276 | 0 | { |
3277 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
3278 | 0 | stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash); |
3279 | | |
3280 | | // Check that the stop block exists and the peer would be allowed to fetch it. |
3281 | 0 | if (!stop_index || !BlockRequestAllowed(*stop_index)) { |
3282 | 0 | LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3283 | 0 | stop_hash.ToString(), node.DisconnectMsg()); |
3284 | 0 | node.fDisconnect = true; |
3285 | 0 | return false; |
3286 | 0 | } |
3287 | 0 | } |
3288 | | |
3289 | 0 | uint32_t stop_height = stop_index->nHeight; |
3290 | 0 | if (start_height > stop_height) { |
3291 | 0 | LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with " Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3292 | 0 | "start height %d and stop height %d, %s", |
3293 | 0 | start_height, stop_height, node.DisconnectMsg()); |
3294 | 0 | node.fDisconnect = true; |
3295 | 0 | return false; |
3296 | 0 | } |
3297 | 0 | if (stop_height - start_height >= max_height_diff) { |
3298 | 0 | LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3299 | 0 | stop_height - start_height + 1, max_height_diff, node.DisconnectMsg()); |
3300 | 0 | node.fDisconnect = true; |
3301 | 0 | return false; |
3302 | 0 | } |
3303 | | |
3304 | 0 | filter_index = GetBlockFilterIndex(filter_type); |
3305 | 0 | if (!filter_index) { |
3306 | 0 | LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3307 | 0 | return false; |
3308 | 0 | } |
3309 | | |
3310 | 0 | return true; |
3311 | 0 | } |
3312 | | |
3313 | | void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv) |
3314 | 0 | { |
3315 | 0 | uint8_t filter_type_ser; |
3316 | 0 | uint32_t start_height; |
3317 | 0 | uint256 stop_hash; |
3318 | |
|
3319 | 0 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3320 | |
|
3321 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3322 | |
|
3323 | 0 | const CBlockIndex* stop_index; |
3324 | 0 | BlockFilterIndex* filter_index; |
3325 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, |
3326 | 0 | MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { |
3327 | 0 | return; |
3328 | 0 | } |
3329 | | |
3330 | 0 | std::vector<BlockFilter> filters; |
3331 | 0 | if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { |
3332 | 0 | LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3333 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3334 | 0 | return; |
3335 | 0 | } |
3336 | | |
3337 | 0 | for (const auto& filter : filters) { |
3338 | 0 | MakeAndPushMessage(node, NetMsgType::CFILTER, filter); |
3339 | 0 | } |
3340 | 0 | } |
3341 | | |
3342 | | void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv) |
3343 | 0 | { |
3344 | 0 | uint8_t filter_type_ser; |
3345 | 0 | uint32_t start_height; |
3346 | 0 | uint256 stop_hash; |
3347 | |
|
3348 | 0 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3349 | |
|
3350 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3351 | |
|
3352 | 0 | const CBlockIndex* stop_index; |
3353 | 0 | BlockFilterIndex* filter_index; |
3354 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, |
3355 | 0 | MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { |
3356 | 0 | return; |
3357 | 0 | } |
3358 | | |
3359 | 0 | uint256 prev_header; |
3360 | 0 | if (start_height > 0) { |
3361 | 0 | const CBlockIndex* const prev_block = |
3362 | 0 | stop_index->GetAncestor(static_cast<int>(start_height - 1)); |
3363 | 0 | if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { |
3364 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3365 | 0 | BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); |
3366 | 0 | return; |
3367 | 0 | } |
3368 | 0 | } |
3369 | | |
3370 | 0 | std::vector<uint256> filter_hashes; |
3371 | 0 | if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { |
3372 | 0 | LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3373 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3374 | 0 | return; |
3375 | 0 | } |
3376 | | |
3377 | 0 | MakeAndPushMessage(node, NetMsgType::CFHEADERS, |
3378 | 0 | filter_type_ser, |
3379 | 0 | stop_index->GetBlockHash(), |
3380 | 0 | prev_header, |
3381 | 0 | filter_hashes); |
3382 | 0 | } |
3383 | | |
3384 | | void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv) |
3385 | 0 | { |
3386 | 0 | uint8_t filter_type_ser; |
3387 | 0 | uint256 stop_hash; |
3388 | |
|
3389 | 0 | vRecv >> filter_type_ser >> stop_hash; |
3390 | |
|
3391 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3392 | |
|
3393 | 0 | const CBlockIndex* stop_index; |
3394 | 0 | BlockFilterIndex* filter_index; |
3395 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash, |
3396 | 0 | /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), |
3397 | 0 | stop_index, filter_index)) { |
3398 | 0 | return; |
3399 | 0 | } |
3400 | | |
3401 | 0 | std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL); |
3402 | | |
3403 | | // Populate headers. |
3404 | 0 | const CBlockIndex* block_index = stop_index; |
3405 | 0 | for (int i = headers.size() - 1; i >= 0; i--) { |
3406 | 0 | int height = (i + 1) * CFCHECKPT_INTERVAL; |
3407 | 0 | block_index = block_index->GetAncestor(height); |
3408 | |
|
3409 | 0 | if (!filter_index->LookupFilterHeader(block_index, headers[i])) { |
3410 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3411 | 0 | BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); |
3412 | 0 | return; |
3413 | 0 | } |
3414 | 0 | } |
3415 | | |
3416 | 0 | MakeAndPushMessage(node, NetMsgType::CFCHECKPT, |
3417 | 0 | filter_type_ser, |
3418 | 0 | stop_index->GetBlockHash(), |
3419 | 0 | headers); |
3420 | 0 | } |
3421 | | |
3422 | | void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) |
3423 | 452k | { |
3424 | 452k | bool new_block{false}; |
3425 | 452k | m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); |
3426 | 452k | if (new_block) { |
3427 | 451k | node.m_last_block_time = GetTime<std::chrono::seconds>(); |
3428 | | // In case this block came from a different peer than we requested |
3429 | | // from, we can erase the block request now anyway (as we just stored |
3430 | | // this block to disk). |
3431 | 451k | LOCK(cs_main); Line | Count | Source | 268 | 451k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 451k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 451k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 451k | #define PASTE(x, y) x ## y |
|
|
|
|
3432 | 451k | RemoveBlockRequest(block->GetHash(), std::nullopt); |
3433 | 451k | } else { |
3434 | 1.54k | LOCK(cs_main); Line | Count | Source | 268 | 1.54k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 1.54k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 1.54k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 1.54k | #define PASTE(x, y) x ## y |
|
|
|
|
3435 | 1.54k | mapBlockSource.erase(block->GetHash()); |
3436 | 1.54k | } |
3437 | 452k | } |
3438 | | |
3439 | | void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
3440 | 1.37M | { |
3441 | 1.37M | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
3442 | 1.37M | bool fBlockRead{false}; |
3443 | 1.37M | { |
3444 | 1.37M | LOCK(cs_main); Line | Count | Source | 268 | 1.37M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 1.37M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 1.37M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 1.37M | #define PASTE(x, y) x ## y |
|
|
|
|
3445 | | |
3446 | 1.37M | auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash); |
3447 | 1.37M | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
3448 | 1.37M | bool requested_block_from_this_peer{false}; |
3449 | | |
3450 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
3451 | 1.37M | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId())653k ; |
3452 | | |
3453 | 1.58M | while (range_flight.first != range_flight.second) { |
3454 | 688k | auto [node_id, block_it] = range_flight.first->second; |
3455 | 688k | if (node_id == pfrom.GetId() && block_it->partialBlock599k ) { |
3456 | 470k | requested_block_from_this_peer = true; |
3457 | 470k | break; |
3458 | 470k | } |
3459 | 217k | range_flight.first++; |
3460 | 217k | } |
3461 | | |
3462 | 1.37M | if (!requested_block_from_this_peer) { |
3463 | 900k | LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId()); Line | Count | Source | 117 | 900k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 900k | do { \ | 109 | 900k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 900k | } while (0) |
|
|
3464 | 900k | return; |
3465 | 900k | } |
3466 | | |
3467 | 470k | PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; |
3468 | | |
3469 | 470k | if (partialBlock.header.IsNull()) { |
3470 | | // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left |
3471 | | // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we |
3472 | | // should not call LookupBlockIndex below. |
3473 | 364 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); |
3474 | 364 | Misbehaving(peer, "previous compact block reconstruction attempt failed"); |
3475 | 364 | LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId()); Line | Count | Source | 117 | 364 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 364 | do { \ | 109 | 364 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 364 | } while (0) |
|
|
3476 | 364 | return; |
3477 | 364 | } |
3478 | | |
3479 | | // We should not have gotten this far in compact block processing unless it's attached to a known header |
3480 | 470k | const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))};Line | Count | Source | 125 | 470k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3481 | 470k | ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, |
3482 | 470k | /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); |
3483 | 470k | if (status == READ_STATUS_INVALID) { |
3484 | 20.1k | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
3485 | 20.1k | Misbehaving(peer, "invalid compact block/non-matching block transactions"); |
3486 | 20.1k | return; |
3487 | 450k | } else if (status == READ_STATUS_FAILED) { |
3488 | 829 | if (first_in_flight) { |
3489 | | // Might have collided, fall back to getdata now :( |
3490 | | // We keep the failed partialBlock to disallow processing another compact block announcement from the same |
3491 | | // peer for the same block. We let the full block download below continue under the same m_downloading_since |
3492 | | // timer. |
3493 | 647 | std::vector<CInv> invs; |
3494 | 647 | invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); |
3495 | 647 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); |
3496 | 647 | } else { |
3497 | 182 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); |
3498 | 182 | LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId()); Line | Count | Source | 117 | 182 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 182 | do { \ | 109 | 182 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 182 | } while (0) |
|
|
3499 | 182 | return; |
3500 | 182 | } |
3501 | 449k | } else { |
3502 | | // Block is okay for further processing |
3503 | 449k | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer |
3504 | 449k | fBlockRead = true; |
3505 | | // mapBlockSource is used for potentially punishing peers and |
3506 | | // updating which peers send us compact blocks, so the race |
3507 | | // between here and cs_main in ProcessNewBlock is fine. |
3508 | | // BIP 152 permits peers to relay compact blocks after validating |
3509 | | // the header only; we should not punish peers if the block turns |
3510 | | // out to be invalid. |
3511 | 449k | mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false)); |
3512 | 449k | } |
3513 | 470k | } // Don't hold cs_main when we call into ProcessNewBlock |
3514 | 450k | if (fBlockRead) { |
3515 | | // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, |
3516 | | // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) |
3517 | | // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent |
3518 | | // disk-space attacks), but this should be safe due to the |
3519 | | // protections in the compact block handler -- see related comment |
3520 | | // in compact block optimistic reconstruction handling. |
3521 | 449k | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
3522 | 449k | } |
3523 | 450k | return; |
3524 | 470k | } |
3525 | | |
3526 | 935k | void PeerManagerImpl::LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block) { |
3527 | | // To prevent log spam, this function should only be called after it was determined that a |
3528 | | // header is both new and valid. |
3529 | | // |
3530 | | // These messages are valuable for detecting potential selfish mining behavior; |
3531 | | // if multiple displacing headers are seen near simultaneously across many |
3532 | | // nodes in the network, this might be an indication of selfish mining. |
3533 | | // In addition it can be used to identify peers which send us a header, but |
3534 | | // don't followup with a complete and valid (compact) block. |
3535 | | // Having this log by default when not in IBD ensures broad availability of |
3536 | | // this data in case investigation is merited. |
3537 | 935k | const auto msg = strprintf( Line | Count | Source | 1172 | 935k | #define strprintf tfm::format |
|
3538 | 935k | "Saw new %sheader hash=%s height=%d %s", |
3539 | 935k | via_compact_block ? "cmpctblock "704k : ""231k , |
3540 | 935k | index.GetBlockHash().ToString(), |
3541 | 935k | index.nHeight, |
3542 | 935k | peer.LogPeer() |
3543 | 935k | ); |
3544 | 935k | if (m_chainman.IsInitialBlockDownload()) { |
3545 | 283k | LogDebug(BCLog::VALIDATION, "%s", msg); Line | Count | Source | 117 | 283k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 283k | do { \ | 109 | 283k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 283k | } while (0) |
|
|
3546 | 652k | } else { |
3547 | 652k | LogInfo("%s", msg);Line | Count | Source | 97 | 652k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 652k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
3548 | 652k | } |
3549 | 935k | } |
3550 | | |
3551 | | void PeerManagerImpl::PushPrivateBroadcastTx(CNode& node) |
3552 | 252 | { |
3553 | 252 | Assume(node.IsPrivateBroadcastConn()); Line | Count | Source | 125 | 252 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3554 | | |
3555 | 252 | const auto opt_tx{m_tx_for_private_broadcast.PickTxForSend(node.GetId(), CService{node.addr})}; |
3556 | 252 | if (!opt_tx) { |
3557 | 252 | LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: no more transactions for private broadcast (connected in vain), %s", node.LogPeer()); Line | Count | Source | 117 | 252 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 252 | do { \ | 109 | 252 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 252 | } while (0) |
|
|
3558 | 252 | node.fDisconnect = true; |
3559 | 252 | return; |
3560 | 252 | } |
3561 | 0 | const CTransactionRef& tx{*opt_tx}; |
3562 | |
|
3563 | 0 | LogDebug(BCLog::PRIVBROADCAST, "P2P handshake completed, sending INV for txid=%s%s, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3564 | 0 | tx->GetHash().ToString(), tx->HasWitness() ? strprintf(", wtxid=%s", tx->GetWitnessHash().ToString()) : "", |
3565 | 0 | node.LogPeer()); |
3566 | |
|
3567 | 0 | MakeAndPushMessage(node, NetMsgType::INV, std::vector<CInv>{{CInv{MSG_TX, tx->GetHash().ToUint256()}}}); |
3568 | 0 | } |
3569 | | |
3570 | | void PeerManagerImpl::ProcessMessage(Peer& peer, CNode& pfrom, const std::string& msg_type, DataStream& vRecv, |
3571 | | const std::chrono::microseconds time_received, |
3572 | | const std::atomic<bool>& interruptMsgProc) |
3573 | 33.8M | { |
3574 | 33.8M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 33.8M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
3575 | | |
3576 | 33.8M | LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); Line | Count | Source | 117 | 33.8M | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 33.8M | do { \ | 109 | 33.8M | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 33.8M | } while (0) |
|
|
3577 | | |
3578 | | |
3579 | 33.8M | if (msg_type == NetMsgType::VERSION) { |
3580 | 753k | if (pfrom.nVersion != 0) { |
3581 | 0 | LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3582 | 0 | return; |
3583 | 0 | } |
3584 | | |
3585 | 753k | int64_t nTime; |
3586 | 753k | CService addrMe; |
3587 | 753k | uint64_t nNonce = 1; |
3588 | 753k | ServiceFlags nServices; |
3589 | 753k | int nVersion; |
3590 | 753k | std::string cleanSubVer; |
3591 | 753k | int starting_height = -1; |
3592 | 753k | bool fRelay = true; |
3593 | | |
3594 | 753k | vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime; |
3595 | 753k | if (nTime < 0) { |
3596 | 0 | nTime = 0; |
3597 | 0 | } |
3598 | 753k | vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer |
3599 | 753k | vRecv >> CNetAddr::V1(addrMe); |
3600 | 753k | if (!pfrom.IsInboundConn()) |
3601 | 600k | { |
3602 | | // Overwrites potentially existing services. In contrast to this, |
3603 | | // unvalidated services received via gossip relay in ADDR/ADDRV2 |
3604 | | // messages are only ever added but cannot replace existing ones. |
3605 | 600k | m_addrman.SetServices(pfrom.addr, nServices); |
3606 | 600k | } |
3607 | 753k | if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)160k ) |
3608 | 137k | { |
3609 | 137k | LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s", Line | Count | Source | 117 | 137k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 137k | do { \ | 109 | 137k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 137k | } while (0) |
|
|
3610 | 137k | nServices, |
3611 | 137k | GetDesirableServiceFlags(nServices), |
3612 | 137k | pfrom.DisconnectMsg()); |
3613 | 137k | pfrom.fDisconnect = true; |
3614 | 137k | return; |
3615 | 137k | } |
3616 | | |
3617 | 616k | if (nVersion < MIN_PEER_PROTO_VERSION) { |
3618 | | // disconnect from peers older than this proto version |
3619 | 0 | LogDebug(BCLog::NET, "peer using obsolete version %i, %s", nVersion, pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3620 | 0 | pfrom.fDisconnect = true; |
3621 | 0 | return; |
3622 | 0 | } |
3623 | | |
3624 | 616k | if (!vRecv.empty()) { |
3625 | | // The version message includes information about the sending node which we don't use: |
3626 | | // - 8 bytes (service bits) |
3627 | | // - 16 bytes (ipv6 address) |
3628 | | // - 2 bytes (port) |
3629 | 616k | vRecv.ignore(26); |
3630 | 616k | vRecv >> nNonce; |
3631 | 616k | } |
3632 | 616k | if (!vRecv.empty()) { |
3633 | 616k | std::string strSubVer; |
3634 | 616k | vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); Line | Count | Source | 495 | 616k | #define LIMITED_STRING(obj,n) Using<LimitedStringFormatter<n>>(obj) |
|
3635 | 616k | cleanSubVer = SanitizeString(strSubVer); |
3636 | 616k | } |
3637 | 616k | if (!vRecv.empty()) { |
3638 | 616k | vRecv >> starting_height; |
3639 | 616k | } |
3640 | 616k | if (!vRecv.empty()) |
3641 | 616k | vRecv >> fRelay; |
3642 | | // Disconnect if we connected to ourself |
3643 | 616k | if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)152k ) |
3644 | 5.93k | { |
3645 | 5.93k | LogInfo("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());Line | Count | Source | 97 | 5.93k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 5.93k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
3646 | 5.93k | pfrom.fDisconnect = true; |
3647 | 5.93k | return; |
3648 | 5.93k | } |
3649 | | |
3650 | 610k | if (pfrom.IsInboundConn() && addrMe.IsRoutable()146k ) |
3651 | 0 | { |
3652 | 0 | SeenLocal(addrMe); |
3653 | 0 | } |
3654 | | |
3655 | | // Inbound peers send us their version message when they connect. |
3656 | | // We send our version message in response. |
3657 | 610k | if (pfrom.IsInboundConn()) { |
3658 | 146k | PushNodeVersion(pfrom, peer); |
3659 | 146k | } |
3660 | | |
3661 | | // Change version |
3662 | 610k | const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION); |
3663 | 610k | pfrom.SetCommonVersion(greatest_common_version); |
3664 | 610k | pfrom.nVersion = nVersion; |
3665 | | |
3666 | 610k | pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); |
3667 | 610k | peer.m_their_services = nServices; |
3668 | 610k | pfrom.SetAddrLocal(addrMe); |
3669 | 610k | { |
3670 | 610k | LOCK(pfrom.m_subver_mutex); Line | Count | Source | 268 | 610k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 610k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 610k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 610k | #define PASTE(x, y) x ## y |
|
|
|
|
3671 | 610k | pfrom.cleanSubVer = cleanSubVer; |
3672 | 610k | } |
3673 | | |
3674 | | // Only initialize the Peer::TxRelay m_relay_txs data structure if: |
3675 | | // - this isn't an outbound block-relay-only connection, and |
3676 | | // - this isn't an outbound feeler connection, and |
3677 | | // - fRelay=true (the peer wishes to receive transaction announcements) |
3678 | | // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that |
3679 | | // the peer may turn on transaction relay later. |
3680 | 610k | if (!pfrom.IsBlockOnlyConn() && |
3681 | 610k | !pfrom.IsFeelerConn()606k && |
3682 | 610k | (591k fRelay591k || (peer.m_our_services & NODE_BLOOM)107k )) { |
3683 | 542k | auto* const tx_relay = peer.SetTxRelay(); |
3684 | 542k | { |
3685 | 542k | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 542k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 542k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 542k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 542k | #define PASTE(x, y) x ## y |
|
|
|
|
3686 | 542k | tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message |
3687 | 542k | } |
3688 | 542k | if (fRelay) pfrom.m_relays_txs = true484k ; |
3689 | 542k | } |
3690 | | |
3691 | 610k | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3692 | 610k | LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, %s%s", Line | Count | Source | 117 | 610k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 610k | do { \ | 109 | 610k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 610k | } while (0) |
|
|
3693 | 610k | cleanSubVer.empty() ? "<no user agent>" : cleanSubVer, pfrom.nVersion, |
3694 | 610k | starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.LogPeer(), |
3695 | 610k | (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); |
3696 | | |
3697 | 610k | if (pfrom.IsPrivateBroadcastConn()) { |
3698 | 646 | if (fRelay) { |
3699 | 391 | MakeAndPushMessage(pfrom, NetMsgType::VERACK); |
3700 | 391 | } else { |
3701 | 255 | LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: does not support transaction relay (connected in vain), %s", Line | Count | Source | 117 | 255 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 255 | do { \ | 109 | 255 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 255 | } while (0) |
|
|
3702 | 255 | pfrom.LogPeer()); |
3703 | 255 | pfrom.fDisconnect = true; |
3704 | 255 | } |
3705 | 646 | return; |
3706 | 646 | } |
3707 | | |
3708 | 609k | if (greatest_common_version >= WTXID_RELAY_VERSION) { |
3709 | 549k | MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY); |
3710 | 549k | } |
3711 | | |
3712 | | // Signal ADDRv2 support (BIP155). |
3713 | 609k | if (greatest_common_version >= 70016) { |
3714 | | // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some |
3715 | | // implementations reject messages they don't know. As a courtesy, don't send |
3716 | | // it to nodes with a version before 70016, as no software is known to support |
3717 | | // BIP155 that doesn't announce at least that protocol version number. |
3718 | 549k | MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2); |
3719 | 549k | } |
3720 | | |
3721 | 609k | if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation549k ) { |
3722 | | // Per BIP-330, we announce txreconciliation support if: |
3723 | | // - protocol version per the peer's VERSION message supports WTXID_RELAY; |
3724 | | // - transaction relay is supported per the peer's VERSION message |
3725 | | // - this is not a block-relay-only connection and not a feeler |
3726 | | // - this is not an addr fetch connection; |
3727 | | // - we are not in -blocksonly mode. |
3728 | 0 | const auto* tx_relay = peer.GetTxRelay(); |
3729 | 0 | if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) && Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
3730 | 0 | !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) { |
3731 | 0 | const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId()); |
3732 | 0 | MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL, |
3733 | 0 | TXRECONCILIATION_VERSION, recon_salt); |
3734 | 0 | } |
3735 | 0 | } |
3736 | | |
3737 | 609k | MakeAndPushMessage(pfrom, NetMsgType::VERACK); |
3738 | | |
3739 | | // Potentially mark this peer as a preferred download peer. |
3740 | 609k | { |
3741 | 609k | LOCK(cs_main); Line | Count | Source | 268 | 609k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 609k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 609k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 609k | #define PASTE(x, y) x ## y |
|
|
|
|
3742 | 609k | CNodeState* state = State(pfrom.GetId()); |
3743 | 609k | state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)146k ) && !pfrom.IsAddrFetchConn()512k && CanServeBlocks(peer)510k ; |
3744 | 609k | m_num_preferred_download_peers += state->fPreferredDownload; |
3745 | 609k | } |
3746 | | |
3747 | | // Attempt to initialize address relay for outbound peers and use result |
3748 | | // to decide whether to send GETADDR, so that we don't send it to |
3749 | | // inbound or outbound block-relay-only peers. |
3750 | 609k | bool send_getaddr{false}; |
3751 | 609k | if (!pfrom.IsInboundConn()) { |
3752 | 463k | send_getaddr = SetupAddressRelay(pfrom, peer); |
3753 | 463k | } |
3754 | 609k | if (send_getaddr) { |
3755 | | // Do a one-time address fetch to help populate/update our addrman. |
3756 | | // If we're starting up for the first time, our addrman may be pretty |
3757 | | // empty, so this mechanism is important to help us connect to the network. |
3758 | | // We skip this for block-relay-only peers. We want to avoid |
3759 | | // potentially leaking addr information and we do not want to |
3760 | | // indicate to the peer that we will participate in addr relay. |
3761 | 459k | MakeAndPushMessage(pfrom, NetMsgType::GETADDR); |
3762 | 459k | peer.m_getaddr_sent = true; |
3763 | | // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response |
3764 | | // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit). |
3765 | 459k | peer.m_addr_token_bucket += MAX_ADDR_TO_SEND; |
3766 | 459k | } |
3767 | | |
3768 | 609k | if (!pfrom.IsInboundConn()) { |
3769 | | // For non-inbound connections, we update the addrman to record |
3770 | | // connection success so that addrman will have an up-to-date |
3771 | | // notion of which peers are online and available. |
3772 | | // |
3773 | | // While we strive to not leak information about block-relay-only |
3774 | | // connections via the addrman, not moving an address to the tried |
3775 | | // table is also potentially detrimental because new-table entries |
3776 | | // are subject to eviction in the event of addrman collisions. We |
3777 | | // mitigate the information-leak by never calling |
3778 | | // AddrMan::Connected() on block-relay-only peers; see |
3779 | | // FinalizeNode(). |
3780 | | // |
3781 | | // This moves an address from New to Tried table in Addrman, |
3782 | | // resolves tried-table collisions, etc. |
3783 | 463k | m_addrman.Good(pfrom.addr); |
3784 | 463k | } |
3785 | | |
3786 | 609k | peer.m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>(); |
3787 | 609k | if (!pfrom.IsInboundConn()) { |
3788 | | // Don't use timedata samples from inbound peers to make it |
3789 | | // harder for others to create false warnings about our clock being out of sync. |
3790 | 463k | m_outbound_time_offsets.Add(peer.m_time_offset); |
3791 | 463k | m_outbound_time_offsets.WarnIfOutOfSync(); |
3792 | 463k | } |
3793 | | |
3794 | | // If the peer is old enough to have the old alert system, send it the final alert. |
3795 | 609k | if (greatest_common_version <= 70012) { |
3796 | 60.3k | constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex}; |
3797 | 60.3k | MakeAndPushMessage(pfrom, "alert", finalAlert); |
3798 | 60.3k | } |
3799 | | |
3800 | | // Feeler connections exist only to verify if address is online. |
3801 | 609k | if (pfrom.IsFeelerConn()) { |
3802 | 14.9k | LogDebug(BCLog::NET, "feeler connection completed, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 14.9k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 14.9k | do { \ | 109 | 14.9k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 14.9k | } while (0) |
|
|
3803 | 14.9k | pfrom.fDisconnect = true; |
3804 | 14.9k | } |
3805 | 609k | return; |
3806 | 610k | } |
3807 | | |
3808 | 33.1M | if (pfrom.nVersion == 0) { |
3809 | | // Must have a version message before anything else |
3810 | 0 | LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3811 | 0 | return; |
3812 | 0 | } |
3813 | | |
3814 | 33.1M | if (msg_type == NetMsgType::VERACK) { |
3815 | 547k | if (pfrom.fSuccessfullyConnected) { |
3816 | 0 | LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3817 | 0 | return; |
3818 | 0 | } |
3819 | | |
3820 | 547k | auto new_peer_msg = [&]() { |
3821 | 437k | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3822 | 437k | return strprintf("New %s peer connected: transport: %s, version: %d, %s%s",Line | Count | Source | 1172 | 437k | #define strprintf tfm::format |
|
3823 | 437k | pfrom.ConnectionTypeAsString(), |
3824 | 437k | TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), |
3825 | 437k | pfrom.nVersion.load(), pfrom.LogPeer(), |
3826 | 437k | (mapped_as ? strprintf0 (", mapped_as=%d", mapped_as)0 : "")); Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
3827 | 437k | }; |
3828 | | |
3829 | | // Log successful connections unconditionally for outbound, but not for inbound as those |
3830 | | // can be triggered by an attacker at high rate. |
3831 | 547k | if (pfrom.IsInboundConn()) { |
3832 | 109k | LogDebug(BCLog::NET, "%s", new_peer_msg()); Line | Count | Source | 117 | 109k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 109k | do { \ | 109 | 109k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 109k | } while (0) |
|
|
3833 | 437k | } else { |
3834 | 437k | LogInfo("%s", new_peer_msg());Line | Count | Source | 97 | 437k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 437k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
3835 | 437k | } |
3836 | | |
3837 | 547k | if (auto tx_relay = peer.GetTxRelay()) { |
3838 | | // `TxRelay::m_tx_inventory_to_send` must be empty before the |
3839 | | // version handshake is completed as |
3840 | | // `TxRelay::m_next_inv_send_time` is first initialised in |
3841 | | // `SendMessages` after the verack is received. Any transactions |
3842 | | // received during the version handshake would otherwise |
3843 | | // immediately be advertised without random delay, potentially |
3844 | | // leaking the time of arrival to a spy. |
3845 | 523k | Assume(WITH_LOCK( Line | Count | Source | 125 | 523k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
3846 | 523k | tx_relay->m_tx_inventory_mutex, |
3847 | 523k | return tx_relay->m_tx_inventory_to_send.empty() && |
3848 | 523k | tx_relay->m_next_inv_send_time == 0s)); |
3849 | 523k | } |
3850 | | |
3851 | 547k | if (pfrom.IsPrivateBroadcastConn()) { |
3852 | 252 | pfrom.fSuccessfullyConnected = true; |
3853 | | // The peer may intend to later send us NetMsgType::FEEFILTER limiting |
3854 | | // cheap transactions, but we don't wait for that and thus we may send |
3855 | | // them a transaction below their threshold. This is ok because this |
3856 | | // relay logic is designed to work even in cases when the peer drops |
3857 | | // the transaction (due to it being too cheap, or for other reasons). |
3858 | 252 | PushPrivateBroadcastTx(pfrom); |
3859 | 252 | return; |
3860 | 252 | } |
3861 | | |
3862 | 547k | if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { |
3863 | | // Tell our peer we are willing to provide version 2 cmpctblocks. |
3864 | | // However, we do not request new block announcements using |
3865 | | // cmpctblock messages. |
3866 | | // We send this to non-NODE NETWORK peers as well, because |
3867 | | // they may wish to request compact blocks from us |
3868 | 513k | MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
3869 | 513k | } |
3870 | | |
3871 | 547k | if (m_txreconciliation) { |
3872 | 0 | if (!peer.m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) { |
3873 | | // We could have optimistically pre-registered/registered the peer. In that case, |
3874 | | // we should forget about the reconciliation state here if this wasn't followed |
3875 | | // by WTXIDRELAY (since WTXIDRELAY can't be announced later). |
3876 | 0 | m_txreconciliation->ForgetPeer(pfrom.GetId()); |
3877 | 0 | } |
3878 | 0 | } |
3879 | | |
3880 | 547k | { |
3881 | 547k | LOCK2(::cs_main, m_tx_download_mutex); Line | Count | Source | 270 | 547k | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 271 | 547k | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
3882 | 547k | const CNodeState* state = State(pfrom.GetId()); |
3883 | 547k | m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo { |
3884 | 547k | .m_preferred = state->fPreferredDownload, |
3885 | 547k | .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay), |
3886 | 547k | .m_wtxid_relay = peer.m_wtxid_relay, |
3887 | 547k | }); |
3888 | 547k | } |
3889 | | |
3890 | 547k | pfrom.fSuccessfullyConnected = true; |
3891 | 547k | return; |
3892 | 547k | } |
3893 | | |
3894 | 32.5M | if (msg_type == NetMsgType::SENDHEADERS) { |
3895 | 0 | peer.m_prefers_headers = true; |
3896 | 0 | return; |
3897 | 0 | } |
3898 | | |
3899 | 32.5M | if (msg_type == NetMsgType::SENDCMPCT) { |
3900 | 1.72M | bool sendcmpct_hb{false}; |
3901 | 1.72M | uint64_t sendcmpct_version{0}; |
3902 | 1.72M | vRecv >> sendcmpct_hb >> sendcmpct_version; |
3903 | | |
3904 | | // Only support compact block relay with witnesses |
3905 | 1.72M | if (sendcmpct_version != CMPCTBLOCKS_VERSION) return36.1k ; |
3906 | | |
3907 | 1.68M | LOCK(cs_main); Line | Count | Source | 268 | 1.68M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 1.68M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 1.68M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 1.68M | #define PASTE(x, y) x ## y |
|
|
|
|
3908 | 1.68M | CNodeState* nodestate = State(pfrom.GetId()); |
3909 | 1.68M | nodestate->m_provides_cmpctblocks = true; |
3910 | 1.68M | nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb; |
3911 | | // save whether peer selects us as BIP152 high-bandwidth peer |
3912 | | // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) |
3913 | 1.68M | pfrom.m_bip152_highbandwidth_from = sendcmpct_hb; |
3914 | 1.68M | return; |
3915 | 1.72M | } |
3916 | | |
3917 | | // BIP339 defines feature negotiation of wtxidrelay, which must happen between |
3918 | | // VERSION and VERACK to avoid relay problems from switching after a connection is up. |
3919 | 30.8M | if (msg_type == NetMsgType::WTXIDRELAY) { |
3920 | 0 | if (pfrom.fSuccessfullyConnected) { |
3921 | | // Disconnect peers that send a wtxidrelay message after VERACK. |
3922 | 0 | LogDebug(BCLog::NET, "wtxidrelay received after verack, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3923 | 0 | pfrom.fDisconnect = true; |
3924 | 0 | return; |
3925 | 0 | } |
3926 | 0 | if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) { |
3927 | 0 | if (!peer.m_wtxid_relay) { |
3928 | 0 | peer.m_wtxid_relay = true; |
3929 | 0 | m_wtxid_relay_peers++; |
3930 | 0 | } else { |
3931 | 0 | LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3932 | 0 | } |
3933 | 0 | } else { |
3934 | 0 | LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3935 | 0 | } |
3936 | 0 | return; |
3937 | 0 | } |
3938 | | |
3939 | | // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen |
3940 | | // between VERSION and VERACK. |
3941 | 30.8M | if (msg_type == NetMsgType::SENDADDRV2) { |
3942 | 0 | if (pfrom.fSuccessfullyConnected) { |
3943 | | // Disconnect peers that send a SENDADDRV2 message after VERACK. |
3944 | 0 | LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3945 | 0 | pfrom.fDisconnect = true; |
3946 | 0 | return; |
3947 | 0 | } |
3948 | 0 | peer.m_wants_addrv2 = true; |
3949 | 0 | return; |
3950 | 0 | } |
3951 | | |
3952 | | // Received from a peer demonstrating readiness to announce transactions via reconciliations. |
3953 | | // This feature negotiation must happen between VERSION and VERACK to avoid relay problems |
3954 | | // from switching announcement protocols after the connection is up. |
3955 | 30.8M | if (msg_type == NetMsgType::SENDTXRCNCL) { |
3956 | 0 | if (!m_txreconciliation) { |
3957 | 0 | LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3958 | 0 | return; |
3959 | 0 | } |
3960 | | |
3961 | 0 | if (pfrom.fSuccessfullyConnected) { |
3962 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3963 | 0 | pfrom.fDisconnect = true; |
3964 | 0 | return; |
3965 | 0 | } |
3966 | | |
3967 | | // Peer must not offer us reconciliations if we specified no tx relay support in VERSION. |
3968 | 0 | if (RejectIncomingTxs(pfrom)) { |
3969 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3970 | 0 | pfrom.fDisconnect = true; |
3971 | 0 | return; |
3972 | 0 | } |
3973 | | |
3974 | | // Peer must not offer us reconciliations if they specified no tx relay support in VERSION. |
3975 | | // This flag might also be false in other cases, but the RejectIncomingTxs check above |
3976 | | // eliminates them, so that this flag fully represents what we are looking for. |
3977 | 0 | const auto* tx_relay = peer.GetTxRelay(); |
3978 | 0 | if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
3979 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3980 | 0 | pfrom.fDisconnect = true; |
3981 | 0 | return; |
3982 | 0 | } |
3983 | | |
3984 | 0 | uint32_t peer_txreconcl_version; |
3985 | 0 | uint64_t remote_salt; |
3986 | 0 | vRecv >> peer_txreconcl_version >> remote_salt; |
3987 | |
|
3988 | 0 | const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(), |
3989 | 0 | peer_txreconcl_version, remote_salt); |
3990 | 0 | switch (result) { |
3991 | 0 | case ReconciliationRegisterResult::NOT_FOUND: |
3992 | 0 | LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3993 | 0 | break; |
3994 | 0 | case ReconciliationRegisterResult::SUCCESS: |
3995 | 0 | break; |
3996 | 0 | case ReconciliationRegisterResult::ALREADY_REGISTERED: |
3997 | 0 | LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
3998 | 0 | pfrom.fDisconnect = true; |
3999 | 0 | return; |
4000 | 0 | case ReconciliationRegisterResult::PROTOCOL_VIOLATION: |
4001 | 0 | LogDebug(BCLog::NET, "txreconciliation protocol violation, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4002 | 0 | pfrom.fDisconnect = true; |
4003 | 0 | return; |
4004 | 0 | } |
4005 | 0 | return; |
4006 | 0 | } |
4007 | | |
4008 | 30.8M | if (!pfrom.fSuccessfullyConnected) { |
4009 | 24.3k | LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); Line | Count | Source | 117 | 24.3k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 24.3k | do { \ | 109 | 24.3k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 24.3k | } while (0) |
|
|
4010 | 24.3k | return; |
4011 | 24.3k | } |
4012 | | |
4013 | 30.8M | if (pfrom.IsPrivateBroadcastConn()) { |
4014 | 0 | if (msg_type != NetMsgType::PONG && msg_type != NetMsgType::GETDATA) { |
4015 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Ignoring incoming message '%s', %s", msg_type, pfrom.LogPeer()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4016 | 0 | return; |
4017 | 0 | } |
4018 | 0 | } |
4019 | | |
4020 | 30.8M | if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) { |
4021 | 0 | const auto ser_params{ |
4022 | 0 | msg_type == NetMsgType::ADDRV2 ? |
4023 | | // Set V2 param so that the CNetAddr and CAddress |
4024 | | // unserialize methods know that an address in v2 format is coming. |
4025 | 0 | CAddress::V2_NETWORK : |
4026 | 0 | CAddress::V1_NETWORK, |
4027 | 0 | }; |
4028 | |
|
4029 | 0 | std::vector<CAddress> vAddr; |
4030 | 0 | vRecv >> ser_params(vAddr); |
4031 | 0 | ProcessAddrs(msg_type, pfrom, peer, std::move(vAddr), interruptMsgProc); |
4032 | 0 | return; |
4033 | 0 | } |
4034 | | |
4035 | 30.8M | if (msg_type == NetMsgType::INV) { |
4036 | 0 | std::vector<CInv> vInv; |
4037 | 0 | vRecv >> vInv; |
4038 | 0 | if (vInv.size() > MAX_INV_SZ) |
4039 | 0 | { |
4040 | 0 | Misbehaving(peer, strprintf("inv message size = %u", vInv.size()));Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
4041 | 0 | return; |
4042 | 0 | } |
4043 | | |
4044 | 0 | const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; |
4045 | |
|
4046 | 0 | LOCK2(cs_main, m_tx_download_mutex); Line | Count | Source | 270 | 0 | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 271 | 0 | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
4047 | |
|
4048 | 0 | const auto current_time{GetTime<std::chrono::microseconds>()}; |
4049 | 0 | uint256* best_block{nullptr}; |
4050 | |
|
4051 | 0 | for (CInv& inv : vInv) { |
4052 | 0 | if (interruptMsgProc) return; |
4053 | | |
4054 | | // Ignore INVs that don't match wtxidrelay setting. |
4055 | | // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. |
4056 | | // This is fine as no INV messages are involved in that process. |
4057 | 0 | if (peer.m_wtxid_relay) { |
4058 | 0 | if (inv.IsMsgTx()) continue; |
4059 | 0 | } else { |
4060 | 0 | if (inv.IsMsgWtx()) continue; |
4061 | 0 | } |
4062 | | |
4063 | 0 | if (inv.IsMsgBlk()) { |
4064 | 0 | const bool fAlreadyHave = AlreadyHaveBlock(inv.hash); |
4065 | 0 | LogDebug(BCLog::NET, "got inv: %s %s peer=%d", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4066 | |
|
4067 | 0 | UpdateBlockAvailability(pfrom.GetId(), inv.hash); |
4068 | 0 | if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) { |
4069 | | // Headers-first is the primary method of announcement on |
4070 | | // the network. If a node fell back to sending blocks by |
4071 | | // inv, it may be for a re-org, or because we haven't |
4072 | | // completed initial headers sync. The final block hash |
4073 | | // provided should be the highest, so send a getheaders and |
4074 | | // then fetch the blocks we need to catch up. |
4075 | 0 | best_block = &inv.hash; |
4076 | 0 | } |
4077 | 0 | } else if (inv.IsGenTxMsg()) { |
4078 | 0 | if (reject_tx_invs) { |
4079 | 0 | LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s", inv.hash.ToString(), pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4080 | 0 | pfrom.fDisconnect = true; |
4081 | 0 | return; |
4082 | 0 | } |
4083 | 0 | const GenTxid gtxid = ToGenTxid(inv); |
4084 | 0 | AddKnownTx(peer, inv.hash); |
4085 | |
|
4086 | 0 | if (!m_chainman.IsInitialBlockDownload()) { |
4087 | 0 | const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)}; |
4088 | 0 | LogDebug(BCLog::NET, "got inv: %s %s peer=%d", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4089 | 0 | } |
4090 | 0 | } else { |
4091 | 0 | LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4092 | 0 | } |
4093 | 0 | } |
4094 | | |
4095 | 0 | if (best_block != nullptr) { |
4096 | | // If we haven't started initial headers-sync with this peer, then |
4097 | | // consider sending a getheaders now. On initial startup, there's a |
4098 | | // reliability vs bandwidth tradeoff, where we are only trying to do |
4099 | | // initial headers sync with one peer at a time, with a long |
4100 | | // timeout (at which point, if the sync hasn't completed, we will |
4101 | | // disconnect the peer and then choose another). In the meantime, |
4102 | | // as new blocks are found, we are willing to add one new peer per |
4103 | | // block to sync with as well, to sync quicker in the case where |
4104 | | // our initial peer is unresponsive (but less bandwidth than we'd |
4105 | | // use if we turned on sync with all peers). |
4106 | 0 | CNodeState& state{*Assert(State(pfrom.GetId()))};Line | Count | Source | 113 | 0 | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
4107 | 0 | if (state.fSyncStarted || (!peer.m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { |
4108 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer)) { |
4109 | 0 | LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4110 | 0 | m_chainman.m_best_header->nHeight, best_block->ToString(), |
4111 | 0 | pfrom.GetId()); |
4112 | 0 | } |
4113 | 0 | if (!state.fSyncStarted) { |
4114 | 0 | peer.m_inv_triggered_getheaders_before_sync = true; |
4115 | | // Update the last block hash that triggered a new headers |
4116 | | // sync, so that we don't turn on headers sync with more |
4117 | | // than 1 new peer every new block. |
4118 | 0 | m_last_block_inv_triggering_headers_sync = *best_block; |
4119 | 0 | } |
4120 | 0 | } |
4121 | 0 | } |
4122 | |
|
4123 | 0 | return; |
4124 | 0 | } |
4125 | | |
4126 | 30.8M | if (msg_type == NetMsgType::GETDATA) { |
4127 | 0 | std::vector<CInv> vInv; |
4128 | 0 | vRecv >> vInv; |
4129 | 0 | if (vInv.size() > MAX_INV_SZ) |
4130 | 0 | { |
4131 | 0 | Misbehaving(peer, strprintf("getdata message size = %u", vInv.size()));Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
4132 | 0 | return; |
4133 | 0 | } |
4134 | | |
4135 | 0 | LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4136 | |
|
4137 | 0 | if (vInv.size() > 0) { |
4138 | 0 | LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4139 | 0 | } |
4140 | |
|
4141 | 0 | if (pfrom.IsPrivateBroadcastConn()) { |
4142 | 0 | const auto pushed_tx_opt{m_tx_for_private_broadcast.GetTxForNode(pfrom.GetId())}; |
4143 | 0 | if (!pushed_tx_opt) { |
4144 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: got GETDATA without sending an INV, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4145 | 0 | pfrom.LogPeer()); |
4146 | 0 | pfrom.fDisconnect = true; |
4147 | 0 | return; |
4148 | 0 | } |
4149 | | |
4150 | 0 | const CTransactionRef& pushed_tx{*pushed_tx_opt}; |
4151 | | |
4152 | | // The GETDATA request must contain exactly one inv and it must be for the transaction |
4153 | | // that we INVed to the peer earlier. |
4154 | 0 | if (vInv.size() == 1 && vInv[0].IsMsgTx() && vInv[0].hash == pushed_tx->GetHash().ToUint256()) { |
4155 | |
|
4156 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, TX_WITH_WITNESS(*pushed_tx)); |
4157 | |
|
4158 | 0 | peer.m_ping_queued = true; // Ensure a ping will be sent: mimic a request via RPC. |
4159 | 0 | MaybeSendPing(pfrom, peer, GetTime<std::chrono::microseconds>()); |
4160 | 0 | } else { |
4161 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: got an unexpected GETDATA message, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4162 | 0 | pfrom.LogPeer()); |
4163 | 0 | pfrom.fDisconnect = true; |
4164 | 0 | } |
4165 | 0 | return; |
4166 | 0 | } |
4167 | | |
4168 | 0 | { |
4169 | 0 | LOCK(peer.m_getdata_requests_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4170 | 0 | peer.m_getdata_requests.insert(peer.m_getdata_requests.end(), vInv.begin(), vInv.end()); |
4171 | 0 | ProcessGetData(pfrom, peer, interruptMsgProc); |
4172 | 0 | } |
4173 | |
|
4174 | 0 | return; |
4175 | 0 | } |
4176 | | |
4177 | 30.8M | if (msg_type == NetMsgType::GETBLOCKS) { |
4178 | 0 | CBlockLocator locator; |
4179 | 0 | uint256 hashStop; |
4180 | 0 | vRecv >> locator >> hashStop; |
4181 | |
|
4182 | 0 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { |
4183 | 0 | LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4184 | 0 | pfrom.fDisconnect = true; |
4185 | 0 | return; |
4186 | 0 | } |
4187 | | |
4188 | | // We might have announced the currently-being-connected tip using a |
4189 | | // compact block, which resulted in the peer sending a getblocks |
4190 | | // request, which we would otherwise respond to without the new block. |
4191 | | // To avoid this situation we simply verify that we are on our best |
4192 | | // known chain now. This is super overkill, but we handle it better |
4193 | | // for getheaders requests, and there are no known nodes which support |
4194 | | // compact blocks but still use getblocks to request blocks. |
4195 | 0 | { |
4196 | 0 | std::shared_ptr<const CBlock> a_recent_block; |
4197 | 0 | { |
4198 | 0 | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4199 | 0 | a_recent_block = m_most_recent_block; |
4200 | 0 | } |
4201 | 0 | BlockValidationState state; |
4202 | 0 | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { |
4203 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4204 | 0 | } |
4205 | 0 | } |
4206 | |
|
4207 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4208 | | |
4209 | | // Find the last block the caller has in the main chain |
4210 | 0 | const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4211 | | |
4212 | | // Send the rest of the chain |
4213 | 0 | if (pindex) |
4214 | 0 | pindex = m_chainman.ActiveChain().Next(pindex); |
4215 | 0 | int nLimit = 500; |
4216 | 0 | LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4217 | 0 | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) |
4218 | 0 | { |
4219 | 0 | if (pindex->GetBlockHash() == hashStop) |
4220 | 0 | { |
4221 | 0 | LogDebug(BCLog::NET, " getblocks stopping at %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4222 | 0 | break; |
4223 | 0 | } |
4224 | | // If pruning, don't inv blocks unless we have on disk and are likely to still have |
4225 | | // for some reasonable time window (1 hour) that block relay might require. |
4226 | 0 | const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; |
4227 | 0 | if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { |
4228 | 0 | LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4229 | 0 | break; |
4230 | 0 | } |
4231 | 0 | WITH_LOCK(peer.m_block_inv_mutex, peer.m_blocks_for_inv_relay.push_back(pindex->GetBlockHash())); Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
4232 | 0 | if (--nLimit <= 0) { |
4233 | | // When this block is requested, we'll send an inv that'll |
4234 | | // trigger the peer to getblocks the next batch of inventory. |
4235 | 0 | LogDebug(BCLog::NET, " getblocks stopping at limit %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4236 | 0 | WITH_LOCK(peer.m_block_inv_mutex, {peer.m_continuation_block = pindex->GetBlockHash();});Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
4237 | 0 | break; |
4238 | 0 | } |
4239 | 0 | } |
4240 | 0 | return; |
4241 | 0 | } |
4242 | | |
4243 | 30.8M | if (msg_type == NetMsgType::GETBLOCKTXN) { |
4244 | 0 | BlockTransactionsRequest req; |
4245 | 0 | vRecv >> req; |
4246 | | // Verify differential encoding invariant: indexes must be strictly increasing |
4247 | | // DifferenceFormatter should guarantee this property during deserialization |
4248 | 0 | for (size_t i = 1; i < req.indexes.size(); ++i) { |
4249 | 0 | Assume(req.indexes[i] > req.indexes[i-1]); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
4250 | 0 | } |
4251 | |
|
4252 | 0 | std::shared_ptr<const CBlock> recent_block; |
4253 | 0 | { |
4254 | 0 | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4255 | 0 | if (m_most_recent_block_hash == req.blockhash) |
4256 | 0 | recent_block = m_most_recent_block; |
4257 | | // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion |
4258 | 0 | } |
4259 | 0 | if (recent_block) { |
4260 | 0 | SendBlockTransactions(pfrom, peer, *recent_block, req); |
4261 | 0 | return; |
4262 | 0 | } |
4263 | | |
4264 | 0 | FlatFilePos block_pos{}; |
4265 | 0 | { |
4266 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4267 | |
|
4268 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash); |
4269 | 0 | if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { |
4270 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4271 | 0 | return; |
4272 | 0 | } |
4273 | | |
4274 | 0 | if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) { |
4275 | 0 | block_pos = pindex->GetBlockPos(); |
4276 | 0 | } |
4277 | 0 | } |
4278 | | |
4279 | 0 | if (!block_pos.IsNull()) { |
4280 | 0 | CBlock block; |
4281 | 0 | const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos, req.blockhash)}; |
4282 | | // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get |
4283 | | // pruned after we release cs_main above, so this read should never fail. |
4284 | 0 | assert(ret); |
4285 | | |
4286 | 0 | SendBlockTransactions(pfrom, peer, block, req); |
4287 | 0 | return; |
4288 | 0 | } |
4289 | | |
4290 | | // If an older block is requested (should never happen in practice, |
4291 | | // but can happen in tests) send a block response instead of a |
4292 | | // blocktxn response. Sending a full block response instead of a |
4293 | | // small blocktxn response is preferable in the case where a peer |
4294 | | // might maliciously send lots of getblocktxn requests to trigger |
4295 | | // expensive disk reads, because it will require the peer to |
4296 | | // actually receive all the data read from disk over the network. |
4297 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4298 | 0 | CInv inv{MSG_WITNESS_BLOCK, req.blockhash}; |
4299 | 0 | WITH_LOCK(peer.m_getdata_requests_mutex, peer.m_getdata_requests.push_back(inv)); Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
4300 | | // The message processing loop will go around again (without pausing) and we'll respond then |
4301 | 0 | return; |
4302 | 0 | } |
4303 | | |
4304 | 30.8M | if (msg_type == NetMsgType::GETHEADERS) { |
4305 | 0 | CBlockLocator locator; |
4306 | 0 | uint256 hashStop; |
4307 | 0 | vRecv >> locator >> hashStop; |
4308 | |
|
4309 | 0 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { |
4310 | 0 | LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4311 | 0 | pfrom.fDisconnect = true; |
4312 | 0 | return; |
4313 | 0 | } |
4314 | | |
4315 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4316 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4317 | 0 | return; |
4318 | 0 | } |
4319 | | |
4320 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4321 | | |
4322 | | // Don't serve headers from our active chain until our chainwork is at least |
4323 | | // the minimum chain work. This prevents us from starting a low-work headers |
4324 | | // sync that will inevitably be aborted by our peer. |
4325 | 0 | if (m_chainman.ActiveTip() == nullptr || |
4326 | 0 | (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) { |
4327 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4328 | | // Just respond with an empty headers message, to tell the peer to |
4329 | | // go away but not treat us as unresponsive. |
4330 | 0 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>()); |
4331 | 0 | return; |
4332 | 0 | } |
4333 | | |
4334 | 0 | CNodeState *nodestate = State(pfrom.GetId()); |
4335 | 0 | const CBlockIndex* pindex = nullptr; |
4336 | 0 | if (locator.IsNull()) |
4337 | 0 | { |
4338 | | // If locator is null, return the hashStop block |
4339 | 0 | pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop); |
4340 | 0 | if (!pindex) { |
4341 | 0 | return; |
4342 | 0 | } |
4343 | 0 | if (!BlockRequestAllowed(*pindex)) { |
4344 | 0 | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4345 | 0 | return; |
4346 | 0 | } |
4347 | 0 | } |
4348 | 0 | else |
4349 | 0 | { |
4350 | | // Find the last block the caller has in the main chain |
4351 | 0 | pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4352 | 0 | if (pindex) |
4353 | 0 | pindex = m_chainman.ActiveChain().Next(pindex); |
4354 | 0 | } |
4355 | | |
4356 | | // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end |
4357 | 0 | std::vector<CBlock> vHeaders; |
4358 | 0 | int nLimit = m_opts.max_headers_result; |
4359 | 0 | LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4360 | 0 | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) |
4361 | 0 | { |
4362 | 0 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
4363 | 0 | if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) |
4364 | 0 | break; |
4365 | 0 | } |
4366 | | // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR |
4367 | | // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty |
4368 | | // headers message). In both cases it's safe to update |
4369 | | // pindexBestHeaderSent to be our tip. |
4370 | | // |
4371 | | // It is important that we simply reset the BestHeaderSent value here, |
4372 | | // and not max(BestHeaderSent, newHeaderSent). We might have announced |
4373 | | // the currently-being-connected tip using a compact block, which |
4374 | | // resulted in the peer sending a headers request, which we respond to |
4375 | | // without the new block. By resetting the BestHeaderSent, we ensure we |
4376 | | // will re-announce the new block via headers (or compact blocks again) |
4377 | | // in the SendMessages logic. |
4378 | 0 | nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip(); |
4379 | 0 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
4380 | 0 | return; |
4381 | 0 | } |
4382 | | |
4383 | 30.8M | if (msg_type == NetMsgType::TX) { |
4384 | 17.7M | if (RejectIncomingTxs(pfrom)) { |
4385 | 320 | LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 320 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 320 | do { \ | 109 | 320 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 320 | } while (0) |
|
|
4386 | 320 | pfrom.fDisconnect = true; |
4387 | 320 | return; |
4388 | 320 | } |
4389 | | |
4390 | | // Stop processing the transaction early if we are still in IBD since we don't |
4391 | | // have enough information to validate it yet. Sending unsolicited transactions |
4392 | | // is not considered a protocol violation, so don't punish the peer. |
4393 | 17.7M | if (m_chainman.IsInitialBlockDownload()) return121k ; |
4394 | | |
4395 | 17.6M | CTransactionRef ptx; |
4396 | 17.6M | vRecv >> TX_WITH_WITNESS(ptx); |
4397 | | |
4398 | 17.6M | const Txid& txid = ptx->GetHash(); |
4399 | 17.6M | const Wtxid& wtxid = ptx->GetWitnessHash(); |
4400 | | |
4401 | 17.6M | const uint256& hash = peer.m_wtxid_relay ? wtxid.ToUint256()0 : txid.ToUint256(); |
4402 | 17.6M | AddKnownTx(peer, hash); |
4403 | | |
4404 | 17.6M | if (const auto num_broadcasted{m_tx_for_private_broadcast.Remove(ptx)}) { |
4405 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Received our privately broadcast transaction (txid=%s) from the " Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4406 | 0 | "network from %s; stopping private broadcast attempts", |
4407 | 0 | txid.ToString(), pfrom.LogPeer()); |
4408 | 0 | if (NUM_PRIVATE_BROADCAST_PER_TX > num_broadcasted.value()) { |
4409 | | // Not all of the initial NUM_PRIVATE_BROADCAST_PER_TX connections were needed. |
4410 | | // Tell CConnman it does not need to start the remaining ones. |
4411 | 0 | m_connman.m_private_broadcast.NumToOpenSub(NUM_PRIVATE_BROADCAST_PER_TX - num_broadcasted.value()); |
4412 | 0 | } |
4413 | 0 | } |
4414 | | |
4415 | 17.6M | LOCK2(cs_main, m_tx_download_mutex); Line | Count | Source | 270 | 17.6M | UniqueLock criticalblock1(MaybeCheckNotHeld(cs1), #cs1, __FILE__, __LINE__); \ | 271 | 17.6M | UniqueLock criticalblock2(MaybeCheckNotHeld(cs2), #cs2, __FILE__, __LINE__) |
|
4416 | | |
4417 | 17.6M | const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx); |
4418 | 17.6M | if (!should_validate) { |
4419 | 12.4M | if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { |
4420 | | // Always relay transactions received from peers with forcerelay |
4421 | | // permission, even if they were already in the mempool, allowing |
4422 | | // the node to function as a gateway for nodes hidden behind it. |
4423 | 3.44M | if (!m_mempool.exists(txid)) { |
4424 | 1.16M | LogInfo("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",Line | Count | Source | 97 | 1.16M | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 1.16M | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
4425 | 1.16M | txid.ToString(), wtxid.ToString(), pfrom.GetId()); |
4426 | 2.28M | } else { |
4427 | 2.28M | LogInfo("Force relaying tx %s (wtxid=%s) from peer=%d\n",Line | Count | Source | 97 | 2.28M | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 2.28M | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
4428 | 2.28M | txid.ToString(), wtxid.ToString(), pfrom.GetId()); |
4429 | 2.28M | InitiateTxBroadcastToAll(txid, wtxid); |
4430 | 2.28M | } |
4431 | 3.44M | } |
4432 | | |
4433 | 12.4M | if (package_to_validate) { |
4434 | 50.5k | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4435 | 50.5k | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), Line | Count | Source | 117 | 50.5k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 50.5k | do { \ | 109 | 50.5k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 50.5k | } while (0) |
|
|
4436 | 50.5k | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4437 | 50.5k | ProcessPackageResult(package_to_validate.value(), package_result); |
4438 | 50.5k | } |
4439 | 12.4M | return; |
4440 | 12.4M | } |
4441 | | |
4442 | | // ReceivedTx should not be telling us to validate the tx and a package. |
4443 | 5.17M | Assume(!package_to_validate.has_value()); Line | Count | Source | 125 | 5.17M | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
4444 | | |
4445 | 5.17M | const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx); |
4446 | 5.17M | const TxValidationState& state = result.m_state; |
4447 | | |
4448 | 5.17M | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { |
4449 | 986k | ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions); |
4450 | 986k | pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); |
4451 | 986k | } |
4452 | 5.17M | if (state.IsInvalid()) { |
4453 | 4.19M | if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) { |
4454 | 85.5k | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4455 | 85.5k | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), Line | Count | Source | 117 | 85.5k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 85.5k | do { \ | 109 | 85.5k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 85.5k | } while (0) |
|
|
4456 | 85.5k | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4457 | 85.5k | ProcessPackageResult(package_to_validate.value(), package_result); |
4458 | 85.5k | } |
4459 | 4.19M | } |
4460 | | |
4461 | 5.17M | return; |
4462 | 17.6M | } |
4463 | | |
4464 | 13.0M | if (msg_type == NetMsgType::CMPCTBLOCK) |
4465 | 3.50M | { |
4466 | | // Ignore cmpctblock received while importing |
4467 | 3.50M | if (m_chainman.m_blockman.LoadingBlocks()) { |
4468 | 0 | LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4469 | 0 | return; |
4470 | 0 | } |
4471 | | |
4472 | 3.50M | CBlockHeaderAndShortTxIDs cmpctblock; |
4473 | 3.50M | vRecv >> cmpctblock; |
4474 | | |
4475 | 3.50M | bool received_new_header = false; |
4476 | 3.50M | const auto blockhash = cmpctblock.header.GetHash(); |
4477 | | |
4478 | 3.50M | { |
4479 | 3.50M | LOCK(cs_main); Line | Count | Source | 268 | 3.50M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.50M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.50M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.50M | #define PASTE(x, y) x ## y |
|
|
|
|
4480 | | |
4481 | 3.50M | const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); |
4482 | 3.50M | if (!prev_block) { |
4483 | | // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers |
4484 | 49.1k | if (!m_chainman.IsInitialBlockDownload()) { |
4485 | 16.7k | MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer); |
4486 | 16.7k | } |
4487 | 49.1k | return; |
4488 | 3.45M | } else if (prev_block->nChainWork + GetBlockProof(cmpctblock.header) < GetAntiDoSWorkThreshold()) { |
4489 | | // If we get a low-work header in a compact block, we can ignore it. |
4490 | 0 | LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4491 | 0 | return; |
4492 | 0 | } |
4493 | | |
4494 | 3.45M | if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) { |
4495 | 1.06M | received_new_header = true; |
4496 | 1.06M | } |
4497 | 3.45M | } |
4498 | | |
4499 | 0 | const CBlockIndex *pindex = nullptr; |
4500 | 3.45M | BlockValidationState state; |
4501 | 3.45M | if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) { |
4502 | 383k | if (state.IsInvalid()) { |
4503 | 383k | MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); |
4504 | 383k | return; |
4505 | 383k | } |
4506 | 383k | } |
4507 | | |
4508 | | // If AcceptBlockHeader returned true, it set pindex |
4509 | 3.07M | Assert(pindex); Line | Count | Source | 113 | 3.07M | #define Assert(val) inline_assertion_check<true>(val, std::source_location::current(), #val) |
|
4510 | 3.07M | if (received_new_header) { |
4511 | 704k | LogBlockHeader(*pindex, pfrom, /*via_compact_block=*/true); |
4512 | 704k | } |
4513 | | |
4514 | 3.07M | bool fProcessBLOCKTXN = false; |
4515 | | |
4516 | | // If we end up treating this as a plain headers message, call that as well |
4517 | | // without cs_main. |
4518 | 3.07M | bool fRevertToHeaderProcessing = false; |
4519 | | |
4520 | | // Keep a CBlock for "optimistic" compactblock reconstructions (see |
4521 | | // below) |
4522 | 3.07M | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4523 | 3.07M | bool fBlockReconstructed = false; |
4524 | | |
4525 | 3.07M | { |
4526 | 3.07M | LOCK(cs_main); Line | Count | Source | 268 | 3.07M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.07M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.07M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.07M | #define PASTE(x, y) x ## y |
|
|
|
|
4527 | 3.07M | UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); |
4528 | | |
4529 | 3.07M | CNodeState *nodestate = State(pfrom.GetId()); |
4530 | | |
4531 | | // If this was a new header with more work than our tip, update the |
4532 | | // peer's last block announcement time |
4533 | 3.07M | if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork704k ) { |
4534 | 694k | nodestate->m_last_block_announcement = GetTime(); |
4535 | 694k | } |
4536 | | |
4537 | 3.07M | if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here |
4538 | 157k | return; |
4539 | | |
4540 | 2.91M | auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash()); |
4541 | 2.91M | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
4542 | 2.91M | bool requested_block_from_this_peer{false}; |
4543 | | |
4544 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
4545 | 2.91M | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId())1.84M ; |
4546 | | |
4547 | 3.26M | while (range_flight.first != range_flight.second) { |
4548 | 1.85M | if (range_flight.first->second.first == pfrom.GetId()) { |
4549 | 1.50M | requested_block_from_this_peer = true; |
4550 | 1.50M | break; |
4551 | 1.50M | } |
4552 | 350k | range_flight.first++; |
4553 | 350k | } |
4554 | | |
4555 | 2.91M | if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better |
4556 | 2.91M | pindex->nTx != 02.76M ) { // We had this block at some point, but pruned it |
4557 | 152k | if (requested_block_from_this_peer) { |
4558 | | // We requested this block for some reason, but our mempool will probably be useless |
4559 | | // so we just grab the block via normal getdata |
4560 | 94.3k | std::vector<CInv> vInv(1); |
4561 | 94.3k | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash); |
4562 | 94.3k | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4563 | 94.3k | } |
4564 | 152k | return; |
4565 | 152k | } |
4566 | | |
4567 | | // If we're not close to tip yet, give up and let parallel block fetch work its magic |
4568 | 2.76M | if (!already_in_flight && !CanDirectFetch()1.02M ) { |
4569 | 246k | return; |
4570 | 246k | } |
4571 | | |
4572 | | // We want to be a bit conservative just to be extra careful about DoS |
4573 | | // possibilities in compact block processing... |
4574 | 2.51M | if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { |
4575 | 2.48M | if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || |
4576 | 2.48M | requested_block_from_this_peer457k ) { |
4577 | 2.19M | std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; |
4578 | 2.19M | if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { |
4579 | 1.39M | if (!(*queuedBlockIt)->partialBlock) |
4580 | 19.3k | (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); |
4581 | 1.37M | else { |
4582 | | // The block was already in flight using compact blocks from the same peer |
4583 | 1.37M | LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n"); Line | Count | Source | 117 | 1.37M | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 1.37M | do { \ | 109 | 1.37M | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 1.37M | } while (0) |
|
|
4584 | 1.37M | return; |
4585 | 1.37M | } |
4586 | 1.39M | } |
4587 | | |
4588 | 821k | PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; |
4589 | 821k | ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4590 | 821k | if (status == READ_STATUS_INVALID) { |
4591 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
4592 | 0 | Misbehaving(peer, "invalid compact block"); |
4593 | 0 | return; |
4594 | 821k | } else if (status == READ_STATUS_FAILED) { |
4595 | 247k | if (first_in_flight) { |
4596 | | // Duplicate txindexes, the block is now in-flight, so just request it |
4597 | 93.4k | std::vector<CInv> vInv(1); |
4598 | 93.4k | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash); |
4599 | 93.4k | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4600 | 153k | } else { |
4601 | | // Give up for this peer and wait for other peer(s) |
4602 | 153k | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4603 | 153k | } |
4604 | 247k | return; |
4605 | 247k | } |
4606 | | |
4607 | 574k | BlockTransactionsRequest req; |
4608 | 1.76M | for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++1.19M ) { |
4609 | 1.19M | if (!partialBlock.IsTxAvailable(i)) |
4610 | 296k | req.indexes.push_back(i); |
4611 | 1.19M | } |
4612 | 574k | if (req.indexes.empty()) { |
4613 | 441k | fProcessBLOCKTXN = true; |
4614 | 441k | } else if (132k first_in_flight132k ) { |
4615 | | // We will try to round-trip any compact blocks we get on failure, |
4616 | | // as long as it's first... |
4617 | 41.4k | req.blockhash = pindex->GetBlockHash(); |
4618 | 41.4k | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4619 | 91.2k | } else if (pfrom.m_bip152_highbandwidth_to && |
4620 | 91.2k | (1.49k !pfrom.IsInboundConn()1.49k || |
4621 | 1.49k | IsBlockRequestedFromOutbound(blockhash)390 || |
4622 | 1.49k | already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 121 )) { |
4623 | | // ... or it's a hb relay peer and: |
4624 | | // - peer is outbound, or |
4625 | | // - we already have an outbound attempt in flight(so we'll take what we can get), or |
4626 | | // - it's not the final parallel download slot (which we may reserve for first outbound) |
4627 | 1.49k | req.blockhash = pindex->GetBlockHash(); |
4628 | 1.49k | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4629 | 89.7k | } else { |
4630 | | // Give up for this peer and wait for other peer(s) |
4631 | 89.7k | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4632 | 89.7k | } |
4633 | 574k | } else { |
4634 | | // This block is either already in flight from a different |
4635 | | // peer, or this peer has too many blocks outstanding to |
4636 | | // download from. |
4637 | | // Optimistically try to reconstruct anyway since we might be |
4638 | | // able to without any round trips. |
4639 | 289k | PartiallyDownloadedBlock tempBlock(&m_mempool); |
4640 | 289k | ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4641 | 289k | if (status != READ_STATUS_OK) { |
4642 | | // TODO: don't ignore failures |
4643 | 279k | return; |
4644 | 279k | } |
4645 | 10.1k | std::vector<CTransactionRef> dummy; |
4646 | 10.1k | const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))};Line | Count | Source | 125 | 10.1k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
4647 | 10.1k | status = tempBlock.FillBlock(*pblock, dummy, |
4648 | 10.1k | /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); |
4649 | 10.1k | if (status == READ_STATUS_OK) { |
4650 | 3.01k | fBlockReconstructed = true; |
4651 | 3.01k | } |
4652 | 10.1k | } |
4653 | 2.48M | } else { |
4654 | 34.6k | if (requested_block_from_this_peer) { |
4655 | | // We requested this block, but its far into the future, so our |
4656 | | // mempool will probably be useless - request the block normally |
4657 | 19.0k | std::vector<CInv> vInv(1); |
4658 | 19.0k | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(peer), blockhash); |
4659 | 19.0k | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4660 | 19.0k | return; |
4661 | 19.0k | } else { |
4662 | | // If this was an announce-cmpctblock, we want the same treatment as a header message |
4663 | 15.6k | fRevertToHeaderProcessing = true; |
4664 | 15.6k | } |
4665 | 34.6k | } |
4666 | 2.51M | } // cs_main |
4667 | | |
4668 | 600k | if (fProcessBLOCKTXN) { |
4669 | 441k | BlockTransactions txn; |
4670 | 441k | txn.blockhash = blockhash; |
4671 | 441k | return ProcessCompactBlockTxns(pfrom, peer, txn); |
4672 | 441k | } |
4673 | | |
4674 | 158k | if (fRevertToHeaderProcessing) { |
4675 | | // Headers received from HB compact block peers are permitted to be |
4676 | | // relayed before full validation (see BIP 152), so we don't want to disconnect |
4677 | | // the peer if the header turns out to be for an invalid block. |
4678 | | // Note that if a peer tries to build on an invalid chain, that |
4679 | | // will be detected and the peer will be disconnected/discouraged. |
4680 | 15.6k | return ProcessHeadersMessage(pfrom, peer, {cmpctblock.header}, /*via_compact_block=*/true); |
4681 | 15.6k | } |
4682 | | |
4683 | 142k | if (fBlockReconstructed) { |
4684 | | // If we got here, we were able to optimistically reconstruct a |
4685 | | // block that is in flight from some other peer. |
4686 | 3.01k | { |
4687 | 3.01k | LOCK(cs_main); Line | Count | Source | 268 | 3.01k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.01k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.01k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.01k | #define PASTE(x, y) x ## y |
|
|
|
|
4688 | 3.01k | mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); |
4689 | 3.01k | } |
4690 | | // Setting force_processing to true means that we bypass some of |
4691 | | // our anti-DoS protections in AcceptBlock, which filters |
4692 | | // unrequested blocks that might be trying to waste our resources |
4693 | | // (eg disk space). Because we only try to reconstruct blocks when |
4694 | | // we're close to caught up (via the CanDirectFetch() requirement |
4695 | | // above, combined with the behavior of not requesting blocks until |
4696 | | // we have a chain with at least the minimum chain work), and we ignore |
4697 | | // compact blocks with less work than our tip, it is safe to treat |
4698 | | // reconstructed compact blocks as having been requested. |
4699 | 3.01k | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
4700 | 3.01k | LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() Line | Count | Source | 268 | 3.01k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 3.01k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 3.01k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 3.01k | #define PASTE(x, y) x ## y |
|
|
|
|
4701 | 3.01k | if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { |
4702 | | // Clear download state for this block, which is in |
4703 | | // process from some other peer. We do this after calling |
4704 | | // ProcessNewBlock so that a malleated cmpctblock announcement |
4705 | | // can't be used to interfere with block relay. |
4706 | 1.82k | RemoveBlockRequest(pblock->GetHash(), std::nullopt); |
4707 | 1.82k | } |
4708 | 3.01k | } |
4709 | 142k | return; |
4710 | 158k | } |
4711 | | |
4712 | 9.55M | if (msg_type == NetMsgType::BLOCKTXN) |
4713 | 929k | { |
4714 | | // Ignore blocktxn received while importing |
4715 | 929k | if (m_chainman.m_blockman.LoadingBlocks()) { |
4716 | 0 | LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4717 | 0 | return; |
4718 | 0 | } |
4719 | | |
4720 | 929k | BlockTransactions resp; |
4721 | 929k | vRecv >> resp; |
4722 | | |
4723 | 929k | return ProcessCompactBlockTxns(pfrom, peer, resp); |
4724 | 929k | } |
4725 | | |
4726 | 8.62M | if (msg_type == NetMsgType::HEADERS) |
4727 | 8.62M | { |
4728 | | // Ignore headers received while importing |
4729 | 8.62M | if (m_chainman.m_blockman.LoadingBlocks()) { |
4730 | 0 | LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4731 | 0 | return; |
4732 | 0 | } |
4733 | | |
4734 | 8.62M | std::vector<CBlockHeader> headers; |
4735 | | |
4736 | | // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. |
4737 | 8.62M | unsigned int nCount = ReadCompactSize(vRecv); |
4738 | 8.62M | if (nCount > m_opts.max_headers_result) { |
4739 | 0 | Misbehaving(peer, strprintf("headers message size = %u", nCount));Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
4740 | 0 | return; |
4741 | 0 | } |
4742 | 8.62M | headers.resize(nCount); |
4743 | 17.2M | for (unsigned int n = 0; n < nCount; n++8.62M ) { |
4744 | 8.62M | vRecv >> headers[n]; |
4745 | 8.62M | ReadCompactSize(vRecv); // ignore tx count; assume it is 0. |
4746 | 8.62M | } |
4747 | | |
4748 | 8.62M | ProcessHeadersMessage(pfrom, peer, std::move(headers), /*via_compact_block=*/false); |
4749 | | |
4750 | | // Check if the headers presync progress needs to be reported to validation. |
4751 | | // This needs to be done without holding the m_headers_presync_mutex lock. |
4752 | 8.62M | if (m_headers_presync_should_signal.exchange(false)) { |
4753 | 0 | HeadersPresyncStats stats; |
4754 | 0 | { |
4755 | 0 | LOCK(m_headers_presync_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4756 | 0 | auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
4757 | 0 | if (it != m_headers_presync_stats.end()) stats = it->second; |
4758 | 0 | } |
4759 | 0 | if (stats.second) { |
4760 | 0 | m_chainman.ReportHeadersPresync(stats.second->first, stats.second->second); |
4761 | 0 | } |
4762 | 0 | } |
4763 | | |
4764 | 8.62M | return; |
4765 | 8.62M | } |
4766 | | |
4767 | 0 | if (msg_type == NetMsgType::BLOCK) |
4768 | 0 | { |
4769 | | // Ignore block received while importing |
4770 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4771 | 0 | LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4772 | 0 | return; |
4773 | 0 | } |
4774 | | |
4775 | 0 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4776 | 0 | vRecv >> TX_WITH_WITNESS(*pblock); |
4777 | |
|
4778 | 0 | LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4779 | |
|
4780 | 0 | const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
4781 | | |
4782 | | // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active |
4783 | 0 | if (prev_block && IsBlockMutated(/*block=*/*pblock, |
4784 | 0 | /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) { |
4785 | 0 | LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer.m_id); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4786 | 0 | Misbehaving(peer, "mutated block"); |
4787 | 0 | WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer.m_id)); Line | Count | Source | 299 | 0 | #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) |
|
4788 | 0 | return; |
4789 | 0 | } |
4790 | | |
4791 | 0 | bool forceProcessing = false; |
4792 | 0 | const uint256 hash(pblock->GetHash()); |
4793 | 0 | bool min_pow_checked = false; |
4794 | 0 | { |
4795 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4796 | | // Always process the block if we requested it, since we may |
4797 | | // need it even when it's not a candidate for a new best tip. |
4798 | 0 | forceProcessing = IsBlockRequested(hash); |
4799 | 0 | RemoveBlockRequest(hash, pfrom.GetId()); |
4800 | | // mapBlockSource is only used for punishing peers and setting |
4801 | | // which peers send us compact blocks, so the race between here and |
4802 | | // cs_main in ProcessNewBlock is fine. |
4803 | 0 | mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); |
4804 | | |
4805 | | // Check claimed work on this block against our anti-dos thresholds. |
4806 | 0 | if (prev_block && prev_block->nChainWork + GetBlockProof(*pblock) >= GetAntiDoSWorkThreshold()) { |
4807 | 0 | min_pow_checked = true; |
4808 | 0 | } |
4809 | 0 | } |
4810 | 0 | ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); |
4811 | 0 | return; |
4812 | 0 | } |
4813 | | |
4814 | 0 | if (msg_type == NetMsgType::GETADDR) { |
4815 | | // This asymmetric behavior for inbound and outbound connections was introduced |
4816 | | // to prevent a fingerprinting attack: an attacker can send specific fake addresses |
4817 | | // to users' AddrMan and later request them by sending getaddr messages. |
4818 | | // Making nodes which are behind NAT and can only make outgoing connections ignore |
4819 | | // the getaddr message mitigates the attack. |
4820 | 0 | if (!pfrom.IsInboundConn()) { |
4821 | 0 | LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4822 | 0 | return; |
4823 | 0 | } |
4824 | | |
4825 | | // Since this must be an inbound connection, SetupAddressRelay will |
4826 | | // never fail. |
4827 | 0 | Assume(SetupAddressRelay(pfrom, peer)); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
4828 | | |
4829 | | // Only send one GetAddr response per connection to reduce resource waste |
4830 | | // and discourage addr stamping of INV announcements. |
4831 | 0 | if (peer.m_getaddr_recvd) { |
4832 | 0 | LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4833 | 0 | return; |
4834 | 0 | } |
4835 | 0 | peer.m_getaddr_recvd = true; |
4836 | |
|
4837 | 0 | peer.m_addrs_to_send.clear(); |
4838 | 0 | std::vector<CAddress> vAddr; |
4839 | 0 | if (pfrom.HasPermission(NetPermissionFlags::Addr)) { |
4840 | 0 | vAddr = m_connman.GetAddressesUnsafe(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt); |
4841 | 0 | } else { |
4842 | 0 | vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); |
4843 | 0 | } |
4844 | 0 | for (const CAddress &addr : vAddr) { |
4845 | 0 | PushAddress(peer, addr); |
4846 | 0 | } |
4847 | 0 | return; |
4848 | 0 | } |
4849 | | |
4850 | 0 | if (msg_type == NetMsgType::MEMPOOL) { |
4851 | | // Only process received mempool messages if we advertise NODE_BLOOM |
4852 | | // or if the peer has mempool permissions. |
4853 | 0 | if (!(peer.m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) |
4854 | 0 | { |
4855 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) |
4856 | 0 | { |
4857 | 0 | LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4858 | 0 | pfrom.fDisconnect = true; |
4859 | 0 | } |
4860 | 0 | return; |
4861 | 0 | } |
4862 | | |
4863 | 0 | if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) |
4864 | 0 | { |
4865 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) |
4866 | 0 | { |
4867 | 0 | LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4868 | 0 | pfrom.fDisconnect = true; |
4869 | 0 | } |
4870 | 0 | return; |
4871 | 0 | } |
4872 | | |
4873 | 0 | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
4874 | 0 | LOCK(tx_relay->m_tx_inventory_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4875 | 0 | tx_relay->m_send_mempool = true; |
4876 | 0 | } |
4877 | 0 | return; |
4878 | 0 | } |
4879 | | |
4880 | 0 | if (msg_type == NetMsgType::PING) { |
4881 | 0 | if (pfrom.GetCommonVersion() > BIP0031_VERSION) { |
4882 | 0 | uint64_t nonce = 0; |
4883 | 0 | vRecv >> nonce; |
4884 | | // Echo the message back with the nonce. This allows for two useful features: |
4885 | | // |
4886 | | // 1) A remote node can quickly check if the connection is operational |
4887 | | // 2) Remote nodes can measure the latency of the network thread. If this node |
4888 | | // is overloaded it won't respond to pings quickly and the remote node can |
4889 | | // avoid sending us more work, like chain download requests. |
4890 | | // |
4891 | | // The nonce stops the remote getting confused between different pings: without |
4892 | | // it, if the remote node sends a ping once per second and this node takes 5 |
4893 | | // seconds to respond to each, the 5th ping the remote sends would appear to |
4894 | | // return very quickly. |
4895 | 0 | MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce); |
4896 | 0 | } |
4897 | 0 | return; |
4898 | 0 | } |
4899 | | |
4900 | 0 | if (msg_type == NetMsgType::PONG) { |
4901 | 0 | const auto ping_end = time_received; |
4902 | 0 | uint64_t nonce = 0; |
4903 | 0 | size_t nAvail = vRecv.in_avail(); |
4904 | 0 | bool bPingFinished = false; |
4905 | 0 | std::string sProblem; |
4906 | |
|
4907 | 0 | if (nAvail >= sizeof(nonce)) { |
4908 | 0 | vRecv >> nonce; |
4909 | | |
4910 | | // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) |
4911 | 0 | if (peer.m_ping_nonce_sent != 0) { |
4912 | 0 | if (nonce == peer.m_ping_nonce_sent) { |
4913 | | // Matching pong received, this ping is no longer outstanding |
4914 | 0 | bPingFinished = true; |
4915 | 0 | const auto ping_time = ping_end - peer.m_ping_start.load(); |
4916 | 0 | if (ping_time.count() >= 0) { |
4917 | | // Let connman know about this successful ping-pong |
4918 | 0 | pfrom.PongReceived(ping_time); |
4919 | 0 | if (pfrom.IsPrivateBroadcastConn()) { |
4920 | 0 | m_tx_for_private_broadcast.NodeConfirmedReception(pfrom.GetId()); |
4921 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Got a PONG (the transaction will probably reach the network), marking for disconnect, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4922 | 0 | pfrom.LogPeer()); |
4923 | 0 | pfrom.fDisconnect = true; |
4924 | 0 | } |
4925 | 0 | } else { |
4926 | | // This should never happen |
4927 | 0 | sProblem = "Timing mishap"; |
4928 | 0 | } |
4929 | 0 | } else { |
4930 | | // Nonce mismatches are normal when pings are overlapping |
4931 | 0 | sProblem = "Nonce mismatch"; |
4932 | 0 | if (nonce == 0) { |
4933 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4934 | 0 | bPingFinished = true; |
4935 | 0 | sProblem = "Nonce zero"; |
4936 | 0 | } |
4937 | 0 | } |
4938 | 0 | } else { |
4939 | 0 | sProblem = "Unsolicited pong without ping"; |
4940 | 0 | } |
4941 | 0 | } else { |
4942 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4943 | 0 | bPingFinished = true; |
4944 | 0 | sProblem = "Short payload"; |
4945 | 0 | } |
4946 | |
|
4947 | 0 | if (!(sProblem.empty())) { |
4948 | 0 | LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4949 | 0 | pfrom.GetId(), |
4950 | 0 | sProblem, |
4951 | 0 | peer.m_ping_nonce_sent, |
4952 | 0 | nonce, |
4953 | 0 | nAvail); |
4954 | 0 | } |
4955 | 0 | if (bPingFinished) { |
4956 | 0 | peer.m_ping_nonce_sent = 0; |
4957 | 0 | } |
4958 | 0 | return; |
4959 | 0 | } |
4960 | | |
4961 | 0 | if (msg_type == NetMsgType::FILTERLOAD) { |
4962 | 0 | if (!(peer.m_our_services & NODE_BLOOM)) { |
4963 | 0 | LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4964 | 0 | pfrom.fDisconnect = true; |
4965 | 0 | return; |
4966 | 0 | } |
4967 | 0 | CBloomFilter filter; |
4968 | 0 | vRecv >> filter; |
4969 | |
|
4970 | 0 | if (!filter.IsWithinSizeConstraints()) |
4971 | 0 | { |
4972 | | // There is no excuse for sending a too-large filter |
4973 | 0 | Misbehaving(peer, "too-large bloom filter"); |
4974 | 0 | } else if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
4975 | 0 | { |
4976 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
4977 | 0 | tx_relay->m_bloom_filter.reset(new CBloomFilter(filter)); |
4978 | 0 | tx_relay->m_relay_txs = true; |
4979 | 0 | } |
4980 | 0 | pfrom.m_bloom_filter_loaded = true; |
4981 | 0 | pfrom.m_relays_txs = true; |
4982 | 0 | } |
4983 | 0 | return; |
4984 | 0 | } |
4985 | | |
4986 | 0 | if (msg_type == NetMsgType::FILTERADD) { |
4987 | 0 | if (!(peer.m_our_services & NODE_BLOOM)) { |
4988 | 0 | LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
4989 | 0 | pfrom.fDisconnect = true; |
4990 | 0 | return; |
4991 | 0 | } |
4992 | 0 | std::vector<unsigned char> vData; |
4993 | 0 | vRecv >> vData; |
4994 | | |
4995 | | // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object, |
4996 | | // and thus, the maximum size any matched object can have) in a filteradd message |
4997 | 0 | bool bad = false; |
4998 | 0 | if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { |
4999 | 0 | bad = true; |
5000 | 0 | } else if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
5001 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
5002 | 0 | if (tx_relay->m_bloom_filter) { |
5003 | 0 | tx_relay->m_bloom_filter->insert(vData); |
5004 | 0 | } else { |
5005 | 0 | bad = true; |
5006 | 0 | } |
5007 | 0 | } |
5008 | 0 | if (bad) { |
5009 | 0 | Misbehaving(peer, "bad filteradd message"); |
5010 | 0 | } |
5011 | 0 | return; |
5012 | 0 | } |
5013 | | |
5014 | 0 | if (msg_type == NetMsgType::FILTERCLEAR) { |
5015 | 0 | if (!(peer.m_our_services & NODE_BLOOM)) { |
5016 | 0 | LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5017 | 0 | pfrom.fDisconnect = true; |
5018 | 0 | return; |
5019 | 0 | } |
5020 | 0 | auto tx_relay = peer.GetTxRelay(); |
5021 | 0 | if (!tx_relay) return; |
5022 | | |
5023 | 0 | { |
5024 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
5025 | 0 | tx_relay->m_bloom_filter = nullptr; |
5026 | 0 | tx_relay->m_relay_txs = true; |
5027 | 0 | } |
5028 | 0 | pfrom.m_bloom_filter_loaded = false; |
5029 | 0 | pfrom.m_relays_txs = true; |
5030 | 0 | return; |
5031 | 0 | } |
5032 | | |
5033 | 0 | if (msg_type == NetMsgType::FEEFILTER) { |
5034 | 0 | CAmount newFeeFilter = 0; |
5035 | 0 | vRecv >> newFeeFilter; |
5036 | 0 | if (MoneyRange(newFeeFilter)) { |
5037 | 0 | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
5038 | 0 | tx_relay->m_fee_filter_received = newFeeFilter; |
5039 | 0 | } |
5040 | 0 | LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5041 | 0 | } |
5042 | 0 | return; |
5043 | 0 | } |
5044 | | |
5045 | 0 | if (msg_type == NetMsgType::GETCFILTERS) { |
5046 | 0 | ProcessGetCFilters(pfrom, peer, vRecv); |
5047 | 0 | return; |
5048 | 0 | } |
5049 | | |
5050 | 0 | if (msg_type == NetMsgType::GETCFHEADERS) { |
5051 | 0 | ProcessGetCFHeaders(pfrom, peer, vRecv); |
5052 | 0 | return; |
5053 | 0 | } |
5054 | | |
5055 | 0 | if (msg_type == NetMsgType::GETCFCHECKPT) { |
5056 | 0 | ProcessGetCFCheckPt(pfrom, peer, vRecv); |
5057 | 0 | return; |
5058 | 0 | } |
5059 | | |
5060 | 0 | if (msg_type == NetMsgType::NOTFOUND) { |
5061 | 0 | std::vector<CInv> vInv; |
5062 | 0 | vRecv >> vInv; |
5063 | 0 | std::vector<GenTxid> tx_invs; |
5064 | 0 | if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
5065 | 0 | for (CInv &inv : vInv) { |
5066 | 0 | if (inv.IsGenTxMsg()) { |
5067 | 0 | tx_invs.emplace_back(ToGenTxid(inv)); |
5068 | 0 | } |
5069 | 0 | } |
5070 | 0 | } |
5071 | 0 | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
5072 | 0 | m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs); |
5073 | 0 | return; |
5074 | 0 | } |
5075 | | |
5076 | | // Ignore unknown message types for extensibility |
5077 | 0 | LogDebug(BCLog::NET, "Unknown message type \"%s\" from peer=%d", SanitizeString(msg_type), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5078 | 0 | return; |
5079 | 0 | } |
5080 | | |
5081 | | bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) |
5082 | 36.2M | { |
5083 | 36.2M | { |
5084 | 36.2M | LOCK(peer.m_misbehavior_mutex); Line | Count | Source | 268 | 36.2M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 36.2M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 36.2M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 36.2M | #define PASTE(x, y) x ## y |
|
|
|
|
5085 | | |
5086 | | // There's nothing to do if the m_should_discourage flag isn't set |
5087 | 36.2M | if (!peer.m_should_discourage) return false35.3M ; |
5088 | | |
5089 | 901k | peer.m_should_discourage = false; |
5090 | 901k | } // peer.m_misbehavior_mutex |
5091 | | |
5092 | 901k | if (pnode.HasPermission(NetPermissionFlags::NoBan)) { |
5093 | | // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission |
5094 | 113k | LogWarning("Not punishing noban peer %d!", peer.m_id);Line | Count | Source | 98 | 113k | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 113k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
5095 | 113k | return false; |
5096 | 113k | } |
5097 | | |
5098 | 787k | if (pnode.IsManualConn()) { |
5099 | | // We never disconnect or discourage manual peers for bad behavior |
5100 | 592k | LogWarning("Not punishing manually connected peer %d!", peer.m_id);Line | Count | Source | 98 | 592k | #define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 592k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
5101 | 592k | return false; |
5102 | 592k | } |
5103 | | |
5104 | 194k | if (pnode.addr.IsLocal()) { |
5105 | | // We disconnect local peers for bad behavior but don't discourage (since that would discourage |
5106 | | // all peers on the same local address) |
5107 | 920 | LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n", Line | Count | Source | 117 | 920 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 920 | do { \ | 109 | 920 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 920 | } while (0) |
|
|
5108 | 920 | pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id); |
5109 | 920 | pnode.fDisconnect = true; |
5110 | 920 | return true; |
5111 | 920 | } |
5112 | | |
5113 | | // Normal case: Disconnect the peer and discourage all nodes sharing the address |
5114 | 194k | LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); Line | Count | Source | 117 | 194k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 194k | do { \ | 109 | 194k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 194k | } while (0) |
|
|
5115 | 194k | if (m_banman) m_banman->Discourage(pnode.addr)0 ; |
5116 | 194k | m_connman.DisconnectNode(pnode.addr); |
5117 | 194k | return true; |
5118 | 194k | } |
5119 | | |
5120 | | bool PeerManagerImpl::ProcessMessages(CNode& node, std::atomic<bool>& interruptMsgProc) |
5121 | 35.4M | { |
5122 | 35.4M | AssertLockNotHeld(m_tx_download_mutex); Line | Count | Source | 149 | 35.4M | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
5123 | 35.4M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 35.4M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5124 | | |
5125 | 35.4M | PeerRef maybe_peer{GetPeerRef(node.GetId())}; |
5126 | 35.4M | if (maybe_peer == nullptr) return false0 ; |
5127 | 35.4M | Peer& peer{*maybe_peer}; |
5128 | | |
5129 | | // For outbound connections, ensure that the initial VERSION message |
5130 | | // has been sent first before processing any incoming messages |
5131 | 35.4M | if (!node.IsInboundConn() && !peer.m_outbound_version_message_sent28.1M ) return false0 ; |
5132 | | |
5133 | 35.4M | { |
5134 | 35.4M | LOCK(peer.m_getdata_requests_mutex); Line | Count | Source | 268 | 35.4M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 35.4M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 35.4M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 35.4M | #define PASTE(x, y) x ## y |
|
|
|
|
5135 | 35.4M | if (!peer.m_getdata_requests.empty()) { |
5136 | 0 | ProcessGetData(node, peer, interruptMsgProc); |
5137 | 0 | } |
5138 | 35.4M | } |
5139 | | |
5140 | 35.4M | const bool processed_orphan = ProcessOrphanTx(peer); |
5141 | | |
5142 | 35.4M | if (node.fDisconnect) |
5143 | 1.45M | return false; |
5144 | | |
5145 | 34.0M | if (processed_orphan) return true119k ; |
5146 | | |
5147 | | // this maintains the order of responses |
5148 | | // and prevents m_getdata_requests to grow unbounded |
5149 | 33.9M | { |
5150 | 33.9M | LOCK(peer.m_getdata_requests_mutex); Line | Count | Source | 268 | 33.9M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.9M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.9M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.9M | #define PASTE(x, y) x ## y |
|
|
|
|
5151 | 33.9M | if (!peer.m_getdata_requests.empty()) return true0 ; |
5152 | 33.9M | } |
5153 | | |
5154 | | // Don't bother if send buffer is too full to respond anyway |
5155 | 33.9M | if (node.fPauseSend) return false0 ; |
5156 | | |
5157 | 33.9M | auto poll_result{node.PollMessage()}; |
5158 | 33.9M | if (!poll_result) { |
5159 | | // No message to process |
5160 | 21.2k | return false; |
5161 | 21.2k | } |
5162 | | |
5163 | 33.8M | CNetMessage& msg{poll_result->first}; |
5164 | 33.8M | bool fMoreWork = poll_result->second; |
5165 | | |
5166 | 33.8M | TRACEPOINT(net, inbound_message, |
5167 | 33.8M | node.GetId(), |
5168 | 33.8M | node.m_addr_name.c_str(), |
5169 | 33.8M | node.ConnectionTypeAsString().c_str(), |
5170 | 33.8M | msg.m_type.c_str(), |
5171 | 33.8M | msg.m_recv.size(), |
5172 | 33.8M | msg.m_recv.data() |
5173 | 33.8M | ); |
5174 | | |
5175 | 33.8M | if (m_opts.capture_messages) { |
5176 | 0 | CaptureMessage(node.addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); |
5177 | 0 | } |
5178 | | |
5179 | 33.8M | try { |
5180 | 33.8M | ProcessMessage(peer, node, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc); |
5181 | 33.8M | if (interruptMsgProc) return false0 ; |
5182 | 33.8M | { |
5183 | 33.8M | LOCK(peer.m_getdata_requests_mutex); Line | Count | Source | 268 | 33.8M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.8M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.8M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.8M | #define PASTE(x, y) x ## y |
|
|
|
|
5184 | 33.8M | if (!peer.m_getdata_requests.empty()) fMoreWork = true0 ; |
5185 | 33.8M | } |
5186 | | // Does this peer have an orphan ready to reconsider? |
5187 | | // (Note: we may have provided a parent for an orphan provided |
5188 | | // by another peer that was already processed; in that case, |
5189 | | // the extra work may not be noticed, possibly resulting in an |
5190 | | // unnecessary 100ms delay) |
5191 | 33.8M | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 33.8M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.8M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.8M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.8M | #define PASTE(x, y) x ## y |
|
|
|
|
5192 | 33.8M | if (m_txdownloadman.HaveMoreWork(peer.m_id)) fMoreWork = true21.2k ; |
5193 | 33.8M | } catch (const std::exception& e) { |
5194 | 0 | LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5195 | 0 | } catch (...) { |
5196 | 0 | LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5197 | 0 | } |
5198 | | |
5199 | 33.8M | return fMoreWork; |
5200 | 33.8M | } |
5201 | | |
5202 | | void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) |
5203 | 33.0M | { |
5204 | 33.0M | AssertLockHeld(cs_main); Line | Count | Source | 144 | 33.0M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5205 | | |
5206 | 33.0M | CNodeState &state = *State(pto.GetId()); |
5207 | | |
5208 | 33.0M | if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn()27.1M && state.fSyncStarted1.12M ) { |
5209 | | // This is an outbound peer subject to disconnection if they don't |
5210 | | // announce a block with as much work as the current tip within |
5211 | | // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if |
5212 | | // their chain has more work than ours, we should sync to it, |
5213 | | // unless it's invalid, in which case we should find that out and |
5214 | | // disconnect from them elsewhere). |
5215 | 1.05M | if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork1.02M ) { |
5216 | | // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set |
5217 | 760k | if (state.m_chain_sync.m_timeout != 0s) { |
5218 | 6.54k | state.m_chain_sync.m_timeout = 0s; |
5219 | 6.54k | state.m_chain_sync.m_work_header = nullptr; |
5220 | 6.54k | state.m_chain_sync.m_sent_getheaders = false; |
5221 | 6.54k | } |
5222 | 760k | } else if (295k state.m_chain_sync.m_timeout == 0s295k || (276k state.m_chain_sync.m_work_header != nullptr276k && state.pindexBestKnownBlock != nullptr276k && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork248k )) { |
5223 | | // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours |
5224 | | // AND |
5225 | | // we are noticing this for the first time (m_timeout is 0) |
5226 | | // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout |
5227 | | // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced). |
5228 | | // Either way, set a new timeout based on our current tip. |
5229 | 18.9k | state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; |
5230 | 18.9k | state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip(); |
5231 | 18.9k | state.m_chain_sync.m_sent_getheaders = false; |
5232 | 276k | } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) { |
5233 | | // No evidence yet that our peer has synced to a chain with work equal to that |
5234 | | // of our tip, when we first detected it was behind. Send a single getheaders |
5235 | | // message to give the peer a chance to update us. |
5236 | 494 | if (state.m_chain_sync.m_sent_getheaders) { |
5237 | | // They've run out of time to catch up! |
5238 | 124 | LogInfo("Outbound peer has old chain, best known block = %s, %s", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg());Line | Count | Source | 97 | 124 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 248 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, 124 __VA_ARGS__) |
|
|
5239 | 124 | pto.fDisconnect = true; |
5240 | 370 | } else { |
5241 | 370 | assert(state.m_chain_sync.m_work_header); |
5242 | | // Here, we assume that the getheaders message goes out, |
5243 | | // because it'll either go out or be skipped because of a |
5244 | | // getheaders in-flight already, in which case the peer should |
5245 | | // still respond to us with a sufficiently high work chain tip. |
5246 | 370 | MaybeSendGetHeaders(pto, |
5247 | 370 | GetLocator(state.m_chain_sync.m_work_header->pprev), |
5248 | 370 | peer); |
5249 | 370 | LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); Line | Count | Source | 117 | 370 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 370 | do { \ | 109 | 370 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 370 | } while (0) |
|
|
5250 | 370 | state.m_chain_sync.m_sent_getheaders = true; |
5251 | | // Bump the timeout to allow a response, which could clear the timeout |
5252 | | // (if the response shows the peer has synced), reset the timeout (if |
5253 | | // the peer syncs to the required work but not to our tip), or result |
5254 | | // in disconnect (if we advance to the timeout and pindexBestKnownBlock |
5255 | | // has not sufficiently progressed) |
5256 | 370 | state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; |
5257 | 370 | } |
5258 | 494 | } |
5259 | 1.05M | } |
5260 | 33.0M | } |
5261 | | |
5262 | | void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) |
5263 | 0 | { |
5264 | | // If we have any extra block-relay-only peers, disconnect the youngest unless |
5265 | | // it's given us a block -- in which case, compare with the second-youngest, and |
5266 | | // out of those two, disconnect the peer who least recently gave us a block. |
5267 | | // The youngest block-relay-only peer would be the extra peer we connected |
5268 | | // to temporarily in order to sync our tip; see net.cpp. |
5269 | | // Note that we use higher nodeid as a measure for most recent connection. |
5270 | 0 | if (m_connman.GetExtraBlockRelayCount() > 0) { |
5271 | 0 | std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0}; |
5272 | |
|
5273 | 0 | m_connman.ForEachNode([&](CNode* pnode) { |
5274 | 0 | if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return; |
5275 | 0 | if (pnode->GetId() > youngest_peer.first) { |
5276 | 0 | next_youngest_peer = youngest_peer; |
5277 | 0 | youngest_peer.first = pnode->GetId(); |
5278 | 0 | youngest_peer.second = pnode->m_last_block_time; |
5279 | 0 | } |
5280 | 0 | }); |
5281 | 0 | NodeId to_disconnect = youngest_peer.first; |
5282 | 0 | if (youngest_peer.second > next_youngest_peer.second) { |
5283 | | // Our newest block-relay-only peer gave us a block more recently; |
5284 | | // disconnect our second youngest. |
5285 | 0 | to_disconnect = next_youngest_peer.first; |
5286 | 0 | } |
5287 | 0 | m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5288 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5289 | | // Make sure we're not getting a block right now, and that |
5290 | | // we've been connected long enough for this eviction to happen |
5291 | | // at all. |
5292 | | // Note that we only request blocks from a peer if we learn of a |
5293 | | // valid headers chain with at least as much work as our tip. |
5294 | 0 | CNodeState *node_state = State(pnode->GetId()); |
5295 | 0 | if (node_state == nullptr || |
5296 | 0 | (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) { |
5297 | 0 | pnode->fDisconnect = true; |
5298 | 0 | LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5299 | 0 | pnode->GetId(), count_seconds(pnode->m_last_block_time)); |
5300 | 0 | return true; |
5301 | 0 | } else { |
5302 | 0 | LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5303 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size()); |
5304 | 0 | } |
5305 | 0 | return false; |
5306 | 0 | }); |
5307 | 0 | } |
5308 | | |
5309 | | // Check whether we have too many outbound-full-relay peers |
5310 | 0 | if (m_connman.GetExtraFullOutboundCount() > 0) { |
5311 | | // If we have more outbound-full-relay peers than we target, disconnect one. |
5312 | | // Pick the outbound-full-relay peer that least recently announced |
5313 | | // us a new block, with ties broken by choosing the more recent |
5314 | | // connection (higher node id) |
5315 | | // Protect peers from eviction if we don't have another connection |
5316 | | // to their network, counting both outbound-full-relay and manual peers. |
5317 | 0 | NodeId worst_peer = -1; |
5318 | 0 | int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max(); |
5319 | |
|
5320 | 0 | m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) { |
5321 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5322 | | |
5323 | | // Only consider outbound-full-relay peers that are not already |
5324 | | // marked for disconnection |
5325 | 0 | if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return; |
5326 | 0 | CNodeState *state = State(pnode->GetId()); |
5327 | 0 | if (state == nullptr) return; // shouldn't be possible, but just in case |
5328 | | // Don't evict our protected peers |
5329 | 0 | if (state->m_chain_sync.m_protect) return; |
5330 | | // If this is the only connection on a particular network that is |
5331 | | // OUTBOUND_FULL_RELAY or MANUAL, protect it. |
5332 | 0 | if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return; |
5333 | 0 | if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { |
5334 | 0 | worst_peer = pnode->GetId(); |
5335 | 0 | oldest_block_announcement = state->m_last_block_announcement; |
5336 | 0 | } |
5337 | 0 | }); |
5338 | 0 | if (worst_peer != -1) { |
5339 | 0 | bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5340 | 0 | AssertLockHeld(::cs_main); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5341 | | |
5342 | | // Only disconnect a peer that has been connected to us for |
5343 | | // some reasonable fraction of our check-frequency, to give |
5344 | | // it time for new information to have arrived. |
5345 | | // Also don't disconnect any peer we're trying to download a |
5346 | | // block from. |
5347 | 0 | CNodeState &state = *State(pnode->GetId()); |
5348 | 0 | if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) { |
5349 | 0 | LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5350 | 0 | pnode->fDisconnect = true; |
5351 | 0 | return true; |
5352 | 0 | } else { |
5353 | 0 | LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5354 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size()); |
5355 | 0 | return false; |
5356 | 0 | } |
5357 | 0 | }); |
5358 | 0 | if (disconnected) { |
5359 | | // If we disconnected an extra peer, that means we successfully |
5360 | | // connected to at least one peer after the last time we |
5361 | | // detected a stale tip. Don't try any more extra peers until |
5362 | | // we next detect a stale tip, to limit the load we put on the |
5363 | | // network from these extra connections. |
5364 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5365 | 0 | } |
5366 | 0 | } |
5367 | 0 | } |
5368 | 0 | } |
5369 | | |
5370 | | void PeerManagerImpl::CheckForStaleTipAndEvictPeers() |
5371 | 0 | { |
5372 | 0 | LOCK(cs_main); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
5373 | |
|
5374 | 0 | auto now{GetTime<std::chrono::seconds>()}; |
5375 | |
|
5376 | 0 | EvictExtraOutboundPeers(now); |
5377 | |
|
5378 | 0 | if (now > m_stale_tip_check_time) { |
5379 | | // Check whether our tip is stale, and if so, allow using an extra |
5380 | | // outbound peer |
5381 | 0 | if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { |
5382 | 0 | LogInfo("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",Line | Count | Source | 97 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
5383 | 0 | count_seconds(now - m_last_tip_update.load())); |
5384 | 0 | m_connman.SetTryNewOutboundPeer(true); |
5385 | 0 | } else if (m_connman.GetTryNewOutboundPeer()) { |
5386 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5387 | 0 | } |
5388 | 0 | m_stale_tip_check_time = now + STALE_CHECK_INTERVAL; |
5389 | 0 | } |
5390 | |
|
5391 | 0 | if (!m_initial_sync_finished && CanDirectFetch()) { |
5392 | 0 | m_connman.StartExtraBlockRelayPeers(); |
5393 | 0 | m_initial_sync_finished = true; |
5394 | 0 | } |
5395 | 0 | } |
5396 | | |
5397 | | void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now) |
5398 | 33.0M | { |
5399 | 33.0M | if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) && |
5400 | 33.0M | peer.m_ping_nonce_sent80.6k && |
5401 | 33.0M | now > peer.m_ping_start.load() + TIMEOUT_INTERVAL34.4k ) |
5402 | 34.4k | { |
5403 | | // The ping timeout is using mocktime. To disable the check during |
5404 | | // testing, increase -peertimeout. |
5405 | 34.4k | LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg()); Line | Count | Source | 117 | 34.4k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 34.4k | do { \ | 109 | 34.4k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 34.4k | } while (0) |
|
|
5406 | 34.4k | node_to.fDisconnect = true; |
5407 | 34.4k | return; |
5408 | 34.4k | } |
5409 | | |
5410 | 33.0M | bool pingSend = false; |
5411 | | |
5412 | 33.0M | if (peer.m_ping_queued) { |
5413 | | // RPC ping request by user |
5414 | 0 | pingSend = true; |
5415 | 0 | } |
5416 | | |
5417 | 33.0M | if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL785k ) { |
5418 | | // Ping automatically sent as a latency probe & keepalive. |
5419 | 554k | pingSend = true; |
5420 | 554k | } |
5421 | | |
5422 | 33.0M | if (pingSend) { |
5423 | 554k | uint64_t nonce; |
5424 | 554k | do { |
5425 | 554k | nonce = FastRandomContext().rand64(); |
5426 | 554k | } while (nonce == 0); |
5427 | 554k | peer.m_ping_queued = false; |
5428 | 554k | peer.m_ping_start = now; |
5429 | 554k | if (node_to.GetCommonVersion() > BIP0031_VERSION) { |
5430 | 513k | peer.m_ping_nonce_sent = nonce; |
5431 | 513k | MakeAndPushMessage(node_to, NetMsgType::PING, nonce); |
5432 | 513k | } else { |
5433 | | // Peer is too old to support ping message type with nonce, pong will never arrive. |
5434 | 41.4k | peer.m_ping_nonce_sent = 0; |
5435 | 41.4k | MakeAndPushMessage(node_to, NetMsgType::PING); |
5436 | 41.4k | } |
5437 | 554k | } |
5438 | 33.0M | } |
5439 | | |
5440 | | void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) |
5441 | 33.0M | { |
5442 | | // Nothing to do for non-address-relay peers |
5443 | 33.0M | if (!peer.m_addr_relay_enabled) return6.46M ; |
5444 | | |
5445 | 26.5M | LOCK(peer.m_addr_send_times_mutex); Line | Count | Source | 268 | 26.5M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 26.5M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 26.5M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 26.5M | #define PASTE(x, y) x ## y |
|
|
|
|
5446 | | // Periodically advertise our local address to the peer. |
5447 | 26.5M | if (fListen && !m_chainman.IsInitialBlockDownload() && |
5448 | 26.5M | peer.m_next_local_addr_send < current_time22.9M ) { |
5449 | | // If we've sent before, clear the bloom filter for the peer, so that our |
5450 | | // self-announcement will actually go out. |
5451 | | // This might be unnecessary if the bloom filter has already rolled |
5452 | | // over since our last self-announcement, but there is only a small |
5453 | | // bandwidth cost that we can incur by doing this (which happens |
5454 | | // once a day on average). |
5455 | 441k | if (peer.m_next_local_addr_send != 0us) { |
5456 | 134k | peer.m_addr_known->reset(); |
5457 | 134k | } |
5458 | 441k | if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) { |
5459 | 0 | CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; |
5460 | 0 | if (peer.m_next_local_addr_send == 0us) { |
5461 | | // Send the initial self-announcement in its own message. This makes sure |
5462 | | // rate-limiting with limited start-tokens doesn't ignore it if the first |
5463 | | // message ends up containing multiple addresses. |
5464 | 0 | if (IsAddrCompatible(peer, local_addr)) { |
5465 | 0 | std::vector<CAddress> self_announcement{local_addr}; |
5466 | 0 | if (peer.m_wants_addrv2) { |
5467 | 0 | MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(self_announcement)); |
5468 | 0 | } else { |
5469 | 0 | MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(self_announcement)); |
5470 | 0 | } |
5471 | 0 | } |
5472 | 0 | } else { |
5473 | | // All later self-announcements are sent together with the other addresses. |
5474 | 0 | PushAddress(peer, local_addr); |
5475 | 0 | } |
5476 | 0 | } |
5477 | 441k | peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); |
5478 | 441k | } |
5479 | | |
5480 | | // We sent an `addr` message to this peer recently. Nothing more to do. |
5481 | 26.5M | if (current_time <= peer.m_next_addr_send) return26.1M ; |
5482 | | |
5483 | 438k | peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL); |
5484 | | |
5485 | 438k | if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {Line | Count | Source | 125 | 438k | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
5486 | | // Should be impossible since we always check size before adding to |
5487 | | // m_addrs_to_send. Recover by trimming the vector. |
5488 | 0 | peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND); |
5489 | 0 | } |
5490 | | |
5491 | | // Remove addr records that the peer already knows about, and add new |
5492 | | // addrs to the m_addr_known filter on the same pass. |
5493 | 438k | auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { |
5494 | 0 | bool ret = peer.m_addr_known->contains(addr.GetKey()); |
5495 | 0 | if (!ret) peer.m_addr_known->insert(addr.GetKey()); |
5496 | 0 | return ret; |
5497 | 0 | }; |
5498 | 438k | peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known), |
5499 | 438k | peer.m_addrs_to_send.end()); |
5500 | | |
5501 | | // No addr messages to send |
5502 | 438k | if (peer.m_addrs_to_send.empty()) return; |
5503 | | |
5504 | 0 | if (peer.m_wants_addrv2) { |
5505 | 0 | MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send)); |
5506 | 0 | } else { |
5507 | 0 | MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send)); |
5508 | 0 | } |
5509 | 0 | peer.m_addrs_to_send.clear(); |
5510 | | |
5511 | | // we only send the big addr message once |
5512 | 0 | if (peer.m_addrs_to_send.capacity() > 40) { |
5513 | 0 | peer.m_addrs_to_send.shrink_to_fit(); |
5514 | 0 | } |
5515 | 0 | } |
5516 | | |
5517 | | void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) |
5518 | 33.0M | { |
5519 | | // Delay sending SENDHEADERS (BIP 130) until we're done with an |
5520 | | // initial-headers-sync with this peer. Receiving headers announcements for |
5521 | | // new blocks while trying to sync their headers chain is problematic, |
5522 | | // because of the state tracking done. |
5523 | 33.0M | if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION6.26M ) { |
5524 | 5.99M | LOCK(cs_main); Line | Count | Source | 268 | 5.99M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 5.99M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 5.99M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 5.99M | #define PASTE(x, y) x ## y |
|
|
|
|
5525 | 5.99M | CNodeState &state = *State(node.GetId()); |
5526 | 5.99M | if (state.pindexBestKnownBlock != nullptr && |
5527 | 5.99M | state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()308k ) { |
5528 | | // Tell our peer we prefer to receive headers rather than inv's |
5529 | | // We send this to non-NODE NETWORK peers as well, because even |
5530 | | // non-NODE NETWORK peers can announce blocks (such as pruning |
5531 | | // nodes) |
5532 | 308k | MakeAndPushMessage(node, NetMsgType::SENDHEADERS); |
5533 | 308k | peer.m_sent_sendheaders = true; |
5534 | 308k | } |
5535 | 5.99M | } |
5536 | 33.0M | } |
5537 | | |
5538 | | void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) |
5539 | 33.0M | { |
5540 | 33.0M | if (m_opts.ignore_incoming_txs) return0 ; |
5541 | 33.0M | if (pto.GetCommonVersion() < FEEFILTER_VERSION) return277k ; |
5542 | | // peers with the forcerelay permission should not filter txs to us |
5543 | 32.7M | if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return10.5M ; |
5544 | | // Don't send feefilter messages to outbound block-relay-only peers since they should never announce |
5545 | | // transactions to us, regardless of feefilter state. |
5546 | 22.1M | if (pto.IsBlockOnlyConn()) return13.8k ; |
5547 | | |
5548 | 22.1M | CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); |
5549 | | |
5550 | 22.1M | if (m_chainman.IsInitialBlockDownload()) { |
5551 | | // Received tx-inv messages are discarded when the active |
5552 | | // chainstate is in IBD, so tell the peer to not send them. |
5553 | 3.62M | currentFilter = MAX_MONEY; |
5554 | 18.5M | } else { |
5555 | 18.5M | static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)}; |
5556 | 18.5M | if (peer.m_fee_filter_sent == MAX_FILTER) { |
5557 | | // Send the current filter if we sent MAX_FILTER previously |
5558 | | // and made it out of IBD. |
5559 | 286k | peer.m_next_send_feefilter = 0us; |
5560 | 286k | } |
5561 | 18.5M | } |
5562 | 22.1M | if (current_time > peer.m_next_send_feefilter) { |
5563 | 918k | CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); |
5564 | | // We always have a fee filter of at least the min relay fee |
5565 | 918k | filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); |
5566 | 918k | if (filterToSend != peer.m_fee_filter_sent) { |
5567 | 731k | MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); |
5568 | 731k | peer.m_fee_filter_sent = filterToSend; |
5569 | 731k | } |
5570 | 918k | peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL); |
5571 | 918k | } |
5572 | | // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY |
5573 | | // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. |
5574 | 21.2M | else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && |
5575 | 21.2M | (435k currentFilter < 3 * peer.m_fee_filter_sent / 4435k || currentFilter > 4 * peer.m_fee_filter_sent / 390.0k )) { |
5576 | 435k | peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); |
5577 | 435k | } |
5578 | 22.1M | } |
5579 | | |
5580 | | namespace { |
5581 | | class CompareInvMempoolOrder |
5582 | | { |
5583 | | const CTxMemPool* m_mempool; |
5584 | | public: |
5585 | 12.3M | explicit CompareInvMempoolOrder(CTxMemPool* mempool) : m_mempool{mempool} {} |
5586 | | |
5587 | | bool operator()(std::set<Wtxid>::iterator a, std::set<Wtxid>::iterator b) |
5588 | 1.76M | { |
5589 | | /* As std::make_heap produces a max-heap, we want the entries with the |
5590 | | * higher mining score to sort later. */ |
5591 | 1.76M | return m_mempool->CompareMiningScoreWithTopology(*b, *a); |
5592 | 1.76M | } |
5593 | | }; |
5594 | | } // namespace |
5595 | | |
5596 | | bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const |
5597 | 18.5M | { |
5598 | | // block-relay-only peers may never send txs to us |
5599 | 18.5M | if (peer.IsBlockOnlyConn()) return true38.8k ; |
5600 | 18.4M | if (peer.IsFeelerConn()) return true16.3k ; |
5601 | | // In -blocksonly mode, peers need the 'relay' permission to send txs to us |
5602 | 18.4M | if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)0 ) return true0 ; |
5603 | 18.4M | return false; |
5604 | 18.4M | } |
5605 | | |
5606 | | bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) |
5607 | 463k | { |
5608 | | // We don't participate in addr relay with outbound block-relay-only |
5609 | | // connections to prevent providing adversaries with the additional |
5610 | | // information of addr traffic to infer the link. |
5611 | 463k | if (node.IsBlockOnlyConn()) return false3.55k ; |
5612 | | |
5613 | 459k | if (!peer.m_addr_relay_enabled.exchange(true)) { |
5614 | | // During version message processing (non-block-relay-only outbound peers) |
5615 | | // or on first addr-related message we have received (inbound peers), initialize |
5616 | | // m_addr_known. |
5617 | 459k | peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001); |
5618 | 459k | } |
5619 | | |
5620 | 459k | return true; |
5621 | 463k | } |
5622 | | |
5623 | | void PeerManagerImpl::ProcessAddrs(std::string_view msg_type, CNode& pfrom, Peer& peer, std::vector<CAddress>&& vAddr, const std::atomic<bool>& interruptMsgProc) |
5624 | 0 | { |
5625 | 0 | AssertLockNotHeld(m_peer_mutex); Line | Count | Source | 149 | 0 | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
5626 | 0 | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 0 | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5627 | |
|
5628 | 0 | if (!SetupAddressRelay(pfrom, peer)) { |
5629 | 0 | LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5630 | 0 | return; |
5631 | 0 | } |
5632 | | |
5633 | 0 | if (vAddr.size() > MAX_ADDR_TO_SEND) |
5634 | 0 | { |
5635 | 0 | Misbehaving(peer, strprintf("%s message size = %u", msg_type, vAddr.size()));Line | Count | Source | 1172 | 0 | #define strprintf tfm::format |
|
5636 | 0 | return; |
5637 | 0 | } |
5638 | | |
5639 | | // Store the new addresses |
5640 | 0 | std::vector<CAddress> vAddrOk; |
5641 | | |
5642 | | // Update/increment addr rate limiting bucket. |
5643 | 0 | const auto current_time{NodeClock::now()}; |
5644 | 0 | if (peer.m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) { |
5645 | | // Don't increment bucket if it's already full |
5646 | 0 | const auto time_diff{current_time - peer.m_addr_token_timestamp}; |
5647 | 0 | const double increment{std::max(Ticks<SecondsDouble>(time_diff), 0.0) * MAX_ADDR_RATE_PER_SECOND}; |
5648 | 0 | peer.m_addr_token_bucket = std::min<double>(peer.m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET); |
5649 | 0 | } |
5650 | 0 | peer.m_addr_token_timestamp = current_time; |
5651 | |
|
5652 | 0 | const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); |
5653 | 0 | uint64_t num_proc = 0; |
5654 | 0 | uint64_t num_rate_limit = 0; |
5655 | 0 | std::shuffle(vAddr.begin(), vAddr.end(), m_rng); |
5656 | 0 | for (CAddress& addr : vAddr) |
5657 | 0 | { |
5658 | 0 | if (interruptMsgProc) |
5659 | 0 | return; |
5660 | | |
5661 | | // Apply rate limiting. |
5662 | 0 | if (peer.m_addr_token_bucket < 1.0) { |
5663 | 0 | if (rate_limited) { |
5664 | 0 | ++num_rate_limit; |
5665 | 0 | continue; |
5666 | 0 | } |
5667 | 0 | } else { |
5668 | 0 | peer.m_addr_token_bucket -= 1.0; |
5669 | 0 | } |
5670 | | // We only bother storing full nodes, though this may include |
5671 | | // things which we would not make an outbound connection to, in |
5672 | | // part because we may make feeler connections to them. |
5673 | 0 | if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) |
5674 | 0 | continue; |
5675 | | |
5676 | 0 | if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_time + 10min) { |
5677 | 0 | addr.nTime = std::chrono::time_point_cast<std::chrono::seconds>(current_time - 5 * 24h); |
5678 | 0 | } |
5679 | 0 | AddAddressKnown(peer, addr); |
5680 | 0 | if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { |
5681 | | // Do not process banned/discouraged addresses beyond remembering we received them |
5682 | 0 | continue; |
5683 | 0 | } |
5684 | 0 | ++num_proc; |
5685 | 0 | const bool reachable{g_reachable_nets.Contains(addr)}; |
5686 | 0 | if (addr.nTime > current_time - 10min && !peer.m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { |
5687 | | // Relay to a limited number of other nodes |
5688 | 0 | RelayAddress(pfrom.GetId(), addr, reachable); |
5689 | 0 | } |
5690 | | // Do not store addresses outside our network |
5691 | 0 | if (reachable) { |
5692 | 0 | vAddrOk.push_back(addr); |
5693 | 0 | } |
5694 | 0 | } |
5695 | 0 | peer.m_addr_processed += num_proc; |
5696 | 0 | peer.m_addr_rate_limited += num_rate_limit; |
5697 | 0 | LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5698 | 0 | vAddr.size(), num_proc, num_rate_limit, pfrom.GetId()); |
5699 | |
|
5700 | 0 | m_addrman.Add(vAddrOk, pfrom.addr, /*time_penalty=*/2h); |
5701 | 0 | if (vAddr.size() < 1000) peer.m_getaddr_sent = false; |
5702 | | |
5703 | | // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements |
5704 | 0 | if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) { |
5705 | 0 | LogDebug(BCLog::NET, "addrfetch connection completed, %s", pfrom.DisconnectMsg()); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5706 | 0 | pfrom.fDisconnect = true; |
5707 | 0 | } |
5708 | 0 | } |
5709 | | |
5710 | | bool PeerManagerImpl::SendMessages(CNode& node) |
5711 | 36.2M | { |
5712 | 36.2M | AssertLockNotHeld(m_tx_download_mutex); Line | Count | Source | 149 | 36.2M | #define AssertLockNotHeld(cs) AssertLockNotHeldInline(#cs, __FILE__, __LINE__, &cs) |
|
5713 | 36.2M | AssertLockHeld(g_msgproc_mutex); Line | Count | Source | 144 | 36.2M | #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) |
|
5714 | | |
5715 | 36.2M | PeerRef maybe_peer{GetPeerRef(node.GetId())}; |
5716 | 36.2M | if (!maybe_peer) return false0 ; |
5717 | 36.2M | Peer& peer{*maybe_peer}; |
5718 | 36.2M | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
5719 | | |
5720 | | // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll |
5721 | | // disconnect misbehaving peers even before the version handshake is complete. |
5722 | 36.2M | if (MaybeDiscourageAndDisconnect(node, peer)) return true194k ; |
5723 | | |
5724 | | // Initiate version handshake for outbound connections |
5725 | 36.0M | if (!node.IsInboundConn() && !peer.m_outbound_version_message_sent28.8M ) { |
5726 | 609k | PushNodeVersion(node, peer); |
5727 | 609k | peer.m_outbound_version_message_sent = true; |
5728 | 609k | } |
5729 | | |
5730 | | // Don't send anything until the version handshake is complete |
5731 | 36.0M | if (!node.fSuccessfullyConnected || node.fDisconnect33.9M ) |
5732 | 3.00M | return true; |
5733 | | |
5734 | 33.0M | const auto current_time{GetTime<std::chrono::microseconds>()}; |
5735 | | |
5736 | | // The logic below does not apply to private broadcast peers, so skip it. |
5737 | | // Also in CConnman::PushMessage() we make sure that unwanted messages are |
5738 | | // not sent. This here is just an optimization. |
5739 | 33.0M | if (node.IsPrivateBroadcastConn()) { |
5740 | 0 | if (node.m_connected + PRIVATE_BROADCAST_MAX_CONNECTION_LIFETIME < current_time) { |
5741 | 0 | LogDebug(BCLog::PRIVBROADCAST, "Disconnecting: did not complete the transaction send within %d seconds, %s", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5742 | 0 | count_seconds(PRIVATE_BROADCAST_MAX_CONNECTION_LIFETIME), node.LogPeer()); |
5743 | 0 | node.fDisconnect = true; |
5744 | 0 | } |
5745 | 0 | return true; |
5746 | 0 | } |
5747 | | |
5748 | 33.0M | if (node.IsAddrFetchConn() && current_time - node.m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL9.70k ) { |
5749 | 286 | LogDebug(BCLog::NET, "addrfetch connection timeout, %s", node.DisconnectMsg()); Line | Count | Source | 117 | 286 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 286 | do { \ | 109 | 286 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 286 | } while (0) |
|
|
5750 | 286 | node.fDisconnect = true; |
5751 | 286 | return true; |
5752 | 286 | } |
5753 | | |
5754 | 33.0M | MaybeSendPing(node, peer, current_time); |
5755 | | |
5756 | | // MaybeSendPing may have marked peer for disconnection |
5757 | 33.0M | if (node.fDisconnect) return true37.7k ; |
5758 | | |
5759 | 33.0M | MaybeSendAddr(node, peer, current_time); |
5760 | | |
5761 | 33.0M | MaybeSendSendHeaders(node, peer); |
5762 | | |
5763 | 33.0M | { |
5764 | 33.0M | LOCK(cs_main); Line | Count | Source | 268 | 33.0M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.0M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.0M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.0M | #define PASTE(x, y) x ## y |
|
|
|
|
5765 | | |
5766 | 33.0M | CNodeState &state = *State(node.GetId()); |
5767 | | |
5768 | | // Start block sync |
5769 | 33.0M | if (m_chainman.m_best_header == nullptr) { |
5770 | 0 | m_chainman.m_best_header = m_chainman.ActiveChain().Tip(); |
5771 | 0 | } |
5772 | | |
5773 | | // Determine whether we might try initial headers sync or parallel |
5774 | | // block download from this peer -- this mostly affects behavior while |
5775 | | // in IBD (once out of IBD, we sync from all peers). |
5776 | 33.0M | bool sync_blocks_and_headers_from_peer = false; |
5777 | 33.0M | if (state.fPreferredDownload) { |
5778 | 20.3M | sync_blocks_and_headers_from_peer = true; |
5779 | 20.3M | } else if (12.6M CanServeBlocks(peer)12.6M && !node.IsAddrFetchConn()85.1k ) { |
5780 | | // Typically this is an inbound peer. If we don't have any outbound |
5781 | | // peers, or if we aren't downloading any blocks from such peers, |
5782 | | // then allow block downloads from this peer, too. |
5783 | | // We prefer downloading blocks from outbound peers to avoid |
5784 | | // putting undue load on (say) some home user who is just making |
5785 | | // outbound connections to the network, but if our only source of |
5786 | | // the latest blocks is from an inbound peer, we have to be sure to |
5787 | | // eventually download it (and not just wait indefinitely for an |
5788 | | // outbound peer to have it). |
5789 | 75.7k | if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()24.2k ) { |
5790 | 71.7k | sync_blocks_and_headers_from_peer = true; |
5791 | 71.7k | } |
5792 | 75.7k | } |
5793 | | |
5794 | 33.0M | if (!state.fSyncStarted && CanServeBlocks(peer)12.9M && !m_chainman.m_blockman.LoadingBlocks()330k ) { |
5795 | | // Only actively request headers from a single peer, unless we're close to today. |
5796 | 330k | if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer146k ) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h187k ) { |
5797 | 181k | const CBlockIndex* pindexStart = m_chainman.m_best_header; |
5798 | | /* If possible, start at the block preceding the currently |
5799 | | best known header. This ensures that we always get a |
5800 | | non-empty list of headers back as long as the peer |
5801 | | is up-to-date. With a non-empty response, we can initialise |
5802 | | the peer's known best block. This wouldn't be possible |
5803 | | if we requested starting at m_chainman.m_best_header and |
5804 | | got back an empty response. */ |
5805 | 181k | if (pindexStart->pprev) |
5806 | 181k | pindexStart = pindexStart->pprev; |
5807 | 181k | if (MaybeSendGetHeaders(node, GetLocator(pindexStart), peer)) { |
5808 | 166k | LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d", pindexStart->nHeight, node.GetId()); Line | Count | Source | 117 | 166k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 166k | do { \ | 109 | 166k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 166k | } while (0) |
|
|
5809 | | |
5810 | 166k | state.fSyncStarted = true; |
5811 | 166k | peer.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + |
5812 | 166k | ( |
5813 | | // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling |
5814 | | // to maintain precision |
5815 | 166k | std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * |
5816 | 166k | Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing |
5817 | 166k | ); |
5818 | 166k | nSyncStarted++; |
5819 | 166k | } |
5820 | 181k | } |
5821 | 330k | } |
5822 | | |
5823 | | // |
5824 | | // Try sending block announcements via headers |
5825 | | // |
5826 | 33.0M | { |
5827 | | // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our |
5828 | | // list of block hashes we're relaying, and our peer wants |
5829 | | // headers announcements, then find the first header |
5830 | | // not yet known to our peer but would connect, and send. |
5831 | | // If no header would connect, or if we have too many |
5832 | | // blocks, or if the peer doesn't want headers, just |
5833 | | // add all to the inv queue. |
5834 | 33.0M | LOCK(peer.m_block_inv_mutex); Line | Count | Source | 268 | 33.0M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.0M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.0M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.0M | #define PASTE(x, y) x ## y |
|
|
|
|
5835 | 33.0M | std::vector<CBlock> vHeaders; |
5836 | 33.0M | bool fRevertToInv = ((!peer.m_prefers_headers && |
5837 | 33.0M | (!state.m_requested_hb_cmpctblocks || peer.m_blocks_for_headers_relay.size() > 113.3M )) || |
5838 | 33.0M | peer.m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE13.2M ); |
5839 | 33.0M | const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery |
5840 | 33.0M | ProcessBlockAvailability(node.GetId()); // ensure pindexBestKnownBlock is up-to-date |
5841 | | |
5842 | 33.0M | if (!fRevertToInv) { |
5843 | 13.2M | bool fFoundStartingHeader = false; |
5844 | | // Try to find first header that our peer doesn't have, and |
5845 | | // then send all headers past that one. If we come across any |
5846 | | // headers that aren't on m_chainman.ActiveChain(), give up. |
5847 | 13.2M | for (const uint256& hash : peer.m_blocks_for_headers_relay) { |
5848 | 319k | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
5849 | 319k | assert(pindex); |
5850 | 319k | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { |
5851 | | // Bail out if we reorged away from this block |
5852 | 0 | fRevertToInv = true; |
5853 | 0 | break; |
5854 | 0 | } |
5855 | 319k | if (pBestIndex != nullptr && pindex->pprev != pBestIndex0 ) { |
5856 | | // This means that the list of blocks to announce don't |
5857 | | // connect to each other. |
5858 | | // This shouldn't really be possible to hit during |
5859 | | // regular operation (because reorgs should take us to |
5860 | | // a chain that has some block not on the prior chain, |
5861 | | // which should be caught by the prior check), but one |
5862 | | // way this could happen is by using invalidateblock / |
5863 | | // reconsiderblock repeatedly on the tip, causing it to |
5864 | | // be added multiple times to m_blocks_for_headers_relay. |
5865 | | // Robustly deal with this rare situation by reverting |
5866 | | // to an inv. |
5867 | 0 | fRevertToInv = true; |
5868 | 0 | break; |
5869 | 0 | } |
5870 | 319k | pBestIndex = pindex; |
5871 | 319k | if (fFoundStartingHeader) { |
5872 | | // add this to the headers message |
5873 | 0 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5874 | 319k | } else if (PeerHasHeader(&state, pindex)) { |
5875 | 276k | continue; // keep looking for the first new block |
5876 | 276k | } else if (42.6k pindex->pprev == nullptr42.6k || PeerHasHeader(&state, pindex->pprev)42.6k ) { |
5877 | | // Peer doesn't have this header but they do have the prior one. |
5878 | | // Start sending headers. |
5879 | 5.51k | fFoundStartingHeader = true; |
5880 | 5.51k | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5881 | 37.1k | } else { |
5882 | | // Peer doesn't have this header or the prior one -- nothing will |
5883 | | // connect, so bail out. |
5884 | 37.1k | fRevertToInv = true; |
5885 | 37.1k | break; |
5886 | 37.1k | } |
5887 | 319k | } |
5888 | 13.2M | } |
5889 | 33.0M | if (!fRevertToInv && !vHeaders.empty()13.2M ) { |
5890 | 5.51k | if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) { |
5891 | | // We only send up to 1 block as header-and-ids, as otherwise |
5892 | | // probably means we're doing an initial-ish-sync or they're slow |
5893 | 5.51k | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, Line | Count | Source | 117 | 5.51k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 5.51k | do { \ | 109 | 5.51k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 5.51k | } while (0) |
|
|
5894 | 5.51k | vHeaders.front().GetHash().ToString(), node.GetId()); |
5895 | | |
5896 | 5.51k | std::optional<CSerializedNetMsg> cached_cmpctblock_msg; |
5897 | 5.51k | { |
5898 | 5.51k | LOCK(m_most_recent_block_mutex); Line | Count | Source | 268 | 5.51k | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 5.51k | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 5.51k | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 5.51k | #define PASTE(x, y) x ## y |
|
|
|
|
5899 | 5.51k | if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { |
5900 | 691 | cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block); |
5901 | 691 | } |
5902 | 5.51k | } |
5903 | 5.51k | if (cached_cmpctblock_msg.has_value()) { |
5904 | 691 | PushMessage(node, std::move(cached_cmpctblock_msg.value())); |
5905 | 4.81k | } else { |
5906 | 4.81k | CBlock block; |
5907 | 4.81k | const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)}; |
5908 | 4.81k | assert(ret); |
5909 | 4.81k | CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; |
5910 | 4.81k | MakeAndPushMessage(node, NetMsgType::CMPCTBLOCK, cmpctblock); |
5911 | 4.81k | } |
5912 | 5.51k | state.pindexBestHeaderSent = pBestIndex; |
5913 | 5.51k | } else if (0 peer.m_prefers_headers0 ) { |
5914 | 0 | if (vHeaders.size() > 1) { |
5915 | 0 | LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5916 | 0 | vHeaders.size(), |
5917 | 0 | vHeaders.front().GetHash().ToString(), |
5918 | 0 | vHeaders.back().GetHash().ToString(), node.GetId()); |
5919 | 0 | } else { |
5920 | 0 | LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5921 | 0 | vHeaders.front().GetHash().ToString(), node.GetId()); |
5922 | 0 | } |
5923 | 0 | MakeAndPushMessage(node, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
5924 | 0 | state.pindexBestHeaderSent = pBestIndex; |
5925 | 0 | } else |
5926 | 0 | fRevertToInv = true; |
5927 | 5.51k | } |
5928 | 33.0M | if (fRevertToInv) { |
5929 | | // If falling back to using an inv, just try to inv the tip. |
5930 | | // The last entry in m_blocks_for_headers_relay was our tip at some point |
5931 | | // in the past. |
5932 | 19.7M | if (!peer.m_blocks_for_headers_relay.empty()) { |
5933 | 755k | const uint256& hashToAnnounce = peer.m_blocks_for_headers_relay.back(); |
5934 | 755k | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce); |
5935 | 755k | assert(pindex); |
5936 | | |
5937 | | // Warn if we're announcing a block that is not on the main chain. |
5938 | | // This should be very rare and could be optimized out. |
5939 | | // Just log for now. |
5940 | 755k | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { |
5941 | 0 | LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
5942 | 0 | hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString()); |
5943 | 0 | } |
5944 | | |
5945 | | // If the peer's chain has this block, don't inv it back. |
5946 | 755k | if (!PeerHasHeader(&state, pindex)) { |
5947 | 531k | peer.m_blocks_for_inv_relay.push_back(hashToAnnounce); |
5948 | 531k | LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, Line | Count | Source | 117 | 531k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 531k | do { \ | 109 | 531k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 531k | } while (0) |
|
|
5949 | 531k | node.GetId(), hashToAnnounce.ToString()); |
5950 | 531k | } |
5951 | 755k | } |
5952 | 19.7M | } |
5953 | 33.0M | peer.m_blocks_for_headers_relay.clear(); |
5954 | 33.0M | } |
5955 | | |
5956 | | // |
5957 | | // Message: inventory |
5958 | | // |
5959 | 0 | std::vector<CInv> vInv; |
5960 | 33.0M | { |
5961 | 33.0M | LOCK(peer.m_block_inv_mutex); Line | Count | Source | 268 | 33.0M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.0M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.0M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.0M | #define PASTE(x, y) x ## y |
|
|
|
|
5962 | 33.0M | vInv.reserve(std::max<size_t>(peer.m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET)); |
5963 | | |
5964 | | // Add blocks |
5965 | 33.0M | for (const uint256& hash : peer.m_blocks_for_inv_relay) { |
5966 | 531k | vInv.emplace_back(MSG_BLOCK, hash); |
5967 | 531k | if (vInv.size() == MAX_INV_SZ) { |
5968 | 0 | MakeAndPushMessage(node, NetMsgType::INV, vInv); |
5969 | 0 | vInv.clear(); |
5970 | 0 | } |
5971 | 531k | } |
5972 | 33.0M | peer.m_blocks_for_inv_relay.clear(); |
5973 | 33.0M | } |
5974 | | |
5975 | 33.0M | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
5976 | 32.8M | LOCK(tx_relay->m_tx_inventory_mutex); Line | Count | Source | 268 | 32.8M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 32.8M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 32.8M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 32.8M | #define PASTE(x, y) x ## y |
|
|
|
|
5977 | | // Check whether periodic sends should happen |
5978 | 32.8M | bool fSendTrickle = node.HasPermission(NetPermissionFlags::NoBan); |
5979 | 32.8M | if (tx_relay->m_next_inv_send_time < current_time) { |
5980 | 526k | fSendTrickle = true; |
5981 | 526k | if (node.IsInboundConn()) { |
5982 | 94.2k | tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, node.m_network_key); |
5983 | 432k | } else { |
5984 | 432k | tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); |
5985 | 432k | } |
5986 | 526k | } |
5987 | | |
5988 | | // Time to send but the peer has requested we not relay transactions. |
5989 | 32.8M | if (fSendTrickle) { |
5990 | 12.3M | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 12.3M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 12.3M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 12.3M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 12.3M | #define PASTE(x, y) x ## y |
|
|
|
|
5991 | 12.3M | if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear()176k ; |
5992 | 12.3M | } |
5993 | | |
5994 | | // Respond to BIP35 mempool requests |
5995 | 32.8M | if (fSendTrickle && tx_relay->m_send_mempool12.3M ) { |
5996 | 0 | auto vtxinfo = m_mempool.infoAll(); |
5997 | 0 | tx_relay->m_send_mempool = false; |
5998 | 0 | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
5999 | |
|
6000 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 0 | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 0 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 0 | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 0 | #define PASTE(x, y) x ## y |
|
|
|
|
6001 | |
|
6002 | 0 | for (const auto& txinfo : vtxinfo) { |
6003 | 0 | const Txid& txid{txinfo.tx->GetHash()}; |
6004 | 0 | const Wtxid& wtxid{txinfo.tx->GetWitnessHash()}; |
6005 | 0 | const auto inv = peer.m_wtxid_relay ? |
6006 | 0 | CInv{MSG_WTX, wtxid.ToUint256()} : |
6007 | 0 | CInv{MSG_TX, txid.ToUint256()}; |
6008 | 0 | tx_relay->m_tx_inventory_to_send.erase(wtxid); |
6009 | | |
6010 | | // Don't send transactions that peers will not put into their mempool |
6011 | 0 | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { |
6012 | 0 | continue; |
6013 | 0 | } |
6014 | 0 | if (tx_relay->m_bloom_filter) { |
6015 | 0 | if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; |
6016 | 0 | } |
6017 | 0 | tx_relay->m_tx_inventory_known_filter.insert(inv.hash); |
6018 | 0 | vInv.push_back(inv); |
6019 | 0 | if (vInv.size() == MAX_INV_SZ) { |
6020 | 0 | MakeAndPushMessage(node, NetMsgType::INV, vInv); |
6021 | 0 | vInv.clear(); |
6022 | 0 | } |
6023 | 0 | } |
6024 | 0 | } |
6025 | | |
6026 | | // Determine transactions to relay |
6027 | 32.8M | if (fSendTrickle) { |
6028 | | // Produce a vector with all candidates for sending |
6029 | 12.3M | std::vector<std::set<Wtxid>::iterator> vInvTx; |
6030 | 12.3M | vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size()); |
6031 | 13.0M | for (std::set<Wtxid>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++747k ) { |
6032 | 747k | vInvTx.push_back(it); |
6033 | 747k | } |
6034 | 12.3M | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
6035 | | // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. |
6036 | | // A heap is used so that not all items need sorting if only a few are being sent. |
6037 | 12.3M | CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); |
6038 | 12.3M | std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
6039 | | // No reason to drain out at many times the network's capacity, |
6040 | | // especially since we have many peers and some will draw much shorter delays. |
6041 | 12.3M | unsigned int nRelayedTransactions = 0; |
6042 | 12.3M | LOCK(tx_relay->m_bloom_filter_mutex); Line | Count | Source | 268 | 12.3M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 12.3M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 12.3M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 12.3M | #define PASTE(x, y) x ## y |
|
|
|
|
6043 | 12.3M | size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5}; |
6044 | 12.3M | broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max); |
6045 | 13.0M | while (!vInvTx.empty() && nRelayedTransactions < broadcast_max747k ) { |
6046 | | // Fetch the top element from the heap |
6047 | 747k | std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
6048 | 747k | std::set<Wtxid>::iterator it = vInvTx.back(); |
6049 | 747k | vInvTx.pop_back(); |
6050 | 747k | auto wtxid = *it; |
6051 | | // Remove it from the to-be-sent set |
6052 | 747k | tx_relay->m_tx_inventory_to_send.erase(it); |
6053 | | // Not in the mempool anymore? don't bother sending it. |
6054 | 747k | auto txinfo = m_mempool.info(wtxid); |
6055 | 747k | if (!txinfo.tx) { |
6056 | 67.1k | continue; |
6057 | 67.1k | } |
6058 | | // `TxRelay::m_tx_inventory_known_filter` contains either txids or wtxids |
6059 | | // depending on whether our peer supports wtxid-relay. Therefore, first |
6060 | | // construct the inv and then use its hash for the filter check. |
6061 | 679k | const auto inv = peer.m_wtxid_relay ? |
6062 | 0 | CInv{MSG_WTX, wtxid.ToUint256()} : |
6063 | 679k | CInv{MSG_TX, txinfo.tx->GetHash().ToUint256()}; |
6064 | | // Check if not in the filter already |
6065 | 679k | if (tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) { |
6066 | 5.97k | continue; |
6067 | 5.97k | } |
6068 | | // Peer told you to not send transactions at that feerate? Don't bother sending it. |
6069 | 673k | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { |
6070 | 0 | continue; |
6071 | 0 | } |
6072 | 673k | if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)0 ) continue0 ; |
6073 | | // Send |
6074 | 673k | vInv.push_back(inv); |
6075 | 673k | nRelayedTransactions++; |
6076 | 673k | if (vInv.size() == MAX_INV_SZ) { |
6077 | 0 | MakeAndPushMessage(node, NetMsgType::INV, vInv); |
6078 | 0 | vInv.clear(); |
6079 | 0 | } |
6080 | 673k | tx_relay->m_tx_inventory_known_filter.insert(inv.hash); |
6081 | 673k | } |
6082 | | |
6083 | | // Ensure we'll respond to GETDATA requests for anything we've just announced |
6084 | 12.3M | LOCK(m_mempool.cs); Line | Count | Source | 268 | 12.3M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 12.3M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 12.3M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 12.3M | #define PASTE(x, y) x ## y |
|
|
|
|
6085 | 12.3M | tx_relay->m_last_inv_sequence = m_mempool.GetSequence(); |
6086 | 12.3M | } |
6087 | 32.8M | } |
6088 | 33.0M | if (!vInv.empty()) |
6089 | 674k | MakeAndPushMessage(node, NetMsgType::INV, vInv); |
6090 | | |
6091 | | // Detect whether we're stalling |
6092 | 33.0M | auto stalling_timeout = m_block_stalling_timeout.load(); |
6093 | 33.0M | if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout0 ) { |
6094 | | // Stalling only triggers when the block download window cannot move. During normal steady state, |
6095 | | // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection |
6096 | | // should only happen during initial block download. |
6097 | 0 | LogInfo("Peer is stalling block download, %s", node.DisconnectMsg());Line | Count | Source | 97 | 0 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
6098 | 0 | node.fDisconnect = true; |
6099 | | // Increase timeout for the next peer so that we don't disconnect multiple peers if our own |
6100 | | // bandwidth is insufficient. |
6101 | 0 | const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX); |
6102 | 0 | if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { |
6103 | 0 | LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout)); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
6104 | 0 | } |
6105 | 0 | return true; |
6106 | 0 | } |
6107 | | // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N) |
6108 | | // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. |
6109 | | // We compensate for other peers to prevent killing off peers due to our own downstream link |
6110 | | // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes |
6111 | | // to unreasonably increase our timeout. |
6112 | 33.0M | if (state.vBlocksInFlight.size() > 0) { |
6113 | 4.72M | QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); |
6114 | 4.72M | int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; |
6115 | 4.72M | if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { |
6116 | 1.28k | LogInfo("Timeout downloading block %s, %s", queuedBlock.pindex->GetBlockHash().ToString(), node.DisconnectMsg());Line | Count | Source | 97 | 1.28k | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 1.28k | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
6117 | 1.28k | node.fDisconnect = true; |
6118 | 1.28k | return true; |
6119 | 1.28k | } |
6120 | 4.72M | } |
6121 | | // Check for headers sync timeouts |
6122 | 33.0M | if (state.fSyncStarted && peer.m_headers_sync_timeout < std::chrono::microseconds::max()20.2M ) { |
6123 | | // Detect whether this is a stalling initial-headers-sync peer |
6124 | 779k | if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) { |
6125 | 640k | if (current_time > peer.m_headers_sync_timeout && nSyncStarted == 112.5k && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)7.07k ) { |
6126 | | // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, |
6127 | | // and we have others we could be using instead. |
6128 | | // Note: If all our peers are inbound, then we won't |
6129 | | // disconnect our sync peer for stalling; we have bigger |
6130 | | // problems if we can't get any outbound peers. |
6131 | 491 | if (!node.HasPermission(NetPermissionFlags::NoBan)) { |
6132 | 40 | LogInfo("Timeout downloading headers, %s", node.DisconnectMsg());Line | Count | Source | 97 | 40 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 40 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
6133 | 40 | node.fDisconnect = true; |
6134 | 40 | return true; |
6135 | 451 | } else { |
6136 | 451 | LogInfo("Timeout downloading headers from noban peer, not %s", node.DisconnectMsg());Line | Count | Source | 97 | 451 | #define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__) Line | Count | Source | 91 | 451 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
|
|
6137 | | // Reset the headers sync state so that we have a |
6138 | | // chance to try downloading from a different peer. |
6139 | | // Note: this will also result in at least one more |
6140 | | // getheaders message to be sent to |
6141 | | // this peer (eventually). |
6142 | 451 | state.fSyncStarted = false; |
6143 | 451 | nSyncStarted--; |
6144 | 451 | peer.m_headers_sync_timeout = 0us; |
6145 | 451 | } |
6146 | 491 | } |
6147 | 640k | } else { |
6148 | | // After we've caught up once, reset the timeout so we can't trigger |
6149 | | // disconnect later. |
6150 | 139k | peer.m_headers_sync_timeout = std::chrono::microseconds::max(); |
6151 | 139k | } |
6152 | 779k | } |
6153 | | |
6154 | | // Check that outbound peers have reasonable chains |
6155 | | // GetTime() is used by this anti-DoS logic so we can test this using mocktime |
6156 | 33.0M | ConsiderEviction(node, peer, GetTime<std::chrono::seconds>()); |
6157 | | |
6158 | | // |
6159 | | // Message: getdata (blocks) |
6160 | | // |
6161 | 33.0M | std::vector<CInv> vGetData; |
6162 | 33.0M | if (CanServeBlocks(peer) && (20.4M (20.4M sync_blocks_and_headers_from_peer20.4M && !IsLimitedPeer(peer)20.4M ) || !m_chainman.IsInitialBlockDownload()7.35M ) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER20.1M ) { |
6163 | 19.7M | std::vector<const CBlockIndex*> vToDownload; |
6164 | 19.7M | NodeId staller = -1; |
6165 | 19.7M | auto get_inflight_budget = [&state]() { |
6166 | 19.7M | return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size())); |
6167 | 19.7M | }; |
6168 | | |
6169 | | // If there are multiple chainstates, download blocks for the |
6170 | | // current chainstate first, to prioritize getting to network tip |
6171 | | // before downloading historical blocks. |
6172 | 19.7M | FindNextBlocksToDownload(peer, get_inflight_budget(), vToDownload, staller); |
6173 | 19.7M | auto historical_blocks{m_chainman.GetHistoricalBlockRange()}; |
6174 | 19.7M | if (historical_blocks && !IsLimitedPeer(peer)0 ) { |
6175 | | // If the first needed historical block is not an ancestor of the last, |
6176 | | // we need to start requesting blocks from their last common ancestor. |
6177 | 0 | const CBlockIndex* from_tip = LastCommonAncestor(historical_blocks->first, historical_blocks->second); |
6178 | 0 | TryDownloadingHistoricalBlocks( |
6179 | 0 | peer, |
6180 | 0 | get_inflight_budget(), |
6181 | 0 | vToDownload, from_tip, historical_blocks->second); |
6182 | 0 | } |
6183 | 19.7M | for (const CBlockIndex *pindex : vToDownload) { |
6184 | 26.5k | uint32_t nFetchFlags = GetFetchFlags(peer); |
6185 | 26.5k | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
6186 | 26.5k | BlockRequested(node.GetId(), *pindex); |
6187 | 26.5k | LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), Line | Count | Source | 117 | 26.5k | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 26.5k | do { \ | 109 | 26.5k | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 26.5k | } while (0) |
|
|
6188 | 26.5k | pindex->nHeight, node.GetId()); |
6189 | 26.5k | } |
6190 | 19.7M | if (state.vBlocksInFlight.empty() && staller != -116.2M ) { |
6191 | 0 | if (State(staller)->m_stalling_since == 0us) { |
6192 | 0 | State(staller)->m_stalling_since = current_time; |
6193 | 0 | LogDebug(BCLog::NET, "Stall started peer=%d\n", staller); Line | Count | Source | 117 | 0 | #define LogDebug(category, ...) detail_LogIfCategoryAndLevelEnabled(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 108 | 0 | do { \ | 109 | 0 | if (util::log::ShouldLog((category), (level))) { \ | 110 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 111 | 0 | Assume(!rate_limit); /*Only called with the levels below*/ \ Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
| 112 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 91 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(SourceLocation{__func__}, category, level, should_ratelimit, __VA_ARGS__) |
| 113 | 0 | } \ | 114 | 0 | } while (0) |
|
|
6194 | 0 | } |
6195 | 0 | } |
6196 | 19.7M | } |
6197 | | |
6198 | | // |
6199 | | // Message: getdata (transactions) |
6200 | | // |
6201 | 33.0M | { |
6202 | 33.0M | LOCK(m_tx_download_mutex); Line | Count | Source | 268 | 33.0M | #define LOCK(cs) UniqueLock UNIQUE_NAME(criticalblock)(MaybeCheckNotHeld(cs), #cs, __FILE__, __LINE__) Line | Count | Source | 11 | 33.0M | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) Line | Count | Source | 9 | 33.0M | #define PASTE2(x, y) PASTE(x, y) Line | Count | Source | 8 | 33.0M | #define PASTE(x, y) x ## y |
|
|
|
|
6203 | 33.0M | for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(node.GetId(), current_time)) { |
6204 | 690k | vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX0 : (MSG_TX | GetFetchFlags(peer)), gtxid.ToUint256()); |
6205 | 690k | if (vGetData.size() >= MAX_GETDATA_SZ) { |
6206 | 0 | MakeAndPushMessage(node, NetMsgType::GETDATA, vGetData); |
6207 | 0 | vGetData.clear(); |
6208 | 0 | } |
6209 | 690k | } |
6210 | 33.0M | } |
6211 | | |
6212 | 33.0M | if (!vGetData.empty()) |
6213 | 427k | MakeAndPushMessage(node, NetMsgType::GETDATA, vGetData); |
6214 | 33.0M | } // release cs_main |
6215 | 0 | MaybeSendFeefilter(node, peer, current_time); |
6216 | 33.0M | return true; |
6217 | 33.0M | } |