-
Notifications
You must be signed in to change notification settings - Fork 250
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix security issue #1362. #1377
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -709,12 +709,15 @@ proc handleIncomingPeer*(peer: Peer): Future[bool] {.async.} = | |
debug "Peer (incoming) lost", peer | ||
nbc_peers.set int64(len(network.peerPool)) | ||
|
||
let res = await network.peerPool.addIncomingPeer(peer) | ||
let res = network.peerPool.addIncomingPeerNoWait(peer) | ||
if res: | ||
peer.updateScore(NewPeerScore) | ||
debug "Peer (incoming) has been added to PeerPool", peer | ||
peer.getFuture().addCallback(onPeerClosed) | ||
result = true | ||
else: | ||
debug "Too many incoming connections in PeerPool, disconnecting", peer | ||
result = false | ||
|
||
nbc_peers.set int64(len(network.peerPool)) | ||
|
||
|
@@ -776,22 +779,24 @@ proc connectWorker(network: Eth2Node) {.async.} = | |
remotePeerInfo = await network.connQueue.popFirst() | ||
peerPoolHasRemotePeer = network.peerPool.hasPeer(remotePeerInfo.peerId) | ||
seenTableHasRemotePeer = network.isSeen(remotePeerInfo) | ||
remotePeerAlreadyConnected = network.connTable.hasKey(remotePeerInfo.peerId) | ||
remotePeerConnecting = network.connTable.hasKey(remotePeerInfo.peerId) | ||
|
||
if not(peerPoolHasRemotePeer) and not(seenTableHasRemotePeer) and not(remotePeerAlreadyConnected): | ||
if not(peerPoolHasRemotePeer) and | ||
not(seenTableHasRemotePeer) and not(remotePeerConnecting): | ||
network.connTable[remotePeerInfo.peerId] = remotePeerInfo | ||
try: | ||
# We trying to connect to peers which are not in PeerPool, SeenTable and | ||
# ConnTable. | ||
var fut = network.dialPeer(remotePeerInfo) | ||
# We discarding here just because we going to check future state, to avoid | ||
# condition where connection happens and timeout reached. | ||
# We discarding here just because we going to check future state, | ||
# to avoid condition where connection happens and timeout reached. | ||
discard await withTimeout(fut, network.connectTimeout) | ||
# We handling only timeout and errors, because successfull connections | ||
# will be stored in PeerPool. | ||
if fut.finished(): | ||
if fut.failed() and not(fut.cancelled()): | ||
debug "Unable to establish connection with peer", peer = remotePeerInfo.id, | ||
debug "Unable to establish connection with peer", | ||
peer = remotePeerInfo.id, | ||
errMsg = fut.readError().msg | ||
inc nbc_failed_dials | ||
network.addSeen(remotePeerInfo, SeenTableTimeDeadPeer) | ||
|
@@ -803,18 +808,18 @@ proc connectWorker(network: Eth2Node) {.async.} = | |
network.connTable.del(remotePeerInfo.peerId) | ||
else: | ||
trace "Peer is already connected, connecting or already seen", | ||
peer = remotePeerInfo.id, peer_pool_has_peer = $peerPoolHasRemotePeer, seen_table_has_peer = $seenTableHasRemotePeer, | ||
connecting_peer = $remotePeerAlreadyConnected, seen_table_size = len(network.seenTable) | ||
|
||
# Prevent (a purely theoretical) high CPU usage when losing connectivity. | ||
await sleepAsync(1.seconds) | ||
peer = remotePeerInfo.id, | ||
peer_pool_has_peer = $peerPoolHasRemotePeer, | ||
seen_table_has_peer = $seenTableHasRemotePeer, | ||
connecting_peer = $remotePeerConnecting, | ||
seen_table_size = len(network.seenTable) | ||
|
||
proc runDiscoveryLoop*(node: Eth2Node) {.async.} = | ||
debug "Starting discovery loop" | ||
|
||
let enrField = ("eth2", SSZ.encode(node.forkId)) | ||
while true: | ||
let currentPeerCount = node.peerPool.len | ||
let currentPeerCount = node.peerPool.lenAvailable({PeerType.Outgoing}) | ||
if currentPeerCount < node.wantedPeers: | ||
try: | ||
let discoveredPeers = | ||
|
@@ -853,8 +858,13 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID, | |
privKey: keys.PrivateKey, rng: ref BrHmacDrbgContext): T = | ||
new result | ||
result.switch = switch | ||
result.wantedPeers = conf.maxPeers | ||
result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) | ||
result.wantedPeers = (conf.maxPeers - conf.maxIncomingPeers) | ||
# We are limiting number of incoming connections (peers) to some value, | ||
# otherwise it is possible to fill our PeerPool with only incoming connections | ||
# This is undesired behavior, because in such way attackers could easily | ||
# falsify our vision of network and beacon chain. | ||
result.peerPool = newPeerPool[Peer, PeerID]( | ||
maxPeers = conf.maxPeers, maxIncomingPeers = conf.maxIncomingPeers) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. AFAIK this is one of the mitigation recommended in #1362. This should explicitly be linked to the issue, maybe with "SEC" (like TODO), so that 6 months from now, after a refactoring, this mitigation does not disappear. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @mratsim this is something we are missing in our implementation, it will be not refactored and we do not perform refactoring without minds. |
||
result.connectTimeout = 10.seconds | ||
result.seenThreshold = 10.minutes | ||
result.seenTable = initTable[PeerID, SeenItem]() | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
need to leave at 79 or remove cultural reference