Browse Source

lnworker: if two instances of the same wallet are trying to connect

simultaneously, give priority to the existing connection
master
ThomasV 2 years ago
parent
commit
0c48fd495f
  1. 11
      electrum/lnpeer.py
  2. 7
      electrum/lnworker.py

11
electrum/lnpeer.py

@ -1259,11 +1259,12 @@ class Peer(Logger):
else:
# all good
fut.set_result((we_must_resend_revoke_and_ack, their_next_local_ctn))
# Block processing of further incoming messages until we finished our part of chan-reest.
# This is needed for the replaying of our local unacked updates to be sane (if the peer
# also replays some messages we must not react to them until we finished replaying our own).
# (it would be sufficient to only block messages related to this channel, but this is easier)
await self._chan_reest_finished[chan.channel_id].wait()
# Block processing of further incoming messages until we finished our part of chan-reest.
# This is needed for the replaying of our local unacked updates to be sane (if the peer
# also replays some messages we must not react to them until we finished replaying our own).
# (it would be sufficient to only block messages related to this channel, but this is easier)
await self._chan_reest_finished[chan.channel_id].wait()
# Note: if the above event is never set, we won't detect if the connection was closed by remote...
def _send_channel_reestablish(self, chan: Channel):
assert self.is_initialized()

7
electrum/lnworker.py

@ -351,12 +351,13 @@ class LNWorker(Logger, EventListener, NetworkRetryManager[LNPeerAddr]):
return peer
async def _add_peer_from_transport(self, *, node_id: bytes, transport: LNTransportBase) -> Peer:
peer = Peer(self, node_id, transport)
with self.lock:
existing_peer = self._peers.get(node_id)
if existing_peer:
existing_peer.close_and_cleanup()
assert node_id not in self._peers
# two instances of the same wallet are attempting to connect simultaneously.
# give priority to existing connection
return
peer = Peer(self, node_id, transport)
self._peers[node_id] = peer
await self.taskgroup.spawn(peer.main_loop())
return peer

Loading…
Cancel
Save