diff --git a/electrum/lnpeer.py b/electrum/lnpeer.py index 23819da5f..58bd75f0e 100644 --- a/electrum/lnpeer.py +++ b/electrum/lnpeer.py @@ -1259,11 +1259,12 @@ class Peer(Logger): else: # all good fut.set_result((we_must_resend_revoke_and_ack, their_next_local_ctn)) - # Block processing of further incoming messages until we finished our part of chan-reest. - # This is needed for the replaying of our local unacked updates to be sane (if the peer - # also replays some messages we must not react to them until we finished replaying our own). - # (it would be sufficient to only block messages related to this channel, but this is easier) - await self._chan_reest_finished[chan.channel_id].wait() + # Block processing of further incoming messages until we finished our part of chan-reest. + # This is needed for the replaying of our local unacked updates to be sane (if the peer + # also replays some messages we must not react to them until we finished replaying our own). + # (it would be sufficient to only block messages related to this channel, but this is easier) + await self._chan_reest_finished[chan.channel_id].wait() + # Note: if the above event is never set, we won't detect if the connection was closed by remote... def _send_channel_reestablish(self, chan: Channel): assert self.is_initialized() diff --git a/electrum/lnworker.py b/electrum/lnworker.py index 1b58a8bd2..089d45668 100644 --- a/electrum/lnworker.py +++ b/electrum/lnworker.py @@ -351,12 +351,13 @@ class LNWorker(Logger, EventListener, NetworkRetryManager[LNPeerAddr]): return peer async def _add_peer_from_transport(self, *, node_id: bytes, transport: LNTransportBase) -> Peer: - peer = Peer(self, node_id, transport) with self.lock: existing_peer = self._peers.get(node_id) if existing_peer: - existing_peer.close_and_cleanup() - assert node_id not in self._peers + # two instances of the same wallet are attempting to connect simultaneously. + # give priority to existing connection + return + peer = Peer(self, node_id, transport) self._peers[node_id] = peer await self.taskgroup.spawn(peer.main_loop()) return peer