Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove support for splicing without quiescence #2922

Merged
merged 1 commit into from
Oct 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,6 @@ case class ChannelParams(channelId: ByteVector32,
else Right(remoteScriptPubKey)
}

/** If both peers support quiescence, we have to exchange stfu when splicing. */
def useQuiescence: Boolean = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.Quiescence)

}

object ChannelParams {
Expand Down Expand Up @@ -822,7 +819,7 @@ case class Commitments(params: ChannelParams,
def localIsQuiescent: Boolean = changes.localChanges.all.isEmpty
def remoteIsQuiescent: Boolean = changes.remoteChanges.all.isEmpty
// HTLCs and pending changes are the same for all active commitments, so we don't need to loop through all of them.
def isQuiescent: Boolean = (params.useQuiescence || active.head.hasNoPendingHtlcs) && localIsQuiescent && remoteIsQuiescent
def isQuiescent: Boolean = localIsQuiescent && remoteIsQuiescent
def hasNoPendingHtlcsOrFeeUpdate: Boolean = active.head.hasNoPendingHtlcsOrFeeUpdate(changes)
def hasPendingOrProposedHtlcs: Boolean = active.head.hasPendingOrProposedHtlcs(changes)
def timedOutOutgoingHtlcs(currentHeight: BlockHeight): Set[UpdateAddHtlc] = active.head.timedOutOutgoingHtlcs(currentHeight)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -854,21 +854,13 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
case Event(cmd: CMD_SPLICE, d: DATA_NORMAL) =>
if (d.commitments.params.remoteParams.initFeatures.hasFeature(Features.SplicePrototype)) {
d.spliceStatus match {
case SpliceStatus.NoSplice if d.commitments.params.useQuiescence =>
case SpliceStatus.NoSplice =>
startSingleTimer(QuiescenceTimeout.toString, QuiescenceTimeout(peer), nodeParams.channelConf.quiescenceTimeout)
if (d.commitments.localIsQuiescent) {
stay() using d.copy(spliceStatus = SpliceStatus.InitiatorQuiescent(cmd)) sending Stfu(d.channelId, initiator = true)
} else {
stay() using d.copy(spliceStatus = SpliceStatus.QuiescenceRequested(cmd))
}
case SpliceStatus.NoSplice if !d.commitments.params.useQuiescence =>
initiateSplice(cmd, d) match {
case Left(f) =>
cmd.replyTo ! RES_FAILURE(cmd, f)
stay()
case Right(spliceInit) =>
stay() using d.copy(spliceStatus = SpliceStatus.SpliceRequested(cmd, spliceInit)) sending spliceInit
}
case _ =>
log.warning("cannot initiate splice, another one is already in progress")
cmd.replyTo ! RES_FAILURE(cmd, InvalidSpliceAlreadyInProgress(d.channelId))
Expand All @@ -886,62 +878,53 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
stay()

case Event(msg: Stfu, d: DATA_NORMAL) =>
if (d.commitments.params.useQuiescence) {
if (d.commitments.remoteIsQuiescent) {
d.spliceStatus match {
case SpliceStatus.NoSplice =>
startSingleTimer(QuiescenceTimeout.toString, QuiescenceTimeout(peer), nodeParams.channelConf.quiescenceTimeout)
if (d.commitments.localIsQuiescent) {
stay() using d.copy(spliceStatus = SpliceStatus.NonInitiatorQuiescent) sending Stfu(d.channelId, initiator = false)
} else {
stay() using d.copy(spliceStatus = SpliceStatus.ReceivedStfu(msg))
}
case SpliceStatus.QuiescenceRequested(cmd) =>
// We could keep track of our splice attempt and merge it with the remote splice instead of cancelling it.
// But this is an edge case that should rarely occur, so it's probably not worth the additional complexity.
log.warning("our peer initiated quiescence before us, cancelling our splice attempt")
cmd.replyTo ! RES_FAILURE(cmd, ConcurrentRemoteSplice(d.channelId))
if (d.commitments.remoteIsQuiescent) {
d.spliceStatus match {
case SpliceStatus.NoSplice =>
startSingleTimer(QuiescenceTimeout.toString, QuiescenceTimeout(peer), nodeParams.channelConf.quiescenceTimeout)
if (d.commitments.localIsQuiescent) {
stay() using d.copy(spliceStatus = SpliceStatus.NonInitiatorQuiescent) sending Stfu(d.channelId, initiator = false)
} else {
stay() using d.copy(spliceStatus = SpliceStatus.ReceivedStfu(msg))
case SpliceStatus.InitiatorQuiescent(cmd) =>
// if both sides send stfu at the same time, the quiescence initiator is the channel opener
if (!msg.initiator || d.commitments.params.localParams.isChannelOpener) {
initiateSplice(cmd, d) match {
case Left(f) =>
cmd.replyTo ! RES_FAILURE(cmd, f)
context.system.scheduler.scheduleOnce(2 second, peer, Peer.Disconnect(remoteNodeId))
stay() using d.copy(spliceStatus = SpliceStatus.NoSplice) sending Warning(d.channelId, f.getMessage)
case Right(spliceInit) =>
stay() using d.copy(spliceStatus = SpliceStatus.SpliceRequested(cmd, spliceInit)) sending spliceInit
}
} else {
log.warning("concurrent stfu received and our peer is the channel initiator, cancelling our splice attempt")
cmd.replyTo ! RES_FAILURE(cmd, ConcurrentRemoteSplice(d.channelId))
stay() using d.copy(spliceStatus = SpliceStatus.NonInitiatorQuiescent)
}
case SpliceStatus.QuiescenceRequested(cmd) =>
// We could keep track of our splice attempt and merge it with the remote splice instead of cancelling it.
// But this is an edge case that should rarely occur, so it's probably not worth the additional complexity.
log.warning("our peer initiated quiescence before us, cancelling our splice attempt")
cmd.replyTo ! RES_FAILURE(cmd, ConcurrentRemoteSplice(d.channelId))
stay() using d.copy(spliceStatus = SpliceStatus.ReceivedStfu(msg))
case SpliceStatus.InitiatorQuiescent(cmd) =>
// if both sides send stfu at the same time, the quiescence initiator is the channel opener
if (!msg.initiator || d.commitments.params.localParams.isChannelOpener) {
initiateSplice(cmd, d) match {
case Left(f) =>
cmd.replyTo ! RES_FAILURE(cmd, f)
context.system.scheduler.scheduleOnce(2 second, peer, Peer.Disconnect(remoteNodeId))
stay() using d.copy(spliceStatus = SpliceStatus.NoSplice) sending Warning(d.channelId, f.getMessage)
case Right(spliceInit) =>
stay() using d.copy(spliceStatus = SpliceStatus.SpliceRequested(cmd, spliceInit)) sending spliceInit
}
case _ =>
log.warning("ignoring duplicate stfu")
stay()
}
} else {
log.warning("our peer sent stfu but is not quiescent")
// NB: we use a small delay to ensure we've sent our warning before disconnecting.
context.system.scheduler.scheduleOnce(2 second, peer, Peer.Disconnect(remoteNodeId))
stay() using d.copy(spliceStatus = SpliceStatus.NoSplice) sending Warning(d.channelId, InvalidSpliceNotQuiescent(d.channelId).getMessage)
} else {
log.warning("concurrent stfu received and our peer is the channel initiator, cancelling our splice attempt")
cmd.replyTo ! RES_FAILURE(cmd, ConcurrentRemoteSplice(d.channelId))
stay() using d.copy(spliceStatus = SpliceStatus.NonInitiatorQuiescent)
}
case _ =>
log.warning("ignoring duplicate stfu")
stay()
}
} else {
log.warning("ignoring stfu because both peers do not advertise quiescence")
stay()
log.warning("our peer sent stfu but is not quiescent")
// NB: we use a small delay to ensure we've sent our warning before disconnecting.
context.system.scheduler.scheduleOnce(2 second, peer, Peer.Disconnect(remoteNodeId))
stay() using d.copy(spliceStatus = SpliceStatus.NoSplice) sending Warning(d.channelId, InvalidSpliceNotQuiescent(d.channelId).getMessage)
}

case Event(_: QuiescenceTimeout, d: DATA_NORMAL) => handleQuiescenceTimeout(d)

case Event(_: SpliceInit, d: DATA_NORMAL) if d.spliceStatus == SpliceStatus.NoSplice && d.commitments.params.useQuiescence =>
log.info("rejecting splice attempt: quiescence not negotiated")
stay() using d.copy(spliceStatus = SpliceStatus.SpliceAborted) sending TxAbort(d.channelId, InvalidSpliceNotQuiescent(d.channelId).getMessage)

case Event(msg: SpliceInit, d: DATA_NORMAL) =>
d.spliceStatus match {
case SpliceStatus.NoSplice | SpliceStatus.NonInitiatorQuiescent =>
case SpliceStatus.NonInitiatorQuiescent =>
if (!d.commitments.isQuiescent) {
log.info("rejecting splice request: channel not quiescent")
stay() using d.copy(spliceStatus = SpliceStatus.SpliceAborted) sending TxAbort(d.channelId, InvalidSpliceNotQuiescent(d.channelId).getMessage)
Expand Down Expand Up @@ -993,6 +976,9 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
stay() using d.copy(spliceStatus = SpliceStatus.SpliceInProgress(cmd_opt = None, sessionId, txBuilder, remoteCommitSig = None)) sending spliceAck
}
}
case SpliceStatus.NoSplice =>
log.info("rejecting splice attempt: quiescence not negotiated")
stay() using d.copy(spliceStatus = SpliceStatus.SpliceAborted) sending TxAbort(d.channelId, InvalidSpliceNotQuiescent(d.channelId).getMessage)
case SpliceStatus.SpliceAborted =>
log.info("rejecting splice attempt: our previous tx_abort was not acked")
stay() sending Warning(d.channelId, InvalidSpliceTxAbortNotAcked(d.channelId).getMessage)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ object TestConstants {
Features.PaymentMetadata -> FeatureSupport.Optional,
Features.RouteBlinding -> FeatureSupport.Optional,
Features.StaticRemoteKey -> FeatureSupport.Mandatory,
Features.Quiescence -> FeatureSupport.Optional,
Features.SplicePrototype -> FeatureSupport.Optional,
),
unknown = Set(UnknownFeature(TestFeature.optional))
),
Expand Down Expand Up @@ -282,6 +284,8 @@ object TestConstants {
Features.RouteBlinding -> FeatureSupport.Optional,
Features.StaticRemoteKey -> FeatureSupport.Mandatory,
Features.AnchorOutputsZeroFeeHtlcTx -> FeatureSupport.Optional,
Features.Quiescence -> FeatureSupport.Optional,
Features.SplicePrototype -> FeatureSupport.Optional,
),
pluginParams = Nil,
overrideInitFeatures = Map.empty,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ object ChannelStateTestsTags {
val DualFunding = "dual_funding"
/** If set, a liquidity ads will be used when opening a channel. */
val LiquidityAds = "liquidity_ads"
/** If set, peers will support splicing. */
val Splicing = "splicing"
/** If set, channels will use option_static_remotekey. */
val StaticRemoteKey = "static_remotekey"
/** If set, channels will use option_anchor_outputs. */
Expand Down Expand Up @@ -93,8 +91,6 @@ object ChannelStateTestsTags {
val RejectRbfAttempts = "reject_rbf_attempts"
/** If set, the non-initiator will require a 1-block delay between RBF attempts. */
val DelayRbfAttempts = "delay_rbf_attempts"
/** If set, peers will support the quiesce protocol. */
val Quiescence = "quiescence"
/** If set, channels will adapt their max HTLC amount to the available balance */
val AdaptMaxHtlcAmount = "adapt-max-htlc-amount"
}
Expand Down Expand Up @@ -165,7 +161,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
.modify(_.channelConf.balanceThresholds).setToIf(tags.contains(ChannelStateTestsTags.AdaptMaxHtlcAmount))(Seq(Channel.BalanceThreshold(1_000 sat, 0 sat), Channel.BalanceThreshold(5_000 sat, 1_000 sat), Channel.BalanceThreshold(10_000 sat, 5_000 sat)))
val wallet = wallet_opt match {
case Some(wallet) => wallet
case None => if (tags.contains(ChannelStateTestsTags.DualFunding) || tags.contains(ChannelStateTestsTags.Splicing)) new SingleKeyOnChainWallet() else new DummyOnChainWallet()
case None => if (tags.contains(ChannelStateTestsTags.DualFunding)) new SingleKeyOnChainWallet() else new DummyOnChainWallet()
}
val alice: TestFSMRef[ChannelState, ChannelData, Channel] = {
implicit val system: ActorSystem = systemA
Expand All @@ -192,8 +188,6 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ZeroConf))(_.updated(Features.ZeroConf, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ScidAlias))(_.updated(Features.ScidAlias, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.Splicing))(_.updated(Features.SplicePrototype, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.Quiescence))(_.updated(Features.Quiescence, FeatureSupport.Optional))
.initFeatures()
val bobInitFeatures = Bob.nodeParams.features
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DisableWumbo))(_.removed(Features.Wumbo))
Expand All @@ -206,8 +200,6 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ZeroConf))(_.updated(Features.ZeroConf, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ScidAlias))(_.updated(Features.ScidAlias, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.Splicing))(_.updated(Features.SplicePrototype, FeatureSupport.Optional))
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.Quiescence))(_.updated(Features.Quiescence, FeatureSupport.Optional))
.initFeatures()

val channelType = ChannelTypes.defaultFromFeatures(aliceInitFeatures, bobInitFeatures, announceChannel = channelFlags.announceChannel)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class NormalQuiescentStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteL
implicit val log: akka.event.LoggingAdapter = akka.event.NoLogging

override def withFixture(test: OneArgTest): Outcome = {
val tags = test.tags + ChannelStateTestsTags.DualFunding + ChannelStateTestsTags.Splicing + ChannelStateTestsTags.Quiescence
val tags = test.tags + ChannelStateTestsTags.DualFunding
val setup = init(tags = tags)
import setup._
reachNormal(setup, tags)
Expand Down
Loading
Loading