diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7e6b3e667392..100964121e77 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
+- JSON API: `close` optional arguments have changed: it now defaults to unilateral close after 48 hours.
- build: now requires `python3-mako` to be installed, i.e. `sudo apt-get install python3-mako`
- plugins: if the config directory has a `plugins` subdirectory, those are loaded.
- plugins: a new notification type `invoice_payment` (sent when an invoice is paid) has been added
diff --git a/channeld/full_channel.c b/channeld/full_channel.c
index 4ec343c54dda..190dd844859f 100644
--- a/channeld/full_channel.c
+++ b/channeld/full_channel.c
@@ -367,6 +367,7 @@ static enum channel_add_err add_htlc(struct channel *channel,
enum side sender = htlc_state_owner(state), recipient = !sender;
const struct htlc **committed, **adding, **removing;
const struct channel_view *view;
+ u32 min_concurrent_htlcs;
htlc = tal(tmpctx, struct htlc);
@@ -443,8 +444,16 @@ static enum channel_add_err add_htlc(struct channel *channel,
* HTLCs to its local commitment transaction...
* - SHOULD fail the channel.
*/
+ /* Also we should not add more htlc's than sender or recipient
+ * configured. This mitigates attacks in which a peer can force the
+ * funder of the channel to pay unnecessary onchain fees during a fee
+ * spike with large commitment transactions.
+ */
+ min_concurrent_htlcs = channel->config[recipient].max_accepted_htlcs;
+ if (min_concurrent_htlcs > channel->config[sender].max_accepted_htlcs)
+ min_concurrent_htlcs = channel->config[sender].max_accepted_htlcs;
if (tal_count(committed) - tal_count(removing) + tal_count(adding)
- > channel->config[recipient].max_accepted_htlcs) {
+ > min_concurrent_htlcs) {
return CHANNEL_ERR_TOO_MANY_HTLCS;
}
diff --git a/contrib/pylightning/lightning/lightning.py b/contrib/pylightning/lightning/lightning.py
index 941802f0611f..8eb99b22e275 100644
--- a/contrib/pylightning/lightning/lightning.py
+++ b/contrib/pylightning/lightning/lightning.py
@@ -3,6 +3,7 @@
import logging
from math import floor, log10
import socket
+import warnings
__version__ = "0.0.7.3"
@@ -309,16 +310,43 @@ def check(self, command_to_check, **kwargs):
payload.update({k: v for k, v in kwargs.items()})
return self.call("check", payload)
- def close(self, peer_id, force=None, timeout=None):
+ def _deprecated_close(self, peer_id, force=None, timeout=None):
+ warnings.warn("close now takes unilateraltimeout arg: expect removal"
+ " in early 2020",
+ DeprecationWarning)
+ payload = {
+ "id": peer_id,
+ "force": force,
+ "timeout": timeout
+ }
+ return self.call("close", payload)
+
+ def close(self, peer_id, *args, **kwargs):
"""
Close the channel with peer {id}, forcing a unilateral
- close if {force} is True, and timing out with {timeout}
- seconds.
+ close after {unilateraltimeout} seconds if non-zero.
+
+ Deprecated usage has {force} and {timeout} args.
"""
+ unilateraltimeout = None
+
+ if 'force' in kwargs or 'timeout' in kwargs:
+ return self._deprecated_close(peer_id, *args, **kwargs)
+
+ # Single arg is ambigious.
+ if len(args) == 1:
+ if isinstance(args[0], bool):
+ return self._deprecated_close(peer_id, *args, **kwargs)
+ unilateraltimeout = args[0]
+ elif len(args) > 1:
+ return self._deprecated_close(peer_id, *args, **kwargs)
+
+ if 'unilateraltimeout' in kwargs:
+ unilateraltimeout = kwargs['unilateraltimeout']
+
payload = {
"id": peer_id,
- "force": force,
- "timeout": timeout
+ "unilateraltimeout": unilateraltimeout
}
return self.call("close", payload)
diff --git a/doc/lightning-close.7 b/doc/lightning-close.7
index ce0d489b29b5..6e1e5663ea32 100644
--- a/doc/lightning-close.7
+++ b/doc/lightning-close.7
@@ -2,12 +2,12 @@
.\" Title: lightning-close
.\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.79.1
-.\" Date: 04/30/2018
+.\" Date: 08/08/2019
.\" Manual: \ \&
.\" Source: \ \&
.\" Language: English
.\"
-.TH "LIGHTNING\-CLOSE" "7" "04/30/2018" "\ \&" "\ \&"
+.TH "LIGHTNING\-CLOSE" "7" "08/08/2019" "\ \&" "\ \&"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,29 +31,32 @@
lightning-close \- Command for closing channels with direct peers
.SH "SYNOPSIS"
.sp
-\fBclose\fR \fIid\fR [\fIforce\fR] [\fItimeout\fR]
+\fBclose\fR \fIid\fR [\fIunilateraltimeout\fR]
.SH "DESCRIPTION"
.sp
-The \fBclose\fR RPC command attempts to close the channel cooperatively with the peer\&. If the given \fIid\fR is a peer ID (66 hex digits as a string), then it applies to the active channel of the direct peer corresponding to the given peer ID\&. If the given \fIid\fR is a channel ID (64 hex digits as a string, or the short channel ID \fIblockheight:txindex:outindex\fR form), then it applies to that channel\&.
+The \fBclose\fR RPC command attempts to close the channel cooperatively with the peer, or unilaterally after \fIunilateraltimeout\fR\&.
.sp
-The \fBclose\fR command will time out and return with an error when the number of seconds specified in \fItimeout\fR is reached\&. If unspecified, it times out in 30 seconds\&.
+If the given \fIid\fR is a peer ID (66 hex digits as a string), then it applies to the active channel of the direct peer corresponding to the given peer ID\&. If the given \fIid\fR is a channel ID (64 hex digits as a string, or the short channel ID \fIblockheight:txindex:outindex\fR form), then it applies to that channel\&.
.sp
-The \fIforce\fR argument, if the JSON value \fItrue\fR, will cause the channel to be unilaterally closed when the timeout is reached\&. If so, timeout will not cause an error, but instead cause the channel to be failed and put onchain unilaterally\&. Unilateral closes will lead to your funds getting locked according to the \fIto_self_delay\fR parameter of the peer\&.
+If \fIunilateraltimeout\fR is not zero, the \fBclose\fR command will unilaterally close the channel when that number of seconds is reached\&. If \fIunilateraltimeout\fR is zero, then the \fBclose\fR command will wait indefinitely until the peer is online and can negotiate a mutual close\&. The default is 2 days (172800 seconds)\&.
.sp
-Normally the peer needs to be live and connected in order to negotiate a mutual close\&. Forcing a unilateral close can be used if you suspect you can no longer contact the peer\&.
+The peer needs to be live and connected in order to negotiate a mutual close\&. The default of unilaterally closing after 48 hours is usually a reasonable indication that you can no longer contact the peer\&.
+.SH "NOTES"
+.sp
+Prior to 0\&.7\&.2, \fBclose\fR took two parameters: \fIforce\fR and \fItimeout\fR\&. \fItimeout\fR was the number of seconds before \fIforce\fR took effect (default, 30), and \fIforce\fR determined whether the result was a unilateral close or an RPC error (default)\&. Even after the timeout, the channel would be closed if the peer reconnected\&.
.SH "RETURN VALUE"
.sp
On success, an object with fields \fItx\fR and \fItxid\fR containing the closing transaction are returned\&. It will also have a field \fItype\fR which is either the JSON string \fImutual\fR or the JSON string \fIunilateral\fR\&. A \fImutual\fR close means that we could negotiate a close with the peer, while a \fIunilateral\fR close means that the \fIforce\fR flag was set and we had to close the channel without waiting for the counterparty\&.
.sp
-A unilateral close may still occur with \fIforce\fR set to \fIfalse\fR if the peer did not behave correctly during the close negotiation\&.
+A unilateral close may still occur at any time if the peer did not behave correctly during the close negotiation\&.
.sp
Unilateral closes will return your funds after a delay\&. The delay will vary based on the peer \fIto_self_delay\fR setting, not your own setting\&.
-.sp
-On failure, if \fBclose\fR failed due to timing out with \fIforce\fR argument \fIfalse\fR, the channel will still eventually close once we have contacted the peer\&.
.SH "AUTHOR"
.sp
ZmnSCPxj is mainly responsible\&.
.SH "SEE ALSO"
+.sp
+lightning\-disconnect(7), lightning\-fundchannel(7)
.SH "RESOURCES"
.sp
Main web site: https://github\&.com/ElementsProject/lightning
diff --git a/doc/lightning-close.7.txt b/doc/lightning-close.7.txt
index cb84447c8b9a..d3d678ad94df 100644
--- a/doc/lightning-close.7.txt
+++ b/doc/lightning-close.7.txt
@@ -8,13 +8,14 @@ lightning-close - Command for closing channels with direct peers
SYNOPSIS
--------
-*close* 'id' ['force'] ['timeout']
+*close* 'id' ['unilateraltimeout']
DESCRIPTION
-----------
The *close* RPC command attempts to close the channel cooperatively
-with the peer.
+with the peer, or unilaterally after 'unilateraltimeout'.
+
If the given 'id' is a peer ID (66 hex digits as a string), then
it applies to the active channel of the direct peer corresponding to
the given peer ID.
@@ -22,22 +23,26 @@ If the given 'id' is a channel ID (64 hex digits as a string, or
the short channel ID 'blockheight:txindex:outindex' form), then it
applies to that channel.
-The *close* command will time out and return with an error when the
-number of seconds specified in 'timeout' is reached.
-If unspecified, it times out in 30 seconds.
-
-The 'force' argument, if the JSON value 'true', will cause the
-channel to be unilaterally closed when the timeout is reached.
-If so, timeout will not cause an error, but instead cause the
-channel to be failed and put onchain unilaterally.
-Unilateral closes will lead to your funds getting locked according
-to the 'to_self_delay' parameter of the peer.
+If 'unilateraltimeout' is not zero, the *close* command will
+unilaterally close the channel when that number of seconds is reached.
+If 'unilateraltimeout' is zero, then the *close* command will wait
+indefinitely until the peer is online and can negotiate a mutual
+close. The default is 2 days (172800 seconds).
-Normally the peer needs to be live and connected in order to negotiate
-a mutual close.
-Forcing a unilateral close can be used if you suspect you can no longer
+The peer needs to be live and connected in order to negotiate
+a mutual close. The default of unilaterally closing after 48 hours
+is usually a reasonable indication that you can no longer
contact the peer.
+NOTES
+-----
+
+Prior to 0.7.2, *close* took two parameters: 'force' and 'timeout'.
+'timeout' was the number of seconds before 'force' took effect
+(default, 30), and 'force' determined whether the result was a
+unilateral close or an RPC error (default). Even after
+the timeout, the channel would be closed if the peer reconnected.
+
RETURN VALUE
------------
@@ -50,24 +55,20 @@ peer, while a 'unilateral' close means that the 'force' flag was
set and we had to close the channel without waiting for the
counterparty.
-A unilateral close may still occur with 'force' set to 'false' if
+A unilateral close may still occur at any time if
the peer did not behave correctly during the close negotiation.
Unilateral closes will return your funds after a delay.
The delay will vary based on the peer 'to_self_delay' setting, not
your own setting.
-On failure, if *close* failed due to timing out with 'force'
-argument 'false', the channel will still eventually close once
-we have contacted the peer.
-
AUTHOR
------
ZmnSCPxj is mainly responsible.
SEE ALSO
--------
-
+lightning-disconnect(7), lightning-fundchannel(7)
RESOURCES
---------
diff --git a/doc/lightningd-config.5 b/doc/lightningd-config.5
index da57fa74b187..42560a0e54f0 100644
--- a/doc/lightningd-config.5
+++ b/doc/lightningd-config.5
@@ -2,12 +2,12 @@
.\" Title: lightningd-config
.\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.79.1
-.\" Date: 08/04/2019
+.\" Date: 08/09/2019
.\" Manual: \ \&
.\" Source: \ \&
.\" Language: English
.\"
-.TH "LIGHTNINGD\-CONFIG" "5" "08/04/2019" "\ \&" "\ \&"
+.TH "LIGHTNINGD\-CONFIG" "5" "08/09/2019" "\ \&" "\ \&"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -257,6 +257,11 @@ Limits on what onchain fee range we\(cqll allow when a node opens a channel with
can (should!) be greater than 100\&.
.RE
.PP
+\fBmax\-concurrent\-htlcs\fR=\fIINTEGER\fR
+.RS 4
+Number of HTLCs one channel can handle concurrently in each direction\&. Should be between 1 and 483 (default 30)\&.
+.RE
+.PP
\fBcltv\-delta\fR=\fIBLOCKS\fR
.RS 4
The number of blocks between incoming payments and outgoing payments: this needs to be enough to make sure that if we have to, we can close the outgoing payment before the incoming, or redeem the incoming once the outgoing is redeemed\&.
diff --git a/doc/lightningd-config.5.txt b/doc/lightningd-config.5.txt
index 83ff02b468ac..91558184b251 100644
--- a/doc/lightningd-config.5.txt
+++ b/doc/lightningd-config.5.txt
@@ -205,6 +205,10 @@ Lightning channel and HTLC options
they're outside this range, we abort their opening attempt. Note
that *commit-fee-max* can (should!) be greater than 100.
+*max-concurrent-htlcs*='INTEGER'::
+ Number of HTLCs one channel can handle concurrently in each direction.
+ Should be between 1 and 483 (default 30).
+
*cltv-delta*='BLOCKS'::
The number of blocks between incoming payments and outgoing payments:
this needs to be enough to make sure that if we have to, we can close
diff --git a/gossipd/gossip_store.c b/gossipd/gossip_store.c
index 752823425e0c..f2ef436aaa64 100644
--- a/gossipd/gossip_store.c
+++ b/gossipd/gossip_store.c
@@ -544,8 +544,13 @@ const u8 *gossip_store_get(const tal_t *ctx,
offset, gs->len, strerror(errno));
}
- /* FIXME: We should skip over these deleted entries! */
- msglen = be32_to_cpu(hdr.len) & ~GOSSIP_STORE_LEN_DELETED_BIT;
+ if (be32_to_cpu(hdr.len) & GOSSIP_STORE_LEN_DELETED_BIT)
+ status_failed(STATUS_FAIL_INTERNAL_ERROR,
+ "gossip_store: get delete entry offset %"PRIu64
+ "/%"PRIu64"",
+ offset, gs->len);
+
+ msglen = be32_to_cpu(hdr.len);
checksum = be32_to_cpu(hdr.crc);
msg = tal_arr(ctx, u8, msglen);
if (pread(gs->fd, msg, msglen, offset + sizeof(hdr)) != msglen)
diff --git a/lightningd/lightningd.h b/lightningd/lightningd.h
index d681007397a2..148b98f658cd 100644
--- a/lightningd/lightningd.h
+++ b/lightningd/lightningd.h
@@ -42,6 +42,9 @@ struct config {
u32 fee_base;
u32 fee_per_satoshi;
+ /* htlcs per channel */
+ u32 max_concurrent_htlcs;
+
/* How long between changing commit and sending COMMIT message. */
u32 commit_time_ms;
diff --git a/lightningd/opening_control.c b/lightningd/opening_control.c
index 11f7ee73d0e4..17efc3950eac 100644
--- a/lightningd/opening_control.c
+++ b/lightningd/opening_control.c
@@ -99,7 +99,7 @@ static void uncommitted_channel_disconnect(struct uncommitted_channel *uc,
u8 *msg = towire_connectctl_peer_disconnected(tmpctx, &uc->peer->id);
log_info(uc->log, "%s", desc);
subd_send_msg(uc->peer->ld->connectd, msg);
- if (uc->fc)
+ if (uc->fc && uc->fc->cmd)
was_pending(command_fail(uc->fc->cmd, LIGHTNINGD, "%s", desc));
notify_disconnect(uc->peer->ld, &uc->peer->id);
}
@@ -835,13 +835,7 @@ static void channel_config(struct lightningd *ld,
*/
ours->to_self_delay = ld->config.locktime_blocks;
- /* BOLT #2:
- *
- * The receiving node MUST fail the channel if:
- *...
- * - `max_accepted_htlcs` is greater than 483.
- */
- ours->max_accepted_htlcs = 483;
+ ours->max_accepted_htlcs = ld->config.max_concurrent_htlcs;
/* This is filled in by lightning_openingd, for consistency. */
ours->channel_reserve = AMOUNT_SAT(UINT64_MAX);
diff --git a/lightningd/options.c b/lightningd/options.c
index 317ea18e0eaf..1647969e4d3c 100644
--- a/lightningd/options.c
+++ b/lightningd/options.c
@@ -457,6 +457,9 @@ static const struct config testnet_config = {
/* We offer to pay 5 times 2-block fee */
.commitment_fee_percent = 500,
+ /* Testnet blockspace is free. */
+ .max_concurrent_htlcs = 483,
+
/* Be aggressive on testnet. */
.cltv_expiry_delta = 6,
.cltv_final = 10,
@@ -513,6 +516,9 @@ static const struct config mainnet_config = {
/* We offer to pay 5 times 2-block fee */
.commitment_fee_percent = 500,
+ /* While up to 483 htlcs are possible we do 30 by default (as eclair does) to save blockspace */
+ .max_concurrent_htlcs = 30,
+
/* BOLT #2:
*
* 1. the `cltv_expiry_delta` for channels, `3R+2G+2S`: if in doubt, a
@@ -569,7 +575,15 @@ static void check_config(struct lightningd *ld)
fatal("Commitment fee invalid min-max %u-%u",
ld->config.commitment_fee_min_percent,
ld->config.commitment_fee_max_percent);
-
+ /* BOLT #2:
+ *
+ * The receiving node MUST fail the channel if:
+ *...
+ * - `max_accepted_htlcs` is greater than 483.
+ */
+ if (ld->config.max_concurrent_htlcs < 1 || ld->config.max_concurrent_htlcs > 483)
+ fatal("--max-concurrent-htlcs value must be between 1 and 483 it is: %u",
+ ld->config.max_concurrent_htlcs);
if (ld->config.anchor_confirms == 0)
fatal("anchor-confirms must be greater than zero");
@@ -928,6 +942,9 @@ static void register_opts(struct lightningd *ld)
opt_register_arg("--fee-per-satoshi", opt_set_u32, opt_show_u32,
&ld->config.fee_per_satoshi,
"Microsatoshi fee for every satoshi in HTLC");
+ opt_register_arg("--max-concurrent-htlcs", opt_set_u32, opt_show_u32,
+ &ld->config.max_concurrent_htlcs,
+ "Number of HTLCs one channel can handle concurrently. Should be between 1 and 483");
opt_register_arg("--min-capacity-sat", opt_set_u64, opt_show_u64,
&ld->config.min_capacity_sat,
"Minimum capacity in satoshis for accepting channels");
diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c
index c33988945fe7..70ce43d10ca1 100644
--- a/lightningd/peer_control.c
+++ b/lightningd/peer_control.c
@@ -320,7 +320,7 @@ static void
register_close_command(struct lightningd *ld,
struct command *cmd,
struct channel *channel,
- unsigned int timeout,
+ unsigned int *timeout,
bool force)
{
struct close_command *cc;
@@ -335,8 +335,11 @@ register_close_command(struct lightningd *ld,
tal_add_destructor2(channel,
&destroy_close_command_on_channel_destroy,
cc);
- new_reltimer(ld->timers, cc, time_from_sec(timeout),
- &close_command_timeout, cc);
+ log_debug(ld->log, "close_command: force = %u, timeout = %i",
+ force, timeout ? *timeout : -1);
+ if (timeout)
+ new_reltimer(ld->timers, cc, time_from_sec(*timeout),
+ &close_command_timeout, cc);
}
static bool invalid_last_tx(const struct bitcoin_tx *tx)
@@ -1225,14 +1228,83 @@ static struct command_result *json_close(struct command *cmd,
struct peer *peer;
struct channel *channel COMPILER_WANTS_INIT("gcc 7.3.0 fails, 8.3 OK");
unsigned int *timeout;
- bool *force;
+ bool force = true;
+ bool do_timeout;
+
+ /* For generating help, give new-style. */
+ if (!params || !deprecated_apis) {
+ if (!param(cmd, buffer, params,
+ p_req("id", param_tok, &idtok),
+ p_opt_def("unilateraltimeout", param_number,
+ &timeout, 48 * 3600),
+ NULL))
+ return command_param_failed();
+ do_timeout = (*timeout != 0);
+ } else if (params->type == JSMN_ARRAY) {
+ const jsmntok_t *tok;
+
+ /* Could be new or old style; get as tok. */
+ if (!param(cmd, buffer, params,
+ p_req("id", param_tok, &idtok),
+ p_opt("unilateraltimeout_or_force", param_tok, &tok),
+ p_opt("timeout", param_number, &timeout),
+ NULL))
+ return command_param_failed();
+
+ if (tok) {
+ /* old-style force bool? */
+ if (json_to_bool(buffer, tok, &force)) {
+ /* Old default timeout */
+ if (!timeout) {
+ timeout = tal(cmd, unsigned int);
+ *timeout = 30;
+ }
+ /* New-style timeout */
+ } else {
+ timeout = tal(cmd, unsigned int);
+ if (!json_to_number(buffer, tok, timeout)) {
+ return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
+ "Expected unilerataltimeout to be a number");
+ }
+ }
+ }
- if (!param(cmd, buffer, params,
- p_req("id", param_tok, &idtok),
- p_opt_def("force", param_bool, &force, false),
- p_opt_def("timeout", param_number, &timeout, 30),
- NULL))
- return command_param_failed();
+ /* If they didn't specify timeout, it's the (new) default */
+ if (!timeout) {
+ timeout = tal(cmd, unsigned int);
+ *timeout = 48 * 3600;
+ }
+ do_timeout = true;
+ } else {
+ unsigned int *old_timeout;
+ bool *old_force;
+
+ /* Named parameters are easy to distinguish */
+ if (!param(cmd, buffer, params,
+ p_req("id", param_tok, &idtok),
+ p_opt_def("unilateraltimeout", param_number,
+ &timeout, 48 * 3600),
+ p_opt("force", param_bool, &old_force),
+ p_opt("timeout", param_number, &old_timeout),
+ NULL))
+ return command_param_failed();
+ /* Old style. */
+ if (old_timeout) {
+ *timeout = *old_timeout;
+ }
+ if (old_force) {
+ /* Use old default */
+ if (!old_timeout)
+ *timeout = 30;
+ force = *old_force;
+ }
+
+ /* New style: do_timeout unless it's 0 */
+ if (!old_timeout && !old_force)
+ do_timeout = (*timeout != 0);
+ else
+ do_timeout = true;
+ }
peer = peer_from_json(cmd->ld, buffer, idtok);
if (peer)
@@ -1283,7 +1355,8 @@ static struct command_result *json_close(struct command *cmd,
}
/* Register this command for later handling. */
- register_close_command(cmd->ld, cmd, channel, *timeout, *force);
+ register_close_command(cmd->ld, cmd, channel,
+ do_timeout ? timeout : NULL, force);
/* Wait until close drops down to chain. */
return command_still_pending(cmd);
diff --git a/tests/test_closing.py b/tests/test_closing.py
index a5a622343146..085e64a37843 100644
--- a/tests/test_closing.py
+++ b/tests/test_closing.py
@@ -38,9 +38,7 @@ def test_closing(node_factory, bitcoind):
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
- # This should return with an error, then close.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chan, False, 0)
+ l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
@@ -97,7 +95,7 @@ def test_closing(node_factory, bitcoind):
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
-def test_closing_while_disconnected(node_factory, bitcoind):
+def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
@@ -105,11 +103,12 @@ def test_closing_while_disconnected(node_factory, bitcoind):
l2.stop()
# The close should still be triggered afterwards.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chan, False, 0)
+ fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
+ fut.result(TIMEOUT)
+
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
@@ -181,8 +180,8 @@ def test_closing_torture(node_factory, executor, bitcoind):
l2.wait_channel_active(scid)
# Start closers: can take a long time under valgrind!
- c1 = executor.submit(l1.rpc.close, l2.info['id'], False, 60)
- c2 = executor.submit(l2.rpc.close, l1.info['id'], False, 60)
+ c1 = executor.submit(l1.rpc.close, l2.info['id'])
+ c2 = executor.submit(l2.rpc.close, l1.info['id'])
# Wait for close to finish
c1.result(TIMEOUT)
c2.result(TIMEOUT)
@@ -235,13 +234,8 @@ def test_closing_different_fees(node_factory, bitcoind, executor):
if p.amount != 0:
l1.pay(p, 100000000)
- # Now close all channels
- # All closes occur in parallel, and on Travis,
- # ALL those lightningd are running on a single core,
- # so increase the timeout so that this test will pass
- # when valgrind is enabled.
- # (close timeout defaults to 30 as of this writing)
- closes = [executor.submit(l1.rpc.close, p.channel, False, 90) for p in peers]
+ # Now close all channels (not unilaterally!)
+ closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
@@ -274,9 +268,7 @@ def test_closing_negotiation_reconnect(node_factory, bitcoind):
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
- # This should return with an error, then close.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chan, False, 0)
+ l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
diff --git a/tests/test_connection.py b/tests/test_connection.py
index ac64a3046ca1..ff9c7de0bd6d 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -586,9 +586,8 @@ def test_shutdown_reconnect(node_factory):
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 0
- # This should return with an error, then close.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chan, False, 0)
+ # This should wait until we're closed.
+ l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
@@ -648,9 +647,7 @@ def test_shutdown_awaiting_lockin(node_factory, bitcoind):
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
- # This should return with an error, then close.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chanid, False, 0)
+ l1.rpc.close(chanid)
l1.daemon.wait_for_log('CHANNELD_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log('CHANNELD_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN')
@@ -923,6 +920,7 @@ def test_funding_cancel_race(node_factory, bitcoind, executor):
def test_funding_external_wallet(node_factory, bitcoind):
l1 = node_factory.get_node()
l2 = node_factory.get_node()
+ l3 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert(l1.rpc.listpeers()['peers'][0]['id'] == l2.info['id'])
@@ -969,6 +967,11 @@ def test_funding_external_wallet(node_factory, bitcoind):
channel = node.rpc.listpeers()['peers'][0]['channels'][0]
assert amount * 1000 == channel['msatoshi_total']
+ # Test that we don't crash if peer disconnects after fundchannel_start
+ l2.connect(l3)
+ l2.rpc.fundchannel_start(l3.info["id"], amount)
+ l3.rpc.close(l2.info["id"])
+
def test_lockin_between_restart(node_factory, bitcoind):
l1 = node_factory.get_node(may_reconnect=True)
@@ -1145,8 +1148,7 @@ def test_update_fee(node_factory, bitcoind):
l2.pay(l1, 100000000)
# Now shutdown cleanly.
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chanid, False, 0)
+ l1.rpc.close(chanid)
l1.daemon.wait_for_log(' to CLOSINGD_COMPLETE')
l2.daemon.wait_for_log(' to CLOSINGD_COMPLETE')
@@ -1345,8 +1347,7 @@ def test_peerinfo(node_factory, bitcoind):
assert l2.rpc.getpeer(l1.info['id'])['localfeatures'] == lfeatures
# Close the channel to forget the peer
- with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
- l1.rpc.close(chan, False, 0)
+ l1.rpc.close(chan)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
diff --git a/tests/test_pay.py b/tests/test_pay.py
index 19ff7160ed8e..ba459818e197 100644
--- a/tests/test_pay.py
+++ b/tests/test_pay.py
@@ -1202,7 +1202,7 @@ def test_forward_local_failed_stats(node_factory, bitcoind, executor):
payment_hash = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
- l2.rpc.close(c23, True, 0)
+ l2.rpc.close(c23, 1)
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash)
@@ -1849,7 +1849,7 @@ def test_setchannelfee_state(node_factory, bitcoind):
# Disconnect and unilaterally close from l2 to l1
l2.rpc.disconnect(l1.info['id'], force=True)
l1.rpc.disconnect(l2.info['id'], force=True)
- result = l2.rpc.close(scid, True, 0)
+ result = l2.rpc.close(scid, 1)
assert result['type'] == 'unilateral'
# wait for l1 to see unilateral close via bitcoin network
diff --git a/wallet/test/run-wallet.c b/wallet/test/run-wallet.c
index e277f95b9db6..eaa25322e8f6 100644
--- a/wallet/test/run-wallet.c
+++ b/wallet/test/run-wallet.c
@@ -325,6 +325,9 @@ char *json_strdup(const tal_t *ctx UNNEEDED, const char *buffer UNNEEDED, const
/* Generated stub for json_stream_success */
struct json_stream *json_stream_success(struct command *cmd UNNEEDED)
{ fprintf(stderr, "json_stream_success called!\n"); abort(); }
+/* Generated stub for json_to_bool */
+bool json_to_bool(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED, bool *b UNNEEDED)
+{ fprintf(stderr, "json_to_bool called!\n"); abort(); }
/* Generated stub for json_tok_bin_from_hex */
u8 *json_tok_bin_from_hex(const tal_t *ctx UNNEEDED, const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED)
{ fprintf(stderr, "json_tok_bin_from_hex called!\n"); abort(); }