From 84c4f06737e1c0214a3ea8883abd5bbf795cb0f7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 30 Jan 2026 15:24:11 +1030 Subject: [PATCH 1/8] pytest: fix flake in test_sql. If l2 hasn't seen l1's node_announcement yet: ``` # Correctly handles missing object. > assert l2.rpc.sql("SELECT option_will_fund_lease_fee_base_msat," " option_will_fund_lease_fee_basis," " option_will_fund_funding_weight," " option_will_fund_channel_fee_max_base_msat," " option_will_fund_channel_fee_max_proportional_thousandths," " option_will_fund_compact_lease" " FROM nodes WHERE HEX(nodeid) = '{}';".format(l1.info['id'].upper())) == {'rows': [[None] * 6]} E AssertionError: assert {'rows': []} == {'rows': [[None, None, None, None, None, None]]} E E Differing items: E {'rows': []} != {'rows': [[None, None, None, None, None, None]]} E E Full diff: E { E - 'rows': [ E + 'rows': [], E ? ++ E - [ E - None, E - None, E - None, E - None, E - None, E - None, E - ], E - ], E } tests/test_plugin.py:4131: AssertionError ``` Signed-off-by: Rusty Russell --- tests/test_plugin.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 3f862c2cd80d..2e18c2d9bace 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -4021,6 +4021,9 @@ def test_sql(node_factory, bitcoind): # Make sure l3 sees new channel wait_for(lambda: len(l3.rpc.listchannels(scid)['channels']) == 2) + # Make sure we have a node_announcement for l1 + wait_for(lambda: l2.rpc.listnodes(l1.info['id'])['nodes'] != []) + # This should create a forward through l2 l1.rpc.pay(l3.rpc.invoice(amount_msat=12300, label='inv1', description='description')['bolt11']) From 6aeb91ec66d2ed7ba7a7908136a98be48ce7c020 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 30 Jan 2026 15:25:11 +1030 Subject: [PATCH 2/8] pytest: mark renepay's self-pay test flaky Not sure this is worth getting to the bottom of, if we're going for xpay? ``` 2026-01-20T05:25:55.4375717Z lightningd-1 2026-01-20T05:22:41.062Z DEBUG plugin-cln-renepay: sendpay_failure notification: {"sendpay_failure":{"code":202,"message":"Malformed error reply","data":{"created_index":2,"id":2,"payment_hash":"8447592dc3786c3181746bd7bf17fe9e84d3de3dfff847f9f2b8c1a65c2d3ff6","groupid":1,"partid":1,"destination":"038194b5f32bdf0aa59812c86c4ef7ad2f294104fa027d1ace9b469bb6f88cf37b","amount_msat":10000,"amount_sent_msat":10000,"created_at":1768886560,"status":"pending","bolt11":"lnbcrt100n1p5k7yfqsp5g072rrl6jn7wu8nlxk279zpkacz34hr22n089j4elguccfxmg0gqpp5s3r4jtwr0pkrrqt5d0tm79l7n6zd8h3alluy070jhrq6vhpd8lmqdqgw3jhxapjxqyjw5qcqp99qxpqysgq6dd74sj0vpyjy9he8pep73tt8pljhv6mc74y28rr8995yjyahgh9kc2jqwqz62h6u6jpqz0u9x6gcahdw3pe2x8r5eyppp53mlpru8sp4p4nf6","onionreply":"c5a02ce5372635959d87818f723767efe3137bbc40568fb04e1f2eedee0a6149000e400f00000000000027100000006c00f20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}}} 2026-01-20T05:25:55.4386158Z lightningd-1 2026-01-20T05:22:41.067Z **BROKEN** plugin-cln-renepay: Unable to parse sendpay_failure: {"code":202,"message":"Malformed error reply","data":{"created_index":2,"id":2,"payment_hash":"8447592dc3786c3181746bd7bf17fe9e84d3de3dfff847f9f2b8c1a65c2d3ff6","groupid":1,"partid":1,"destination":"038194b5f32bdf0aa59812c86c4ef7ad2f294104fa027d1ace9b469bb6f88cf37b","amount_msat":10000,"amount_sent_msat":10000,"created_at":1768886560,"status":"pending","bolt11":"lnbcrt100n1p5k7yfqsp5g072rrl6jn7wu8nlxk279zpkacz34hr22n089j4elguccfxmg0gqpp5s3r4jtwr0pkrrqt5d0tm79l7n6zd8h3alluy070jhrq6vhpd8lmqdqgw3jhxapjxqyjw5qcqp99qxpqysgq6dd74sj0vpyjy9he8pep73tt8pljhv6mc74y28rr8995yjyahgh9kc2jqwqz62h6u6jpqz0u9x6gcahdw3pe2x8r5eyppp53mlpru8sp4p4nf6","onionreply":"c5a02ce5372635959d87818f723767efe3137bbc40568fb04e1f2eedee0a6149000e400f00000000000027100000006c00f20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}} ``` ``` 2026-01-20T05:25:55.4465729Z with pytest.raises(RpcError, match=r"Unknown invoice") as excinfo: 2026-01-20T05:25:55.4466111Z > l1.rpc.call("renepay", {"invstring": inv2}) 2026-01-20T05:25:55.4466310Z 2026-01-20T05:25:55.4466402Z tests/test_renepay.py:423: ... 2026-01-20T05:25:55.4489732Z elif "error" in resp: 2026-01-20T05:25:55.4490005Z > raise RpcError(method, payload, resp['error']) 2026-01-20T05:25:55.4492135Z E pyln.client.lightning.RpcError: RPC call failed: method: renepay, payload: {'invstring': 'lnbcrt100n1p5k7yfqsp5g072rrl6jn7wu8nlxk279zpkacz34hr22n089j4elguccfxmg0gqpp5s3r4jtwr0pkrrqt5d0tm79l7n6zd8h3alluy070jhrq6vhpd8lmqdqgw3jhxapjxqyjw5qcqp99qxpqysgq6dd74sj0vpyjy9he8pep73tt8pljhv6mc74y28rr8995yjyahgh9kc2jqwqz62h6u6jpqz0u9x6gcahdw3pe2x8r5eyppp53mlpru8sp4p4nf6'}, error: {'code': -4, 'message': 'Plugin terminated before replying to RPC call.'} ``` Signed-off-by: Rusty Russell --- tests/test_renepay.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_renepay.py b/tests/test_renepay.py index 047d4a4d6b2f..65735426b0db 100644 --- a/tests/test_renepay.py +++ b/tests/test_renepay.py @@ -407,6 +407,7 @@ def test_hardmpp(node_factory): assert invoice["amount_received_msat"] >= Millisatoshi("1800000sat") +@pytest.mark.flaky(reruns=2) def test_self_pay(node_factory): l1, l2 = node_factory.line_graph(2, wait_for_announce=True) From 48889b0983e9065dd6f11399a6c27c4639bfcb16 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 30 Jan 2026 15:26:11 +1030 Subject: [PATCH 3/8] pytest: make test_connect_ratelimit more robust We were sending SIGSTOP to the lightningds, but that doesn't always mean the connectd would stop it seems: ``` lightningd-1 2026-01-27T04:49:16.979Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: Initializing important peer with 1 addresses lightningd-1 2026-01-27T04:49:16.979Z DEBUG connectd: Got 10 bad cupdates, ignoring them (expected on mainnet) lightningd-1 2026-01-27T04:49:16.979Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: Connected out, starting crypto lightningd-1 2026-01-27T04:49:16.980Z DEBUG 038194b5f32bdf0aa59812c86c4ef7ad2f294104fa027d1ace9b469bb6f88cf37b-hsmd: Got WIRE_HSMD_ECDH_REQ lightningd-1 2026-01-27T04:49:16.981Z DEBUG hsmd: Client: Received message 1 from client lightningd-1 2026-01-27T04:49:16.985Z TRACE 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-gossipd: handle_recv_gossip: WIRE_CHANNEL_ANNOUNCEMENT lightningd-1 2026-01-27T04:49:16.985Z TRACE 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-gossipd: handle_recv_gossip: WIRE_CHANNEL_UPDATE lightningd-1 2026-01-27T04:49:16.985Z TRACE 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-gossipd: handle_recv_gossip: WIRE_CHANNEL_UPDATE lightningd-1 2026-01-27T04:49:16.985Z TRACE 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-gossipd: handle_recv_gossip: WIRE_NODE_ANNOUNCEMENT lightningd-1 2026-01-27T04:49:16.985Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: Connect OUT lightningd-1 2026-01-27T04:49:16.986Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: peer_out WIRE_INIT lightningd-1 2026-01-27T04:49:16.986Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: peer_in WIRE_INIT lightningd-1 2026-01-27T04:49:16.986Z TRACE lightningd: Calling peer_connected hook of plugin chanbackup lightningd-1 2026-01-27T04:49:16.986Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: Handed peer, entering loop lightningd-1 2026-01-27T04:49:16.986Z DEBUG 03cecbfdc68544cc596223b68ce0710c9e5d2c9cb317ee07822d95079acc703d31-connectd: Initializing important peer with 1 addresses lightningd-1 2026-01-27T04:49:16.986Z DEBUG 033845802d25b4e074ccfd7cd8b339a41dc75bf9978a034800444b51d42b07799a-connectd: Initializing important peer with 1 addresses lightningd-1 2026-01-27T04:49:16.987Z DEBUG 033845802d25b4e074ccfd7cd8b339a41dc75bf9978a034800444b51d42b07799a-connectd: Too many connections, waiting... lightningd-1 2026-01-27T04:49:16.987Z DEBUG 02186115cb7e93e2cb4d9d9fe7a9cf5ff7a5784bfdda4f164ff041655e4bcd4fd0-connectd: Initializing important peer with 1 addresses lightningd-1 2026-01-27T04:49:16.987Z DEBUG 02186115cb7e93e2cb4d9d9fe7a9cf5ff7a5784bfdda4f164ff041655e4bcd4fd0-connectd: Too many connections, waiting... lightningd-1 2026-01-27T04:49:16.987Z DEBUG 02287bfac8b99b35477ebe9334eede1e32b189e24644eb701c079614712331cec0-connectd: Initializing important peer with 1 addresses lightningd-1 2026-01-27T04:49:16.987Z DEBUG 02287bfac8b99b35477ebe9334eede1e32b189e24644eb701c079614712331cec0-connectd: Too many connections, waiting... lightningd-1 2026-01-27T04:49:16.987Z DEBUG 03cecbfdc68544cc596223b68ce0710c9e5d2c9cb317ee07822d95079acc703d31-connectd: Connected out, starting crypto lightningd-1 2026-01-27T04:49:16.989Z DEBUG 038194b5f32bdf0aa59812c86c4ef7ad2f294104fa027d1ace9b469bb6f88cf37b-hsmd: Got WIRE_HSMD_ECDH_REQ lightningd-1 2026-01-27T04:49:16.989Z DEBUG hsmd: Client: Received message 1 from client lightningd-1 2026-01-27T04:49:16.990Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: peer_in WIRE_GOSSIP_TIMESTAMP_FILTER lightningd-1 2026-01-27T04:49:16.991Z DEBUG 03cecbfdc68544cc596223b68ce0710c9e5d2c9cb317ee07822d95079acc703d31-connectd: Connect OUT lightningd-1 2026-01-27T04:49:16.991Z DEBUG 03cecbfdc68544cc596223b68ce0710c9e5d2c9cb317ee07822d95079acc703d31-connectd: peer_out WIRE_INIT lightningd-1 2026-01-27T04:49:16.991Z DEBUG 0258f3ff3e0853ccc09f6fe89823056d7c0c55c95fab97674df5e1ad97a72f6265-connectd: peer_out WIRE_PEER_STORAGE ``` Signed-off-by: Rusty Russell --- tests/test_connection.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/test_connection.py b/tests/test_connection.py index 1d3ea7d1ad48..20fa6259fbae 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -4496,9 +4496,15 @@ def test_connect_ratelimit(node_factory, bitcoind): assert not l1.daemon.is_in_log('Unblocking for') l1.stop() - # Suspend the others, to make sure they cannot respond too fast. + + # Suspend the others' connectd, to make sure they cannot respond too fast. + connectd_pids = [] for n in nodes: - os.kill(n.daemon.proc.pid, signal.SIGSTOP) + log = n.daemon.is_in_log(' connectd: pid .*, msgfd') + m = re.search(r'connectd: pid (\d*),', log) + pid = int(m.groups()[0]) + connectd_pids.append(pid) + os.kill(pid, signal.SIGSTOP) try: l1.start() @@ -4509,13 +4515,13 @@ def test_connect_ratelimit(node_factory, bitcoind): * (len(nodes) - 1)) except Exception as err: # Resume, so pytest doesn't hang! - for n in nodes: - os.kill(n.daemon.proc.pid, signal.SIGCONT) + for p in connectd_pids: + os.kill(p, signal.SIGCONT) raise err # Resume them - for n in nodes: - os.kill(n.daemon.proc.pid, signal.SIGCONT) + for p in connectd_pids: + os.kill(p, signal.SIGCONT) # And now they're all connected wait_for(lambda: [p['connected'] for p in l1.rpc.listpeers()['peers']] == [True] * len(nodes)) From 18c65e55072ea0c58bcdb09882b99fc650e242c7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 3 Feb 2026 11:14:04 +1030 Subject: [PATCH 4/8] connectd: unify IO logging calls. Normally, connectd forwards messages and then the subds do logging, but it logs manually for msgs which are handled internally. Clarify this logic in one place for all callers. Signed-off-by: Rusty Russell --- connectd/multiplex.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/connectd/multiplex.c b/connectd/multiplex.c index 1cbc98f1e90e..203962c3ad43 100644 --- a/connectd/multiplex.c +++ b/connectd/multiplex.c @@ -732,6 +732,14 @@ static void handle_pong_in(struct peer *peer, const u8 *msg) abort(); } +/* Various cases where we don't send the msg to a gossipd, we want to + * do IO logging! */ +static void log_peer_io(const struct peer *peer, const u8 *msg) +{ + status_peer_io(LOG_IO_IN, &peer->id, msg); + io_wake(peer->peer_outq); +} + /* Forward to gossipd */ static void handle_gossip_in(struct peer *peer, const u8 *msg) { @@ -744,7 +752,7 @@ static void handle_gossip_in(struct peer *peer, const u8 *msg) gmsg = towire_gossipd_recv_gossip(NULL, &peer->id, msg); /* gossipd doesn't log IO, so we log it here. */ - status_peer_io(LOG_IO_IN, &peer->id, msg); + log_peer_io(peer, msg); daemon_conn_send(peer->daemon->gossipd, take(gmsg)); } @@ -857,19 +865,19 @@ static bool handle_message_locally(struct peer *peer, const u8 *msg) gossip_rcvd_filter_add(peer->gs.grf, msg); if (type == WIRE_GOSSIP_TIMESTAMP_FILTER) { - status_peer_io(LOG_IO_IN, &peer->id, msg); + log_peer_io(peer, msg); handle_gossip_timestamp_filter_in(peer, msg); return true; } else if (type == WIRE_PING) { - status_peer_io(LOG_IO_IN, &peer->id, msg); + log_peer_io(peer, msg); handle_ping_in(peer, msg); return true; } else if (type == WIRE_PONG) { - status_peer_io(LOG_IO_IN, &peer->id, msg); + log_peer_io(peer, msg); handle_pong_in(peer, msg); return true; } else if (type == WIRE_ONION_MESSAGE) { - status_peer_io(LOG_IO_IN, &peer->id, msg); + log_peer_io(peer, msg); handle_onion_message(peer->daemon, peer, msg); return true; } else if (type == WIRE_QUERY_CHANNEL_RANGE) { @@ -1267,7 +1275,6 @@ static struct io_plan *read_body_from_peer_done(struct io_conn *peer_conn, /* If we swallow this, just try again. */ if (handle_message_locally(peer, decrypted)) { /* Make sure to update peer->peer_in_lastmsg so we blame correct msg! */ - io_wake(peer->peer_outq); goto out; } From 7e3d12aab14657977500e362aa0602a3ddaea851 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 3 Feb 2026 11:14:25 +1030 Subject: [PATCH 5/8] pytest: allow pushed after onchain_fee in test_bookkeeping_missed_chans_pushed It can happen, and it's perfectly reasonable. If this happens in other places, we might need to allow arbitrary reordering? ``` 2026-01-29T05:55:58.5474967Z exp_events = [{'tag': 'channel_open', 'credit_msat': open_amt * 1000, 'debit_msat': 0}, 2026-01-29T05:55:58.5475765Z {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': push_amt}, 2026-01-29T05:55:58.5476454Z {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0}, 2026-01-29T05:55:58.5477168Z {'tag': 'invoice', 'credit_msat': 0, 'debit_msat': invoice_msat}] 2026-01-29T05:55:58.5477797Z > check_events(l1, channel_id, exp_events) 2026-01-29T05:55:58.5478120Z 2026-01-29T05:55:58.5478282Z tests/test_bookkeeper.py:402: 2026-01-29T05:55:58.5478777Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2026-01-29T05:55:58.5479162Z 2026-01-29T05:55:58.5479396Z node = 2026-01-29T05:55:58.5480158Z channel_id = 'a4e913b2d143efc3d90cfa66a56aeed3eb9e1533b350c8e84124bdec37bcf74a' 2026-01-29T05:55:58.5481929Z exp_events = [{'credit_msat': 10000000000, 'debit_msat': 0, 'tag': 'channel_open'}, {'credit_msat': 0, 'debit_msat': 1000000000, 'tag': 'pushed'}, {'credit_msat': 4927000, 'debit_msat': 0, 'tag': 'onchain_fee'}, {'credit_msat': 0, 'debit_msat': 11000000, 'tag': 'invoice'}] 2026-01-29T05:55:58.5483442Z 2026-01-29T05:55:58.5483671Z def check_events(node, channel_id, exp_events): 2026-01-29T05:55:58.5484551Z chan_events = [ev for ev in node.rpc.bkpr_listaccountevents()['events'] if ev['account'] == channel_id] 2026-01-29T05:55:58.5485684Z stripped = [{k: d[k] for k in ('tag', 'credit_msat', 'debit_msat') if k in d} for d in chan_events] 2026-01-29T05:55:58.5486455Z > assert stripped == exp_events 2026-01-29T05:55:58.5489277Z E AssertionError: assert [{'tag': 'channel_open', 'credit_msat': 10000000000, 'debit_msat': 0}, {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0}, {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': 1000000000}, {'tag': 'invoice', 'credit_msat': 0, 'debit_msat': 11000000}] == [{'tag': 'channel_open', 'credit_msat': 10000000000, 'debit_msat': 0}, {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': 1000000000}, {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0}, {'tag': 'invoice', 'credit_msat': 0, 'debit_msat': 11000000}] 2026-01-29T05:55:58.5492021Z E 2026-01-29T05:55:58.5492767Z E At index 1 diff: {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0} != {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': 1000000000} 2026-01-29T05:55:58.5493812Z E 2026-01-29T05:55:58.5494078Z E Full diff: 2026-01-29T05:55:58.5494373Z E [ 2026-01-29T05:55:58.5494863Z E { 2026-01-29T05:55:58.5495166Z E 'credit_msat': 10000000000, 2026-01-29T05:55:58.5495565Z E 'debit_msat': 0, 2026-01-29T05:55:58.5495946Z E 'tag': 'channel_open', 2026-01-29T05:55:58.5496330Z E - }, 2026-01-29T05:55:58.5496613Z E - { 2026-01-29T05:55:58.5496906Z E - 'credit_msat': 0, 2026-01-29T05:55:58.5497285Z E - 'debit_msat': 1000000000, 2026-01-29T05:55:58.5497900Z E - 'tag': 'pushed', 2026-01-29T05:55:58.5498264Z E }, 2026-01-29T05:55:58.5498531Z E { 2026-01-29T05:55:58.5498818Z E 'credit_msat': 4927000, 2026-01-29T05:55:58.5499200Z E 'debit_msat': 0, 2026-01-29T05:55:58.5499563Z E 'tag': 'onchain_fee', 2026-01-29T05:55:58.5499925Z E }, 2026-01-29T05:55:58.5500190Z E { 2026-01-29T05:55:58.5500477Z E 'credit_msat': 0, 2026-01-29T05:55:58.5500863Z E + 'debit_msat': 1000000000, 2026-01-29T05:55:58.5501255Z E + 'tag': 'pushed', 2026-01-29T05:55:58.5501592Z E + }, 2026-01-29T05:55:58.5501853Z E + { 2026-01-29T05:55:58.5502141Z E + 'credit_msat': 0, 2026-01-29T05:55:58.5502511Z E 'debit_msat': 11000000, 2026-01-29T05:55:58.5502889Z E 'tag': 'invoice', 2026-01-29T05:55:58.5503424Z E }, 2026-01-29T05:55:58.5503698Z E ] 2026-01-29T05:55:58.5503861Z 2026-01-29T05:55:58.5504027Z tests/test_bookkeeper.py:29: AssertionError ``` Signed-off-by: Rusty Russell --- tests/test_bookkeeper.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/test_bookkeeper.py b/tests/test_bookkeeper.py index 5160261e40f1..d2a2e71457bb 100644 --- a/tests/test_bookkeeper.py +++ b/tests/test_bookkeeper.py @@ -23,10 +23,10 @@ def find_first_tag(evs, tag): return ev[0] -def check_events(node, channel_id, exp_events): +def check_events(node, channel_id, exp_events, alt_events=None): chan_events = [ev for ev in node.rpc.bkpr_listaccountevents()['events'] if ev['account'] == channel_id] stripped = [{k: d[k] for k in ('tag', 'credit_msat', 'debit_msat') if k in d} for d in chan_events] - assert stripped == exp_events + assert stripped == exp_events or stripped == alt_events @unittest.skipIf(TEST_NETWORK != 'regtest', "fixme: broadcast fails, dusty") @@ -399,7 +399,12 @@ def test_bookkeeping_missed_chans_pushed(node_factory, bitcoind): {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': push_amt}, {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0}, {'tag': 'invoice', 'credit_msat': 0, 'debit_msat': invoice_msat}] - check_events(l1, channel_id, exp_events) + # We sometimes see onchain_fee first: + alt_events = [{'tag': 'channel_open', 'credit_msat': open_amt * 1000, 'debit_msat': 0}, + {'tag': 'onchain_fee', 'credit_msat': 4927000, 'debit_msat': 0}, + {'tag': 'pushed', 'credit_msat': 0, 'debit_msat': push_amt}, + {'tag': 'invoice', 'credit_msat': 0, 'debit_msat': invoice_msat}] + check_events(l1, channel_id, exp_events, alt_events) # l2 events exp_events = [{'tag': 'channel_open', 'credit_msat': 0, 'debit_msat': 0}, From 1e4b70133aac602f06c1a2c8c9cb3c157c5bfea7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 3 Feb 2026 11:14:25 +1030 Subject: [PATCH 6/8] lightningd: fix spurious memleak in peer_connected_serialize. Of course stream will be freed soon, too, but if run at the exact right time, memleak will get upset: use tmpctx. ``` lightningd: MEMLEAK: 0x5616f720bc48 lightningd: label=common/wireaddr.c:255:char[] lightningd: alloc: lightningd: /home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:488 (tal_alloc_) lightningd: /home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:517 (tal_alloc_arr_) lightningd: /home/runner/work/lightning/lightning/ccan/ccan/tal/str/str.c:81 (tal_vfmt_) lightningd: /home/runner/work/lightning/lightning/ccan/ccan/tal/str/str.c:37 (tal_fmt_) lightningd: /home/runner/work/lightning/lightning/common/wireaddr.c:255 (fmt_wireaddr_without_port) lightningd: /home/runner/work/lightning/lightning/common/wireaddr.c:276 (fmt_wireaddr) lightningd: /home/runner/work/lightning/lightning/common/wireaddr.c:232 (fmt_wireaddr_internal) lightningd: /home/runner/work/lightning/lightning/lightningd/peer_control.c:1327 (peer_connected_serialize) lightningd: /home/runner/work/lightning/lightning/lightningd/plugin_hook.c:359 (plugin_hook_call_next) lightningd: /home/runner/work/lightning/lightning/lightningd/plugin_hook.c:395 (plugin_hook_call_) lightningd: /home/runner/work/lightning/lightning/lightningd/peer_control.c:1753 (plugin_hook_call_peer_connected) lightningd: /home/runner/work/lightning/lightning/lightningd/peer_control.c:1885 (handle_peer_connected) lightningd: /home/runner/work/lightning/lightning/lightningd/connect_control.c:563 (connectd_msg) ``` Signed-off-by: Rusty Russell --- lightningd/peer_control.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c index 2751769c2421..2ec8bea3689f 100644 --- a/lightningd/peer_control.c +++ b/lightningd/peer_control.c @@ -1322,13 +1322,11 @@ peer_connected_serialize(struct peer_connected_hook_payload *payload, json_object_start(stream, "peer"); json_add_node_id(stream, "id", &payload->peer_id); json_add_string(stream, "direction", payload->incoming ? "in" : "out"); - json_add_string( - stream, "addr", - fmt_wireaddr_internal(stream, &payload->addr)); + json_add_string(stream, "addr", + fmt_wireaddr_internal(tmpctx, &payload->addr)); if (payload->remote_addr) - json_add_string( - stream, "remote_addr", - fmt_wireaddr(stream, payload->remote_addr)); + json_add_string(stream, "remote_addr", + fmt_wireaddr(tmpctx, payload->remote_addr)); /* Since this is start of hook, peer is always in table! */ json_add_hex_talarr(stream, "features", peer_by_id(payload->ld, &payload->peer_id) From d83212f89e3ec03da4430a5836b20da269b1a268 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 3 Feb 2026 11:33:58 +1030 Subject: [PATCH 7/8] pytest: fix flake in test_even_sendcustommsg We need to make sure the message is fully processed before removing the plugin. Signed-off-by: Rusty Russell --- connectd/connectd.c | 4 ++++ connectd/connectd_wire.csv | 4 ++++ connectd/multiplex.c | 16 ++++++++++++++++ connectd/multiplex.h | 3 +++ lightningd/connect_control.c | 8 ++++++++ tests/test_misc.py | 2 ++ 6 files changed, 37 insertions(+) diff --git a/connectd/connectd.c b/connectd/connectd.c index 93fea082c98e..44f81b6c2518 100644 --- a/connectd/connectd.c +++ b/connectd/connectd.c @@ -2401,6 +2401,10 @@ static struct io_plan *recv_req(struct io_conn *conn, add_scid_map(daemon, msg); goto out; + case WIRE_CONNECTD_CUSTOMMSG_IN_COMPLETE: + custommsg_completed(daemon, msg); + goto out; + case WIRE_CONNECTD_DEV_MEMLEAK: if (daemon->developer) { dev_connect_memleak(daemon, msg); diff --git a/connectd/connectd_wire.csv b/connectd/connectd_wire.csv index 4b4d54577265..7b14915915ab 100644 --- a/connectd/connectd_wire.csv +++ b/connectd/connectd_wire.csv @@ -194,6 +194,10 @@ msgdata,connectd_custommsg_in,id,node_id, msgdata,connectd_custommsg_in,msg_len,u16, msgdata,connectd_custommsg_in,msg,u8,msg_len +# We got that custommsg, thankyou, it was delightful. +msgtype,connectd_custommsg_in_complete,2111 +msgdata,connectd_custommsg_in_complete,id,node_id, + # A custom message that the lightningd tells us to send to the peer. msgtype,connectd_custommsg_out,2011 msgdata,connectd_custommsg_out,id,node_id, diff --git a/connectd/multiplex.c b/connectd/multiplex.c index 203962c3ad43..a59b7ad33476 100644 --- a/connectd/multiplex.c +++ b/connectd/multiplex.c @@ -856,6 +856,22 @@ static bool handle_custommsg(struct daemon *daemon, return true; } +void custommsg_completed(struct daemon *daemon, const u8 *msg) +{ + struct node_id id; + const struct peer *peer; + + if (!fromwire_connectd_custommsg_in_complete(msg, &id)) + master_badmsg(WIRE_CONNECTD_CUSTOMMSG_IN_COMPLETE, msg); + + /* If it's still around, log it. */ + peer = peer_htable_get(daemon->peers, &id); + if (peer) { + status_peer_debug(&peer->id, "custommsg processing finished"); + log_peer_io(peer, msg); + } +} + /* We handle pings and gossip messages. */ static bool handle_message_locally(struct peer *peer, const u8 *msg) { diff --git a/connectd/multiplex.h b/connectd/multiplex.h index 1a20143a6764..f42f7cb76ea2 100644 --- a/connectd/multiplex.h +++ b/connectd/multiplex.h @@ -28,6 +28,9 @@ void send_manual_ping(struct daemon *daemon, const u8 *msg); /* When lightningd says to send a custom message (from a plugin) */ void send_custommsg(struct daemon *daemon, const u8 *msg); +/* lightningd has finished with the custommsg */ +void custommsg_completed(struct daemon *daemon, const u8 *msg); + /* When lightningd says what custom messages we can recv */ void set_custommsgs(struct daemon *daemon, const u8 *msg); diff --git a/lightningd/connect_control.c b/lightningd/connect_control.c index ae0bde7a378e..09da14aa8948 100644 --- a/lightningd/connect_control.c +++ b/lightningd/connect_control.c @@ -351,6 +351,7 @@ void connect_succeeded(struct lightningd *ld, const struct peer *peer, } struct custommsg_payload { + struct lightningd *ld; struct node_id peer_id; u8 *msg; }; @@ -376,6 +377,11 @@ static bool custommsg_cb(struct custommsg_payload *payload, static void custommsg_final(struct custommsg_payload *payload STEALS) { + /* Note: on shutdown, ld->connectd can be NULL! */ + if (payload->ld->connectd) { + subd_send_msg(payload->ld->connectd, + take(towire_connectd_custommsg_in_complete(NULL, &payload->peer_id))); + } tal_steal(tmpctx, payload); } @@ -397,6 +403,7 @@ static void handle_custommsg_in(struct lightningd *ld, const u8 *msg) { struct custommsg_payload *p = tal(NULL, struct custommsg_payload); + p->ld = ld; if (!fromwire_connectd_custommsg_in(p, msg, &p->peer_id, &p->msg)) { log_broken(ld->log, "Malformed custommsg: %s", tal_hex(tmpctx, msg)); @@ -550,6 +557,7 @@ static unsigned connectd_msg(struct subd *connectd, const u8 *msg, const int *fd case WIRE_CONNECTD_DEV_EXHAUST_FDS: case WIRE_CONNECTD_DEV_SET_MAX_SCIDS_ENCODE_SIZE: case WIRE_CONNECTD_SCID_MAP: + case WIRE_CONNECTD_CUSTOMMSG_IN_COMPLETE: /* This is a reply, so never gets through to here. */ case WIRE_CONNECTD_INIT_REPLY: case WIRE_CONNECTD_ACTIVATE_REPLY: diff --git a/tests/test_misc.py b/tests/test_misc.py index 20705782ded3..5baebbcdce42 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -4719,6 +4719,8 @@ def test_even_sendcustommsg(node_factory): l1.rpc.sendcustommsg(l2.info['id'], msg) l2.daemon.wait_for_log(r'\[IN\] {}'.format(msg)) l2.daemon.wait_for_log(r'allow_even_msgs.*Got message 43690') + # Make sure it *processes* before we remove plugin. + l2.daemon.wait_for_log(f"{l1.info['id']}-connectd: custommsg processing finished") # And nobody gets upset assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] From 7e87e5ef8b294d908a38884a5c6a10a8514d87cd Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 3 Feb 2026 13:23:55 +1030 Subject: [PATCH 8/8] pytest: fix flake in test_channel_lease_unilat_closes ``` 2026-01-30T05:55:13.6654636Z # Note that l3 has the whole lease delay (minus blocks already mined) 2026-01-30T05:55:13.6655396Z _, _, l3blocks = l3.wait_for_onchaind_tx('OUR_DELAYED_RETURN_TO_WALLET', 2026-01-30T05:55:13.6656086Z 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') 2026-01-30T05:55:13.6656618Z > assert l3blocks == 4032 - 6 - 2 - 1 2026-01-30T05:55:13.6657033Z E assert 4025 == (((4032 - 6) - 2) - 1) ``` Turns out that 43420433829 (tests: de-flake test that was failing on cltv expiry) added a line to mine two more blocks, but the hardcoded 110 was not changed to 112, so we weren't actually waiting correctly. Remove hardcoded numbers in favor of calculation, and do the same in test_channel_lease_post_expiry (which was correct, for now). Signed-off-by: Rusty Russell Signed-off-by: Rusty Russell --- tests/test_closing.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_closing.py b/tests/test_closing.py index e64295dc8065..0cc1c2b68a85 100644 --- a/tests/test_closing.py +++ b/tests/test_closing.py @@ -871,7 +871,8 @@ def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams): bitcoind.generate_block(6) sync_blockheight(bitcoind, [l1, l2]) # make sure we're at the right place for the csv lock - l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115') + height = bitcoind.rpc.getblockchaininfo()['blocks'] + l2.daemon.wait_for_log(f'Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now {height}') # We need to give l1-l2 time to update their blockheights for i in range(0, 4000, 1000): @@ -980,7 +981,8 @@ def test_channel_lease_unilat_closes(node_factory, bitcoind): bitcoind.generate_block(2) sync_blockheight(bitcoind, [l1, l2, l3]) # make sure we're at the right place for the csv lock - l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110') + height = bitcoind.rpc.getblockchaininfo()['blocks'] + l2.daemon.wait_for_log(f'Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now {height}') l2.stop() # unilateral close channels l1<->l2 & l3<->l2